summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Kconfig2
-rw-r--r--drivers/gpu/drm/i915/Kconfig.profile14
-rw-r--r--drivers/gpu/drm/i915/Makefile10
-rw-r--r--drivers/gpu/drm/i915/TODO.txt41
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c1432
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.h30
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c616
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.h19
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c196
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.h4
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c42
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c48
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c1248
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h19
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c88
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c31
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c290
-rw-r--r--drivers/gpu/drm/i915/display/intel_csr.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c29
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c2785
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h23
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c1394
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h100
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c3863
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h56
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c209
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c283
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h32
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h179
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c1673
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c115
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c55
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c519
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c117
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c962
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.h54
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c68
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c144
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c54
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c714
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c44
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c653
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c1714
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h27
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.h1
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c556
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.h29
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c2218
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.h35
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.c15
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c79
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c67
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c72
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c335
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_fence.c95
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ioctls.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c44
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c10
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h122
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_blt.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h23
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c109
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c116
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.c16
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c41
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c40
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.h4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c18
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c918
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c4
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c38
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c8
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c14
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c10
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c21
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c10
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c10
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c2
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_gt.c2
-rw-r--r--drivers/gpu/drm/i915/gt/gen2_engine_cs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_engine_cs.c8
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_ppgtt.h4
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_renderstate.c20
-rw-r--r--drivers/gpu/drm/i915/gt/gen7_renderstate.c20
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.c15
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c13
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_renderstate.c20
-rw-r--r--drivers/gpu/drm/i915/gt/gen9_renderstate.c20
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c23
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_param.h11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c73
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c27
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_user.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_user.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c146
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c102
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c23
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c47
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c47
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_irq.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.c31
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c52
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc_types.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c55
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc_reg.h5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.h22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ppgtt.c9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6_types.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c105
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.c29
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.h20
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c33
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c330
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_types.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps_types.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c429
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.h23
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline_types.h20
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c105
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds_types.h3
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c51
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.h21
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_context.c7
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine.c3
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine.h3
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_cs.c12
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c17
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_pm.c11
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c77
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_gt_pm.c4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c29
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_llc.c3
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_llc.h3
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c19
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_mocs.c40
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.c3
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.h3
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_ring_submission.c4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_timeline.c180
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c101
-rw-r--r--drivers/gpu/drm/i915/gt/shmem_utils.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c16
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c24
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c107
-rw-r--r--drivers/gpu/drm/i915/gvt/display.h14
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c22
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c78
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h17
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c261
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c37
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.h7
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c11
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c2
-rw-r--r--drivers/gpu/drm/i915/i915_active.c20
-rw-r--r--drivers/gpu/drm/i915/i915_buddy.c3
-rw-r--r--drivers/gpu/drm/i915/i915_buddy.h7
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c104
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c22
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c95
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h207
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c258
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c7
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c168
-rw-r--r--drivers/gpu/drm/i915/i915_memcpy.c2
-rw-r--r--drivers/gpu/drm/i915/i915_memcpy.h2
-rw-r--r--drivers/gpu/drm/i915/i915_mm.c117
-rw-r--r--drivers/gpu/drm/i915/i915_params.c5
-rw-r--r--drivers/gpu/drm/i915/i915_params.h5
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c23
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c140
-rw-r--r--drivers/gpu/drm/i915/i915_perf_types.h8
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c2
-rw-r--r--drivers/gpu/drm/i915/i915_priolist_types.h11
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h55
-rw-r--r--drivers/gpu/drm/i915/i915_request.c143
-rw-r--r--drivers/gpu/drm/i915/i915_request.h49
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c52
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.h16
-rw-r--r--drivers/gpu/drm/i915/i915_selftest.h2
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c4
-rw-r--r--drivers/gpu/drm/i915/i915_switcheroo.c4
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c2
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c30
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h20
-rw-r--r--drivers/gpu/drm/i915/i915_vma_types.h12
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c13
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h12
-rw-r--r--drivers/gpu/drm/i915/intel_dram.c6
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c54
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.h22
-rw-r--r--drivers/gpu/drm/i915/intel_pch.c11
-rw-r--r--drivers/gpu/drm/i915/intel_pch.h3
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c548
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h5
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c4
-rw-r--r--drivers/gpu/drm/i915/intel_step.c106
-rw-r--r--drivers/gpu/drm/i915/intel_step.h40
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c20
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_buddy.c48
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c97
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c211
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c219
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c136
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.h5
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c95
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_region.c4
299 files changed, 17330 insertions, 14735 deletions
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 1e1cb245fca7..69f57ca9c68d 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -20,6 +20,7 @@ config DRM_I915
select INPUT if ACPI
select ACPI_VIDEO if ACPI
select ACPI_BUTTON if ACPI
+ select IO_MAPPING
select SYNC_FILE
select IOSF_MBI
select CRC32
@@ -101,6 +102,7 @@ config DRM_I915_GVT
bool "Enable Intel GVT-g graphics virtualization host support"
depends on DRM_I915
depends on 64BIT
+ depends on VFIO_MDEV=y || VFIO_MDEV=DRM_I915
default n
help
Choose this option if you want to enable Intel GVT-g graphics
diff --git a/drivers/gpu/drm/i915/Kconfig.profile b/drivers/gpu/drm/i915/Kconfig.profile
index 35bbe2b80596..39328567c200 100644
--- a/drivers/gpu/drm/i915/Kconfig.profile
+++ b/drivers/gpu/drm/i915/Kconfig.profile
@@ -1,3 +1,17 @@
+config DRM_I915_REQUEST_TIMEOUT
+ int "Default timeout for requests (ms)"
+ default 20000 # milliseconds
+ help
+ Configures the default timeout after which any user submissions will
+ be forcefully terminated.
+
+ Beware setting this value lower, or close to heartbeat interval
+ rounded to whole seconds times three, in order to avoid allowing
+ misbehaving applications causing total rendering failure in unrelated
+ clients.
+
+ May be 0 to disable the timeout.
+
config DRM_I915_FENCE_TIMEOUT
int "Timeout for unsignaled foreign fences (ms, jiffy granularity)"
default 10000 # milliseconds
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 2385a7505f5d..d0d936d9137b 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -52,6 +52,7 @@ i915-y += i915_drv.o \
intel_pm.o \
intel_runtime_pm.o \
intel_sideband.o \
+ intel_step.o \
intel_uncore.o \
intel_wakeref.o \
vlv_suspend.o
@@ -139,7 +140,6 @@ gem-y += \
gem/i915_gem_dmabuf.o \
gem/i915_gem_domain.o \
gem/i915_gem_execbuffer.o \
- gem/i915_gem_fence.o \
gem/i915_gem_internal.o \
gem/i915_gem_object.o \
gem/i915_gem_object_blt.o \
@@ -209,6 +209,7 @@ i915-y += \
display/intel_dpll.o \
display/intel_dpll_mgr.o \
display/intel_dsb.o \
+ display/intel_fb.o \
display/intel_fbc.o \
display/intel_fdi.o \
display/intel_fifo_underrun.o \
@@ -223,7 +224,9 @@ i915-y += \
display/intel_sprite.o \
display/intel_tc.o \
display/intel_vga.o \
- display/i9xx_plane.o
+ display/i9xx_plane.o \
+ display/skl_scaler.o \
+ display/skl_universal_plane.o
i915-$(CONFIG_ACPI) += \
display/intel_acpi.o \
display/intel_opregion.o
@@ -238,9 +241,12 @@ i915-y += \
display/dvo_ns2501.o \
display/dvo_sil164.o \
display/dvo_tfp410.o \
+ display/g4x_dp.o \
+ display/g4x_hdmi.o \
display/icl_dsi.o \
display/intel_crt.o \
display/intel_ddi.o \
+ display/intel_ddi_buf_trans.o \
display/intel_dp.o \
display/intel_dp_aux.o \
display/intel_dp_aux_backlight.o \
diff --git a/drivers/gpu/drm/i915/TODO.txt b/drivers/gpu/drm/i915/TODO.txt
new file mode 100644
index 000000000000..81a82c9c203f
--- /dev/null
+++ b/drivers/gpu/drm/i915/TODO.txt
@@ -0,0 +1,41 @@
+gem/gt TODO items
+-----------------
+
+- For discrete memory manager, merge enough dg1 to be able to refactor it to
+ TTM. Then land pci ids (just in case that turns up an uapi problem). TTM has
+ improved a lot the past 2 years, there's no reason anymore not to use it.
+
+- Come up with a plan what to do with drm/scheduler and how to get there.
+
+- Roll out dma_fence critical section annotations.
+
+- There's a lot of complexity added past few years to make relocations faster.
+ That doesn't make sense given hw and gpu apis moved away from this model years
+ ago:
+ 1. Land a modern pre-bound uapi like VM_BIND
+ 2. Any complexity added in this area past few years which can't be justified
+ with VM_BIND using userspace should be removed. Looking at amdgpu dma_resv on
+ the bo and vm, plus some lru locks is all that needed. No complex rcu,
+ refcounts, caching, ... on everything.
+ This is the matching task on the vm side compared to ttm/dma_resv on the
+ backing storage side.
+
+- i915_sw_fence seems to be the main structure for the i915-gem dma_fence model.
+ How-to-dma_fence is core and drivers really shouldn't build their own world
+ here, treating everything else as a fixed platform. i915_sw_fence concepts
+ should be moved to dma_fence, drm/scheduler or atomic commit helpers. Or
+ removed if dri-devel consensus is that it's not a good idea. Once that's done
+ maybe even remove it if there's nothing left.
+
+Smaller things:
+- i915_utils.h needs to be moved to the right places.
+
+- dma_fence_work should be in drivers/dma-buf
+
+- i915_mm.c should be moved to the right places. Some of the helpers also look a
+ bit fishy:
+
+ https://lore.kernel.org/linux-mm/20210301083320.943079-1-hch@lst.de/
+
+- tasklet helpers in i915_gem.h also look a bit misplaced and should
+ probably be moved to tasklet headers.
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
new file mode 100644
index 000000000000..dfe3cf328d13
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -0,0 +1,1432 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ * DisplayPort support for G4x,ILK,SNB,IVB,VLV,CHV (HSW+ handled by the DDI code).
+ */
+
+#include "g4x_dp.h"
+#include "intel_audio.h"
+#include "intel_connector.h"
+#include "intel_display_types.h"
+#include "intel_dp.h"
+#include "intel_dp_link_training.h"
+#include "intel_dpio_phy.h"
+#include "intel_fifo_underrun.h"
+#include "intel_hdmi.h"
+#include "intel_hotplug.h"
+#include "intel_panel.h"
+#include "intel_pps.h"
+#include "intel_sideband.h"
+
+struct dp_link_dpll {
+ int clock;
+ struct dpll dpll;
+};
+
+static const struct dp_link_dpll g4x_dpll[] = {
+ { 162000,
+ { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
+ { 270000,
+ { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
+};
+
+static const struct dp_link_dpll pch_dpll[] = {
+ { 162000,
+ { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
+ { 270000,
+ { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
+};
+
+static const struct dp_link_dpll vlv_dpll[] = {
+ { 162000,
+ { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
+ { 270000,
+ { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
+};
+
+/*
+ * CHV supports eDP 1.4 that have more link rates.
+ * Below only provides the fixed rate but exclude variable rate.
+ */
+static const struct dp_link_dpll chv_dpll[] = {
+ /*
+ * CHV requires to program fractional division for m2.
+ * m2 is stored in fixed point format using formula below
+ * (m2_int << 22) | m2_fraction
+ */
+ { 162000, /* m2_int = 32, m2_fraction = 1677722 */
+ { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
+ { 270000, /* m2_int = 27, m2_fraction = 0 */
+ { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
+};
+
+const struct dpll *vlv_get_dpll(struct drm_i915_private *i915)
+{
+ return IS_CHERRYVIEW(i915) ? &chv_dpll[0].dpll : &vlv_dpll[0].dpll;
+}
+
+void g4x_dp_set_clock(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ const struct dp_link_dpll *divisor = NULL;
+ int i, count = 0;
+
+ if (IS_G4X(dev_priv)) {
+ divisor = g4x_dpll;
+ count = ARRAY_SIZE(g4x_dpll);
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
+ divisor = pch_dpll;
+ count = ARRAY_SIZE(pch_dpll);
+ } else if (IS_CHERRYVIEW(dev_priv)) {
+ divisor = chv_dpll;
+ count = ARRAY_SIZE(chv_dpll);
+ } else if (IS_VALLEYVIEW(dev_priv)) {
+ divisor = vlv_dpll;
+ count = ARRAY_SIZE(vlv_dpll);
+ }
+
+ if (divisor && count) {
+ for (i = 0; i < count; i++) {
+ if (pipe_config->port_clock == divisor[i].clock) {
+ pipe_config->dpll = divisor[i].dpll;
+ pipe_config->clock_set = true;
+ break;
+ }
+ }
+ }
+}
+
+static void intel_dp_prepare(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ enum port port = encoder->port;
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+
+ intel_dp_set_link_params(intel_dp,
+ pipe_config->port_clock,
+ pipe_config->lane_count);
+
+ /*
+ * There are four kinds of DP registers:
+ * IBX PCH
+ * SNB CPU
+ * IVB CPU
+ * CPT PCH
+ *
+ * IBX PCH and CPU are the same for almost everything,
+ * except that the CPU DP PLL is configured in this
+ * register
+ *
+ * CPT PCH is quite different, having many bits moved
+ * to the TRANS_DP_CTL register instead. That
+ * configuration happens (oddly) in ilk_pch_enable
+ */
+
+ /* Preserve the BIOS-computed detected bit. This is
+ * supposed to be read-only.
+ */
+ intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
+
+ /* Handle DP bits in common between all three register formats */
+ intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
+ intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
+
+ /* Split out the IBX/CPU vs CPT settings */
+
+ if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ intel_dp->DP |= DP_SYNC_HS_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ intel_dp->DP |= DP_SYNC_VS_HIGH;
+ intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
+
+ if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
+ intel_dp->DP |= DP_ENHANCED_FRAMING;
+
+ intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
+ } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
+ u32 trans_dp;
+
+ intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
+
+ trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe));
+ if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
+ trans_dp |= TRANS_DP_ENH_FRAMING;
+ else
+ trans_dp &= ~TRANS_DP_ENH_FRAMING;
+ intel_de_write(dev_priv, TRANS_DP_CTL(crtc->pipe), trans_dp);
+ } else {
+ if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
+ intel_dp->DP |= DP_COLOR_RANGE_16_235;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ intel_dp->DP |= DP_SYNC_HS_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ intel_dp->DP |= DP_SYNC_VS_HIGH;
+ intel_dp->DP |= DP_LINK_TRAIN_OFF;
+
+ if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
+ intel_dp->DP |= DP_ENHANCED_FRAMING;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
+ else
+ intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
+ }
+}
+
+static void assert_dp_port(struct intel_dp *intel_dp, bool state)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ bool cur_state = intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN;
+
+ I915_STATE_WARN(cur_state != state,
+ "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ onoff(state), onoff(cur_state));
+}
+#define assert_dp_port_disabled(d) assert_dp_port((d), false)
+
+static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
+{
+ bool cur_state = intel_de_read(dev_priv, DP_A) & DP_PLL_ENABLE;
+
+ I915_STATE_WARN(cur_state != state,
+ "eDP PLL state assertion failure (expected %s, current %s)\n",
+ onoff(state), onoff(cur_state));
+}
+#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
+#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
+
+static void ilk_edp_pll_on(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
+ assert_dp_port_disabled(intel_dp);
+ assert_edp_pll_disabled(dev_priv);
+
+ drm_dbg_kms(&dev_priv->drm, "enabling eDP PLL for clock %d\n",
+ pipe_config->port_clock);
+
+ intel_dp->DP &= ~DP_PLL_FREQ_MASK;
+
+ if (pipe_config->port_clock == 162000)
+ intel_dp->DP |= DP_PLL_FREQ_162MHZ;
+ else
+ intel_dp->DP |= DP_PLL_FREQ_270MHZ;
+
+ intel_de_write(dev_priv, DP_A, intel_dp->DP);
+ intel_de_posting_read(dev_priv, DP_A);
+ udelay(500);
+
+ /*
+ * [DevILK] Work around required when enabling DP PLL
+ * while a pipe is enabled going to FDI:
+ * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
+ * 2. Program DP PLL enable
+ */
+ if (IS_IRONLAKE(dev_priv))
+ intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
+
+ intel_dp->DP |= DP_PLL_ENABLE;
+
+ intel_de_write(dev_priv, DP_A, intel_dp->DP);
+ intel_de_posting_read(dev_priv, DP_A);
+ udelay(200);
+}
+
+static void ilk_edp_pll_off(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
+ assert_dp_port_disabled(intel_dp);
+ assert_edp_pll_enabled(dev_priv);
+
+ drm_dbg_kms(&dev_priv->drm, "disabling eDP PLL\n");
+
+ intel_dp->DP &= ~DP_PLL_ENABLE;
+
+ intel_de_write(dev_priv, DP_A, intel_dp->DP);
+ intel_de_posting_read(dev_priv, DP_A);
+ udelay(200);
+}
+
+static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
+ enum port port, enum pipe *pipe)
+{
+ enum pipe p;
+
+ for_each_pipe(dev_priv, p) {
+ u32 val = intel_de_read(dev_priv, TRANS_DP_CTL(p));
+
+ if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
+ *pipe = p;
+ return true;
+ }
+ }
+
+ drm_dbg_kms(&dev_priv->drm, "No pipe for DP port %c found\n",
+ port_name(port));
+
+ /* must initialize pipe to something for the asserts */
+ *pipe = PIPE_A;
+
+ return false;
+}
+
+bool g4x_dp_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t dp_reg, enum port port,
+ enum pipe *pipe)
+{
+ bool ret;
+ u32 val;
+
+ val = intel_de_read(dev_priv, dp_reg);
+
+ ret = val & DP_PORT_EN;
+
+ /* asserts want to know the pipe even if the port is disabled */
+ if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
+ *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
+ else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
+ ret &= cpt_dp_port_selected(dev_priv, port, pipe);
+ else if (IS_CHERRYVIEW(dev_priv))
+ *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
+ else
+ *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
+
+ return ret;
+}
+
+static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ intel_wakeref_t wakeref;
+ bool ret;
+
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain);
+ if (!wakeref)
+ return false;
+
+ ret = g4x_dp_port_enabled(dev_priv, intel_dp->output_reg,
+ encoder->port, pipe);
+
+ intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+
+ return ret;
+}
+
+static void intel_dp_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ u32 tmp, flags = 0;
+ enum port port = encoder->port;
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+
+ if (encoder->type == INTEL_OUTPUT_EDP)
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
+ else
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
+
+ tmp = intel_de_read(dev_priv, intel_dp->output_reg);
+
+ pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
+
+ if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
+ u32 trans_dp = intel_de_read(dev_priv,
+ TRANS_DP_CTL(crtc->pipe));
+
+ if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
+ flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NVSYNC;
+ } else {
+ if (tmp & DP_SYNC_HS_HIGH)
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (tmp & DP_SYNC_VS_HIGH)
+ flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NVSYNC;
+ }
+
+ pipe_config->hw.adjusted_mode.flags |= flags;
+
+ if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
+ pipe_config->limited_color_range = true;
+
+ pipe_config->lane_count =
+ ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
+
+ intel_dp_get_m_n(crtc, pipe_config);
+
+ if (port == PORT_A) {
+ if ((intel_de_read(dev_priv, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
+ pipe_config->port_clock = 162000;
+ else
+ pipe_config->port_clock = 270000;
+ }
+
+ pipe_config->hw.adjusted_mode.crtc_clock =
+ intel_dotclock_calculate(pipe_config->port_clock,
+ &pipe_config->dp_m_n);
+
+ if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
+ pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
+ /*
+ * This is a big fat ugly hack.
+ *
+ * Some machines in UEFI boot mode provide us a VBT that has 18
+ * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
+ * unknown we fail to light up. Yet the same BIOS boots up with
+ * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
+ * max, not what it tells us to use.
+ *
+ * Note: This will still be broken if the eDP panel is not lit
+ * up by the BIOS, and thus we can't get the mode at module
+ * load.
+ */
+ drm_dbg_kms(&dev_priv->drm,
+ "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
+ pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
+ dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
+ }
+}
+
+static void
+intel_dp_link_down(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
+ enum port port = encoder->port;
+ u32 DP = intel_dp->DP;
+
+ if (drm_WARN_ON(&dev_priv->drm,
+ (intel_de_read(dev_priv, intel_dp->output_reg) &
+ DP_PORT_EN) == 0))
+ return;
+
+ drm_dbg_kms(&dev_priv->drm, "\n");
+
+ if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
+ (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
+ DP &= ~DP_LINK_TRAIN_MASK_CPT;
+ DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
+ } else {
+ DP &= ~DP_LINK_TRAIN_MASK;
+ DP |= DP_LINK_TRAIN_PAT_IDLE;
+ }
+ intel_de_write(dev_priv, intel_dp->output_reg, DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+
+ DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
+ intel_de_write(dev_priv, intel_dp->output_reg, DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+
+ /*
+ * HW workaround for IBX, we need to move the port
+ * to transcoder A after disabling it to allow the
+ * matching HDMI port to be enabled on transcoder A.
+ */
+ if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
+ /*
+ * We get CPU/PCH FIFO underruns on the other pipe when
+ * doing the workaround. Sweep them under the rug.
+ */
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
+
+ /* always enable with pattern 1 (as per spec) */
+ DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
+ DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
+ DP_LINK_TRAIN_PAT_1;
+ intel_de_write(dev_priv, intel_dp->output_reg, DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+
+ DP &= ~DP_PORT_EN;
+ intel_de_write(dev_priv, intel_dp->output_reg, DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+
+ intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
+ }
+
+ msleep(intel_dp->pps.panel_power_down_delay);
+
+ intel_dp->DP = DP;
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ intel_wakeref_t wakeref;
+
+ with_intel_pps_lock(intel_dp, wakeref)
+ intel_dp->pps.active_pipe = INVALID_PIPE;
+ }
+}
+
+static void intel_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ intel_dp->link_trained = false;
+
+ if (old_crtc_state->has_audio)
+ intel_audio_codec_disable(encoder,
+ old_crtc_state, old_conn_state);
+
+ /*
+ * Make sure the panel is off before trying to change the mode.
+ * But also ensure that we have vdd while we switch off the panel.
+ */
+ intel_pps_vdd_on(intel_dp);
+ intel_edp_backlight_off(old_conn_state);
+ intel_dp_set_power(intel_dp, DP_SET_POWER_D3);
+ intel_pps_off(intel_dp);
+}
+
+static void g4x_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ intel_disable_dp(state, encoder, old_crtc_state, old_conn_state);
+}
+
+static void vlv_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ intel_disable_dp(state, encoder, old_crtc_state, old_conn_state);
+}
+
+static void g4x_post_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ enum port port = encoder->port;
+
+ /*
+ * Bspec does not list a specific disable sequence for g4x DP.
+ * Follow the ilk+ sequence (disable pipe before the port) for
+ * g4x DP as it does not suffer from underruns like the normal
+ * g4x modeset sequence (disable pipe after the port).
+ */
+ intel_dp_link_down(encoder, old_crtc_state);
+
+ /* Only ilk+ has port A */
+ if (port == PORT_A)
+ ilk_edp_pll_off(intel_dp, old_crtc_state);
+}
+
+static void vlv_post_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ intel_dp_link_down(encoder, old_crtc_state);
+}
+
+static void chv_post_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ intel_dp_link_down(encoder, old_crtc_state);
+
+ vlv_dpio_get(dev_priv);
+
+ /* Assert data lane reset */
+ chv_data_lane_soft_reset(encoder, old_crtc_state, true);
+
+ vlv_dpio_put(dev_priv);
+}
+
+static void
+cpt_set_link_train(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ u8 dp_train_pat)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 *DP = &intel_dp->DP;
+
+ *DP &= ~DP_LINK_TRAIN_MASK_CPT;
+
+ switch (intel_dp_training_pattern_symbol(dp_train_pat)) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ *DP |= DP_LINK_TRAIN_OFF_CPT;
+ break;
+ case DP_TRAINING_PATTERN_1:
+ *DP |= DP_LINK_TRAIN_PAT_1_CPT;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ *DP |= DP_LINK_TRAIN_PAT_2_CPT;
+ break;
+ default:
+ MISSING_CASE(intel_dp_training_pattern_symbol(dp_train_pat));
+ return;
+ }
+
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+}
+
+static void
+g4x_set_link_train(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ u8 dp_train_pat)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 *DP = &intel_dp->DP;
+
+ *DP &= ~DP_LINK_TRAIN_MASK;
+
+ switch (intel_dp_training_pattern_symbol(dp_train_pat)) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ *DP |= DP_LINK_TRAIN_OFF;
+ break;
+ case DP_TRAINING_PATTERN_1:
+ *DP |= DP_LINK_TRAIN_PAT_1;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ *DP |= DP_LINK_TRAIN_PAT_2;
+ break;
+ default:
+ MISSING_CASE(intel_dp_training_pattern_symbol(dp_train_pat));
+ return;
+ }
+
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+}
+
+static void intel_dp_enable_port(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ /* enable with pattern 1 (as per spec) */
+
+ intel_dp_program_link_training_pattern(intel_dp, crtc_state,
+ DP_TRAINING_PATTERN_1);
+
+ /*
+ * Magic for VLV/CHV. We _must_ first set up the register
+ * without actually enabling the port, and then do another
+ * write to enable the port. Otherwise link training will
+ * fail when the power sequencer is freshly used for this port.
+ */
+ intel_dp->DP |= DP_PORT_EN;
+ if (crtc_state->has_audio)
+ intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
+
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+}
+
+static void intel_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ u32 dp_reg = intel_de_read(dev_priv, intel_dp->output_reg);
+ enum pipe pipe = crtc->pipe;
+ intel_wakeref_t wakeref;
+
+ if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN))
+ return;
+
+ with_intel_pps_lock(intel_dp, wakeref) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ vlv_pps_init(encoder, pipe_config);
+
+ intel_dp_enable_port(intel_dp, pipe_config);
+
+ intel_pps_vdd_on_unlocked(intel_dp);
+ intel_pps_on_unlocked(intel_dp);
+ intel_pps_vdd_off_unlocked(intel_dp, true);
+ }
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ unsigned int lane_mask = 0x0;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
+
+ vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
+ lane_mask);
+ }
+
+ intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
+ intel_dp_configure_protocol_converter(intel_dp, pipe_config);
+ intel_dp_check_frl_training(intel_dp);
+ intel_dp_pcon_dsc_configure(intel_dp, pipe_config);
+ intel_dp_start_link_train(intel_dp, pipe_config);
+ intel_dp_stop_link_train(intel_dp, pipe_config);
+
+ if (pipe_config->has_audio) {
+ drm_dbg(&dev_priv->drm, "Enabling DP audio on pipe %c\n",
+ pipe_name(pipe));
+ intel_audio_codec_enable(encoder, pipe_config, conn_state);
+ }
+}
+
+static void g4x_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ intel_enable_dp(state, encoder, pipe_config, conn_state);
+ intel_edp_backlight_on(pipe_config, conn_state);
+}
+
+static void vlv_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ intel_edp_backlight_on(pipe_config, conn_state);
+}
+
+static void g4x_pre_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ enum port port = encoder->port;
+
+ intel_dp_prepare(encoder, pipe_config);
+
+ /* Only ilk+ has port A */
+ if (port == PORT_A)
+ ilk_edp_pll_on(intel_dp, pipe_config);
+}
+
+static void vlv_pre_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ vlv_phy_pre_encoder_enable(encoder, pipe_config);
+
+ intel_enable_dp(state, encoder, pipe_config, conn_state);
+}
+
+static void vlv_dp_pre_pll_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ intel_dp_prepare(encoder, pipe_config);
+
+ vlv_phy_pre_pll_enable(encoder, pipe_config);
+}
+
+static void chv_pre_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ chv_phy_pre_encoder_enable(encoder, pipe_config);
+
+ intel_enable_dp(state, encoder, pipe_config, conn_state);
+
+ /* Second common lane will stay alive on its own now */
+ chv_phy_release_cl2_override(encoder);
+}
+
+static void chv_dp_pre_pll_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ intel_dp_prepare(encoder, pipe_config);
+
+ chv_phy_pre_pll_enable(encoder, pipe_config);
+}
+
+static void chv_dp_post_pll_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ chv_phy_post_pll_disable(encoder, old_crtc_state);
+}
+
+static u8 intel_dp_voltage_max_2(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+}
+
+static u8 intel_dp_voltage_max_3(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
+}
+
+static u8 intel_dp_preemph_max_2(struct intel_dp *intel_dp)
+{
+ return DP_TRAIN_PRE_EMPH_LEVEL_2;
+}
+
+static u8 intel_dp_preemph_max_3(struct intel_dp *intel_dp)
+{
+ return DP_TRAIN_PRE_EMPH_LEVEL_3;
+}
+
+static void vlv_set_signal_levels(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ unsigned long demph_reg_value, preemph_reg_value,
+ uniqtranscale_reg_value;
+ u8 train_set = intel_dp->train_set[0];
+
+ switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
+ case DP_TRAIN_PRE_EMPH_LEVEL_0:
+ preemph_reg_value = 0x0004000;
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+ demph_reg_value = 0x2B405555;
+ uniqtranscale_reg_value = 0x552AB83A;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+ demph_reg_value = 0x2B404040;
+ uniqtranscale_reg_value = 0x5548B83A;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
+ demph_reg_value = 0x2B245555;
+ uniqtranscale_reg_value = 0x5560B83A;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
+ demph_reg_value = 0x2B405555;
+ uniqtranscale_reg_value = 0x5598DA3A;
+ break;
+ default:
+ return;
+ }
+ break;
+ case DP_TRAIN_PRE_EMPH_LEVEL_1:
+ preemph_reg_value = 0x0002000;
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+ demph_reg_value = 0x2B404040;
+ uniqtranscale_reg_value = 0x5552B83A;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+ demph_reg_value = 0x2B404848;
+ uniqtranscale_reg_value = 0x5580B83A;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
+ demph_reg_value = 0x2B404040;
+ uniqtranscale_reg_value = 0x55ADDA3A;
+ break;
+ default:
+ return;
+ }
+ break;
+ case DP_TRAIN_PRE_EMPH_LEVEL_2:
+ preemph_reg_value = 0x0000000;
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+ demph_reg_value = 0x2B305555;
+ uniqtranscale_reg_value = 0x5570B83A;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+ demph_reg_value = 0x2B2B4040;
+ uniqtranscale_reg_value = 0x55ADDA3A;
+ break;
+ default:
+ return;
+ }
+ break;
+ case DP_TRAIN_PRE_EMPH_LEVEL_3:
+ preemph_reg_value = 0x0006000;
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+ demph_reg_value = 0x1B405555;
+ uniqtranscale_reg_value = 0x55ADDA3A;
+ break;
+ default:
+ return;
+ }
+ break;
+ default:
+ return;
+ }
+
+ vlv_set_phy_signal_level(encoder, crtc_state,
+ demph_reg_value, preemph_reg_value,
+ uniqtranscale_reg_value, 0);
+}
+
+static void chv_set_signal_levels(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ u32 deemph_reg_value, margin_reg_value;
+ bool uniq_trans_scale = false;
+ u8 train_set = intel_dp->train_set[0];
+
+ switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
+ case DP_TRAIN_PRE_EMPH_LEVEL_0:
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+ deemph_reg_value = 128;
+ margin_reg_value = 52;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+ deemph_reg_value = 128;
+ margin_reg_value = 77;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
+ deemph_reg_value = 128;
+ margin_reg_value = 102;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
+ deemph_reg_value = 128;
+ margin_reg_value = 154;
+ uniq_trans_scale = true;
+ break;
+ default:
+ return;
+ }
+ break;
+ case DP_TRAIN_PRE_EMPH_LEVEL_1:
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+ deemph_reg_value = 85;
+ margin_reg_value = 78;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+ deemph_reg_value = 85;
+ margin_reg_value = 116;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
+ deemph_reg_value = 85;
+ margin_reg_value = 154;
+ break;
+ default:
+ return;
+ }
+ break;
+ case DP_TRAIN_PRE_EMPH_LEVEL_2:
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+ deemph_reg_value = 64;
+ margin_reg_value = 104;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+ deemph_reg_value = 64;
+ margin_reg_value = 154;
+ break;
+ default:
+ return;
+ }
+ break;
+ case DP_TRAIN_PRE_EMPH_LEVEL_3:
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+ deemph_reg_value = 43;
+ margin_reg_value = 154;
+ break;
+ default:
+ return;
+ }
+ break;
+ default:
+ return;
+ }
+
+ chv_set_phy_signal_level(encoder, crtc_state,
+ deemph_reg_value, margin_reg_value,
+ uniq_trans_scale);
+}
+
+static u32 g4x_signal_levels(u8 train_set)
+{
+ u32 signal_levels = 0;
+
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+ default:
+ signal_levels |= DP_VOLTAGE_0_4;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+ signal_levels |= DP_VOLTAGE_0_6;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
+ signal_levels |= DP_VOLTAGE_0_8;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
+ signal_levels |= DP_VOLTAGE_1_2;
+ break;
+ }
+ switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
+ case DP_TRAIN_PRE_EMPH_LEVEL_0:
+ default:
+ signal_levels |= DP_PRE_EMPHASIS_0;
+ break;
+ case DP_TRAIN_PRE_EMPH_LEVEL_1:
+ signal_levels |= DP_PRE_EMPHASIS_3_5;
+ break;
+ case DP_TRAIN_PRE_EMPH_LEVEL_2:
+ signal_levels |= DP_PRE_EMPHASIS_6;
+ break;
+ case DP_TRAIN_PRE_EMPH_LEVEL_3:
+ signal_levels |= DP_PRE_EMPHASIS_9_5;
+ break;
+ }
+ return signal_levels;
+}
+
+static void
+g4x_set_signal_levels(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u8 train_set = intel_dp->train_set[0];
+ u32 signal_levels;
+
+ signal_levels = g4x_signal_levels(train_set);
+
+ drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+ signal_levels);
+
+ intel_dp->DP &= ~(DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK);
+ intel_dp->DP |= signal_levels;
+
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+}
+
+/* SNB CPU eDP voltage swing and pre-emphasis control */
+static u32 snb_cpu_edp_signal_levels(u8 train_set)
+{
+ u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+
+ switch (signal_levels) {
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+ return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+ return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
+ return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+ return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+ return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
+ default:
+ MISSING_CASE(signal_levels);
+ return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
+ }
+}
+
+static void
+snb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u8 train_set = intel_dp->train_set[0];
+ u32 signal_levels;
+
+ signal_levels = snb_cpu_edp_signal_levels(train_set);
+
+ drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+ signal_levels);
+
+ intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
+ intel_dp->DP |= signal_levels;
+
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+}
+
+/* IVB CPU eDP voltage swing and pre-emphasis control */
+static u32 ivb_cpu_edp_signal_levels(u8 train_set)
+{
+ u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+
+ switch (signal_levels) {
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+ return EDP_LINK_TRAIN_400MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+ return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
+ return EDP_LINK_TRAIN_400MV_6DB_IVB;
+
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+ return EDP_LINK_TRAIN_600MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+ return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
+
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+ return EDP_LINK_TRAIN_800MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+ return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
+
+ default:
+ MISSING_CASE(signal_levels);
+ return EDP_LINK_TRAIN_500MV_0DB_IVB;
+ }
+}
+
+static void
+ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u8 train_set = intel_dp->train_set[0];
+ u32 signal_levels;
+
+ signal_levels = ivb_cpu_edp_signal_levels(train_set);
+
+ drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+ signal_levels);
+
+ intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
+ intel_dp->DP |= signal_levels;
+
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+}
+
+/*
+ * If display is now connected check links status,
+ * there has been known issues of link loss triggering
+ * long pulse.
+ *
+ * Some sinks (eg. ASUS PB287Q) seem to perform some
+ * weird HPD ping pong during modesets. So we can apparently
+ * end up with HPD going low during a modeset, and then
+ * going back up soon after. And once that happens we must
+ * retrain the link to get a picture. That's in case no
+ * userspace component reacted to intermittent HPD dip.
+ */
+static enum intel_hotplug_state
+intel_dp_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct drm_modeset_acquire_ctx ctx;
+ enum intel_hotplug_state state;
+ int ret;
+
+ if (intel_dp->compliance.test_active &&
+ intel_dp->compliance.test_type == DP_TEST_LINK_PHY_TEST_PATTERN) {
+ intel_dp_phy_test(encoder);
+ /* just do the PHY test and nothing else */
+ return INTEL_HOTPLUG_UNCHANGED;
+ }
+
+ state = intel_encoder_hotplug(encoder, connector);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ for (;;) {
+ ret = intel_dp_retrain_link(encoder, &ctx);
+
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ continue;
+ }
+
+ break;
+ }
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ drm_WARN(encoder->base.dev, ret,
+ "Acquiring modeset locks failed with %i\n", ret);
+
+ /*
+ * Keeping it consistent with intel_ddi_hotplug() and
+ * intel_hdmi_hotplug().
+ */
+ if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries)
+ state = INTEL_HOTPLUG_RETRY;
+
+ return state;
+}
+
+static bool ibx_digital_port_connected(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 bit = dev_priv->hotplug.pch_hpd[encoder->hpd_pin];
+
+ return intel_de_read(dev_priv, SDEISR) & bit;
+}
+
+static bool g4x_digital_port_connected(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 bit;
+
+ switch (encoder->hpd_pin) {
+ case HPD_PORT_B:
+ bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
+ break;
+ case HPD_PORT_C:
+ bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
+ break;
+ case HPD_PORT_D:
+ bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
+ break;
+ default:
+ MISSING_CASE(encoder->hpd_pin);
+ return false;
+ }
+
+ return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
+}
+
+static bool gm45_digital_port_connected(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 bit;
+
+ switch (encoder->hpd_pin) {
+ case HPD_PORT_B:
+ bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
+ break;
+ case HPD_PORT_C:
+ bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
+ break;
+ case HPD_PORT_D:
+ bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
+ break;
+ default:
+ MISSING_CASE(encoder->hpd_pin);
+ return false;
+ }
+
+ return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
+}
+
+static bool ilk_digital_port_connected(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin];
+
+ return intel_de_read(dev_priv, DEISR) & bit;
+}
+
+static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
+{
+ intel_dp_encoder_flush_work(encoder);
+
+ drm_encoder_cleanup(encoder);
+ kfree(enc_to_dig_port(to_intel_encoder(encoder)));
+}
+
+enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ enum pipe pipe;
+
+ if (g4x_dp_port_enabled(dev_priv, intel_dp->output_reg,
+ encoder->port, &pipe))
+ return pipe;
+
+ return INVALID_PIPE;
+}
+
+static void intel_dp_encoder_reset(struct drm_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder));
+
+ intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
+
+ intel_dp->reset_link_params = true;
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ intel_wakeref_t wakeref;
+
+ with_intel_pps_lock(intel_dp, wakeref)
+ intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp);
+ }
+
+ intel_pps_encoder_reset(intel_dp);
+}
+
+static const struct drm_encoder_funcs intel_dp_enc_funcs = {
+ .reset = intel_dp_encoder_reset,
+ .destroy = intel_dp_encoder_destroy,
+};
+
+bool g4x_dp_init(struct drm_i915_private *dev_priv,
+ i915_reg_t output_reg, enum port port)
+{
+ struct intel_digital_port *dig_port;
+ struct intel_encoder *intel_encoder;
+ struct drm_encoder *encoder;
+ struct intel_connector *intel_connector;
+
+ dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
+ if (!dig_port)
+ return false;
+
+ intel_connector = intel_connector_alloc();
+ if (!intel_connector)
+ goto err_connector_alloc;
+
+ intel_encoder = &dig_port->base;
+ encoder = &intel_encoder->base;
+
+ mutex_init(&dig_port->hdcp_mutex);
+
+ if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
+ &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
+ "DP %c", port_name(port)))
+ goto err_encoder_init;
+
+ intel_encoder->hotplug = intel_dp_hotplug;
+ intel_encoder->compute_config = intel_dp_compute_config;
+ intel_encoder->get_hw_state = intel_dp_get_hw_state;
+ intel_encoder->get_config = intel_dp_get_config;
+ intel_encoder->sync_state = intel_dp_sync_state;
+ intel_encoder->initial_fastset_check = intel_dp_initial_fastset_check;
+ intel_encoder->update_pipe = intel_panel_update_backlight;
+ intel_encoder->suspend = intel_dp_encoder_suspend;
+ intel_encoder->shutdown = intel_dp_encoder_shutdown;
+ if (IS_CHERRYVIEW(dev_priv)) {
+ intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
+ intel_encoder->pre_enable = chv_pre_enable_dp;
+ intel_encoder->enable = vlv_enable_dp;
+ intel_encoder->disable = vlv_disable_dp;
+ intel_encoder->post_disable = chv_post_disable_dp;
+ intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
+ } else if (IS_VALLEYVIEW(dev_priv)) {
+ intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
+ intel_encoder->pre_enable = vlv_pre_enable_dp;
+ intel_encoder->enable = vlv_enable_dp;
+ intel_encoder->disable = vlv_disable_dp;
+ intel_encoder->post_disable = vlv_post_disable_dp;
+ } else {
+ intel_encoder->pre_enable = g4x_pre_enable_dp;
+ intel_encoder->enable = g4x_enable_dp;
+ intel_encoder->disable = g4x_disable_dp;
+ intel_encoder->post_disable = g4x_post_disable_dp;
+ }
+
+ if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
+ (HAS_PCH_CPT(dev_priv) && port != PORT_A))
+ dig_port->dp.set_link_train = cpt_set_link_train;
+ else
+ dig_port->dp.set_link_train = g4x_set_link_train;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ dig_port->dp.set_signal_levels = chv_set_signal_levels;
+ else if (IS_VALLEYVIEW(dev_priv))
+ dig_port->dp.set_signal_levels = vlv_set_signal_levels;
+ else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
+ dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels;
+ else if (IS_SANDYBRIDGE(dev_priv) && port == PORT_A)
+ dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels;
+ else
+ dig_port->dp.set_signal_levels = g4x_set_signal_levels;
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) ||
+ (HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) {
+ dig_port->dp.preemph_max = intel_dp_preemph_max_3;
+ dig_port->dp.voltage_max = intel_dp_voltage_max_3;
+ } else {
+ dig_port->dp.preemph_max = intel_dp_preemph_max_2;
+ dig_port->dp.voltage_max = intel_dp_voltage_max_2;
+ }
+
+ dig_port->dp.output_reg = output_reg;
+ dig_port->max_lanes = 4;
+
+ intel_encoder->type = INTEL_OUTPUT_DP;
+ intel_encoder->power_domain = intel_port_to_power_domain(port);
+ if (IS_CHERRYVIEW(dev_priv)) {
+ if (port == PORT_D)
+ intel_encoder->pipe_mask = BIT(PIPE_C);
+ else
+ intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B);
+ } else {
+ intel_encoder->pipe_mask = ~0;
+ }
+ intel_encoder->cloneable = 0;
+ intel_encoder->port = port;
+ intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
+
+ dig_port->hpd_pulse = intel_dp_hpd_pulse;
+
+ if (HAS_GMCH(dev_priv)) {
+ if (IS_GM45(dev_priv))
+ dig_port->connected = gm45_digital_port_connected;
+ else
+ dig_port->connected = g4x_digital_port_connected;
+ } else {
+ if (port == PORT_A)
+ dig_port->connected = ilk_digital_port_connected;
+ else
+ dig_port->connected = ibx_digital_port_connected;
+ }
+
+ if (port != PORT_A)
+ intel_infoframe_init(dig_port);
+
+ dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
+ if (!intel_dp_init_connector(dig_port, intel_connector))
+ goto err_init_connector;
+
+ return true;
+
+err_init_connector:
+ drm_encoder_cleanup(encoder);
+err_encoder_init:
+ kfree(intel_connector);
+err_connector_alloc:
+ kfree(dig_port);
+ return false;
+}
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.h b/drivers/gpu/drm/i915/display/g4x_dp.h
new file mode 100644
index 000000000000..e1f50263a725
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/g4x_dp.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _G4X_DP_H_
+#define _G4X_DP_H_
+
+#include <linux/types.h>
+
+#include "i915_reg.h"
+
+enum pipe;
+enum port;
+struct drm_i915_private;
+struct intel_crtc_state;
+struct intel_dp;
+struct intel_encoder;
+
+const struct dpll *vlv_get_dpll(struct drm_i915_private *i915);
+enum pipe vlv_active_pipe(struct intel_dp *intel_dp);
+void g4x_dp_set_clock(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config);
+bool g4x_dp_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t dp_reg, enum port port,
+ enum pipe *pipe);
+bool g4x_dp_init(struct drm_i915_private *dev_priv,
+ i915_reg_t output_reg, enum port port);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
new file mode 100644
index 000000000000..78f93506ffaf
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -0,0 +1,616 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ * HDMI support for G4x,ILK,SNB,IVB,VLV,CHV (HSW+ handled by the DDI code).
+ */
+
+#include "g4x_hdmi.h"
+#include "intel_audio.h"
+#include "intel_connector.h"
+#include "intel_display_types.h"
+#include "intel_dpio_phy.h"
+#include "intel_fifo_underrun.h"
+#include "intel_hdmi.h"
+#include "intel_hotplug.h"
+#include "intel_sideband.h"
+#include "intel_sdvo.h"
+
+static void intel_hdmi_prepare(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ u32 hdmi_val;
+
+ intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
+
+ hdmi_val = SDVO_ENCODING_HDMI;
+ if (!HAS_PCH_SPLIT(dev_priv) && crtc_state->limited_color_range)
+ hdmi_val |= HDMI_COLOR_RANGE_16_235;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH;
+
+ if (crtc_state->pipe_bpp > 24)
+ hdmi_val |= HDMI_COLOR_FORMAT_12bpc;
+ else
+ hdmi_val |= SDVO_COLOR_FORMAT_8bpc;
+
+ if (crtc_state->has_hdmi_sink)
+ hdmi_val |= HDMI_MODE_SELECT_HDMI;
+
+ if (HAS_PCH_CPT(dev_priv))
+ hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
+ else if (IS_CHERRYVIEW(dev_priv))
+ hdmi_val |= SDVO_PIPE_SEL_CHV(crtc->pipe);
+ else
+ hdmi_val |= SDVO_PIPE_SEL(crtc->pipe);
+
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, hdmi_val);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+}
+
+static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ intel_wakeref_t wakeref;
+ bool ret;
+
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain);
+ if (!wakeref)
+ return false;
+
+ ret = intel_sdvo_port_enabled(dev_priv, intel_hdmi->hdmi_reg, pipe);
+
+ intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+
+ return ret;
+}
+
+static void intel_hdmi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 tmp, flags = 0;
+ int dotclock;
+
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_HDMI);
+
+ tmp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
+
+ if (tmp & SDVO_HSYNC_ACTIVE_HIGH)
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (tmp & SDVO_VSYNC_ACTIVE_HIGH)
+ flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NVSYNC;
+
+ if (tmp & HDMI_MODE_SELECT_HDMI)
+ pipe_config->has_hdmi_sink = true;
+
+ pipe_config->infoframes.enable |=
+ intel_hdmi_infoframes_enabled(encoder, pipe_config);
+
+ if (pipe_config->infoframes.enable)
+ pipe_config->has_infoframe = true;
+
+ if (tmp & HDMI_AUDIO_ENABLE)
+ pipe_config->has_audio = true;
+
+ if (!HAS_PCH_SPLIT(dev_priv) &&
+ tmp & HDMI_COLOR_RANGE_16_235)
+ pipe_config->limited_color_range = true;
+
+ pipe_config->hw.adjusted_mode.flags |= flags;
+
+ if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
+ dotclock = pipe_config->port_clock * 2 / 3;
+ else
+ dotclock = pipe_config->port_clock;
+
+ if (pipe_config->pixel_multiplier)
+ dotclock /= pipe_config->pixel_multiplier;
+
+ pipe_config->hw.adjusted_mode.crtc_clock = dotclock;
+
+ pipe_config->lane_count = 4;
+
+ intel_hdmi_read_gcp_infoframe(encoder, pipe_config);
+
+ intel_read_infoframe(encoder, pipe_config,
+ HDMI_INFOFRAME_TYPE_AVI,
+ &pipe_config->infoframes.avi);
+ intel_read_infoframe(encoder, pipe_config,
+ HDMI_INFOFRAME_TYPE_SPD,
+ &pipe_config->infoframes.spd);
+ intel_read_infoframe(encoder, pipe_config,
+ HDMI_INFOFRAME_TYPE_VENDOR,
+ &pipe_config->infoframes.hdmi);
+}
+
+static void intel_enable_hdmi_audio(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+
+ drm_WARN_ON(&i915->drm, !pipe_config->has_hdmi_sink);
+ drm_dbg_kms(&i915->drm, "Enabling HDMI audio on pipe %c\n",
+ pipe_name(crtc->pipe));
+ intel_audio_codec_enable(encoder, pipe_config, conn_state);
+}
+
+static void g4x_enable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 temp;
+
+ temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
+
+ temp |= SDVO_ENABLE;
+ if (pipe_config->has_audio)
+ temp |= HDMI_AUDIO_ENABLE;
+
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+
+ if (pipe_config->has_audio)
+ intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
+}
+
+static void ibx_enable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 temp;
+
+ temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
+
+ temp |= SDVO_ENABLE;
+ if (pipe_config->has_audio)
+ temp |= HDMI_AUDIO_ENABLE;
+
+ /*
+ * HW workaround, need to write this twice for issue
+ * that may result in first write getting masked.
+ */
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+
+ /*
+ * HW workaround, need to toggle enable bit off and on
+ * for 12bpc with pixel repeat.
+ *
+ * FIXME: BSpec says this should be done at the end of
+ * the modeset sequence, so not sure if this isn't too soon.
+ */
+ if (pipe_config->pipe_bpp > 24 &&
+ pipe_config->pixel_multiplier > 1) {
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg,
+ temp & ~SDVO_ENABLE);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+
+ /*
+ * HW workaround, need to write this twice for issue
+ * that may result in first write getting masked.
+ */
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+ }
+
+ if (pipe_config->has_audio)
+ intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
+}
+
+static void cpt_enable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ enum pipe pipe = crtc->pipe;
+ u32 temp;
+
+ temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
+
+ temp |= SDVO_ENABLE;
+ if (pipe_config->has_audio)
+ temp |= HDMI_AUDIO_ENABLE;
+
+ /*
+ * WaEnableHDMI8bpcBefore12bpc:snb,ivb
+ *
+ * The procedure for 12bpc is as follows:
+ * 1. disable HDMI clock gating
+ * 2. enable HDMI with 8bpc
+ * 3. enable HDMI with 12bpc
+ * 4. enable HDMI clock gating
+ */
+
+ if (pipe_config->pipe_bpp > 24) {
+ intel_de_write(dev_priv, TRANS_CHICKEN1(pipe),
+ intel_de_read(dev_priv, TRANS_CHICKEN1(pipe)) | TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
+
+ temp &= ~SDVO_COLOR_FORMAT_MASK;
+ temp |= SDVO_COLOR_FORMAT_8bpc;
+ }
+
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+
+ if (pipe_config->pipe_bpp > 24) {
+ temp &= ~SDVO_COLOR_FORMAT_MASK;
+ temp |= HDMI_COLOR_FORMAT_12bpc;
+
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+
+ intel_de_write(dev_priv, TRANS_CHICKEN1(pipe),
+ intel_de_read(dev_priv, TRANS_CHICKEN1(pipe)) & ~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
+ }
+
+ if (pipe_config->has_audio)
+ intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
+}
+
+static void vlv_enable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+}
+
+static void intel_disable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_digital_port *dig_port =
+ hdmi_to_dig_port(intel_hdmi);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
+ u32 temp;
+
+ temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
+
+ temp &= ~(SDVO_ENABLE | HDMI_AUDIO_ENABLE);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+
+ /*
+ * HW workaround for IBX, we need to move the port
+ * to transcoder A after disabling it to allow the
+ * matching DP port to be enabled on transcoder A.
+ */
+ if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B) {
+ /*
+ * We get CPU/PCH FIFO underruns on the other pipe when
+ * doing the workaround. Sweep them under the rug.
+ */
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
+
+ temp &= ~SDVO_PIPE_SEL_MASK;
+ temp |= SDVO_ENABLE | SDVO_PIPE_SEL(PIPE_A);
+ /*
+ * HW workaround, need to write this twice for issue
+ * that may result in first write getting masked.
+ */
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+
+ temp &= ~SDVO_ENABLE;
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+
+ intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
+ }
+
+ dig_port->set_infoframes(encoder,
+ false,
+ old_crtc_state, old_conn_state);
+
+ intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
+}
+
+static void g4x_disable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ if (old_crtc_state->has_audio)
+ intel_audio_codec_disable(encoder,
+ old_crtc_state, old_conn_state);
+
+ intel_disable_hdmi(state, encoder, old_crtc_state, old_conn_state);
+}
+
+static void pch_disable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ if (old_crtc_state->has_audio)
+ intel_audio_codec_disable(encoder,
+ old_crtc_state, old_conn_state);
+}
+
+static void pch_post_disable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ intel_disable_hdmi(state, encoder, old_crtc_state, old_conn_state);
+}
+
+static void intel_hdmi_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_digital_port *dig_port =
+ enc_to_dig_port(encoder);
+
+ intel_hdmi_prepare(encoder, pipe_config);
+
+ dig_port->set_infoframes(encoder,
+ pipe_config->has_infoframe,
+ pipe_config, conn_state);
+}
+
+static void vlv_hdmi_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ vlv_phy_pre_encoder_enable(encoder, pipe_config);
+
+ /* HDMI 1.0V-2dB */
+ vlv_set_phy_signal_level(encoder, pipe_config,
+ 0x2b245f5f, 0x00002000,
+ 0x5578b83a, 0x2b247878);
+
+ dig_port->set_infoframes(encoder,
+ pipe_config->has_infoframe,
+ pipe_config, conn_state);
+
+ g4x_enable_hdmi(state, encoder, pipe_config, conn_state);
+
+ vlv_wait_port_ready(dev_priv, dig_port, 0x0);
+}
+
+static void vlv_hdmi_pre_pll_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ intel_hdmi_prepare(encoder, pipe_config);
+
+ vlv_phy_pre_pll_enable(encoder, pipe_config);
+}
+
+static void chv_hdmi_pre_pll_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ intel_hdmi_prepare(encoder, pipe_config);
+
+ chv_phy_pre_pll_enable(encoder, pipe_config);
+}
+
+static void chv_hdmi_post_pll_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ chv_phy_post_pll_disable(encoder, old_crtc_state);
+}
+
+static void vlv_hdmi_post_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ /* Reset lanes to avoid HDMI flicker (VLV w/a) */
+ vlv_phy_reset_lanes(encoder, old_crtc_state);
+}
+
+static void chv_hdmi_post_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ vlv_dpio_get(dev_priv);
+
+ /* Assert data lane reset */
+ chv_data_lane_soft_reset(encoder, old_crtc_state, true);
+
+ vlv_dpio_put(dev_priv);
+}
+
+static void chv_hdmi_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ chv_phy_pre_encoder_enable(encoder, pipe_config);
+
+ /* FIXME: Program the support xxx V-dB */
+ /* Use 800mV-0dB */
+ chv_set_phy_signal_level(encoder, pipe_config, 128, 102, false);
+
+ dig_port->set_infoframes(encoder,
+ pipe_config->has_infoframe,
+ pipe_config, conn_state);
+
+ g4x_enable_hdmi(state, encoder, pipe_config, conn_state);
+
+ vlv_wait_port_ready(dev_priv, dig_port, 0x0);
+
+ /* Second common lane will stay alive on its own now */
+ chv_phy_release_cl2_override(encoder);
+}
+
+static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
+ .destroy = intel_encoder_destroy,
+};
+
+static enum intel_hotplug_state
+intel_hdmi_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector)
+{
+ enum intel_hotplug_state state;
+
+ state = intel_encoder_hotplug(encoder, connector);
+
+ /*
+ * On many platforms the HDMI live state signal is known to be
+ * unreliable, so we can't use it to detect if a sink is connected or
+ * not. Instead we detect if it's connected based on whether we can
+ * read the EDID or not. That in turn has a problem during disconnect,
+ * since the HPD interrupt may be raised before the DDC lines get
+ * disconnected (due to how the required length of DDC vs. HPD
+ * connector pins are specified) and so we'll still be able to get a
+ * valid EDID. To solve this schedule another detection cycle if this
+ * time around we didn't detect any change in the sink's connection
+ * status.
+ */
+ if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries)
+ state = INTEL_HOTPLUG_RETRY;
+
+ return state;
+}
+
+void g4x_hdmi_init(struct drm_i915_private *dev_priv,
+ i915_reg_t hdmi_reg, enum port port)
+{
+ struct intel_digital_port *dig_port;
+ struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
+
+ dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
+ if (!dig_port)
+ return;
+
+ intel_connector = intel_connector_alloc();
+ if (!intel_connector) {
+ kfree(dig_port);
+ return;
+ }
+
+ intel_encoder = &dig_port->base;
+
+ mutex_init(&dig_port->hdcp_mutex);
+
+ drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
+ &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS,
+ "HDMI %c", port_name(port));
+
+ intel_encoder->hotplug = intel_hdmi_hotplug;
+ intel_encoder->compute_config = intel_hdmi_compute_config;
+ if (HAS_PCH_SPLIT(dev_priv)) {
+ intel_encoder->disable = pch_disable_hdmi;
+ intel_encoder->post_disable = pch_post_disable_hdmi;
+ } else {
+ intel_encoder->disable = g4x_disable_hdmi;
+ }
+ intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
+ intel_encoder->get_config = intel_hdmi_get_config;
+ if (IS_CHERRYVIEW(dev_priv)) {
+ intel_encoder->pre_pll_enable = chv_hdmi_pre_pll_enable;
+ intel_encoder->pre_enable = chv_hdmi_pre_enable;
+ intel_encoder->enable = vlv_enable_hdmi;
+ intel_encoder->post_disable = chv_hdmi_post_disable;
+ intel_encoder->post_pll_disable = chv_hdmi_post_pll_disable;
+ } else if (IS_VALLEYVIEW(dev_priv)) {
+ intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable;
+ intel_encoder->pre_enable = vlv_hdmi_pre_enable;
+ intel_encoder->enable = vlv_enable_hdmi;
+ intel_encoder->post_disable = vlv_hdmi_post_disable;
+ } else {
+ intel_encoder->pre_enable = intel_hdmi_pre_enable;
+ if (HAS_PCH_CPT(dev_priv))
+ intel_encoder->enable = cpt_enable_hdmi;
+ else if (HAS_PCH_IBX(dev_priv))
+ intel_encoder->enable = ibx_enable_hdmi;
+ else
+ intel_encoder->enable = g4x_enable_hdmi;
+ }
+
+ intel_encoder->type = INTEL_OUTPUT_HDMI;
+ intel_encoder->power_domain = intel_port_to_power_domain(port);
+ intel_encoder->port = port;
+ if (IS_CHERRYVIEW(dev_priv)) {
+ if (port == PORT_D)
+ intel_encoder->pipe_mask = BIT(PIPE_C);
+ else
+ intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B);
+ } else {
+ intel_encoder->pipe_mask = ~0;
+ }
+ intel_encoder->cloneable = 1 << INTEL_OUTPUT_ANALOG;
+ intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
+ /*
+ * BSpec is unclear about HDMI+HDMI cloning on g4x, but it seems
+ * to work on real hardware. And since g4x can send infoframes to
+ * only one port anyway, nothing is lost by allowing it.
+ */
+ if (IS_G4X(dev_priv))
+ intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI;
+
+ dig_port->hdmi.hdmi_reg = hdmi_reg;
+ dig_port->dp.output_reg = INVALID_MMIO_REG;
+ dig_port->max_lanes = 4;
+
+ intel_infoframe_init(dig_port);
+
+ dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
+ intel_hdmi_init_connector(dig_port, intel_connector);
+}
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.h b/drivers/gpu/drm/i915/display/g4x_hdmi.h
new file mode 100644
index 000000000000..7aca14b602c6
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _G4X_HDMI_H_
+#define _G4X_HDMI_H_
+
+#include <linux/types.h>
+
+#include "i915_reg.h"
+
+enum port;
+struct drm_i915_private;
+
+void g4x_hdmi_init(struct drm_i915_private *dev_priv,
+ i915_reg_t hdmi_reg, enum port port);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
index e3e69e6cef65..456374ddf37a 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.c
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -11,6 +11,7 @@
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
#include "intel_display_types.h"
+#include "intel_fb.h"
#include "intel_sprite.h"
#include "i9xx_plane.h"
@@ -128,7 +129,7 @@ static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
else if (IS_IVYBRIDGE(dev_priv))
return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
i9xx_plane == PLANE_C;
- else if (INTEL_GEN(dev_priv) >= 4)
+ else if (DISPLAY_VER(dev_priv) >= 4)
return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
else
return i9xx_plane == PLANE_A;
@@ -141,9 +142,9 @@ static bool i9xx_plane_has_windowing(struct intel_plane *plane)
if (IS_CHERRYVIEW(dev_priv))
return i9xx_plane == PLANE_B;
- else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+ else if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
return false;
- else if (IS_GEN(dev_priv, 4))
+ else if (IS_DISPLAY_VER(dev_priv, 4))
return i9xx_plane == PLANE_C;
else
return i9xx_plane == PLANE_B ||
@@ -161,8 +162,8 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
dspcntr = DISPLAY_PLANE_ENABLE;
- if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
- IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
+ if (IS_G4X(dev_priv) || IS_IRONLAKE(dev_priv) ||
+ IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv))
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
switch (fb->format->format) {
@@ -210,7 +211,7 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
return 0;
}
- if (INTEL_GEN(dev_priv) >= 4 &&
+ if (DISPLAY_VER(dev_priv) >= 4 &&
fb->modifier == I915_FORMAT_MOD_X_TILED)
dspcntr |= DISPPLANE_TILED;
@@ -249,7 +250,7 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
- if (INTEL_GEN(dev_priv) >= 4)
+ if (DISPLAY_VER(dev_priv) >= 4)
offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
plane_state, 0);
else
@@ -266,11 +267,11 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
* Linear surfaces seem to work just fine, even on hsw/bdw
* despite them not using the linear offset anymore.
*/
- if (INTEL_GEN(dev_priv) >= 4 && fb->modifier == I915_FORMAT_MOD_X_TILED) {
+ if (DISPLAY_VER(dev_priv) >= 4 && fb->modifier == I915_FORMAT_MOD_X_TILED) {
u32 alignment = intel_surf_alignment(fb, 0);
int cpp = fb->format->cpp[0];
- while ((src_x + src_w) * cpp > plane_state->color_plane[0].stride) {
+ while ((src_x + src_w) * cpp > plane_state->view.color_plane[0].stride) {
if (offset == 0) {
drm_dbg_kms(&dev_priv->drm,
"Unable to find suitable display surface offset due to X-tiling\n");
@@ -305,14 +306,14 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
drm_WARN_ON(&dev_priv->drm, src_x > 8191 || src_y > 4095);
- } else if (INTEL_GEN(dev_priv) >= 4 &&
+ } else if (DISPLAY_VER(dev_priv) >= 4 &&
fb->modifier == I915_FORMAT_MOD_X_TILED) {
drm_WARN_ON(&dev_priv->drm, src_x > 4095 || src_y > 4095);
}
- plane_state->color_plane[0].offset = offset;
- plane_state->color_plane[0].x = src_x;
- plane_state->color_plane[0].y = src_y;
+ plane_state->view.color_plane[0].offset = offset;
+ plane_state->view.color_plane[0].x = src_x;
+ plane_state->view.color_plane[0].y = src_y;
return 0;
}
@@ -363,7 +364,7 @@ static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
if (crtc_state->csc_enable)
dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
- if (INTEL_GEN(dev_priv) < 5)
+ if (DISPLAY_VER(dev_priv) < 5)
dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
return dspcntr;
@@ -423,8 +424,8 @@ static void i9xx_update_plane(struct intel_plane *plane,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
u32 linear_offset;
- int x = plane_state->color_plane[0].x;
- int y = plane_state->color_plane[0].y;
+ int x = plane_state->view.color_plane[0].x;
+ int y = plane_state->view.color_plane[0].y;
int crtc_x = plane_state->uapi.dst.x1;
int crtc_y = plane_state->uapi.dst.y1;
int crtc_w = drm_rect_width(&plane_state->uapi.dst);
@@ -437,17 +438,17 @@ static void i9xx_update_plane(struct intel_plane *plane,
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
- if (INTEL_GEN(dev_priv) >= 4)
- dspaddr_offset = plane_state->color_plane[0].offset;
+ if (DISPLAY_VER(dev_priv) >= 4)
+ dspaddr_offset = plane_state->view.color_plane[0].offset;
else
dspaddr_offset = linear_offset;
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane),
- plane_state->color_plane[0].stride);
+ plane_state->view.color_plane[0].stride);
- if (INTEL_GEN(dev_priv) < 4) {
+ if (DISPLAY_VER(dev_priv) < 4) {
/*
* PLANE_A doesn't actually have a full window
* generator but let's assume we still need to
@@ -468,7 +469,7 @@ static void i9xx_update_plane(struct intel_plane *plane,
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
intel_de_write_fw(dev_priv, DSPOFFSET(i9xx_plane),
(y << 16) | x);
- } else if (INTEL_GEN(dev_priv) >= 4) {
+ } else if (DISPLAY_VER(dev_priv) >= 4) {
intel_de_write_fw(dev_priv, DSPLINOFF(i9xx_plane),
linear_offset);
intel_de_write_fw(dev_priv, DSPTILEOFF(i9xx_plane),
@@ -481,7 +482,7 @@ static void i9xx_update_plane(struct intel_plane *plane,
* the control register just before the surface register.
*/
intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
- if (INTEL_GEN(dev_priv) >= 4)
+ if (DISPLAY_VER(dev_priv) >= 4)
intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
else
@@ -514,7 +515,7 @@ static void i9xx_disable_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
- if (INTEL_GEN(dev_priv) >= 4)
+ if (DISPLAY_VER(dev_priv) >= 4)
intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 0);
else
intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 0);
@@ -530,7 +531,7 @@ g4x_primary_async_flip(struct intel_plane *plane,
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
u32 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
- u32 dspaddr_offset = plane_state->color_plane[0].offset;
+ u32 dspaddr_offset = plane_state->view.color_plane[0].offset;
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
unsigned long irqflags;
@@ -551,7 +552,7 @@ vlv_primary_async_flip(struct intel_plane *plane,
bool async_flip)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- u32 dspaddr_offset = plane_state->color_plane[0].offset;
+ u32 dspaddr_offset = plane_state->view.color_plane[0].offset;
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
unsigned long irqflags;
@@ -669,7 +670,7 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
ret = val & DISPLAY_PLANE_ENABLE;
- if (INTEL_GEN(dev_priv) >= 5)
+ if (DISPLAY_VER(dev_priv) >= 5)
*pipe = plane->pipe;
else
*pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
@@ -729,7 +730,7 @@ i9xx_plane_max_stride(struct intel_plane *plane,
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- if (INTEL_GEN(dev_priv) >= 3) {
+ if (DISPLAY_VER(dev_priv) >= 3) {
if (modifier == I915_FORMAT_MOD_X_TILED)
return 8*1024;
else
@@ -770,10 +771,6 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
int num_formats;
int ret, zpos;
- if (INTEL_GEN(dev_priv) >= 9)
- return skl_universal_plane_create(dev_priv, pipe,
- PLANE_PRIMARY);
-
plane = intel_plane_alloc();
if (IS_ERR(plane))
return plane;
@@ -783,7 +780,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
* On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
* port is hooked to pipe B. Hence we want plane A feeding pipe B.
*/
- if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4 &&
+ if (HAS_FBC(dev_priv) && DISPLAY_VER(dev_priv) < 4 &&
INTEL_NUM_PIPES(dev_priv) == 2)
plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
else
@@ -801,7 +798,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
formats = vlv_primary_formats;
num_formats = ARRAY_SIZE(vlv_primary_formats);
- } else if (INTEL_GEN(dev_priv) >= 4) {
+ } else if (DISPLAY_VER(dev_priv) >= 4) {
/*
* WaFP16GammaEnabling:ivb
* "Workaround : When using the 64-bit format, the plane
@@ -827,7 +824,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
num_formats = ARRAY_SIZE(i8xx_primary_formats);
}
- if (INTEL_GEN(dev_priv) >= 4)
+ if (DISPLAY_VER(dev_priv) >= 4)
plane_funcs = &i965_plane_funcs;
else
plane_funcs = &i8xx_plane_funcs;
@@ -842,7 +839,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
plane->min_cdclk = i9xx_plane_min_cdclk;
if (HAS_GMCH(dev_priv)) {
- if (INTEL_GEN(dev_priv) >= 4)
+ if (DISPLAY_VER(dev_priv) >= 4)
plane->max_stride = i965_plane_max_stride;
else
plane->max_stride = i9xx_plane_max_stride;
@@ -867,17 +864,17 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
plane->async_flip = g4x_primary_async_flip;
plane->enable_flip_done = bdw_primary_enable_flip_done;
plane->disable_flip_done = bdw_primary_disable_flip_done;
- } else if (INTEL_GEN(dev_priv) >= 7) {
+ } else if (DISPLAY_VER(dev_priv) >= 7) {
plane->async_flip = g4x_primary_async_flip;
plane->enable_flip_done = ivb_primary_enable_flip_done;
plane->disable_flip_done = ivb_primary_disable_flip_done;
- } else if (INTEL_GEN(dev_priv) >= 5) {
+ } else if (DISPLAY_VER(dev_priv) >= 5) {
plane->async_flip = g4x_primary_async_flip;
plane->enable_flip_done = ilk_primary_enable_flip_done;
plane->disable_flip_done = ilk_primary_disable_flip_done;
}
- if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+ if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
0, plane_funcs,
formats, num_formats,
@@ -899,14 +896,14 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
supported_rotations =
DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
DRM_MODE_REFLECT_X;
- } else if (INTEL_GEN(dev_priv) >= 4) {
+ } else if (DISPLAY_VER(dev_priv) >= 4) {
supported_rotations =
DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
} else {
supported_rotations = DRM_MODE_ROTATE_0;
}
- if (INTEL_GEN(dev_priv) >= 4)
+ if (DISPLAY_VER(dev_priv) >= 4)
drm_plane_create_rotation_property(&plane->base,
DRM_MODE_ROTATE_0,
supported_rotations);
@@ -924,3 +921,122 @@ fail:
return ERR_PTR(ret);
}
+static int i9xx_format_to_fourcc(int format)
+{
+ switch (format) {
+ case DISPPLANE_8BPP:
+ return DRM_FORMAT_C8;
+ case DISPPLANE_BGRA555:
+ return DRM_FORMAT_ARGB1555;
+ case DISPPLANE_BGRX555:
+ return DRM_FORMAT_XRGB1555;
+ case DISPPLANE_BGRX565:
+ return DRM_FORMAT_RGB565;
+ default:
+ case DISPPLANE_BGRX888:
+ return DRM_FORMAT_XRGB8888;
+ case DISPPLANE_RGBX888:
+ return DRM_FORMAT_XBGR8888;
+ case DISPPLANE_BGRA888:
+ return DRM_FORMAT_ARGB8888;
+ case DISPPLANE_RGBA888:
+ return DRM_FORMAT_ABGR8888;
+ case DISPPLANE_BGRX101010:
+ return DRM_FORMAT_XRGB2101010;
+ case DISPPLANE_RGBX101010:
+ return DRM_FORMAT_XBGR2101010;
+ case DISPPLANE_BGRA101010:
+ return DRM_FORMAT_ARGB2101010;
+ case DISPPLANE_RGBA101010:
+ return DRM_FORMAT_ABGR2101010;
+ case DISPPLANE_RGBX161616:
+ return DRM_FORMAT_XBGR16161616F;
+ }
+}
+
+void
+i9xx_get_initial_plane_config(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ enum pipe pipe;
+ u32 val, base, offset;
+ int fourcc, pixel_format;
+ unsigned int aligned_height;
+ struct drm_framebuffer *fb;
+ struct intel_framebuffer *intel_fb;
+
+ if (!plane->get_hw_state(plane, &pipe))
+ return;
+
+ drm_WARN_ON(dev, pipe != crtc->pipe);
+
+ intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ if (!intel_fb) {
+ drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
+ return;
+ }
+
+ fb = &intel_fb->base;
+
+ fb->dev = dev;
+
+ val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
+
+ if (DISPLAY_VER(dev_priv) >= 4) {
+ if (val & DISPPLANE_TILED) {
+ plane_config->tiling = I915_TILING_X;
+ fb->modifier = I915_FORMAT_MOD_X_TILED;
+ }
+
+ if (val & DISPPLANE_ROTATE_180)
+ plane_config->rotation = DRM_MODE_ROTATE_180;
+ }
+
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
+ val & DISPPLANE_MIRROR)
+ plane_config->rotation |= DRM_MODE_REFLECT_X;
+
+ pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
+ fourcc = i9xx_format_to_fourcc(pixel_format);
+ fb->format = drm_format_info(fourcc);
+
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ offset = intel_de_read(dev_priv, DSPOFFSET(i9xx_plane));
+ base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
+ } else if (DISPLAY_VER(dev_priv) >= 4) {
+ if (plane_config->tiling)
+ offset = intel_de_read(dev_priv,
+ DSPTILEOFF(i9xx_plane));
+ else
+ offset = intel_de_read(dev_priv,
+ DSPLINOFF(i9xx_plane));
+ base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
+ } else {
+ base = intel_de_read(dev_priv, DSPADDR(i9xx_plane));
+ }
+ plane_config->base = base;
+
+ val = intel_de_read(dev_priv, PIPESRC(pipe));
+ fb->width = ((val >> 16) & 0xfff) + 1;
+ fb->height = ((val >> 0) & 0xfff) + 1;
+
+ val = intel_de_read(dev_priv, DSPSTRIDE(i9xx_plane));
+ fb->pitches[0] = val & 0xffffffc0;
+
+ aligned_height = intel_fb_align_height(fb, 0, fb->height);
+
+ plane_config->size = fb->pitches[0] * aligned_height;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+ crtc->base.name, plane->base.name, fb->width, fb->height,
+ fb->format->cpp[0] * 8, base, fb->pitches[0],
+ plane_config->size);
+
+ plane_config->fb = intel_fb;
+}
+
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.h b/drivers/gpu/drm/i915/display/i9xx_plane.h
index ca963c2a8457..027b66053984 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.h
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.h
@@ -10,6 +10,8 @@
enum pipe;
struct drm_i915_private;
+struct intel_crtc;
+struct intel_initial_plane_config;
struct intel_plane;
struct intel_plane_state;
@@ -21,4 +23,6 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
struct intel_plane *
intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe);
+void i9xx_get_initial_plane_config(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config);
#endif
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 9d245a689323..9282978060b0 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -35,6 +35,8 @@
#include "intel_dsi.h"
#include "intel_panel.h"
#include "intel_vdsc.h"
+#include "skl_scaler.h"
+#include "skl_universal_plane.h"
static int header_credits_available(struct drm_i915_private *dev_priv,
enum transcoder dsi_trans)
@@ -455,7 +457,7 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp);
/* For EHL, TGL, set latency optimization for PCS_DW1 lanes */
- if (IS_JSL_EHL(dev_priv) || (INTEL_GEN(dev_priv) >= 12)) {
+ if (IS_JSL_EHL(dev_priv) || (DISPLAY_VER(dev_priv) >= 12)) {
tmp = intel_de_read(dev_priv,
ICL_PORT_PCS_DW1_AUX(phy));
tmp &= ~LATENCY_OPTIM_MASK;
@@ -590,7 +592,7 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
* a value '0' inside TA_PARAM_REGISTERS otherwise
* leave all fields at HW default values.
*/
- if (IS_GEN(dev_priv, 11)) {
+ if (IS_DISPLAY_VER(dev_priv, 11)) {
if (afe_clk(encoder, crtc_state) <= 800000) {
for_each_dsi_port(port, intel_dsi->ports) {
tmp = intel_de_read(dev_priv,
@@ -653,6 +655,24 @@ static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->dpll.lock);
}
+static bool gen11_dsi_is_clock_enabled(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ bool clock_enabled = false;
+ enum phy phy;
+ u32 tmp;
+
+ tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
+
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ if (!(tmp & ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)))
+ clock_enabled = true;
+ }
+
+ return clock_enabled;
+}
+
static void gen11_dsi_map_pll(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
@@ -672,7 +692,7 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
for_each_dsi_phy(phy, intel_dsi->phys) {
- if (INTEL_GEN(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 12)
val |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
else
val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
@@ -754,7 +774,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
}
}
- if (INTEL_GEN(dev_priv) >= 12) {
+ if (DISPLAY_VER(dev_priv) >= 12) {
if (is_vid_mode(intel_dsi))
tmp |= BLANKING_PACKET_ENABLE;
}
@@ -1000,7 +1020,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
}
/* program TRANS_VBLANK register, should be same as vtotal programmed */
- if (INTEL_GEN(dev_priv) >= 12) {
+ if (DISPLAY_VER(dev_priv) >= 12) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
intel_de_write(dev_priv, VBLANK(dsi_trans),
@@ -1138,7 +1158,7 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
gen11_dsi_configure_transcoder(encoder, crtc_state);
/* Step 4l: Gate DDI clocks */
- if (IS_GEN(dev_priv, 11))
+ if (IS_DISPLAY_VER(dev_priv, 11))
gen11_dsi_gate_clocks(encoder);
}
@@ -1488,14 +1508,10 @@ static void gen11_dsi_get_cmd_mode_config(struct intel_dsi *intel_dsi,
static void gen11_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- /* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */
- pipe_config->port_clock = intel_dpll_get_freq(i915,
- pipe_config->shared_dpll,
- &pipe_config->dpll_hw_state);
+ intel_ddi_get_clock(encoder, pipe_config, icl_ddi_combo_get_pll(encoder));
pipe_config->hw.adjusted_mode.crtc_clock = intel_dsi->pclk;
if (intel_dsi->dual_link)
@@ -1518,7 +1534,7 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
- int dsc_max_bpc = INTEL_GEN(dev_priv) >= 12 ? 12 : 10;
+ int dsc_max_bpc = DISPLAY_VER(dev_priv) >= 12 ? 12 : 10;
bool use_dsc;
int ret;
@@ -1940,6 +1956,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
encoder->pipe_mask = ~0;
encoder->power_domain = POWER_DOMAIN_PORT_DSI;
encoder->get_power_domains = gen11_dsi_get_power_domains;
+ encoder->disable_clock = gen11_dsi_gate_clocks;
+ encoder->is_clock_enabled = gen11_dsi_is_clock_enabled;
/* register DSI connector with DRM subsystem */
drm_connector_init(dev, connector, &gen11_dsi_connector_funcs,
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index e00fdc47c0eb..4fa389fce8cb 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -40,7 +40,7 @@
#include "intel_global_state.h"
#include "intel_hdcp.h"
#include "intel_psr.h"
-#include "intel_sprite.h"
+#include "skl_universal_plane.h"
/**
* intel_digital_connector_atomic_get_property - hook for connector->atomic_get_property.
@@ -332,8 +332,7 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
plane_state->hw.fb->format->is_yuv &&
plane_state->hw.fb->format->num_planes > 1) {
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- if (IS_GEN(dev_priv, 9) &&
- !IS_GEMINILAKE(dev_priv)) {
+ if (IS_DISPLAY_VER(dev_priv, 9)) {
mode = SKL_PS_SCALER_MODE_NV12;
} else if (icl_is_hdr_plane(dev_priv, plane->id)) {
/*
@@ -351,7 +350,7 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
if (linked)
mode |= PS_PLANE_Y_SEL(linked->id);
}
- } else if (INTEL_GEN(dev_priv) > 9 || IS_GEMINILAKE(dev_priv)) {
+ } else if (DISPLAY_VER(dev_priv) >= 10) {
mode = PS_SCALER_MODE_NORMAL;
} else if (num_scalers_need == 1 && intel_crtc->num_scalers > 1) {
/*
@@ -460,7 +459,7 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
* isn't necessary to change between HQ and dyn mode
* on those platforms.
*/
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ if (DISPLAY_VER(dev_priv) >= 10)
continue;
plane = drm_plane_from_index(&dev_priv->drm, i);
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index f7de55707746..9671c8f6e892 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -248,7 +248,7 @@ static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_sta
break;
}
- if (INTEL_GEN(dev_priv) < 12 && adjusted_mode->crtc_clock > 148500)
+ if (DISPLAY_VER(dev_priv) < 12 && adjusted_mode->crtc_clock > 148500)
i = ARRAY_SIZE(hdmi_audio_clock);
if (i == ARRAY_SIZE(hdmi_audio_clock)) {
@@ -586,14 +586,14 @@ static void enable_audio_dsc_wa(struct intel_encoder *encoder,
unsigned int hblank_early_prog, samples_room;
unsigned int val;
- if (INTEL_GEN(i915) < 11)
+ if (DISPLAY_VER(i915) < 11)
return;
val = intel_de_read(i915, AUD_CONFIG_BE);
- if (INTEL_GEN(i915) == 11)
+ if (IS_DISPLAY_VER(i915, 11))
val |= HBLANK_EARLY_ENABLE_ICL(pipe);
- else if (INTEL_GEN(i915) >= 12)
+ else if (DISPLAY_VER(i915) >= 12)
val |= HBLANK_EARLY_ENABLE_TGL(pipe);
if (crtc_state->dsc.compression_enable &&
@@ -933,7 +933,7 @@ void intel_init_audio_hooks(struct drm_i915_private *dev_priv)
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
- } else if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8) {
+ } else if (IS_HASWELL(dev_priv) || DISPLAY_VER(dev_priv) >= 8) {
dev_priv->display.audio_codec_enable = hsw_audio_codec_enable;
dev_priv->display.audio_codec_disable = hsw_audio_codec_disable;
} else if (HAS_PCH_SPLIT(dev_priv)) {
@@ -1010,7 +1010,7 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
if (dev_priv->audio_power_refcount++ == 0) {
- if (INTEL_GEN(dev_priv) >= 9) {
+ if (DISPLAY_VER(dev_priv) >= 9) {
intel_de_write(dev_priv, AUD_FREQ_CNTRL,
dev_priv->audio_freq_cntrl);
drm_dbg_kms(&dev_priv->drm,
@@ -1022,7 +1022,7 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
if (IS_GEMINILAKE(dev_priv))
glk_force_audio_cdclk(dev_priv, true);
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ if (DISPLAY_VER(dev_priv) >= 10)
intel_de_write(dev_priv, AUD_PIN_BUF_CTL,
(intel_de_read(dev_priv, AUD_PIN_BUF_CTL) | AUD_PIN_BUF_ENABLE));
}
@@ -1050,7 +1050,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
unsigned long cookie;
u32 tmp;
- if (INTEL_GEN(dev_priv) < 9)
+ if (DISPLAY_VER(dev_priv) < 9)
return;
cookie = i915_audio_component_get_power(kdev);
@@ -1266,6 +1266,15 @@ static const struct component_ops i915_audio_component_bind_ops = {
.unbind = i915_audio_component_unbind,
};
+#define AUD_FREQ_TMODE_SHIFT 14
+#define AUD_FREQ_4T 0
+#define AUD_FREQ_8T (2 << AUD_FREQ_TMODE_SHIFT)
+#define AUD_FREQ_PULLCLKS(x) (((x) & 0x3) << 11)
+#define AUD_FREQ_BCLK_96M BIT(4)
+
+#define AUD_FREQ_GEN12 (AUD_FREQ_8T | AUD_FREQ_PULLCLKS(0) | AUD_FREQ_BCLK_96M)
+#define AUD_FREQ_TGL_BROKEN (AUD_FREQ_8T | AUD_FREQ_PULLCLKS(2) | AUD_FREQ_BCLK_96M)
+
/**
* i915_audio_component_init - initialize and register the audio component
* @dev_priv: i915 device instance
@@ -1284,6 +1293,7 @@ static const struct component_ops i915_audio_component_bind_ops = {
*/
static void i915_audio_component_init(struct drm_i915_private *dev_priv)
{
+ u32 aud_freq, aud_freq_init;
int ret;
ret = component_add_typed(dev_priv->drm.dev,
@@ -1296,12 +1306,22 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv)
return;
}
- if (INTEL_GEN(dev_priv) >= 9) {
- dev_priv->audio_freq_cntrl = intel_de_read(dev_priv,
- AUD_FREQ_CNTRL);
- drm_dbg_kms(&dev_priv->drm,
- "init value of AUD_FREQ_CNTRL of 0x%x\n",
- dev_priv->audio_freq_cntrl);
+ if (DISPLAY_VER(dev_priv) >= 9) {
+ aud_freq_init = intel_de_read(dev_priv, AUD_FREQ_CNTRL);
+
+ if (INTEL_GEN(dev_priv) >= 12)
+ aud_freq = AUD_FREQ_GEN12;
+ else
+ aud_freq = aud_freq_init;
+
+ /* use BIOS provided value for TGL unless it is a known bad value */
+ if (IS_TIGERLAKE(dev_priv) && aud_freq_init != AUD_FREQ_TGL_BROKEN)
+ aud_freq = aud_freq_init;
+
+ drm_dbg_kms(&dev_priv->drm, "use AUD_FREQ_CNTRL of 0x%x (init value 0x%x)\n",
+ aud_freq, aud_freq_init);
+
+ dev_priv->audio_freq_cntrl = aud_freq;
}
dev_priv->audio_component_registered = true;
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 987cf509337f..3d0c035b5e38 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -59,7 +59,9 @@
*/
/* Wrapper for VBT child device config */
-struct display_device_data {
+struct intel_bios_encoder_data {
+ struct drm_i915_private *i915;
+
struct child_device_config child;
struct dsc_compression_parameters_entry *dsc;
struct list_head node;
@@ -211,7 +213,7 @@ get_lvds_fp_timing(const struct bdb_header *bdb,
/* Parse general panel options */
static void
-parse_panel_options(struct drm_i915_private *dev_priv,
+parse_panel_options(struct drm_i915_private *i915,
const struct bdb_header *bdb)
{
const struct bdb_lvds_options *lvds_options;
@@ -223,27 +225,27 @@ parse_panel_options(struct drm_i915_private *dev_priv,
if (!lvds_options)
return;
- dev_priv->vbt.lvds_dither = lvds_options->pixel_dither;
+ i915->vbt.lvds_dither = lvds_options->pixel_dither;
- ret = intel_opregion_get_panel_type(dev_priv);
+ ret = intel_opregion_get_panel_type(i915);
if (ret >= 0) {
- drm_WARN_ON(&dev_priv->drm, ret > 0xf);
+ drm_WARN_ON(&i915->drm, ret > 0xf);
panel_type = ret;
- drm_dbg_kms(&dev_priv->drm, "Panel type: %d (OpRegion)\n",
+ drm_dbg_kms(&i915->drm, "Panel type: %d (OpRegion)\n",
panel_type);
} else {
if (lvds_options->panel_type > 0xf) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"Invalid VBT panel type 0x%x\n",
lvds_options->panel_type);
return;
}
panel_type = lvds_options->panel_type;
- drm_dbg_kms(&dev_priv->drm, "Panel type: %d (VBT)\n",
+ drm_dbg_kms(&i915->drm, "Panel type: %d (VBT)\n",
panel_type);
}
- dev_priv->vbt.panel_type = panel_type;
+ i915->vbt.panel_type = panel_type;
drrs_mode = (lvds_options->dps_panel_type_bits
>> (panel_type * 2)) & MODE_MASK;
@@ -254,17 +256,17 @@ parse_panel_options(struct drm_i915_private *dev_priv,
*/
switch (drrs_mode) {
case 0:
- dev_priv->vbt.drrs_type = STATIC_DRRS_SUPPORT;
- drm_dbg_kms(&dev_priv->drm, "DRRS supported mode is static\n");
+ i915->vbt.drrs_type = STATIC_DRRS_SUPPORT;
+ drm_dbg_kms(&i915->drm, "DRRS supported mode is static\n");
break;
case 2:
- dev_priv->vbt.drrs_type = SEAMLESS_DRRS_SUPPORT;
- drm_dbg_kms(&dev_priv->drm,
+ i915->vbt.drrs_type = SEAMLESS_DRRS_SUPPORT;
+ drm_dbg_kms(&i915->drm,
"DRRS supported mode is seamless\n");
break;
default:
- dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
- drm_dbg_kms(&dev_priv->drm,
+ i915->vbt.drrs_type = DRRS_NOT_SUPPORTED;
+ drm_dbg_kms(&i915->drm,
"DRRS not supported (VBT input)\n");
break;
}
@@ -272,7 +274,7 @@ parse_panel_options(struct drm_i915_private *dev_priv,
/* Try to find integrated panel timing data */
static void
-parse_lfp_panel_dtd(struct drm_i915_private *dev_priv,
+parse_lfp_panel_dtd(struct drm_i915_private *i915,
const struct bdb_header *bdb)
{
const struct bdb_lvds_lfp_data *lvds_lfp_data;
@@ -280,7 +282,7 @@ parse_lfp_panel_dtd(struct drm_i915_private *dev_priv,
const struct lvds_dvo_timing *panel_dvo_timing;
const struct lvds_fp_timing *fp_timing;
struct drm_display_mode *panel_fixed_mode;
- int panel_type = dev_priv->vbt.panel_type;
+ int panel_type = i915->vbt.panel_type;
lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
if (!lvds_lfp_data)
@@ -300,9 +302,9 @@ parse_lfp_panel_dtd(struct drm_i915_private *dev_priv,
fill_detail_timing_data(panel_fixed_mode, panel_dvo_timing);
- dev_priv->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
+ i915->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"Found panel mode in BIOS VBT legacy lfp table:\n");
drm_mode_debug_printmodeline(panel_fixed_mode);
@@ -313,16 +315,16 @@ parse_lfp_panel_dtd(struct drm_i915_private *dev_priv,
/* check the resolution, just to be sure */
if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
fp_timing->y_res == panel_fixed_mode->vdisplay) {
- dev_priv->vbt.bios_lvds_val = fp_timing->lvds_reg_val;
- drm_dbg_kms(&dev_priv->drm,
+ i915->vbt.bios_lvds_val = fp_timing->lvds_reg_val;
+ drm_dbg_kms(&i915->drm,
"VBT initial LVDS value %x\n",
- dev_priv->vbt.bios_lvds_val);
+ i915->vbt.bios_lvds_val);
}
}
}
static void
-parse_generic_dtd(struct drm_i915_private *dev_priv,
+parse_generic_dtd(struct drm_i915_private *i915,
const struct bdb_header *bdb)
{
const struct bdb_generic_dtd *generic_dtd;
@@ -335,26 +337,26 @@ parse_generic_dtd(struct drm_i915_private *dev_priv,
return;
if (generic_dtd->gdtd_size < sizeof(struct generic_dtd_entry)) {
- drm_err(&dev_priv->drm, "GDTD size %u is too small.\n",
+ drm_err(&i915->drm, "GDTD size %u is too small.\n",
generic_dtd->gdtd_size);
return;
} else if (generic_dtd->gdtd_size !=
sizeof(struct generic_dtd_entry)) {
- drm_err(&dev_priv->drm, "Unexpected GDTD size %u\n",
+ drm_err(&i915->drm, "Unexpected GDTD size %u\n",
generic_dtd->gdtd_size);
/* DTD has unknown fields, but keep going */
}
num_dtd = (get_blocksize(generic_dtd) -
sizeof(struct bdb_generic_dtd)) / generic_dtd->gdtd_size;
- if (dev_priv->vbt.panel_type >= num_dtd) {
- drm_err(&dev_priv->drm,
+ if (i915->vbt.panel_type >= num_dtd) {
+ drm_err(&i915->drm,
"Panel type %d not found in table of %d DTD's\n",
- dev_priv->vbt.panel_type, num_dtd);
+ i915->vbt.panel_type, num_dtd);
return;
}
- dtd = &generic_dtd->dtd[dev_priv->vbt.panel_type];
+ dtd = &generic_dtd->dtd[i915->vbt.panel_type];
panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
if (!panel_fixed_mode)
@@ -393,15 +395,15 @@ parse_generic_dtd(struct drm_i915_private *dev_priv,
else
panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"Found panel mode in BIOS VBT generic dtd table:\n");
drm_mode_debug_printmodeline(panel_fixed_mode);
- dev_priv->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
+ i915->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
}
static void
-parse_panel_dtd(struct drm_i915_private *dev_priv,
+parse_panel_dtd(struct drm_i915_private *i915,
const struct bdb_header *bdb)
{
/*
@@ -413,18 +415,18 @@ parse_panel_dtd(struct drm_i915_private *dev_priv,
* back to trying the old LFP block if that fails.
*/
if (bdb->version >= 229)
- parse_generic_dtd(dev_priv, bdb);
- if (!dev_priv->vbt.lfp_lvds_vbt_mode)
- parse_lfp_panel_dtd(dev_priv, bdb);
+ parse_generic_dtd(i915, bdb);
+ if (!i915->vbt.lfp_lvds_vbt_mode)
+ parse_lfp_panel_dtd(i915, bdb);
}
static void
-parse_lfp_backlight(struct drm_i915_private *dev_priv,
+parse_lfp_backlight(struct drm_i915_private *i915,
const struct bdb_header *bdb)
{
const struct bdb_lfp_backlight_data *backlight_data;
const struct lfp_backlight_data_entry *entry;
- int panel_type = dev_priv->vbt.panel_type;
+ int panel_type = i915->vbt.panel_type;
u16 level;
backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
@@ -432,7 +434,7 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
return;
if (backlight_data->entry_size != sizeof(backlight_data->data[0])) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"Unsupported backlight data entry size %u\n",
backlight_data->entry_size);
return;
@@ -440,26 +442,26 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
entry = &backlight_data->data[panel_type];
- dev_priv->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM;
- if (!dev_priv->vbt.backlight.present) {
- drm_dbg_kms(&dev_priv->drm,
+ i915->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM;
+ if (!i915->vbt.backlight.present) {
+ drm_dbg_kms(&i915->drm,
"PWM backlight not present in VBT (type %u)\n",
entry->type);
return;
}
- dev_priv->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
+ i915->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
if (bdb->version >= 191 &&
get_blocksize(backlight_data) >= sizeof(*backlight_data)) {
const struct lfp_backlight_control_method *method;
method = &backlight_data->backlight_control[panel_type];
- dev_priv->vbt.backlight.type = method->type;
- dev_priv->vbt.backlight.controller = method->controller;
+ i915->vbt.backlight.type = method->type;
+ i915->vbt.backlight.controller = method->controller;
}
- dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
- dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
+ i915->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
+ i915->vbt.backlight.active_low_pwm = entry->active_low_pwm;
if (bdb->version >= 234) {
u16 min_level;
@@ -477,37 +479,37 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
min_level = min_level / 255;
if (min_level > 255) {
- drm_warn(&dev_priv->drm, "Brightness min level > 255\n");
+ drm_warn(&i915->drm, "Brightness min level > 255\n");
level = 255;
}
- dev_priv->vbt.backlight.min_brightness = min_level;
+ i915->vbt.backlight.min_brightness = min_level;
} else {
level = backlight_data->level[panel_type];
- dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
+ i915->vbt.backlight.min_brightness = entry->min_brightness;
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"VBT backlight PWM modulation frequency %u Hz, "
"active %s, min brightness %u, level %u, controller %u\n",
- dev_priv->vbt.backlight.pwm_freq_hz,
- dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
- dev_priv->vbt.backlight.min_brightness,
+ i915->vbt.backlight.pwm_freq_hz,
+ i915->vbt.backlight.active_low_pwm ? "low" : "high",
+ i915->vbt.backlight.min_brightness,
level,
- dev_priv->vbt.backlight.controller);
+ i915->vbt.backlight.controller);
}
/* Try to find sdvo panel data */
static void
-parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
+parse_sdvo_panel_data(struct drm_i915_private *i915,
const struct bdb_header *bdb)
{
const struct bdb_sdvo_panel_dtds *dtds;
struct drm_display_mode *panel_fixed_mode;
int index;
- index = dev_priv->params.vbt_sdvo_panel_type;
+ index = i915->params.vbt_sdvo_panel_type;
if (index == -2) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"Ignore SDVO panel mode from BIOS VBT tables.\n");
return;
}
@@ -532,17 +534,17 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
fill_detail_timing_data(panel_fixed_mode, &dtds->dtds[index]);
- dev_priv->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode;
+ i915->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"Found SDVO panel mode in BIOS VBT tables:\n");
drm_mode_debug_printmodeline(panel_fixed_mode);
}
-static int intel_bios_ssc_frequency(struct drm_i915_private *dev_priv,
+static int intel_bios_ssc_frequency(struct drm_i915_private *i915,
bool alternate)
{
- switch (INTEL_GEN(dev_priv)) {
+ switch (DISPLAY_VER(i915)) {
case 2:
return alternate ? 66667 : 48000;
case 3:
@@ -554,7 +556,7 @@ static int intel_bios_ssc_frequency(struct drm_i915_private *dev_priv,
}
static void
-parse_general_features(struct drm_i915_private *dev_priv,
+parse_general_features(struct drm_i915_private *i915,
const struct bdb_header *bdb)
{
const struct bdb_general_features *general;
@@ -563,31 +565,31 @@ parse_general_features(struct drm_i915_private *dev_priv,
if (!general)
return;
- dev_priv->vbt.int_tv_support = general->int_tv_support;
+ i915->vbt.int_tv_support = general->int_tv_support;
/* int_crt_support can't be trusted on earlier platforms */
if (bdb->version >= 155 &&
- (HAS_DDI(dev_priv) || IS_VALLEYVIEW(dev_priv)))
- dev_priv->vbt.int_crt_support = general->int_crt_support;
- dev_priv->vbt.lvds_use_ssc = general->enable_ssc;
- dev_priv->vbt.lvds_ssc_freq =
- intel_bios_ssc_frequency(dev_priv, general->ssc_freq);
- dev_priv->vbt.display_clock_mode = general->display_clock_mode;
- dev_priv->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
+ (HAS_DDI(i915) || IS_VALLEYVIEW(i915)))
+ i915->vbt.int_crt_support = general->int_crt_support;
+ i915->vbt.lvds_use_ssc = general->enable_ssc;
+ i915->vbt.lvds_ssc_freq =
+ intel_bios_ssc_frequency(i915, general->ssc_freq);
+ i915->vbt.display_clock_mode = general->display_clock_mode;
+ i915->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
if (bdb->version >= 181) {
- dev_priv->vbt.orientation = general->rotate_180 ?
+ i915->vbt.orientation = general->rotate_180 ?
DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP :
DRM_MODE_PANEL_ORIENTATION_NORMAL;
} else {
- dev_priv->vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+ i915->vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
- dev_priv->vbt.int_tv_support,
- dev_priv->vbt.int_crt_support,
- dev_priv->vbt.lvds_use_ssc,
- dev_priv->vbt.lvds_ssc_freq,
- dev_priv->vbt.display_clock_mode,
- dev_priv->vbt.fdi_rx_polarity_inverted);
+ i915->vbt.int_tv_support,
+ i915->vbt.int_crt_support,
+ i915->vbt.lvds_use_ssc,
+ i915->vbt.lvds_ssc_freq,
+ i915->vbt.display_clock_mode,
+ i915->vbt.fdi_rx_polarity_inverted);
}
static const struct child_device_config *
@@ -597,10 +599,10 @@ child_device_ptr(const struct bdb_general_definitions *defs, int i)
}
static void
-parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
+parse_sdvo_device_mapping(struct drm_i915_private *i915)
{
struct sdvo_device_mapping *mapping;
- const struct display_device_data *devdata;
+ const struct intel_bios_encoder_data *devdata;
const struct child_device_config *child;
int count = 0;
@@ -608,12 +610,12 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
* Only parse SDVO mappings on gens that could have SDVO. This isn't
* accurate and doesn't have to be, as long as it's not too strict.
*/
- if (!IS_GEN_RANGE(dev_priv, 3, 7)) {
- drm_dbg_kms(&dev_priv->drm, "Skipping SDVO device mapping\n");
+ if (!IS_DISPLAY_RANGE(i915, 3, 7)) {
+ drm_dbg_kms(&i915->drm, "Skipping SDVO device mapping\n");
return;
}
- list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
+ list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
child = &devdata->child;
if (child->slave_addr != SLAVE_ADDR1 &&
@@ -627,17 +629,17 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
if (child->dvo_port != DEVICE_PORT_DVOB &&
child->dvo_port != DEVICE_PORT_DVOC) {
/* skip the incorrect SDVO port */
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"Incorrect SDVO port. Skip it\n");
continue;
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"the SDVO device with slave addr %2x is found on"
" %s port\n",
child->slave_addr,
(child->dvo_port == DEVICE_PORT_DVOB) ?
"SDVOB" : "SDVOC");
- mapping = &dev_priv->vbt.sdvo_mappings[child->dvo_port - 1];
+ mapping = &i915->vbt.sdvo_mappings[child->dvo_port - 1];
if (!mapping->initialized) {
mapping->dvo_port = child->dvo_port;
mapping->slave_addr = child->slave_addr;
@@ -645,20 +647,20 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
mapping->ddc_pin = child->ddc_pin;
mapping->i2c_pin = child->i2c_pin;
mapping->initialized = 1;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
mapping->dvo_port, mapping->slave_addr,
mapping->dvo_wiring, mapping->ddc_pin,
mapping->i2c_pin);
} else {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"Maybe one SDVO port is shared by "
"two SDVO device.\n");
}
if (child->slave2_addr) {
/* Maybe this is a SDVO device with multiple inputs */
/* And the mapping info is not added */
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"there exists the slave2_addr. Maybe this"
" is a SDVO device with multiple inputs.\n");
}
@@ -667,13 +669,13 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
if (!count) {
/* No SDVO device info is found */
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"No SDVO device info is found in VBT\n");
}
}
static void
-parse_driver_features(struct drm_i915_private *dev_priv,
+parse_driver_features(struct drm_i915_private *i915,
const struct bdb_header *bdb)
{
const struct bdb_driver_features *driver;
@@ -682,14 +684,14 @@ parse_driver_features(struct drm_i915_private *dev_priv,
if (!driver)
return;
- if (INTEL_GEN(dev_priv) >= 5) {
+ if (DISPLAY_VER(i915) >= 5) {
/*
* Note that we consider BDB_DRIVER_FEATURE_INT_SDVO_LVDS
* to mean "eDP". The VBT spec doesn't agree with that
* interpretation, but real world VBTs seem to.
*/
if (driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS)
- dev_priv->vbt.int_lvds_support = 0;
+ i915->vbt.int_lvds_support = 0;
} else {
/*
* FIXME it's not clear which BDB version has the LVDS config
@@ -705,11 +707,11 @@ parse_driver_features(struct drm_i915_private *dev_priv,
if (bdb->version >= 134 &&
driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS &&
driver->lvds_config != BDB_DRIVER_FEATURE_INT_SDVO_LVDS)
- dev_priv->vbt.int_lvds_support = 0;
+ i915->vbt.int_lvds_support = 0;
}
if (bdb->version < 228) {
- drm_dbg_kms(&dev_priv->drm, "DRRS State Enabled:%d\n",
+ drm_dbg_kms(&i915->drm, "DRRS State Enabled:%d\n",
driver->drrs_enabled);
/*
* If DRRS is not supported, drrs_type has to be set to 0.
@@ -718,18 +720,18 @@ parse_driver_features(struct drm_i915_private *dev_priv,
* driver->drrs_enabled=false
*/
if (!driver->drrs_enabled)
- dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
+ i915->vbt.drrs_type = DRRS_NOT_SUPPORTED;
- dev_priv->vbt.psr.enable = driver->psr_enabled;
+ i915->vbt.psr.enable = driver->psr_enabled;
}
}
static void
-parse_power_conservation_features(struct drm_i915_private *dev_priv,
+parse_power_conservation_features(struct drm_i915_private *i915,
const struct bdb_header *bdb)
{
const struct bdb_lfp_power *power;
- u8 panel_type = dev_priv->vbt.panel_type;
+ u8 panel_type = i915->vbt.panel_type;
if (bdb->version < 228)
return;
@@ -738,7 +740,7 @@ parse_power_conservation_features(struct drm_i915_private *dev_priv,
if (!power)
return;
- dev_priv->vbt.psr.enable = power->psr & BIT(panel_type);
+ i915->vbt.psr.enable = power->psr & BIT(panel_type);
/*
* If DRRS is not supported, drrs_type has to be set to 0.
@@ -747,19 +749,19 @@ parse_power_conservation_features(struct drm_i915_private *dev_priv,
* power->drrs & BIT(panel_type)=false
*/
if (!(power->drrs & BIT(panel_type)))
- dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
+ i915->vbt.drrs_type = DRRS_NOT_SUPPORTED;
if (bdb->version >= 232)
- dev_priv->vbt.edp.hobl = power->hobl & BIT(panel_type);
+ i915->vbt.edp.hobl = power->hobl & BIT(panel_type);
}
static void
-parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
+parse_edp(struct drm_i915_private *i915, const struct bdb_header *bdb)
{
const struct bdb_edp *edp;
const struct edp_power_seq *edp_pps;
const struct edp_fast_link_params *edp_link_params;
- int panel_type = dev_priv->vbt.panel_type;
+ int panel_type = i915->vbt.panel_type;
edp = find_section(bdb, BDB_EDP);
if (!edp)
@@ -767,13 +769,13 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
switch ((edp->color_depth >> (panel_type * 2)) & 3) {
case EDP_18BPP:
- dev_priv->vbt.edp.bpp = 18;
+ i915->vbt.edp.bpp = 18;
break;
case EDP_24BPP:
- dev_priv->vbt.edp.bpp = 24;
+ i915->vbt.edp.bpp = 24;
break;
case EDP_30BPP:
- dev_priv->vbt.edp.bpp = 30;
+ i915->vbt.edp.bpp = 30;
break;
}
@@ -781,17 +783,17 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
edp_pps = &edp->power_seqs[panel_type];
edp_link_params = &edp->fast_link_params[panel_type];
- dev_priv->vbt.edp.pps = *edp_pps;
+ i915->vbt.edp.pps = *edp_pps;
switch (edp_link_params->rate) {
case EDP_RATE_1_62:
- dev_priv->vbt.edp.rate = DP_LINK_BW_1_62;
+ i915->vbt.edp.rate = DP_LINK_BW_1_62;
break;
case EDP_RATE_2_7:
- dev_priv->vbt.edp.rate = DP_LINK_BW_2_7;
+ i915->vbt.edp.rate = DP_LINK_BW_2_7;
break;
default:
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"VBT has unknown eDP link rate value %u\n",
edp_link_params->rate);
break;
@@ -799,16 +801,16 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
switch (edp_link_params->lanes) {
case EDP_LANE_1:
- dev_priv->vbt.edp.lanes = 1;
+ i915->vbt.edp.lanes = 1;
break;
case EDP_LANE_2:
- dev_priv->vbt.edp.lanes = 2;
+ i915->vbt.edp.lanes = 2;
break;
case EDP_LANE_4:
- dev_priv->vbt.edp.lanes = 4;
+ i915->vbt.edp.lanes = 4;
break;
default:
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"VBT has unknown eDP lane count value %u\n",
edp_link_params->lanes);
break;
@@ -816,19 +818,19 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
switch (edp_link_params->preemphasis) {
case EDP_PREEMPHASIS_NONE:
- dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0;
+ i915->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0;
break;
case EDP_PREEMPHASIS_3_5dB:
- dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1;
+ i915->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1;
break;
case EDP_PREEMPHASIS_6dB:
- dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2;
+ i915->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2;
break;
case EDP_PREEMPHASIS_9_5dB:
- dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
+ i915->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
break;
default:
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"VBT has unknown eDP pre-emphasis value %u\n",
edp_link_params->preemphasis);
break;
@@ -836,19 +838,19 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
switch (edp_link_params->vswing) {
case EDP_VSWING_0_4V:
- dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
+ i915->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
break;
case EDP_VSWING_0_6V:
- dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
+ i915->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
break;
case EDP_VSWING_0_8V:
- dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+ i915->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
break;
case EDP_VSWING_1_2V:
- dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
+ i915->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
break;
default:
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"VBT has unknown eDP voltage swing value %u\n",
edp_link_params->vswing);
break;
@@ -858,53 +860,53 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
u8 vswing;
/* Don't read from VBT if module parameter has valid value*/
- if (dev_priv->params.edp_vswing) {
- dev_priv->vbt.edp.low_vswing =
- dev_priv->params.edp_vswing == 1;
+ if (i915->params.edp_vswing) {
+ i915->vbt.edp.low_vswing =
+ i915->params.edp_vswing == 1;
} else {
vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
- dev_priv->vbt.edp.low_vswing = vswing == 0;
+ i915->vbt.edp.low_vswing = vswing == 0;
}
}
}
static void
-parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
+parse_psr(struct drm_i915_private *i915, const struct bdb_header *bdb)
{
const struct bdb_psr *psr;
const struct psr_table *psr_table;
- int panel_type = dev_priv->vbt.panel_type;
+ int panel_type = i915->vbt.panel_type;
psr = find_section(bdb, BDB_PSR);
if (!psr) {
- drm_dbg_kms(&dev_priv->drm, "No PSR BDB found.\n");
+ drm_dbg_kms(&i915->drm, "No PSR BDB found.\n");
return;
}
psr_table = &psr->psr_table[panel_type];
- dev_priv->vbt.psr.full_link = psr_table->full_link;
- dev_priv->vbt.psr.require_aux_wakeup = psr_table->require_aux_to_wakeup;
+ i915->vbt.psr.full_link = psr_table->full_link;
+ i915->vbt.psr.require_aux_wakeup = psr_table->require_aux_to_wakeup;
/* Allowed VBT values goes from 0 to 15 */
- dev_priv->vbt.psr.idle_frames = psr_table->idle_frames < 0 ? 0 :
+ i915->vbt.psr.idle_frames = psr_table->idle_frames < 0 ? 0 :
psr_table->idle_frames > 15 ? 15 : psr_table->idle_frames;
switch (psr_table->lines_to_wait) {
case 0:
- dev_priv->vbt.psr.lines_to_wait = PSR_0_LINES_TO_WAIT;
+ i915->vbt.psr.lines_to_wait = PSR_0_LINES_TO_WAIT;
break;
case 1:
- dev_priv->vbt.psr.lines_to_wait = PSR_1_LINE_TO_WAIT;
+ i915->vbt.psr.lines_to_wait = PSR_1_LINE_TO_WAIT;
break;
case 2:
- dev_priv->vbt.psr.lines_to_wait = PSR_4_LINES_TO_WAIT;
+ i915->vbt.psr.lines_to_wait = PSR_4_LINES_TO_WAIT;
break;
case 3:
- dev_priv->vbt.psr.lines_to_wait = PSR_8_LINES_TO_WAIT;
+ i915->vbt.psr.lines_to_wait = PSR_8_LINES_TO_WAIT;
break;
default:
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"VBT has unknown PSR lines to wait %u\n",
psr_table->lines_to_wait);
break;
@@ -915,50 +917,49 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
* Old decimal value is wake up time in multiples of 100 us.
*/
if (bdb->version >= 205 &&
- (IS_GEN9_BC(dev_priv) || IS_GEMINILAKE(dev_priv) ||
- INTEL_GEN(dev_priv) >= 10)) {
+ (IS_GEN9_BC(i915) || DISPLAY_VER(i915) >= 10)) {
switch (psr_table->tp1_wakeup_time) {
case 0:
- dev_priv->vbt.psr.tp1_wakeup_time_us = 500;
+ i915->vbt.psr.tp1_wakeup_time_us = 500;
break;
case 1:
- dev_priv->vbt.psr.tp1_wakeup_time_us = 100;
+ i915->vbt.psr.tp1_wakeup_time_us = 100;
break;
case 3:
- dev_priv->vbt.psr.tp1_wakeup_time_us = 0;
+ i915->vbt.psr.tp1_wakeup_time_us = 0;
break;
default:
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"VBT tp1 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
psr_table->tp1_wakeup_time);
fallthrough;
case 2:
- dev_priv->vbt.psr.tp1_wakeup_time_us = 2500;
+ i915->vbt.psr.tp1_wakeup_time_us = 2500;
break;
}
switch (psr_table->tp2_tp3_wakeup_time) {
case 0:
- dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 500;
+ i915->vbt.psr.tp2_tp3_wakeup_time_us = 500;
break;
case 1:
- dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 100;
+ i915->vbt.psr.tp2_tp3_wakeup_time_us = 100;
break;
case 3:
- dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 0;
+ i915->vbt.psr.tp2_tp3_wakeup_time_us = 0;
break;
default:
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"VBT tp2_tp3 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
psr_table->tp2_tp3_wakeup_time);
fallthrough;
case 2:
- dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 2500;
+ i915->vbt.psr.tp2_tp3_wakeup_time_us = 2500;
break;
}
} else {
- dev_priv->vbt.psr.tp1_wakeup_time_us = psr_table->tp1_wakeup_time * 100;
- dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100;
+ i915->vbt.psr.tp1_wakeup_time_us = psr_table->tp1_wakeup_time * 100;
+ i915->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100;
}
if (bdb->version >= 226) {
@@ -980,74 +981,74 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
wakeup_time = 2500;
break;
}
- dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us = wakeup_time;
+ i915->vbt.psr.psr2_tp2_tp3_wakeup_time_us = wakeup_time;
} else {
/* Reusing PSR1 wakeup time for PSR2 in older VBTs */
- dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us = dev_priv->vbt.psr.tp2_tp3_wakeup_time_us;
+ i915->vbt.psr.psr2_tp2_tp3_wakeup_time_us = i915->vbt.psr.tp2_tp3_wakeup_time_us;
}
}
-static void parse_dsi_backlight_ports(struct drm_i915_private *dev_priv,
+static void parse_dsi_backlight_ports(struct drm_i915_private *i915,
u16 version, enum port port)
{
- if (!dev_priv->vbt.dsi.config->dual_link || version < 197) {
- dev_priv->vbt.dsi.bl_ports = BIT(port);
- if (dev_priv->vbt.dsi.config->cabc_supported)
- dev_priv->vbt.dsi.cabc_ports = BIT(port);
+ if (!i915->vbt.dsi.config->dual_link || version < 197) {
+ i915->vbt.dsi.bl_ports = BIT(port);
+ if (i915->vbt.dsi.config->cabc_supported)
+ i915->vbt.dsi.cabc_ports = BIT(port);
return;
}
- switch (dev_priv->vbt.dsi.config->dl_dcs_backlight_ports) {
+ switch (i915->vbt.dsi.config->dl_dcs_backlight_ports) {
case DL_DCS_PORT_A:
- dev_priv->vbt.dsi.bl_ports = BIT(PORT_A);
+ i915->vbt.dsi.bl_ports = BIT(PORT_A);
break;
case DL_DCS_PORT_C:
- dev_priv->vbt.dsi.bl_ports = BIT(PORT_C);
+ i915->vbt.dsi.bl_ports = BIT(PORT_C);
break;
default:
case DL_DCS_PORT_A_AND_C:
- dev_priv->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(PORT_C);
+ i915->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(PORT_C);
break;
}
- if (!dev_priv->vbt.dsi.config->cabc_supported)
+ if (!i915->vbt.dsi.config->cabc_supported)
return;
- switch (dev_priv->vbt.dsi.config->dl_dcs_cabc_ports) {
+ switch (i915->vbt.dsi.config->dl_dcs_cabc_ports) {
case DL_DCS_PORT_A:
- dev_priv->vbt.dsi.cabc_ports = BIT(PORT_A);
+ i915->vbt.dsi.cabc_ports = BIT(PORT_A);
break;
case DL_DCS_PORT_C:
- dev_priv->vbt.dsi.cabc_ports = BIT(PORT_C);
+ i915->vbt.dsi.cabc_ports = BIT(PORT_C);
break;
default:
case DL_DCS_PORT_A_AND_C:
- dev_priv->vbt.dsi.cabc_ports =
+ i915->vbt.dsi.cabc_ports =
BIT(PORT_A) | BIT(PORT_C);
break;
}
}
static void
-parse_mipi_config(struct drm_i915_private *dev_priv,
+parse_mipi_config(struct drm_i915_private *i915,
const struct bdb_header *bdb)
{
const struct bdb_mipi_config *start;
const struct mipi_config *config;
const struct mipi_pps_data *pps;
- int panel_type = dev_priv->vbt.panel_type;
+ int panel_type = i915->vbt.panel_type;
enum port port;
/* parse MIPI blocks only if LFP type is MIPI */
- if (!intel_bios_is_dsi_present(dev_priv, &port))
+ if (!intel_bios_is_dsi_present(i915, &port))
return;
/* Initialize this to undefined indicating no generic MIPI support */
- dev_priv->vbt.dsi.panel_id = MIPI_DSI_UNDEFINED_PANEL_ID;
+ i915->vbt.dsi.panel_id = MIPI_DSI_UNDEFINED_PANEL_ID;
/* Block #40 is already parsed and panel_fixed_mode is
- * stored in dev_priv->lfp_lvds_vbt_mode
+ * stored in i915->lfp_lvds_vbt_mode
* resuse this when needed
*/
@@ -1056,11 +1057,11 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
*/
start = find_section(bdb, BDB_MIPI_CONFIG);
if (!start) {
- drm_dbg_kms(&dev_priv->drm, "No MIPI config BDB found");
+ drm_dbg_kms(&i915->drm, "No MIPI config BDB found");
return;
}
- drm_dbg(&dev_priv->drm, "Found MIPI Config block, panel index = %d\n",
+ drm_dbg(&i915->drm, "Found MIPI Config block, panel index = %d\n",
panel_type);
/*
@@ -1071,17 +1072,17 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
pps = &start->pps[panel_type];
/* store as of now full data. Trim when we realise all is not needed */
- dev_priv->vbt.dsi.config = kmemdup(config, sizeof(struct mipi_config), GFP_KERNEL);
- if (!dev_priv->vbt.dsi.config)
+ i915->vbt.dsi.config = kmemdup(config, sizeof(struct mipi_config), GFP_KERNEL);
+ if (!i915->vbt.dsi.config)
return;
- dev_priv->vbt.dsi.pps = kmemdup(pps, sizeof(struct mipi_pps_data), GFP_KERNEL);
- if (!dev_priv->vbt.dsi.pps) {
- kfree(dev_priv->vbt.dsi.config);
+ i915->vbt.dsi.pps = kmemdup(pps, sizeof(struct mipi_pps_data), GFP_KERNEL);
+ if (!i915->vbt.dsi.pps) {
+ kfree(i915->vbt.dsi.config);
return;
}
- parse_dsi_backlight_ports(dev_priv, bdb->version, port);
+ parse_dsi_backlight_ports(i915, bdb->version, port);
/* FIXME is the 90 vs. 270 correct? */
switch (config->rotation) {
@@ -1090,25 +1091,25 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
* Most (all?) VBTs claim 0 degrees despite having
* an upside down panel, thus we do not trust this.
*/
- dev_priv->vbt.dsi.orientation =
+ i915->vbt.dsi.orientation =
DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
break;
case ENABLE_ROTATION_90:
- dev_priv->vbt.dsi.orientation =
+ i915->vbt.dsi.orientation =
DRM_MODE_PANEL_ORIENTATION_RIGHT_UP;
break;
case ENABLE_ROTATION_180:
- dev_priv->vbt.dsi.orientation =
+ i915->vbt.dsi.orientation =
DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
break;
case ENABLE_ROTATION_270:
- dev_priv->vbt.dsi.orientation =
+ i915->vbt.dsi.orientation =
DRM_MODE_PANEL_ORIENTATION_LEFT_UP;
break;
}
/* We have mandatory mipi config blocks. Initialize as generic panel */
- dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
+ i915->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
}
/* Find the sequence block and size for the given panel. */
@@ -1271,13 +1272,13 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total)
* Get len of pre-fixed deassert fragment from a v1 init OTP sequence,
* skip all delay + gpio operands and stop at the first DSI packet op.
*/
-static int get_init_otp_deassert_fragment_len(struct drm_i915_private *dev_priv)
+static int get_init_otp_deassert_fragment_len(struct drm_i915_private *i915)
{
- const u8 *data = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
+ const u8 *data = i915->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
int index, len;
- if (drm_WARN_ON(&dev_priv->drm,
- !data || dev_priv->vbt.dsi.seq_version != 1))
+ if (drm_WARN_ON(&i915->drm,
+ !data || i915->vbt.dsi.seq_version != 1))
return 0;
/* index = 1 to skip sequence byte */
@@ -1305,55 +1306,55 @@ static int get_init_otp_deassert_fragment_len(struct drm_i915_private *dev_priv)
* these devices we split the init OTP sequence into a deassert sequence and
* the actual init OTP part.
*/
-static void fixup_mipi_sequences(struct drm_i915_private *dev_priv)
+static void fixup_mipi_sequences(struct drm_i915_private *i915)
{
u8 *init_otp;
int len;
/* Limit this to VLV for now. */
- if (!IS_VALLEYVIEW(dev_priv))
+ if (!IS_VALLEYVIEW(i915))
return;
/* Limit this to v1 vid-mode sequences */
- if (dev_priv->vbt.dsi.config->is_cmd_mode ||
- dev_priv->vbt.dsi.seq_version != 1)
+ if (i915->vbt.dsi.config->is_cmd_mode ||
+ i915->vbt.dsi.seq_version != 1)
return;
/* Only do this if there are otp and assert seqs and no deassert seq */
- if (!dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] ||
- !dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET] ||
- dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET])
+ if (!i915->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] ||
+ !i915->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET] ||
+ i915->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET])
return;
/* The deassert-sequence ends at the first DSI packet */
- len = get_init_otp_deassert_fragment_len(dev_priv);
+ len = get_init_otp_deassert_fragment_len(i915);
if (!len)
return;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"Using init OTP fragment to deassert reset\n");
/* Copy the fragment, update seq byte and terminate it */
- init_otp = (u8 *)dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
- dev_priv->vbt.dsi.deassert_seq = kmemdup(init_otp, len + 1, GFP_KERNEL);
- if (!dev_priv->vbt.dsi.deassert_seq)
+ init_otp = (u8 *)i915->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
+ i915->vbt.dsi.deassert_seq = kmemdup(init_otp, len + 1, GFP_KERNEL);
+ if (!i915->vbt.dsi.deassert_seq)
return;
- dev_priv->vbt.dsi.deassert_seq[0] = MIPI_SEQ_DEASSERT_RESET;
- dev_priv->vbt.dsi.deassert_seq[len] = MIPI_SEQ_ELEM_END;
+ i915->vbt.dsi.deassert_seq[0] = MIPI_SEQ_DEASSERT_RESET;
+ i915->vbt.dsi.deassert_seq[len] = MIPI_SEQ_ELEM_END;
/* Use the copy for deassert */
- dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET] =
- dev_priv->vbt.dsi.deassert_seq;
+ i915->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET] =
+ i915->vbt.dsi.deassert_seq;
/* Replace the last byte of the fragment with init OTP seq byte */
init_otp[len - 1] = MIPI_SEQ_INIT_OTP;
/* And make MIPI_MIPI_SEQ_INIT_OTP point to it */
- dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1;
+ i915->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1;
}
static void
-parse_mipi_sequence(struct drm_i915_private *dev_priv,
+parse_mipi_sequence(struct drm_i915_private *i915,
const struct bdb_header *bdb)
{
- int panel_type = dev_priv->vbt.panel_type;
+ int panel_type = i915->vbt.panel_type;
const struct bdb_mipi_sequence *sequence;
const u8 *seq_data;
u32 seq_size;
@@ -1361,25 +1362,25 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,
int index = 0;
/* Only our generic panel driver uses the sequence block. */
- if (dev_priv->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID)
+ if (i915->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID)
return;
sequence = find_section(bdb, BDB_MIPI_SEQUENCE);
if (!sequence) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"No MIPI Sequence found, parsing complete\n");
return;
}
/* Fail gracefully for forward incompatible sequence block. */
if (sequence->version >= 4) {
- drm_err(&dev_priv->drm,
+ drm_err(&i915->drm,
"Unable to parse MIPI Sequence Block v%u\n",
sequence->version);
return;
}
- drm_dbg(&dev_priv->drm, "Found MIPI sequence block v%u\n",
+ drm_dbg(&i915->drm, "Found MIPI sequence block v%u\n",
sequence->version);
seq_data = find_panel_sequence_block(sequence, panel_type, &seq_size);
@@ -1397,41 +1398,41 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,
break;
if (seq_id >= MIPI_SEQ_MAX) {
- drm_err(&dev_priv->drm, "Unknown sequence %u\n",
+ drm_err(&i915->drm, "Unknown sequence %u\n",
seq_id);
goto err;
}
/* Log about presence of sequences we won't run. */
if (seq_id == MIPI_SEQ_TEAR_ON || seq_id == MIPI_SEQ_TEAR_OFF)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"Unsupported sequence %u\n", seq_id);
- dev_priv->vbt.dsi.sequence[seq_id] = data + index;
+ i915->vbt.dsi.sequence[seq_id] = data + index;
if (sequence->version >= 3)
index = goto_next_sequence_v3(data, index, seq_size);
else
index = goto_next_sequence(data, index, seq_size);
if (!index) {
- drm_err(&dev_priv->drm, "Invalid sequence %u\n",
+ drm_err(&i915->drm, "Invalid sequence %u\n",
seq_id);
goto err;
}
}
- dev_priv->vbt.dsi.data = data;
- dev_priv->vbt.dsi.size = seq_size;
- dev_priv->vbt.dsi.seq_version = sequence->version;
+ i915->vbt.dsi.data = data;
+ i915->vbt.dsi.size = seq_size;
+ i915->vbt.dsi.seq_version = sequence->version;
- fixup_mipi_sequences(dev_priv);
+ fixup_mipi_sequences(i915);
- drm_dbg(&dev_priv->drm, "MIPI related VBT parsing complete\n");
+ drm_dbg(&i915->drm, "MIPI related VBT parsing complete\n");
return;
err:
kfree(data);
- memset(dev_priv->vbt.dsi.sequence, 0, sizeof(dev_priv->vbt.dsi.sequence));
+ memset(i915->vbt.dsi.sequence, 0, sizeof(i915->vbt.dsi.sequence));
}
static void
@@ -1439,7 +1440,7 @@ parse_compression_parameters(struct drm_i915_private *i915,
const struct bdb_header *bdb)
{
const struct bdb_compression_parameters *params;
- struct display_device_data *devdata;
+ struct intel_bios_encoder_data *devdata;
const struct child_device_config *child;
u16 block_size;
int index;
@@ -1505,51 +1506,52 @@ static enum port get_port_by_ddc_pin(struct drm_i915_private *i915, u8 ddc_pin)
const struct ddi_vbt_port_info *info;
enum port port;
+ if (!ddc_pin)
+ return PORT_NONE;
+
for_each_port(port) {
info = &i915->vbt.ddi_port_info[port];
- if (info->child && ddc_pin == info->alternate_ddc_pin)
+ if (info->devdata && ddc_pin == info->alternate_ddc_pin)
return port;
}
return PORT_NONE;
}
-static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
+static void sanitize_ddc_pin(struct drm_i915_private *i915,
enum port port)
{
- struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
+ struct ddi_vbt_port_info *info = &i915->vbt.ddi_port_info[port];
+ struct child_device_config *child;
enum port p;
- if (!info->alternate_ddc_pin)
+ p = get_port_by_ddc_pin(i915, info->alternate_ddc_pin);
+ if (p == PORT_NONE)
return;
- p = get_port_by_ddc_pin(dev_priv, info->alternate_ddc_pin);
- if (p != PORT_NONE) {
- drm_dbg_kms(&dev_priv->drm,
- "port %c trying to use the same DDC pin (0x%x) as port %c, "
- "disabling port %c DVI/HDMI support\n",
- port_name(port), info->alternate_ddc_pin,
- port_name(p), port_name(p));
+ drm_dbg_kms(&i915->drm,
+ "port %c trying to use the same DDC pin (0x%x) as port %c, "
+ "disabling port %c DVI/HDMI support\n",
+ port_name(port), info->alternate_ddc_pin,
+ port_name(p), port_name(p));
- /*
- * If we have multiple ports supposedly sharing the
- * pin, then dvi/hdmi couldn't exist on the shared
- * port. Otherwise they share the same ddc bin and
- * system couldn't communicate with them separately.
- *
- * Give inverse child device order the priority,
- * last one wins. Yes, there are real machines
- * (eg. Asrock B250M-HDV) where VBT has both
- * port A and port E with the same AUX ch and
- * we must pick port E :(
- */
- info = &dev_priv->vbt.ddi_port_info[p];
+ /*
+ * If we have multiple ports supposedly sharing the pin, then dvi/hdmi
+ * couldn't exist on the shared port. Otherwise they share the same ddc
+ * pin and system couldn't communicate with them separately.
+ *
+ * Give inverse child device order the priority, last one wins. Yes,
+ * there are real machines (eg. Asrock B250M-HDV) where VBT has both
+ * port A and port E with the same AUX ch and we must pick port E :(
+ */
+ info = &i915->vbt.ddi_port_info[p];
+ child = &info->devdata->child;
- info->supports_dvi = false;
- info->supports_hdmi = false;
- info->alternate_ddc_pin = 0;
- }
+ child->device_type &= ~DEVICE_TYPE_TMDS_DVI_SIGNALING;
+ child->device_type |= DEVICE_TYPE_NOT_HDMI_OUTPUT;
+
+ info->alternate_ddc_pin = 0;
}
static enum port get_port_by_aux_ch(struct drm_i915_private *i915, u8 aux_ch)
@@ -1557,50 +1559,50 @@ static enum port get_port_by_aux_ch(struct drm_i915_private *i915, u8 aux_ch)
const struct ddi_vbt_port_info *info;
enum port port;
+ if (!aux_ch)
+ return PORT_NONE;
+
for_each_port(port) {
info = &i915->vbt.ddi_port_info[port];
- if (info->child && aux_ch == info->alternate_aux_channel)
+ if (info->devdata && aux_ch == info->alternate_aux_channel)
return port;
}
return PORT_NONE;
}
-static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
+static void sanitize_aux_ch(struct drm_i915_private *i915,
enum port port)
{
- struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
+ struct ddi_vbt_port_info *info = &i915->vbt.ddi_port_info[port];
+ struct child_device_config *child;
enum port p;
- if (!info->alternate_aux_channel)
+ p = get_port_by_aux_ch(i915, info->alternate_aux_channel);
+ if (p == PORT_NONE)
return;
- p = get_port_by_aux_ch(dev_priv, info->alternate_aux_channel);
- if (p != PORT_NONE) {
- drm_dbg_kms(&dev_priv->drm,
- "port %c trying to use the same AUX CH (0x%x) as port %c, "
- "disabling port %c DP support\n",
- port_name(port), info->alternate_aux_channel,
- port_name(p), port_name(p));
+ drm_dbg_kms(&i915->drm,
+ "port %c trying to use the same AUX CH (0x%x) as port %c, "
+ "disabling port %c DP support\n",
+ port_name(port), info->alternate_aux_channel,
+ port_name(p), port_name(p));
- /*
- * If we have multiple ports supposedlt sharing the
- * aux channel, then DP couldn't exist on the shared
- * port. Otherwise they share the same aux channel
- * and system couldn't communicate with them separately.
- *
- * Give inverse child device order the priority,
- * last one wins. Yes, there are real machines
- * (eg. Asrock B250M-HDV) where VBT has both
- * port A and port E with the same AUX ch and
- * we must pick port E :(
- */
- info = &dev_priv->vbt.ddi_port_info[p];
+ /*
+ * If we have multiple ports supposedly sharing the aux channel, then DP
+ * couldn't exist on the shared port. Otherwise they share the same aux
+ * channel and system couldn't communicate with them separately.
+ *
+ * Give inverse child device order the priority, last one wins. Yes,
+ * there are real machines (eg. Asrock B250M-HDV) where VBT has both
+ * port A and port E with the same AUX ch and we must pick port E :(
+ */
+ info = &i915->vbt.ddi_port_info[p];
+ child = &info->devdata->child;
- info->supports_dp = false;
- info->alternate_aux_channel = 0;
- }
+ child->device_type &= ~DEVICE_TYPE_DISPLAYPORT_OUTPUT;
+ info->alternate_aux_channel = 0;
}
static const u8 cnp_ddc_pin_map[] = {
@@ -1630,20 +1632,40 @@ static const u8 rkl_pch_tgp_ddc_pin_map[] = {
[RKL_DDC_BUS_DDI_E] = GMBUS_PIN_10_TC2_ICP,
};
-static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
+static const u8 adls_ddc_pin_map[] = {
+ [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
+ [ADLS_DDC_BUS_PORT_TC1] = GMBUS_PIN_9_TC1_ICP,
+ [ADLS_DDC_BUS_PORT_TC2] = GMBUS_PIN_10_TC2_ICP,
+ [ADLS_DDC_BUS_PORT_TC3] = GMBUS_PIN_11_TC3_ICP,
+ [ADLS_DDC_BUS_PORT_TC4] = GMBUS_PIN_12_TC4_ICP,
+};
+
+static const u8 gen9bc_tgp_ddc_pin_map[] = {
+ [DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
+ [DDC_BUS_DDI_C] = GMBUS_PIN_9_TC1_ICP,
+ [DDC_BUS_DDI_D] = GMBUS_PIN_10_TC2_ICP,
+};
+
+static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin)
{
const u8 *ddc_pin_map;
int n_entries;
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) {
+ if (HAS_PCH_ADP(i915)) {
+ ddc_pin_map = adls_ddc_pin_map;
+ n_entries = ARRAY_SIZE(adls_ddc_pin_map);
+ } else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) {
return vbt_pin;
- } else if (IS_ROCKETLAKE(dev_priv) && INTEL_PCH_TYPE(dev_priv) == PCH_TGP) {
+ } else if (IS_ROCKETLAKE(i915) && INTEL_PCH_TYPE(i915) == PCH_TGP) {
ddc_pin_map = rkl_pch_tgp_ddc_pin_map;
n_entries = ARRAY_SIZE(rkl_pch_tgp_ddc_pin_map);
- } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) {
+ } else if (HAS_PCH_TGP(i915) && IS_GEN9_BC(i915)) {
+ ddc_pin_map = gen9bc_tgp_ddc_pin_map;
+ n_entries = ARRAY_SIZE(gen9bc_tgp_ddc_pin_map);
+ } else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) {
ddc_pin_map = icp_ddc_pin_map;
n_entries = ARRAY_SIZE(icp_ddc_pin_map);
- } else if (HAS_PCH_CNP(dev_priv)) {
+ } else if (HAS_PCH_CNP(i915)) {
ddc_pin_map = cnp_ddc_pin_map;
n_entries = ARRAY_SIZE(cnp_ddc_pin_map);
} else {
@@ -1654,7 +1676,7 @@ static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
if (vbt_pin < n_entries && ddc_pin_map[vbt_pin] != 0)
return ddc_pin_map[vbt_pin];
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n",
vbt_pin);
return 0;
@@ -1679,7 +1701,7 @@ static enum port __dvo_port_to_port(int n_ports, int n_dvo,
return PORT_NONE;
}
-static enum port dvo_port_to_port(struct drm_i915_private *dev_priv,
+static enum port dvo_port_to_port(struct drm_i915_private *i915,
u8 dvo_port)
{
/*
@@ -1708,8 +1730,26 @@ static enum port dvo_port_to_port(struct drm_i915_private *dev_priv,
[PORT_TC1] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1 },
[PORT_TC2] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1 },
};
+ /*
+ * Alderlake S ports used in the driver are PORT_A, PORT_D, PORT_E,
+ * PORT_F and PORT_G, we need to map that to correct VBT sections.
+ */
+ static const int adls_port_mapping[][3] = {
+ [PORT_A] = { DVO_PORT_HDMIA, DVO_PORT_DPA, -1 },
+ [PORT_B] = { -1 },
+ [PORT_C] = { -1 },
+ [PORT_TC1] = { DVO_PORT_HDMIB, DVO_PORT_DPB, -1 },
+ [PORT_TC2] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1 },
+ [PORT_TC3] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1 },
+ [PORT_TC4] = { DVO_PORT_HDMIE, DVO_PORT_DPE, -1 },
+ };
- if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
+ if (IS_ALDERLAKE_S(i915))
+ return __dvo_port_to_port(ARRAY_SIZE(adls_port_mapping),
+ ARRAY_SIZE(adls_port_mapping[0]),
+ adls_port_mapping,
+ dvo_port);
+ else if (IS_DG1(i915) || IS_ROCKETLAKE(i915))
return __dvo_port_to_port(ARRAY_SIZE(rkl_port_mapping),
ARRAY_SIZE(rkl_port_mapping[0]),
rkl_port_mapping,
@@ -1721,69 +1761,146 @@ static enum port dvo_port_to_port(struct drm_i915_private *dev_priv,
dvo_port);
}
-static void parse_ddi_port(struct drm_i915_private *dev_priv,
- struct display_device_data *devdata,
- u8 bdb_version)
+static int parse_bdb_230_dp_max_link_rate(const int vbt_max_link_rate)
+{
+ switch (vbt_max_link_rate) {
+ default:
+ case BDB_230_VBT_DP_MAX_LINK_RATE_DEF:
+ return 0;
+ case BDB_230_VBT_DP_MAX_LINK_RATE_UHBR20:
+ return 2000000;
+ case BDB_230_VBT_DP_MAX_LINK_RATE_UHBR13P5:
+ return 1350000;
+ case BDB_230_VBT_DP_MAX_LINK_RATE_UHBR10:
+ return 1000000;
+ case BDB_230_VBT_DP_MAX_LINK_RATE_HBR3:
+ return 810000;
+ case BDB_230_VBT_DP_MAX_LINK_RATE_HBR2:
+ return 540000;
+ case BDB_230_VBT_DP_MAX_LINK_RATE_HBR:
+ return 270000;
+ case BDB_230_VBT_DP_MAX_LINK_RATE_LBR:
+ return 162000;
+ }
+}
+
+static int parse_bdb_216_dp_max_link_rate(const int vbt_max_link_rate)
+{
+ switch (vbt_max_link_rate) {
+ default:
+ case BDB_216_VBT_DP_MAX_LINK_RATE_HBR3:
+ return 810000;
+ case BDB_216_VBT_DP_MAX_LINK_RATE_HBR2:
+ return 540000;
+ case BDB_216_VBT_DP_MAX_LINK_RATE_HBR:
+ return 270000;
+ case BDB_216_VBT_DP_MAX_LINK_RATE_LBR:
+ return 162000;
+ }
+}
+
+static void sanitize_device_type(struct intel_bios_encoder_data *devdata,
+ enum port port)
+{
+ struct drm_i915_private *i915 = devdata->i915;
+ bool is_hdmi;
+
+ if (port != PORT_A || DISPLAY_VER(i915) >= 12)
+ return;
+
+ if (!(devdata->child.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING))
+ return;
+
+ is_hdmi = !(devdata->child.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT);
+
+ drm_dbg_kms(&i915->drm, "VBT claims port A supports DVI%s, ignoring\n",
+ is_hdmi ? "/HDMI" : "");
+
+ devdata->child.device_type &= ~DEVICE_TYPE_TMDS_DVI_SIGNALING;
+ devdata->child.device_type |= DEVICE_TYPE_NOT_HDMI_OUTPUT;
+}
+
+static bool
+intel_bios_encoder_supports_crt(const struct intel_bios_encoder_data *devdata)
+{
+ return devdata->child.device_type & DEVICE_TYPE_ANALOG_OUTPUT;
+}
+
+bool
+intel_bios_encoder_supports_dvi(const struct intel_bios_encoder_data *devdata)
+{
+ return devdata->child.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
+}
+
+bool
+intel_bios_encoder_supports_hdmi(const struct intel_bios_encoder_data *devdata)
+{
+ return intel_bios_encoder_supports_dvi(devdata) &&
+ (devdata->child.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
+}
+
+bool
+intel_bios_encoder_supports_dp(const struct intel_bios_encoder_data *devdata)
+{
+ return devdata->child.device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
+}
+
+static bool
+intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata)
+{
+ return intel_bios_encoder_supports_dp(devdata) &&
+ devdata->child.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR;
+}
+
+static void parse_ddi_port(struct drm_i915_private *i915,
+ struct intel_bios_encoder_data *devdata)
{
const struct child_device_config *child = &devdata->child;
struct ddi_vbt_port_info *info;
- bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
+ bool is_dvi, is_hdmi, is_dp, is_edp, is_crt, supports_typec_usb, supports_tbt;
+ int dp_boost_level, hdmi_boost_level;
enum port port;
- port = dvo_port_to_port(dev_priv, child->dvo_port);
+ port = dvo_port_to_port(i915, child->dvo_port);
if (port == PORT_NONE)
return;
- info = &dev_priv->vbt.ddi_port_info[port];
+ info = &i915->vbt.ddi_port_info[port];
- if (info->child) {
- drm_dbg_kms(&dev_priv->drm,
+ if (info->devdata) {
+ drm_dbg_kms(&i915->drm,
"More than one child device for port %c in VBT, using the first.\n",
port_name(port));
return;
}
- is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
- is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
- is_crt = child->device_type & DEVICE_TYPE_ANALOG_OUTPUT;
- is_hdmi = is_dvi && (child->device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
- is_edp = is_dp && (child->device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
-
- if (port == PORT_A && is_dvi && INTEL_GEN(dev_priv) < 12) {
- drm_dbg_kms(&dev_priv->drm,
- "VBT claims port A supports DVI%s, ignoring\n",
- is_hdmi ? "/HDMI" : "");
- is_dvi = false;
- is_hdmi = false;
- }
+ sanitize_device_type(devdata, port);
- info->supports_dvi = is_dvi;
- info->supports_hdmi = is_hdmi;
- info->supports_dp = is_dp;
- info->supports_edp = is_edp;
+ is_dvi = intel_bios_encoder_supports_dvi(devdata);
+ is_dp = intel_bios_encoder_supports_dp(devdata);
+ is_crt = intel_bios_encoder_supports_crt(devdata);
+ is_hdmi = intel_bios_encoder_supports_hdmi(devdata);
+ is_edp = intel_bios_encoder_supports_edp(devdata);
- if (bdb_version >= 195)
- info->supports_typec_usb = child->dp_usb_type_c;
+ supports_typec_usb = intel_bios_encoder_supports_typec_usb(devdata);
+ supports_tbt = intel_bios_encoder_supports_tbt(devdata);
- if (bdb_version >= 209)
- info->supports_tbt = child->tbt;
-
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n",
port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp,
- HAS_LSPCON(dev_priv) && child->lspcon,
- info->supports_typec_usb, info->supports_tbt,
+ HAS_LSPCON(i915) && child->lspcon,
+ supports_typec_usb, supports_tbt,
devdata->dsc != NULL);
if (is_dvi) {
u8 ddc_pin;
- ddc_pin = map_ddc_pin(dev_priv, child->ddc_pin);
- if (intel_gmbus_is_valid_pin(dev_priv, ddc_pin)) {
+ ddc_pin = map_ddc_pin(i915, child->ddc_pin);
+ if (intel_gmbus_is_valid_pin(i915, ddc_pin)) {
info->alternate_ddc_pin = ddc_pin;
- sanitize_ddc_pin(dev_priv, port);
+ sanitize_ddc_pin(i915, port);
} else {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"Port %c has invalid DDC pin %d, "
"sticking to defaults\n",
port_name(port), ddc_pin);
@@ -1793,21 +1910,21 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
if (is_dp) {
info->alternate_aux_channel = child->aux_channel;
- sanitize_aux_ch(dev_priv, port);
+ sanitize_aux_ch(i915, port);
}
- if (bdb_version >= 158) {
+ if (i915->vbt.version >= 158) {
/* The VBT HDMI level shift values match the table we have. */
u8 hdmi_level_shift = child->hdmi_level_shifter_value;
- drm_dbg_kms(&dev_priv->drm,
- "VBT HDMI level shift for port %c: %d\n",
+ drm_dbg_kms(&i915->drm,
+ "Port %c VBT HDMI level shift: %d\n",
port_name(port),
hdmi_level_shift);
info->hdmi_level_shift = hdmi_level_shift;
info->hdmi_level_shift_set = true;
}
- if (bdb_version >= 204) {
+ if (i915->vbt.version >= 204) {
int max_tmds_clock;
switch (child->hdmi_max_data_rate) {
@@ -1826,69 +1943,60 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
}
if (max_tmds_clock)
- drm_dbg_kms(&dev_priv->drm,
- "VBT HDMI max TMDS clock for port %c: %d kHz\n",
+ drm_dbg_kms(&i915->drm,
+ "Port %c VBT HDMI max TMDS clock: %d kHz\n",
port_name(port), max_tmds_clock);
info->max_tmds_clock = max_tmds_clock;
}
- /* Parse the I_boost config for SKL and above */
- if (bdb_version >= 196 && child->iboost) {
- info->dp_boost_level = translate_iboost(child->dp_iboost_level);
- drm_dbg_kms(&dev_priv->drm,
- "VBT (e)DP boost level for port %c: %d\n",
- port_name(port), info->dp_boost_level);
- info->hdmi_boost_level = translate_iboost(child->hdmi_iboost_level);
- drm_dbg_kms(&dev_priv->drm,
- "VBT HDMI boost level for port %c: %d\n",
- port_name(port), info->hdmi_boost_level);
- }
+ /* I_boost config for SKL and above */
+ dp_boost_level = intel_bios_encoder_dp_boost_level(devdata);
+ if (dp_boost_level)
+ drm_dbg_kms(&i915->drm,
+ "Port %c VBT (e)DP boost level: %d\n",
+ port_name(port), dp_boost_level);
+
+ hdmi_boost_level = intel_bios_encoder_hdmi_boost_level(devdata);
+ if (hdmi_boost_level)
+ drm_dbg_kms(&i915->drm,
+ "Port %c VBT HDMI boost level: %d\n",
+ port_name(port), hdmi_boost_level);
/* DP max link rate for CNL+ */
- if (bdb_version >= 216) {
- switch (child->dp_max_link_rate) {
- default:
- case VBT_DP_MAX_LINK_RATE_HBR3:
- info->dp_max_link_rate = 810000;
- break;
- case VBT_DP_MAX_LINK_RATE_HBR2:
- info->dp_max_link_rate = 540000;
- break;
- case VBT_DP_MAX_LINK_RATE_HBR:
- info->dp_max_link_rate = 270000;
- break;
- case VBT_DP_MAX_LINK_RATE_LBR:
- info->dp_max_link_rate = 162000;
- break;
- }
- drm_dbg_kms(&dev_priv->drm,
- "VBT DP max link rate for port %c: %d\n",
+ if (i915->vbt.version >= 216) {
+ if (i915->vbt.version >= 230)
+ info->dp_max_link_rate = parse_bdb_230_dp_max_link_rate(child->dp_max_link_rate);
+ else
+ info->dp_max_link_rate = parse_bdb_216_dp_max_link_rate(child->dp_max_link_rate);
+
+ drm_dbg_kms(&i915->drm,
+ "Port %c VBT DP max link rate: %d\n",
port_name(port), info->dp_max_link_rate);
}
- info->child = child;
+ info->devdata = devdata;
}
-static void parse_ddi_ports(struct drm_i915_private *dev_priv, u8 bdb_version)
+static void parse_ddi_ports(struct drm_i915_private *i915)
{
- struct display_device_data *devdata;
+ struct intel_bios_encoder_data *devdata;
- if (!HAS_DDI(dev_priv) && !IS_CHERRYVIEW(dev_priv))
+ if (!HAS_DDI(i915) && !IS_CHERRYVIEW(i915))
return;
- if (bdb_version < 155)
+ if (i915->vbt.version < 155)
return;
- list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node)
- parse_ddi_port(dev_priv, devdata, bdb_version);
+ list_for_each_entry(devdata, &i915->vbt.display_devices, node)
+ parse_ddi_port(i915, devdata);
}
static void
-parse_general_definitions(struct drm_i915_private *dev_priv,
+parse_general_definitions(struct drm_i915_private *i915,
const struct bdb_header *bdb)
{
const struct bdb_general_definitions *defs;
- struct display_device_data *devdata;
+ struct intel_bios_encoder_data *devdata;
const struct child_device_config *child;
int i, child_device_num;
u8 expected_size;
@@ -1897,23 +2005,23 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (!defs) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"No general definition block is found, no devices defined.\n");
return;
}
block_size = get_blocksize(defs);
if (block_size < sizeof(*defs)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"General definitions block too small (%u)\n",
block_size);
return;
}
bus_pin = defs->crt_ddc_gmbus_pin;
- drm_dbg_kms(&dev_priv->drm, "crt_ddc_bus_pin: %d\n", bus_pin);
- if (intel_gmbus_is_valid_pin(dev_priv, bus_pin))
- dev_priv->vbt.crt_ddc_pin = bus_pin;
+ drm_dbg_kms(&i915->drm, "crt_ddc_bus_pin: %d\n", bus_pin);
+ if (intel_gmbus_is_valid_pin(i915, bus_pin))
+ i915->vbt.crt_ddc_pin = bus_pin;
if (bdb->version < 106) {
expected_size = 22;
@@ -1930,20 +2038,20 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
} else {
expected_size = sizeof(*child);
BUILD_BUG_ON(sizeof(*child) < 39);
- drm_dbg(&dev_priv->drm,
+ drm_dbg(&i915->drm,
"Expected child device config size for VBT version %u not known; assuming %u\n",
bdb->version, expected_size);
}
/* Flag an error for unexpected size, but continue anyway. */
if (defs->child_dev_size != expected_size)
- drm_err(&dev_priv->drm,
+ drm_err(&i915->drm,
"Unexpected child device config size %u (expected %u for VBT version %u)\n",
defs->child_dev_size, expected_size, bdb->version);
/* The legacy sized child device config is the minimum we need. */
if (defs->child_dev_size < LEGACY_CHILD_DEVICE_CONFIG_SIZE) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"Child device config size %u is too small.\n",
defs->child_dev_size);
return;
@@ -1957,7 +2065,7 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
if (!child->device_type)
continue;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"Found VBT child device with type 0x%x\n",
child->device_type);
@@ -1965,6 +2073,8 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
if (!devdata)
break;
+ devdata->i915 = i915;
+
/*
* Copy as much as we know (sizeof) and is available
* (child_dev_size) of the child device config. Accessing the
@@ -1973,71 +2083,103 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
memcpy(&devdata->child, child,
min_t(size_t, defs->child_dev_size, sizeof(*child)));
- list_add_tail(&devdata->node, &dev_priv->vbt.display_devices);
+ list_add_tail(&devdata->node, &i915->vbt.display_devices);
}
- if (list_empty(&dev_priv->vbt.display_devices))
- drm_dbg_kms(&dev_priv->drm,
+ if (list_empty(&i915->vbt.display_devices))
+ drm_dbg_kms(&i915->drm,
"no child dev is parsed from VBT\n");
}
/* Common defaults which may be overridden by VBT. */
static void
-init_vbt_defaults(struct drm_i915_private *dev_priv)
+init_vbt_defaults(struct drm_i915_private *i915)
{
- dev_priv->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC;
+ i915->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC;
/* Default to having backlight */
- dev_priv->vbt.backlight.present = true;
+ i915->vbt.backlight.present = true;
/* LFP panel data */
- dev_priv->vbt.lvds_dither = 1;
+ i915->vbt.lvds_dither = 1;
/* SDVO panel data */
- dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
+ i915->vbt.sdvo_lvds_vbt_mode = NULL;
/* general features */
- dev_priv->vbt.int_tv_support = 1;
- dev_priv->vbt.int_crt_support = 1;
+ i915->vbt.int_tv_support = 1;
+ i915->vbt.int_crt_support = 1;
/* driver features */
- dev_priv->vbt.int_lvds_support = 1;
+ i915->vbt.int_lvds_support = 1;
/* Default to using SSC */
- dev_priv->vbt.lvds_use_ssc = 1;
+ i915->vbt.lvds_use_ssc = 1;
/*
* Core/SandyBridge/IvyBridge use alternative (120MHz) reference
* clock for LVDS.
*/
- dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev_priv,
- !HAS_PCH_SPLIT(dev_priv));
- drm_dbg_kms(&dev_priv->drm, "Set default to SSC at %d kHz\n",
- dev_priv->vbt.lvds_ssc_freq);
+ i915->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(i915,
+ !HAS_PCH_SPLIT(i915));
+ drm_dbg_kms(&i915->drm, "Set default to SSC at %d kHz\n",
+ i915->vbt.lvds_ssc_freq);
}
/* Defaults to initialize only if there is no VBT. */
static void
-init_vbt_missing_defaults(struct drm_i915_private *dev_priv)
+init_vbt_missing_defaults(struct drm_i915_private *i915)
{
enum port port;
+ int ports = PORT_A | PORT_B | PORT_C | PORT_D | PORT_E | PORT_F;
- for_each_port(port) {
- struct ddi_vbt_port_info *info =
- &dev_priv->vbt.ddi_port_info[port];
- enum phy phy = intel_port_to_phy(dev_priv, port);
+ if (!HAS_DDI(i915) && !IS_CHERRYVIEW(i915))
+ return;
+
+ for_each_port_masked(port, ports) {
+ struct intel_bios_encoder_data *devdata;
+ struct child_device_config *child;
+ enum phy phy = intel_port_to_phy(i915, port);
/*
* VBT has the TypeC mode (native,TBT/USB) and we don't want
* to detect it.
*/
- if (intel_phy_is_tc(dev_priv, phy))
+ if (intel_phy_is_tc(i915, phy))
continue;
- info->supports_dvi = (port != PORT_A && port != PORT_E);
- info->supports_hdmi = info->supports_dvi;
- info->supports_dp = (port != PORT_E);
- info->supports_edp = (port == PORT_A);
+ /* Create fake child device config */
+ devdata = kzalloc(sizeof(*devdata), GFP_KERNEL);
+ if (!devdata)
+ break;
+
+ devdata->i915 = i915;
+ child = &devdata->child;
+
+ if (port == PORT_F)
+ child->dvo_port = DVO_PORT_HDMIF;
+ else if (port == PORT_E)
+ child->dvo_port = DVO_PORT_HDMIE;
+ else
+ child->dvo_port = DVO_PORT_HDMIA + port;
+
+ if (port != PORT_A && port != PORT_E)
+ child->device_type |= DEVICE_TYPE_TMDS_DVI_SIGNALING;
+
+ if (port != PORT_E)
+ child->device_type |= DEVICE_TYPE_DISPLAYPORT_OUTPUT;
+
+ if (port == PORT_A)
+ child->device_type |= DEVICE_TYPE_INTERNAL_CONNECTOR;
+
+ list_add_tail(&devdata->node, &i915->vbt.display_devices);
+
+ drm_dbg_kms(&i915->drm,
+ "Generating default VBT child device with type 0x04%x on port %c\n",
+ child->device_type, port_name(port));
}
+
+ /* Bypass some minimum baseline VBT version checks */
+ i915->vbt.version = 155;
}
static const struct bdb_header *get_bdb_header(const struct vbt_header *vbt)
@@ -2096,9 +2238,9 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size)
return vbt;
}
-static struct vbt_header *oprom_get_vbt(struct drm_i915_private *dev_priv)
+static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
void __iomem *p = NULL, *oprom;
struct vbt_header *vbt;
u16 vbt_size;
@@ -2122,13 +2264,13 @@ static struct vbt_header *oprom_get_vbt(struct drm_i915_private *dev_priv)
goto err_unmap_oprom;
if (sizeof(struct vbt_header) > size) {
- drm_dbg(&dev_priv->drm, "VBT header incomplete\n");
+ drm_dbg(&i915->drm, "VBT header incomplete\n");
goto err_unmap_oprom;
}
vbt_size = ioread16(p + offsetof(struct vbt_header, vbt_size));
if (vbt_size > size) {
- drm_dbg(&dev_priv->drm,
+ drm_dbg(&i915->drm,
"VBT incomplete (vbt_size overflows)\n");
goto err_unmap_oprom;
}
@@ -2157,123 +2299,124 @@ err_unmap_oprom:
/**
* intel_bios_init - find VBT and initialize settings from the BIOS
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* Parse and initialize settings from the Video BIOS Tables (VBT). If the VBT
* was not found in ACPI OpRegion, try to find it in PCI ROM first. Also
* initialize some defaults if the VBT is not present at all.
*/
-void intel_bios_init(struct drm_i915_private *dev_priv)
+void intel_bios_init(struct drm_i915_private *i915)
{
- const struct vbt_header *vbt = dev_priv->opregion.vbt;
+ const struct vbt_header *vbt = i915->opregion.vbt;
struct vbt_header *oprom_vbt = NULL;
const struct bdb_header *bdb;
- INIT_LIST_HEAD(&dev_priv->vbt.display_devices);
+ INIT_LIST_HEAD(&i915->vbt.display_devices);
- if (!HAS_DISPLAY(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (!HAS_DISPLAY(i915)) {
+ drm_dbg_kms(&i915->drm,
"Skipping VBT init due to disabled display.\n");
return;
}
- init_vbt_defaults(dev_priv);
+ init_vbt_defaults(i915);
/* If the OpRegion does not have VBT, look in PCI ROM. */
if (!vbt) {
- oprom_vbt = oprom_get_vbt(dev_priv);
+ oprom_vbt = oprom_get_vbt(i915);
if (!oprom_vbt)
goto out;
vbt = oprom_vbt;
- drm_dbg_kms(&dev_priv->drm, "Found valid VBT in PCI ROM\n");
+ drm_dbg_kms(&i915->drm, "Found valid VBT in PCI ROM\n");
}
bdb = get_bdb_header(vbt);
+ i915->vbt.version = bdb->version;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"VBT signature \"%.*s\", BDB version %d\n",
(int)sizeof(vbt->signature), vbt->signature, bdb->version);
/* Grab useful general definitions */
- parse_general_features(dev_priv, bdb);
- parse_general_definitions(dev_priv, bdb);
- parse_panel_options(dev_priv, bdb);
- parse_panel_dtd(dev_priv, bdb);
- parse_lfp_backlight(dev_priv, bdb);
- parse_sdvo_panel_data(dev_priv, bdb);
- parse_driver_features(dev_priv, bdb);
- parse_power_conservation_features(dev_priv, bdb);
- parse_edp(dev_priv, bdb);
- parse_psr(dev_priv, bdb);
- parse_mipi_config(dev_priv, bdb);
- parse_mipi_sequence(dev_priv, bdb);
+ parse_general_features(i915, bdb);
+ parse_general_definitions(i915, bdb);
+ parse_panel_options(i915, bdb);
+ parse_panel_dtd(i915, bdb);
+ parse_lfp_backlight(i915, bdb);
+ parse_sdvo_panel_data(i915, bdb);
+ parse_driver_features(i915, bdb);
+ parse_power_conservation_features(i915, bdb);
+ parse_edp(i915, bdb);
+ parse_psr(i915, bdb);
+ parse_mipi_config(i915, bdb);
+ parse_mipi_sequence(i915, bdb);
/* Depends on child device list */
- parse_compression_parameters(dev_priv, bdb);
-
- /* Further processing on pre-parsed data */
- parse_sdvo_device_mapping(dev_priv, bdb->version);
- parse_ddi_ports(dev_priv, bdb->version);
+ parse_compression_parameters(i915, bdb);
out:
if (!vbt) {
- drm_info(&dev_priv->drm,
+ drm_info(&i915->drm,
"Failed to find VBIOS tables (VBT)\n");
- init_vbt_missing_defaults(dev_priv);
+ init_vbt_missing_defaults(i915);
}
+ /* Further processing on pre-parsed or generated child device data */
+ parse_sdvo_device_mapping(i915);
+ parse_ddi_ports(i915);
+
kfree(oprom_vbt);
}
/**
* intel_bios_driver_remove - Free any resources allocated by intel_bios_init()
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*/
-void intel_bios_driver_remove(struct drm_i915_private *dev_priv)
+void intel_bios_driver_remove(struct drm_i915_private *i915)
{
- struct display_device_data *devdata, *n;
+ struct intel_bios_encoder_data *devdata, *n;
- list_for_each_entry_safe(devdata, n, &dev_priv->vbt.display_devices, node) {
+ list_for_each_entry_safe(devdata, n, &i915->vbt.display_devices, node) {
list_del(&devdata->node);
kfree(devdata->dsc);
kfree(devdata);
}
- kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
- dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
- kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
- dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
- kfree(dev_priv->vbt.dsi.data);
- dev_priv->vbt.dsi.data = NULL;
- kfree(dev_priv->vbt.dsi.pps);
- dev_priv->vbt.dsi.pps = NULL;
- kfree(dev_priv->vbt.dsi.config);
- dev_priv->vbt.dsi.config = NULL;
- kfree(dev_priv->vbt.dsi.deassert_seq);
- dev_priv->vbt.dsi.deassert_seq = NULL;
+ kfree(i915->vbt.sdvo_lvds_vbt_mode);
+ i915->vbt.sdvo_lvds_vbt_mode = NULL;
+ kfree(i915->vbt.lfp_lvds_vbt_mode);
+ i915->vbt.lfp_lvds_vbt_mode = NULL;
+ kfree(i915->vbt.dsi.data);
+ i915->vbt.dsi.data = NULL;
+ kfree(i915->vbt.dsi.pps);
+ i915->vbt.dsi.pps = NULL;
+ kfree(i915->vbt.dsi.config);
+ i915->vbt.dsi.config = NULL;
+ kfree(i915->vbt.dsi.deassert_seq);
+ i915->vbt.dsi.deassert_seq = NULL;
}
/**
* intel_bios_is_tv_present - is integrated TV present in VBT
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* Return true if TV is present. If no child devices were parsed from VBT,
* assume TV is present.
*/
-bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv)
+bool intel_bios_is_tv_present(struct drm_i915_private *i915)
{
- const struct display_device_data *devdata;
+ const struct intel_bios_encoder_data *devdata;
const struct child_device_config *child;
- if (!dev_priv->vbt.int_tv_support)
+ if (!i915->vbt.int_tv_support)
return false;
- if (list_empty(&dev_priv->vbt.display_devices))
+ if (list_empty(&i915->vbt.display_devices))
return true;
- list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
+ list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
child = &devdata->child;
/*
@@ -2299,21 +2442,21 @@ bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv)
/**
* intel_bios_is_lvds_present - is LVDS present in VBT
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
* @i2c_pin: i2c pin for LVDS if present
*
* Return true if LVDS is present. If no child devices were parsed from VBT,
* assume LVDS is present.
*/
-bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin)
+bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin)
{
- const struct display_device_data *devdata;
+ const struct intel_bios_encoder_data *devdata;
const struct child_device_config *child;
- if (list_empty(&dev_priv->vbt.display_devices))
+ if (list_empty(&i915->vbt.display_devices))
return true;
- list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
+ list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
child = &devdata->child;
/* If the device type is not LFP, continue.
@@ -2324,7 +2467,7 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin)
child->device_type != DEVICE_TYPE_LFP)
continue;
- if (intel_gmbus_is_valid_pin(dev_priv, child->i2c_pin))
+ if (intel_gmbus_is_valid_pin(i915, child->i2c_pin))
*i2c_pin = child->i2c_pin;
/* However, we cannot trust the BIOS writers to populate
@@ -2340,7 +2483,7 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin)
* additional data. Trust that if the VBT was written into
* the OpRegion then they have validated the LVDS's existence.
*/
- if (dev_priv->opregion.vbt)
+ if (i915->opregion.vbt)
return true;
}
@@ -2349,14 +2492,14 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin)
/**
* intel_bios_is_port_present - is the specified digital port present
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
* @port: port to check
*
* Return true if the device in %port is present.
*/
-bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port)
+bool intel_bios_is_port_present(struct drm_i915_private *i915, enum port port)
{
- const struct display_device_data *devdata;
+ const struct intel_bios_encoder_data *devdata;
const struct child_device_config *child;
static const struct {
u16 dp, hdmi;
@@ -2368,19 +2511,19 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
[PORT_F] = { DVO_PORT_DPF, DVO_PORT_HDMIF, },
};
- if (HAS_DDI(dev_priv)) {
+ if (HAS_DDI(i915)) {
const struct ddi_vbt_port_info *port_info =
- &dev_priv->vbt.ddi_port_info[port];
+ &i915->vbt.ddi_port_info[port];
- return port_info->child;
+ return port_info->devdata;
}
/* FIXME maybe deal with port A as well? */
- if (drm_WARN_ON(&dev_priv->drm,
+ if (drm_WARN_ON(&i915->drm,
port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
return false;
- list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
+ list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
child = &devdata->child;
if ((child->dvo_port == port_mapping[port].dp ||
@@ -2395,14 +2538,14 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
/**
* intel_bios_is_port_edp - is the device in given port eDP
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
* @port: port to check
*
* Return true if the device in %port is eDP.
*/
-bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
+bool intel_bios_is_port_edp(struct drm_i915_private *i915, enum port port)
{
- const struct display_device_data *devdata;
+ const struct intel_bios_encoder_data *devdata;
const struct child_device_config *child;
static const short port_mapping[] = {
[PORT_B] = DVO_PORT_DPB,
@@ -2412,10 +2555,15 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
[PORT_F] = DVO_PORT_DPF,
};
- if (HAS_DDI(dev_priv))
- return dev_priv->vbt.ddi_port_info[port].supports_edp;
+ if (HAS_DDI(i915)) {
+ const struct intel_bios_encoder_data *devdata;
- list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
+ devdata = intel_bios_encoder_data_lookup(i915, port);
+
+ return devdata && intel_bios_encoder_supports_edp(devdata);
+ }
+
+ list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
child = &devdata->child;
if (child->dvo_port == port_mapping[port] &&
@@ -2462,12 +2610,12 @@ static bool child_dev_is_dp_dual_mode(const struct child_device_config *child,
return false;
}
-bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv,
+bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *i915,
enum port port)
{
- const struct display_device_data *devdata;
+ const struct intel_bios_encoder_data *devdata;
- list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
+ list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
if (child_dev_is_dp_dual_mode(&devdata->child, port))
return true;
}
@@ -2477,19 +2625,19 @@ bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv,
/**
* intel_bios_is_dsi_present - is DSI present in VBT
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
* @port: port for DSI if present
*
* Return true if DSI is present, and return the port in %port.
*/
-bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
+bool intel_bios_is_dsi_present(struct drm_i915_private *i915,
enum port *port)
{
- const struct display_device_data *devdata;
+ const struct intel_bios_encoder_data *devdata;
const struct child_device_config *child;
u8 dvo_port;
- list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
+ list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
child = &devdata->child;
if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
@@ -2498,15 +2646,15 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
dvo_port = child->dvo_port;
if (dvo_port == DVO_PORT_MIPIA ||
- (dvo_port == DVO_PORT_MIPIB && INTEL_GEN(dev_priv) >= 11) ||
- (dvo_port == DVO_PORT_MIPIC && INTEL_GEN(dev_priv) < 11)) {
+ (dvo_port == DVO_PORT_MIPIB && DISPLAY_VER(i915) >= 11) ||
+ (dvo_port == DVO_PORT_MIPIC && DISPLAY_VER(i915) < 11)) {
if (port)
*port = dvo_port - DVO_PORT_MIPIA;
return true;
} else if (dvo_port == DVO_PORT_MIPIB ||
dvo_port == DVO_PORT_MIPIC ||
dvo_port == DVO_PORT_MIPID) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"VBT has unsupported DSI port %c\n",
port_name(dvo_port - DVO_PORT_MIPIA));
}
@@ -2585,7 +2733,7 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
int dsc_max_bpc)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct display_device_data *devdata;
+ const struct intel_bios_encoder_data *devdata;
const struct child_device_config *child;
list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
@@ -2619,13 +2767,13 @@ bool
intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915,
enum port port)
{
- const struct child_device_config *child =
- i915->vbt.ddi_port_info[port].child;
+ const struct intel_bios_encoder_data *devdata =
+ i915->vbt.ddi_port_info[port].devdata;
if (drm_WARN_ON_ONCE(&i915->drm, !IS_GEN9_LP(i915)))
return false;
- return child && child->hpd_invert;
+ return devdata && devdata->child.hpd_invert;
}
/**
@@ -2639,49 +2787,83 @@ bool
intel_bios_is_lspcon_present(const struct drm_i915_private *i915,
enum port port)
{
- const struct child_device_config *child =
- i915->vbt.ddi_port_info[port].child;
+ const struct intel_bios_encoder_data *devdata =
+ i915->vbt.ddi_port_info[port].devdata;
- return HAS_LSPCON(i915) && child && child->lspcon;
+ return HAS_LSPCON(i915) && devdata && devdata->child.lspcon;
}
-enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv,
+/**
+ * intel_bios_is_lane_reversal_needed - if lane reversal needed on port
+ * @i915: i915 device instance
+ * @port: port to check
+ *
+ * Return true if port requires lane reversal
+ */
+bool
+intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915,
+ enum port port)
+{
+ const struct intel_bios_encoder_data *devdata =
+ i915->vbt.ddi_port_info[port].devdata;
+
+ return devdata && devdata->child.lane_reversal;
+}
+
+enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
enum port port)
{
const struct ddi_vbt_port_info *info =
- &dev_priv->vbt.ddi_port_info[port];
+ &i915->vbt.ddi_port_info[port];
enum aux_ch aux_ch;
if (!info->alternate_aux_channel) {
aux_ch = (enum aux_ch)port;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"using AUX %c for port %c (platform default)\n",
aux_ch_name(aux_ch), port_name(port));
return aux_ch;
}
+ /*
+ * RKL/DG1 VBT uses PHY based mapping. Combo PHYs A,B,C,D
+ * map to DDI A,B,TC1,TC2 respectively.
+ *
+ * ADL-S VBT uses PHY based mapping. Combo PHYs A,B,C,D,E
+ * map to DDI A,TC1,TC2,TC3,TC4 respectively.
+ */
switch (info->alternate_aux_channel) {
case DP_AUX_A:
aux_ch = AUX_CH_A;
break;
case DP_AUX_B:
- aux_ch = AUX_CH_B;
+ if (IS_ALDERLAKE_S(i915))
+ aux_ch = AUX_CH_USBC1;
+ else
+ aux_ch = AUX_CH_B;
break;
case DP_AUX_C:
- /*
- * RKL/DG1 VBT uses PHY based mapping. Combo PHYs A,B,C,D
- * map to DDI A,B,TC1,TC2 respectively.
- */
- aux_ch = (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) ?
- AUX_CH_USBC1 : AUX_CH_C;
+ if (IS_ALDERLAKE_S(i915))
+ aux_ch = AUX_CH_USBC2;
+ else if (IS_DG1(i915) || IS_ROCKETLAKE(i915))
+ aux_ch = AUX_CH_USBC1;
+ else
+ aux_ch = AUX_CH_C;
break;
case DP_AUX_D:
- aux_ch = (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) ?
- AUX_CH_USBC2 : AUX_CH_D;
+ if (IS_ALDERLAKE_S(i915))
+ aux_ch = AUX_CH_USBC3;
+ else if (IS_DG1(i915) || IS_ROCKETLAKE(i915))
+ aux_ch = AUX_CH_USBC2;
+ else
+ aux_ch = AUX_CH_D;
break;
case DP_AUX_E:
- aux_ch = AUX_CH_E;
+ if (IS_ALDERLAKE_S(i915))
+ aux_ch = AUX_CH_USBC4;
+ else
+ aux_ch = AUX_CH_E;
break;
case DP_AUX_F:
aux_ch = AUX_CH_F;
@@ -2701,7 +2883,7 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv,
break;
}
- drm_dbg_kms(&dev_priv->drm, "using AUX %c for port %c (VBT)\n",
+ drm_dbg_kms(&i915->drm, "using AUX %c for port %c (VBT)\n",
aux_ch_name(aux_ch), port_name(port));
return aux_ch;
@@ -2723,18 +2905,20 @@ int intel_bios_hdmi_level_shift(struct intel_encoder *encoder)
return info->hdmi_level_shift_set ? info->hdmi_level_shift : -1;
}
-int intel_bios_dp_boost_level(struct intel_encoder *encoder)
+int intel_bios_encoder_dp_boost_level(const struct intel_bios_encoder_data *devdata)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ if (!devdata || devdata->i915->vbt.version < 196 || !devdata->child.iboost)
+ return 0;
- return i915->vbt.ddi_port_info[encoder->port].dp_boost_level;
+ return translate_iboost(devdata->child.dp_iboost_level);
}
-int intel_bios_hdmi_boost_level(struct intel_encoder *encoder)
+int intel_bios_encoder_hdmi_boost_level(const struct intel_bios_encoder_data *devdata)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ if (!devdata || devdata->i915->vbt.version < 196 || !devdata->child.iboost)
+ return 0;
- return i915->vbt.ddi_port_info[encoder->port].hdmi_boost_level;
+ return translate_iboost(devdata->child.hdmi_iboost_level);
}
int intel_bios_dp_max_link_rate(struct intel_encoder *encoder)
@@ -2751,28 +2935,18 @@ int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder)
return i915->vbt.ddi_port_info[encoder->port].alternate_ddc_pin;
}
-bool intel_bios_port_supports_dvi(struct drm_i915_private *i915, enum port port)
-{
- return i915->vbt.ddi_port_info[port].supports_dvi;
-}
-
-bool intel_bios_port_supports_hdmi(struct drm_i915_private *i915, enum port port)
-{
- return i915->vbt.ddi_port_info[port].supports_hdmi;
-}
-
-bool intel_bios_port_supports_dp(struct drm_i915_private *i915, enum port port)
+bool intel_bios_encoder_supports_typec_usb(const struct intel_bios_encoder_data *devdata)
{
- return i915->vbt.ddi_port_info[port].supports_dp;
+ return devdata->i915->vbt.version >= 195 && devdata->child.dp_usb_type_c;
}
-bool intel_bios_port_supports_typec_usb(struct drm_i915_private *i915,
- enum port port)
+bool intel_bios_encoder_supports_tbt(const struct intel_bios_encoder_data *devdata)
{
- return i915->vbt.ddi_port_info[port].supports_typec_usb;
+ return devdata->i915->vbt.version >= 209 && devdata->child.tbt;
}
-bool intel_bios_port_supports_tbt(struct drm_i915_private *i915, enum port port)
+const struct intel_bios_encoder_data *
+intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port)
{
- return i915->vbt.ddi_port_info[port].supports_tbt;
+ return i915->vbt.ddi_port_info[port].devdata;
}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index e29e79faa01b..4709c4d29805 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -33,6 +33,7 @@
#include <linux/types.h>
struct drm_i915_private;
+struct intel_bios_encoder_data;
struct intel_crtc_state;
struct intel_encoder;
enum port;
@@ -241,20 +242,28 @@ bool intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915,
enum port port);
bool intel_bios_is_lspcon_present(const struct drm_i915_private *i915,
enum port port);
+bool intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915,
+ enum port port);
enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port);
bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
int dsc_max_bpc);
int intel_bios_max_tmds_clock(struct intel_encoder *encoder);
int intel_bios_hdmi_level_shift(struct intel_encoder *encoder);
-int intel_bios_dp_boost_level(struct intel_encoder *encoder);
-int intel_bios_hdmi_boost_level(struct intel_encoder *encoder);
int intel_bios_dp_max_link_rate(struct intel_encoder *encoder);
int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder);
-bool intel_bios_port_supports_dvi(struct drm_i915_private *i915, enum port port);
-bool intel_bios_port_supports_hdmi(struct drm_i915_private *i915, enum port port);
-bool intel_bios_port_supports_dp(struct drm_i915_private *i915, enum port port);
bool intel_bios_port_supports_typec_usb(struct drm_i915_private *i915, enum port port);
bool intel_bios_port_supports_tbt(struct drm_i915_private *i915, enum port port);
+const struct intel_bios_encoder_data *
+intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port);
+
+bool intel_bios_encoder_supports_dvi(const struct intel_bios_encoder_data *devdata);
+bool intel_bios_encoder_supports_hdmi(const struct intel_bios_encoder_data *devdata);
+bool intel_bios_encoder_supports_dp(const struct intel_bios_encoder_data *devdata);
+bool intel_bios_encoder_supports_typec_usb(const struct intel_bios_encoder_data *devdata);
+bool intel_bios_encoder_supports_tbt(const struct intel_bios_encoder_data *devdata);
+int intel_bios_encoder_dp_boost_level(const struct intel_bios_encoder_data *devdata);
+int intel_bios_encoder_hdmi_boost_level(const struct intel_bios_encoder_data *devdata);
+
#endif /* _INTEL_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 4b5a30ac84bc..584ab5ce4106 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -77,9 +77,19 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
qi->num_points = dram_info->num_qgv_points;
- if (IS_GEN(dev_priv, 12))
- qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 16;
- else if (IS_GEN(dev_priv, 11))
+ if (IS_DISPLAY_VER(dev_priv, 12))
+ switch (dram_info->type) {
+ case INTEL_DRAM_DDR4:
+ qi->t_bl = 4;
+ break;
+ case INTEL_DRAM_DDR5:
+ qi->t_bl = 8;
+ break;
+ default:
+ qi->t_bl = 16;
+ break;
+ }
+ else if (IS_DISPLAY_VER(dev_priv, 11))
qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 8;
if (drm_WARN_ON(&dev_priv->drm,
@@ -142,6 +152,12 @@ static const struct intel_sa_info rkl_sa_info = {
.displayrtids = 128,
};
+static const struct intel_sa_info adls_sa_info = {
+ .deburst = 16,
+ .deprogbwlimit = 38, /* GB/s */
+ .displayrtids = 256,
+};
+
static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
{
struct intel_qgv_info qi = {};
@@ -251,11 +267,13 @@ void intel_bw_init_hw(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
- if (IS_ROCKETLAKE(dev_priv))
+ if (IS_ALDERLAKE_S(dev_priv))
+ icl_get_bw_info(dev_priv, &adls_sa_info);
+ else if (IS_ROCKETLAKE(dev_priv))
icl_get_bw_info(dev_priv, &rkl_sa_info);
- else if (IS_GEN(dev_priv, 12))
+ else if (IS_DISPLAY_VER(dev_priv, 12))
icl_get_bw_info(dev_priv, &tgl_sa_info);
- else if (IS_GEN(dev_priv, 11))
+ else if (IS_DISPLAY_VER(dev_priv, 11))
icl_get_bw_info(dev_priv, &icl_sa_info);
}
@@ -515,7 +533,7 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
u32 mask = (1 << num_qgv_points) - 1;
/* FIXME earlier gens need some checks too */
- if (INTEL_GEN(dev_priv) < 11)
+ if (DISPLAY_VER(dev_priv) < 11)
return 0;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 2e878cc274b7..3f43ad4d7362 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -96,7 +96,7 @@ static void fixed_450mhz_get_cdclk(struct drm_i915_private *dev_priv,
static void i85x_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_config *cdclk_config)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
u16 hpllcc = 0;
/*
@@ -138,7 +138,7 @@ static void i85x_get_cdclk(struct drm_i915_private *dev_priv,
static void i915gm_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_config *cdclk_config)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
u16 gcfgc = 0;
pci_read_config_word(pdev, GCFGC, &gcfgc);
@@ -162,7 +162,7 @@ static void i915gm_get_cdclk(struct drm_i915_private *dev_priv,
static void i945gm_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_config *cdclk_config)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
u16 gcfgc = 0;
pci_read_config_word(pdev, GCFGC, &gcfgc);
@@ -256,7 +256,7 @@ static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
static void g33_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_config *cdclk_config)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
static const u8 div_3200[] = { 12, 10, 8, 7, 5, 16 };
static const u8 div_4000[] = { 14, 12, 10, 8, 6, 20 };
static const u8 div_4800[] = { 20, 14, 12, 10, 8, 24 };
@@ -305,7 +305,7 @@ fail:
static void pnv_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_config *cdclk_config)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
u16 gcfgc = 0;
pci_read_config_word(pdev, GCFGC, &gcfgc);
@@ -339,7 +339,7 @@ static void pnv_get_cdclk(struct drm_i915_private *dev_priv,
static void i965gm_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_config *cdclk_config)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
static const u8 div_3200[] = { 16, 10, 8 };
static const u8 div_4000[] = { 20, 12, 10 };
static const u8 div_5333[] = { 24, 16, 14 };
@@ -384,7 +384,7 @@ fail:
static void gm45_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_config *cdclk_config)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
unsigned int cdclk_sel;
u16 tmp = 0;
@@ -1375,7 +1375,7 @@ static void bxt_de_pll_readout(struct drm_i915_private *dev_priv,
{
u32 val, ratio;
- if (INTEL_GEN(dev_priv) >= 11)
+ if (DISPLAY_VER(dev_priv) >= 11)
icl_readout_refclk(dev_priv, cdclk_config);
else if (IS_CANNONLAKE(dev_priv))
cnl_readout_refclk(dev_priv, cdclk_config);
@@ -1397,7 +1397,7 @@ static void bxt_de_pll_readout(struct drm_i915_private *dev_priv,
* CNL+ have the ratio directly in the PLL enable register, gen9lp had
* it in a separate PLL control register.
*/
- if (INTEL_GEN(dev_priv) >= 10)
+ if (DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv))
ratio = val & CNL_CDCLK_PLL_RATIO_MASK;
else
ratio = intel_de_read(dev_priv, BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
@@ -1413,9 +1413,9 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
bxt_de_pll_readout(dev_priv, cdclk_config);
- if (INTEL_GEN(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 12)
cdclk_config->bypass = cdclk_config->ref / 2;
- else if (INTEL_GEN(dev_priv) >= 11)
+ else if (DISPLAY_VER(dev_priv) >= 11)
cdclk_config->bypass = 50000;
else
cdclk_config->bypass = cdclk_config->ref;
@@ -1433,7 +1433,7 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
break;
case BXT_CDCLK_CD2X_DIV_SEL_1_5:
drm_WARN(&dev_priv->drm,
- IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10,
+ DISPLAY_VER(dev_priv) >= 10,
"Unsupported divider\n");
div = 3;
break;
@@ -1441,7 +1441,8 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
div = 4;
break;
case BXT_CDCLK_CD2X_DIV_SEL_4:
- drm_WARN(&dev_priv->drm, INTEL_GEN(dev_priv) >= 10,
+ drm_WARN(&dev_priv->drm,
+ DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv),
"Unsupported divider\n");
div = 8;
break;
@@ -1530,12 +1531,12 @@ static void cnl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco)
static u32 bxt_cdclk_cd2x_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- if (INTEL_GEN(dev_priv) >= 12) {
+ if (DISPLAY_VER(dev_priv) >= 12) {
if (pipe == INVALID_PIPE)
return TGL_CDCLK_CD2X_PIPE_NONE;
else
return TGL_CDCLK_CD2X_PIPE(pipe);
- } else if (INTEL_GEN(dev_priv) >= 11) {
+ } else if (DISPLAY_VER(dev_priv) >= 11) {
if (pipe == INVALID_PIPE)
return ICL_CDCLK_CD2X_PIPE_NONE;
else
@@ -1558,7 +1559,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
int ret;
/* Inform power controller of upcoming frequency change. */
- if (INTEL_GEN(dev_priv) >= 10)
+ if (DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv))
ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
SKL_CDCLK_PREPARE_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE,
@@ -1591,7 +1592,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
break;
case 3:
drm_WARN(&dev_priv->drm,
- IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10,
+ DISPLAY_VER(dev_priv) >= 10,
"Unsupported divider\n");
divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
break;
@@ -1599,13 +1600,14 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
divider = BXT_CDCLK_CD2X_DIV_SEL_2;
break;
case 8:
- drm_WARN(&dev_priv->drm, INTEL_GEN(dev_priv) >= 10,
+ drm_WARN(&dev_priv->drm,
+ DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv),
"Unsupported divider\n");
divider = BXT_CDCLK_CD2X_DIV_SEL_4;
break;
}
- if (INTEL_GEN(dev_priv) >= 10) {
+ if (DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv)) {
if (dev_priv->cdclk.hw.vco != 0 &&
dev_priv->cdclk.hw.vco != vco)
cnl_cdclk_pll_disable(dev_priv);
@@ -1636,7 +1638,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
if (pipe != INVALID_PIPE)
intel_wait_for_vblank(dev_priv, pipe);
- if (INTEL_GEN(dev_priv) >= 10) {
+ if (DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv)) {
ret = sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
cdclk_config->voltage_level);
} else {
@@ -1661,7 +1663,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
intel_update_cdclk(dev_priv);
- if (INTEL_GEN(dev_priv) >= 10)
+ if (DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv))
/*
* Can't read out the voltage level :(
* Let's just assume everything is as expected.
@@ -1795,7 +1797,7 @@ static void bxt_cdclk_uninit_hw(struct drm_i915_private *dev_priv)
*/
void intel_cdclk_init_hw(struct drm_i915_private *i915)
{
- if (IS_GEN9_LP(i915) || INTEL_GEN(i915) >= 10)
+ if (IS_GEN9_LP(i915) || DISPLAY_VER(i915) >= 10)
bxt_cdclk_init_hw(i915);
else if (IS_GEN9_BC(i915))
skl_cdclk_init_hw(i915);
@@ -1810,7 +1812,7 @@ void intel_cdclk_init_hw(struct drm_i915_private *i915)
*/
void intel_cdclk_uninit_hw(struct drm_i915_private *i915)
{
- if (INTEL_GEN(i915) >= 10 || IS_GEN9_LP(i915))
+ if (DISPLAY_VER(i915) >= 10 || IS_GEN9_LP(i915))
bxt_cdclk_uninit_hw(i915);
else if (IS_GEN9_BC(i915))
skl_cdclk_uninit_hw(i915);
@@ -1850,7 +1852,7 @@ static bool intel_cdclk_can_cd2x_update(struct drm_i915_private *dev_priv,
const struct intel_cdclk_config *b)
{
/* Older hw doesn't have the capability */
- if (INTEL_GEN(dev_priv) < 10 && !IS_GEN9_LP(dev_priv))
+ if (DISPLAY_VER(dev_priv) < 10 && !IS_GEN9_LP(dev_priv))
return false;
return a->cdclk != b->cdclk &&
@@ -1998,9 +2000,9 @@ static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
int pixel_rate = crtc_state->pixel_rate;
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ if (DISPLAY_VER(dev_priv) >= 10)
return DIV_ROUND_UP(pixel_rate, 2);
- else if (IS_GEN(dev_priv, 9) ||
+ else if (IS_DISPLAY_VER(dev_priv, 9) ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
return pixel_rate;
else if (IS_CHERRYVIEW(dev_priv))
@@ -2048,10 +2050,10 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
crtc_state->has_audio &&
crtc_state->port_clock >= 540000 &&
crtc_state->lane_count == 4) {
- if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) {
+ if (IS_DISPLAY_VER(dev_priv, 10)) {
/* Display WA #1145: glk,cnl */
min_cdclk = max(316800, min_cdclk);
- } else if (IS_GEN(dev_priv, 9) || IS_BROADWELL(dev_priv)) {
+ } else if (IS_DISPLAY_VER(dev_priv, 9) || IS_BROADWELL(dev_priv)) {
/* Display WA #1144: skl,bxt */
min_cdclk = max(432000, min_cdclk);
}
@@ -2061,7 +2063,7 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
* According to BSpec, "The CD clock frequency must be at least twice
* the frequency of the Azalia BCLK." and BCLK is 96 MHz by default.
*/
- if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9)
+ if (crtc_state->has_audio && DISPLAY_VER(dev_priv) >= 9)
min_cdclk = max(2 * 96000, min_cdclk);
/*
@@ -2145,10 +2147,10 @@ static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state)
if (IS_ERR(bw_state))
return PTR_ERR(bw_state);
- if (cdclk_state->min_cdclk[i] == min_cdclk)
+ if (cdclk_state->min_cdclk[crtc->pipe] == min_cdclk)
continue;
- cdclk_state->min_cdclk[i] = min_cdclk;
+ cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
ret = intel_atomic_lock_global_state(&cdclk_state->base);
if (ret)
@@ -2199,10 +2201,10 @@ static int bxt_compute_min_voltage_level(struct intel_cdclk_state *cdclk_state)
else
min_voltage_level = 0;
- if (cdclk_state->min_voltage_level[i] == min_voltage_level)
+ if (cdclk_state->min_voltage_level[crtc->pipe] == min_voltage_level)
continue;
- cdclk_state->min_voltage_level[i] = min_voltage_level;
+ cdclk_state->min_voltage_level[crtc->pipe] = min_voltage_level;
ret = intel_atomic_lock_global_state(&cdclk_state->base);
if (ret)
@@ -2588,14 +2590,14 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
{
int max_cdclk_freq = dev_priv->max_cdclk_freq;
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ if (DISPLAY_VER(dev_priv) >= 10)
return 2 * max_cdclk_freq;
- else if (IS_GEN(dev_priv, 9) ||
+ else if (IS_DISPLAY_VER(dev_priv, 9) ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
return max_cdclk_freq;
else if (IS_CHERRYVIEW(dev_priv))
return max_cdclk_freq*95/100;
- else if (INTEL_GEN(dev_priv) < 4)
+ else if (DISPLAY_VER(dev_priv) < 4)
return 2*max_cdclk_freq*90/100;
else
return max_cdclk_freq*90/100;
@@ -2616,7 +2618,7 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
dev_priv->max_cdclk_freq = 552000;
else
dev_priv->max_cdclk_freq = 556800;
- } else if (INTEL_GEN(dev_priv) >= 11) {
+ } else if (DISPLAY_VER(dev_priv) >= 11) {
if (dev_priv->cdclk.hw.ref == 24000)
dev_priv->max_cdclk_freq = 648000;
else
@@ -2831,7 +2833,7 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv)
freq = pch_rawclk(dev_priv);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
freq = vlv_hrawclk(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 3)
+ else if (DISPLAY_VER(dev_priv) >= 3)
freq = i9xx_hrawclk(dev_priv);
else
/* no rawclk on other platforms, or no need to know it */
@@ -2852,7 +2854,7 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
dev_priv->display.calc_voltage_level = tgl_calc_voltage_level;
dev_priv->cdclk.table = rkl_cdclk_table;
- } else if (INTEL_GEN(dev_priv) >= 12) {
+ } else if (DISPLAY_VER(dev_priv) >= 12) {
dev_priv->display.set_cdclk = bxt_set_cdclk;
dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
@@ -2864,7 +2866,7 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
dev_priv->display.calc_voltage_level = ehl_calc_voltage_level;
dev_priv->cdclk.table = icl_cdclk_table;
- } else if (INTEL_GEN(dev_priv) >= 11) {
+ } else if (DISPLAY_VER(dev_priv) >= 11) {
dev_priv->display.set_cdclk = bxt_set_cdclk;
dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
@@ -2906,7 +2908,7 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.modeset_calc_cdclk = fixed_modeset_calc_cdclk;
}
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEN9_LP(dev_priv))
+ if (DISPLAY_VER(dev_priv) >= 10 || IS_GEN9_LP(dev_priv))
dev_priv->display.get_cdclk = bxt_get_cdclk;
else if (IS_GEN9_BC(dev_priv))
dev_priv->display.get_cdclk = skl_get_cdclk;
@@ -2916,9 +2918,9 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.get_cdclk = hsw_get_cdclk;
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
dev_priv->display.get_cdclk = vlv_get_cdclk;
- else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
+ else if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv))
dev_priv->display.get_cdclk = fixed_400mhz_get_cdclk;
- else if (IS_GEN(dev_priv, 5))
+ else if (IS_IRONLAKE(dev_priv))
dev_priv->display.get_cdclk = fixed_450mhz_get_cdclk;
else if (IS_GM45(dev_priv))
dev_priv->display.get_cdclk = gm45_get_cdclk;
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index ff7dcb7088bf..c75d7124d57a 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -173,7 +173,7 @@ static void ilk_update_pipe_csc(struct intel_crtc *crtc,
coeff[6] << 16 | coeff[7]);
intel_de_write(dev_priv, PIPE_CSC_COEFF_BV(pipe), coeff[8] << 16);
- if (INTEL_GEN(dev_priv) >= 7) {
+ if (DISPLAY_VER(dev_priv) >= 7) {
intel_de_write(dev_priv, PIPE_CSC_POSTOFF_HI(pipe),
postoff[0]);
intel_de_write(dev_priv, PIPE_CSC_POSTOFF_ME(pipe),
@@ -225,7 +225,7 @@ static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state)
*/
return crtc_state->limited_color_range &&
(IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
- IS_GEN_RANGE(dev_priv, 9, 10));
+ IS_DISPLAY_RANGE(dev_priv, 9, 10));
}
static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
@@ -530,7 +530,7 @@ static void skl_color_commit(const struct intel_crtc_state *crtc_state)
intel_de_write(dev_priv, GAMMA_MODE(crtc->pipe),
crtc_state->gamma_mode);
- if (INTEL_GEN(dev_priv) >= 11)
+ if (DISPLAY_VER(dev_priv) >= 11)
icl_load_csc_matrix(crtc_state);
else
ilk_load_csc_matrix(crtc_state);
@@ -737,7 +737,7 @@ static void ivb_load_lut_ext_max(const struct intel_crtc_state *crtc_state)
* ToDo: Extend the ABI to be able to program values
* from 3.0 to 7.0
*/
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+ if (DISPLAY_VER(dev_priv) >= 10) {
intel_dsb_reg_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 0),
1 << 16);
intel_dsb_reg_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 1),
@@ -1222,7 +1222,7 @@ static bool need_plane_update(struct intel_plane *plane,
* We have to reconfigure that even if the plane is inactive.
*/
return crtc_state->active_planes & BIT(plane->id) ||
- (INTEL_GEN(dev_priv) < 9 &&
+ (DISPLAY_VER(dev_priv) < 9 &&
plane->id == PLANE_PRIMARY);
}
@@ -1709,9 +1709,9 @@ int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_stat
else
return i9xx_gamma_precision(crtc_state);
} else {
- if (INTEL_GEN(dev_priv) >= 11)
+ if (DISPLAY_VER(dev_priv) >= 11)
return icl_gamma_precision(crtc_state);
- else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ else if (IS_DISPLAY_VER(dev_priv, 10))
return glk_gamma_precision(crtc_state);
else if (IS_IRONLAKE(dev_priv))
return ilk_gamma_precision(crtc_state);
@@ -2105,7 +2105,7 @@ void intel_color_init(struct intel_crtc *crtc)
dev_priv->display.color_commit = i9xx_color_commit;
dev_priv->display.load_luts = chv_load_luts;
dev_priv->display.read_luts = chv_read_luts;
- } else if (INTEL_GEN(dev_priv) >= 4) {
+ } else if (DISPLAY_VER(dev_priv) >= 4) {
dev_priv->display.color_check = i9xx_color_check;
dev_priv->display.color_commit = i9xx_color_commit;
dev_priv->display.load_luts = i965_load_luts;
@@ -2117,31 +2117,31 @@ void intel_color_init(struct intel_crtc *crtc)
dev_priv->display.read_luts = i9xx_read_luts;
}
} else {
- if (INTEL_GEN(dev_priv) >= 11)
+ if (DISPLAY_VER(dev_priv) >= 11)
dev_priv->display.color_check = icl_color_check;
- else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ else if (DISPLAY_VER(dev_priv) >= 10)
dev_priv->display.color_check = glk_color_check;
- else if (INTEL_GEN(dev_priv) >= 7)
+ else if (DISPLAY_VER(dev_priv) >= 7)
dev_priv->display.color_check = ivb_color_check;
else
dev_priv->display.color_check = ilk_color_check;
- if (INTEL_GEN(dev_priv) >= 9)
+ if (DISPLAY_VER(dev_priv) >= 9)
dev_priv->display.color_commit = skl_color_commit;
else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
dev_priv->display.color_commit = hsw_color_commit;
else
dev_priv->display.color_commit = ilk_color_commit;
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (DISPLAY_VER(dev_priv) >= 11) {
dev_priv->display.load_luts = icl_load_luts;
dev_priv->display.read_luts = icl_read_luts;
- } else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) {
+ } else if (IS_DISPLAY_VER(dev_priv, 10)) {
dev_priv->display.load_luts = glk_load_luts;
dev_priv->display.read_luts = glk_read_luts;
- } else if (INTEL_GEN(dev_priv) >= 8) {
+ } else if (DISPLAY_VER(dev_priv) >= 8) {
dev_priv->display.load_luts = bdw_load_luts;
- } else if (INTEL_GEN(dev_priv) >= 7) {
+ } else if (DISPLAY_VER(dev_priv) >= 7) {
dev_priv->display.load_luts = ivb_load_luts;
} else {
dev_priv->display.load_luts = ilk_load_luts;
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 996ae0608a62..5df57d16a401 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -187,10 +187,16 @@ static bool has_phy_misc(struct drm_i915_private *i915, enum phy phy)
* Some platforms only expect PHY_MISC to be programmed for PHY-A and
* PHY-B and may not even have instances of the register for the
* other combo PHY's.
+ *
+ * ADL-S technically has three instances of PHY_MISC, but only requires
+ * that we program it for PHY A.
*/
- if (IS_JSL_EHL(i915) ||
- IS_ROCKETLAKE(i915) ||
- IS_DG1(i915))
+
+ if (IS_ALDERLAKE_S(i915))
+ return phy == PHY_A;
+ else if (IS_JSL_EHL(i915) ||
+ IS_ROCKETLAKE(i915) ||
+ IS_DG1(i915))
return phy < PHY_C;
return true;
@@ -246,14 +252,21 @@ static bool phy_is_master(struct drm_i915_private *dev_priv, enum phy phy)
* RKL,DG1:
* A(master) -> B(slave)
* C(master) -> D(slave)
+ * ADL-S:
+ * A(master) -> B(slave), C(slave)
+ * D(master) -> E(slave)
*
* We must set the IREFGEN bit for any PHY acting as a master
* to another PHY.
*/
- if ((IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) && phy == PHY_C)
+ if (phy == PHY_A)
return true;
+ else if (IS_ALDERLAKE_S(dev_priv))
+ return phy == PHY_D;
+ else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
+ return phy == PHY_C;
- return phy == PHY_A;
+ return false;
}
static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
@@ -265,7 +278,7 @@ static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
if (!icl_combo_phy_enabled(dev_priv, phy))
return false;
- if (INTEL_GEN(dev_priv) >= 12) {
+ if (DISPLAY_VER(dev_priv) >= 12) {
ret &= check_phy_reg(dev_priv, phy, ICL_PORT_TX_DW8_LN0(phy),
ICL_PORT_TX_DW8_ODCC_CLK_SEL |
ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK,
@@ -388,7 +401,7 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
intel_de_write(dev_priv, ICL_PHY_MISC(phy), val);
skip_phy_misc:
- if (INTEL_GEN(dev_priv) >= 12) {
+ if (DISPLAY_VER(dev_priv) >= 12) {
val = intel_de_read(dev_priv, ICL_PORT_TX_DW8_LN0(phy));
val &= ~ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK;
val |= ICL_PORT_TX_DW8_ODCC_CLK_SEL;
@@ -460,7 +473,7 @@ skip_phy_misc:
void intel_combo_phy_init(struct drm_i915_private *i915)
{
- if (INTEL_GEN(i915) >= 11)
+ if (DISPLAY_VER(i915) >= 11)
icl_combo_phys_init(i915);
else if (IS_CANNONLAKE(i915))
cnl_combo_phys_init(i915);
@@ -468,7 +481,7 @@ void intel_combo_phy_init(struct drm_i915_private *i915)
void intel_combo_phy_uninit(struct drm_i915_private *i915)
{
- if (INTEL_GEN(i915) >= 11)
+ if (DISPLAY_VER(i915) >= 11)
icl_combo_phys_uninit(i915);
else if (IS_CANNONLAKE(i915))
cnl_combo_phys_uninit(i915);
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 4934edd51cb0..580d652c3276 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -38,6 +38,7 @@
#include "intel_crt.h"
#include "intel_ddi.h"
#include "intel_display_types.h"
+#include "intel_fdi.h"
#include "intel_fifo_underrun.h"
#include "intel_gmbus.h"
#include "intel_hotplug.h"
@@ -141,7 +142,7 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- intel_ddi_get_config(encoder, pipe_config);
+ hsw_ddi_get_config(encoder, pipe_config);
pipe_config->hw.adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
DRM_MODE_FLAG_NHSYNC |
@@ -164,7 +165,7 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
u32 adpa;
- if (INTEL_GEN(dev_priv) >= 5)
+ if (DISPLAY_VER(dev_priv) >= 5)
adpa = ADPA_HOTPLUG_BITS;
else
adpa = 0;
@@ -355,7 +356,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
* DAC limit supposedly 355 MHz.
*/
max_clock = 270000;
- else if (IS_GEN_RANGE(dev_priv, 3, 4))
+ else if (IS_DISPLAY_RANGE(dev_priv, 3, 4))
max_clock = 400000;
else
max_clock = 350000;
@@ -710,7 +711,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
/* Set the border color to purple. */
intel_uncore_write(uncore, bclrpat_reg, 0x500050);
- if (!IS_GEN(dev_priv, 2)) {
+ if (!IS_DISPLAY_VER(dev_priv, 2)) {
u32 pipeconf = intel_uncore_read(uncore, pipeconf_reg);
intel_uncore_write(uncore,
pipeconf_reg,
@@ -889,7 +890,7 @@ load_detect:
if (ret > 0) {
if (intel_crt_detect_ddc(connector))
status = connector_status_connected;
- else if (INTEL_GEN(dev_priv) < 4)
+ else if (DISPLAY_VER(dev_priv) < 4)
status = intel_crt_load_detect(crt,
to_intel_crtc(connector->state->crtc)->pipe);
else if (dev_priv->params.load_detect_test)
@@ -948,7 +949,7 @@ void intel_crt_reset(struct drm_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_crt *crt = intel_encoder_to_crt(to_intel_encoder(encoder));
- if (INTEL_GEN(dev_priv) >= 5) {
+ if (DISPLAY_VER(dev_priv) >= 5) {
u32 adpa;
adpa = intel_de_read(dev_priv, crt->adpa_reg);
@@ -1046,7 +1047,7 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
else
crt->base.pipe_mask = ~0;
- if (IS_GEN(dev_priv, 2))
+ if (IS_DISPLAY_VER(dev_priv, 2))
connector->interlace_allowed = 0;
else
connector->interlace_allowed = 1;
@@ -1075,6 +1076,9 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
crt->base.enable = hsw_enable_crt;
crt->base.disable = hsw_disable_crt;
crt->base.post_disable = hsw_post_disable_crt;
+ crt->base.enable_clock = hsw_ddi_enable_clock;
+ crt->base.disable_clock = hsw_ddi_disable_clock;
+ crt->base.is_clock_enabled = hsw_ddi_is_clock_enabled;
} else {
if (HAS_PCH_SPLIT(dev_priv)) {
crt->base.compute_config = pch_crt_compute_config;
diff --git a/drivers/gpu/drm/i915/display/intel_crt.h b/drivers/gpu/drm/i915/display/intel_crt.h
index 1b3fba359efc..6c5c44600cbd 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.h
+++ b/drivers/gpu/drm/i915/display/intel_crt.h
@@ -11,7 +11,6 @@
enum pipe;
struct drm_encoder;
struct drm_i915_private;
-struct drm_i915_private;
bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
i915_reg_t adpa_reg, enum pipe *pipe);
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index 8e77ca7ddf11..39358076c05b 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -10,6 +10,9 @@
#include <drm/drm_plane.h>
#include <drm/drm_plane_helper.h>
+#include "i915_trace.h"
+#include "i915_vgpu.h"
+
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
#include "intel_color.h"
@@ -17,9 +20,13 @@
#include "intel_cursor.h"
#include "intel_display_debugfs.h"
#include "intel_display_types.h"
+#include "intel_dsi.h"
#include "intel_pipe_crc.h"
+#include "intel_psr.h"
#include "intel_sprite.h"
+#include "intel_vrr.h"
#include "i9xx_plane.h"
+#include "skl_universal_plane.h"
static void assert_vblank_disabled(struct drm_crtc *crtc)
{
@@ -32,6 +39,9 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
struct drm_device *dev = crtc->base.dev;
struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
+ if (!crtc->active)
+ return 0;
+
if (!vblank->max_vblank_count)
return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
@@ -41,8 +51,6 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- u32 mode_flags = crtc->mode_flags;
/*
* From Gen 11, In case of dsi cmd mode, frame counter wouldnt
@@ -50,7 +58,8 @@ u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
* the hw counter, then we would find it updated in only
* the next TE, hence switching to sw counter.
*/
- if (mode_flags & (I915_MODE_FLAG_DSI_USE_TE0 | I915_MODE_FLAG_DSI_USE_TE1))
+ if (crtc_state->mode_flags & (I915_MODE_FLAG_DSI_USE_TE0 |
+ I915_MODE_FLAG_DSI_USE_TE1))
return 0;
/*
@@ -61,9 +70,9 @@ u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
(crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
return 0;
- if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+ if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
return 0xffffffff; /* full 32 bit counter */
- else if (INTEL_GEN(dev_priv) >= 3)
+ else if (DISPLAY_VER(dev_priv) >= 3)
return 0xffffff; /* only 24 bits of frame count */
else
return 0; /* Gen2 doesn't have a hardware frame counter */
@@ -77,12 +86,26 @@ void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
drm_crtc_set_max_vblank_count(&crtc->base,
intel_crtc_max_vblank_count(crtc_state));
drm_crtc_vblank_on(&crtc->base);
+
+ /*
+ * Should really happen exactly when we enable the pipe
+ * but we want the frame counters in the trace, and that
+ * requires vblank support on some platforms/outputs.
+ */
+ trace_intel_pipe_enable(crtc);
}
void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ /*
+ * Should really happen exactly when we disable the pipe
+ * but we want the frame counters in the trace, and that
+ * requires vblank support on some platforms/outputs.
+ */
+ trace_intel_pipe_disable(crtc);
+
drm_crtc_vblank_off(&crtc->base);
assert_vblank_disabled(&crtc->base);
}
@@ -242,7 +265,11 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
crtc->pipe = pipe;
crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe];
- primary = intel_primary_plane_create(dev_priv, pipe);
+ if (DISPLAY_VER(dev_priv) >= 9)
+ primary = skl_universal_plane_create(dev_priv, pipe,
+ PLANE_PRIMARY);
+ else
+ primary = intel_primary_plane_create(dev_priv, pipe);
if (IS_ERR(primary)) {
ret = PTR_ERR(primary);
goto fail;
@@ -252,7 +279,11 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
for_each_sprite(dev_priv, pipe, sprite) {
struct intel_plane *plane;
- plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
+ if (DISPLAY_VER(dev_priv) >= 9)
+ plane = skl_universal_plane_create(dev_priv, pipe,
+ PLANE_SPRITE0 + sprite);
+ else
+ plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
if (IS_ERR(plane)) {
ret = PTR_ERR(plane);
goto fail;
@@ -271,16 +302,16 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
if (IS_CHERRYVIEW(dev_priv) ||
IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
funcs = &g4x_crtc_funcs;
- else if (IS_GEN(dev_priv, 4))
+ else if (IS_DISPLAY_VER(dev_priv, 4))
funcs = &i965_crtc_funcs;
else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
funcs = &i915gm_crtc_funcs;
- else if (IS_GEN(dev_priv, 3))
+ else if (IS_DISPLAY_VER(dev_priv, 3))
funcs = &i915_crtc_funcs;
else
funcs = &i8xx_crtc_funcs;
} else {
- if (INTEL_GEN(dev_priv) >= 8)
+ if (DISPLAY_VER(dev_priv) >= 8)
funcs = &bdw_crtc_funcs;
else
funcs = &ilk_crtc_funcs;
@@ -296,7 +327,7 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
dev_priv->pipe_to_crtc_mapping[pipe] = crtc;
- if (INTEL_GEN(dev_priv) < 9) {
+ if (DISPLAY_VER(dev_priv) < 9) {
enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
@@ -304,7 +335,7 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc;
}
- if (INTEL_GEN(dev_priv) >= 10)
+ if (DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv))
drm_crtc_create_scaling_filter_property(&crtc->base,
BIT(DRM_SCALING_FILTER_DEFAULT) |
BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR));
@@ -322,3 +353,238 @@ fail:
return ret;
}
+
+int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
+ int usecs)
+{
+ /* paranoia */
+ if (!adjusted_mode->crtc_htotal)
+ return 1;
+
+ return DIV_ROUND_UP(usecs * adjusted_mode->crtc_clock,
+ 1000 * adjusted_mode->crtc_htotal);
+}
+
+static int intel_mode_vblank_start(const struct drm_display_mode *mode)
+{
+ int vblank_start = mode->crtc_vblank_start;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vblank_start = DIV_ROUND_UP(vblank_start, 2);
+
+ return vblank_start;
+}
+
+/**
+ * intel_pipe_update_start() - start update of a set of display registers
+ * @new_crtc_state: the new crtc state
+ *
+ * Mark the start of an update to pipe registers that should be updated
+ * atomically regarding vblank. If the next vblank will happens within
+ * the next 100 us, this function waits until the vblank passes.
+ *
+ * After a successful call to this function, interrupts will be disabled
+ * until a subsequent call to intel_pipe_update_end(). That is done to
+ * avoid random delays.
+ */
+void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_display_mode *adjusted_mode = &new_crtc_state->hw.adjusted_mode;
+ long timeout = msecs_to_jiffies_timeout(1);
+ int scanline, min, max, vblank_start;
+ wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
+ bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI);
+ DEFINE_WAIT(wait);
+
+ if (new_crtc_state->uapi.async_flip)
+ return;
+
+ if (new_crtc_state->vrr.enable)
+ vblank_start = intel_vrr_vmax_vblank_start(new_crtc_state);
+ else
+ vblank_start = intel_mode_vblank_start(adjusted_mode);
+
+ /* FIXME needs to be calibrated sensibly */
+ min = vblank_start - intel_usecs_to_scanlines(adjusted_mode,
+ VBLANK_EVASION_TIME_US);
+ max = vblank_start - 1;
+
+ if (min <= 0 || max <= 0)
+ goto irq_disable;
+
+ if (drm_WARN_ON(&dev_priv->drm, drm_crtc_vblank_get(&crtc->base)))
+ goto irq_disable;
+
+ /*
+ * Wait for psr to idle out after enabling the VBL interrupts
+ * VBL interrupts will start the PSR exit and prevent a PSR
+ * re-entry as well.
+ */
+ intel_psr_wait_for_idle(new_crtc_state);
+
+ local_irq_disable();
+
+ crtc->debug.min_vbl = min;
+ crtc->debug.max_vbl = max;
+ trace_intel_pipe_update_start(crtc);
+
+ for (;;) {
+ /*
+ * prepare_to_wait() has a memory barrier, which guarantees
+ * other CPUs can see the task state update by the time we
+ * read the scanline.
+ */
+ prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
+
+ scanline = intel_get_crtc_scanline(crtc);
+ if (scanline < min || scanline > max)
+ break;
+
+ if (!timeout) {
+ drm_err(&dev_priv->drm,
+ "Potential atomic update failure on pipe %c\n",
+ pipe_name(crtc->pipe));
+ break;
+ }
+
+ local_irq_enable();
+
+ timeout = schedule_timeout(timeout);
+
+ local_irq_disable();
+ }
+
+ finish_wait(wq, &wait);
+
+ drm_crtc_vblank_put(&crtc->base);
+
+ /*
+ * On VLV/CHV DSI the scanline counter would appear to
+ * increment approx. 1/3 of a scanline before start of vblank.
+ * The registers still get latched at start of vblank however.
+ * This means we must not write any registers on the first
+ * line of vblank (since not the whole line is actually in
+ * vblank). And unfortunately we can't use the interrupt to
+ * wait here since it will fire too soon. We could use the
+ * frame start interrupt instead since it will fire after the
+ * critical scanline, but that would require more changes
+ * in the interrupt code. So for now we'll just do the nasty
+ * thing and poll for the bad scanline to pass us by.
+ *
+ * FIXME figure out if BXT+ DSI suffers from this as well
+ */
+ while (need_vlv_dsi_wa && scanline == vblank_start)
+ scanline = intel_get_crtc_scanline(crtc);
+
+ crtc->debug.scanline_start = scanline;
+ crtc->debug.start_vbl_time = ktime_get();
+ crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
+
+ trace_intel_pipe_update_vblank_evaded(crtc);
+ return;
+
+irq_disable:
+ local_irq_disable();
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
+static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end)
+{
+ u64 delta = ktime_to_ns(ktime_sub(end, crtc->debug.start_vbl_time));
+ unsigned int h;
+
+ h = ilog2(delta >> 9);
+ if (h >= ARRAY_SIZE(crtc->debug.vbl.times))
+ h = ARRAY_SIZE(crtc->debug.vbl.times) - 1;
+ crtc->debug.vbl.times[h]++;
+
+ crtc->debug.vbl.sum += delta;
+ if (!crtc->debug.vbl.min || delta < crtc->debug.vbl.min)
+ crtc->debug.vbl.min = delta;
+ if (delta > crtc->debug.vbl.max)
+ crtc->debug.vbl.max = delta;
+
+ if (delta > 1000 * VBLANK_EVASION_TIME_US) {
+ drm_dbg_kms(crtc->base.dev,
+ "Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n",
+ pipe_name(crtc->pipe),
+ div_u64(delta, 1000),
+ VBLANK_EVASION_TIME_US);
+ crtc->debug.vbl.over++;
+ }
+}
+#else
+static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end) {}
+#endif
+
+/**
+ * intel_pipe_update_end() - end update of a set of display registers
+ * @new_crtc_state: the new crtc state
+ *
+ * Mark the end of an update started with intel_pipe_update_start(). This
+ * re-enables interrupts and verifies the update was actually completed
+ * before a vblank.
+ */
+void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ enum pipe pipe = crtc->pipe;
+ int scanline_end = intel_get_crtc_scanline(crtc);
+ u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc);
+ ktime_t end_vbl_time = ktime_get();
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ if (new_crtc_state->uapi.async_flip)
+ return;
+
+ trace_intel_pipe_update_end(crtc, end_vbl_count, scanline_end);
+
+ /*
+ * Incase of mipi dsi command mode, we need to set frame update
+ * request for every commit.
+ */
+ if (DISPLAY_VER(dev_priv) >= 11 &&
+ intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
+ icl_dsi_frame_update(new_crtc_state);
+
+ /* We're still in the vblank-evade critical section, this can't race.
+ * Would be slightly nice to just grab the vblank count and arm the
+ * event outside of the critical section - the spinlock might spin for a
+ * while ... */
+ if (new_crtc_state->uapi.event) {
+ drm_WARN_ON(&dev_priv->drm,
+ drm_crtc_vblank_get(&crtc->base) != 0);
+
+ spin_lock(&crtc->base.dev->event_lock);
+ drm_crtc_arm_vblank_event(&crtc->base,
+ new_crtc_state->uapi.event);
+ spin_unlock(&crtc->base.dev->event_lock);
+
+ new_crtc_state->uapi.event = NULL;
+ }
+
+ local_irq_enable();
+
+ /* Send VRR Push to terminate Vblank */
+ intel_vrr_send_push(new_crtc_state);
+
+ if (intel_vgpu_active(dev_priv))
+ return;
+
+ if (crtc->debug.start_vbl_count &&
+ crtc->debug.start_vbl_count != end_vbl_count) {
+ drm_err(&dev_priv->drm,
+ "Atomic update failure on pipe %c (start=%u end=%u) time %lld us, min %d, max %d, scanline start %d, end %d\n",
+ pipe_name(pipe), crtc->debug.start_vbl_count,
+ end_vbl_count,
+ ktime_us_delta(end_vbl_time,
+ crtc->debug.start_vbl_time),
+ crtc->debug.min_vbl, crtc->debug.max_vbl,
+ crtc->debug.scanline_start, scanline_end);
+ }
+
+ dbg_vblank_evade(crtc, end_vbl_time);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_csr.c b/drivers/gpu/drm/i915/display/intel_csr.c
index 67dc64df78a5..794efcc3ca08 100644
--- a/drivers/gpu/drm/i915/display/intel_csr.c
+++ b/drivers/gpu/drm/i915/display/intel_csr.c
@@ -40,6 +40,10 @@
#define GEN12_CSR_MAX_FW_SIZE ICL_CSR_MAX_FW_SIZE
+#define ADLS_CSR_PATH "i915/adls_dmc_ver2_01.bin"
+#define ADLS_CSR_VERSION_REQUIRED CSR_VERSION(2, 1)
+MODULE_FIRMWARE(ADLS_CSR_PATH);
+
#define DG1_CSR_PATH "i915/dg1_dmc_ver2_02.bin"
#define DG1_CSR_VERSION_REQUIRED CSR_VERSION(2, 2)
MODULE_FIRMWARE(DG1_CSR_PATH);
@@ -640,7 +644,7 @@ static void csr_load_work_fn(struct work_struct *work)
dev_priv = container_of(work, typeof(*dev_priv), csr.work);
csr = &dev_priv->csr;
- request_firmware(&fw, dev_priv->csr.fw_path, &dev_priv->drm.pdev->dev);
+ request_firmware(&fw, dev_priv->csr.fw_path, dev_priv->drm.dev);
parse_csr_fw(dev_priv, fw);
if (dev_priv->csr.dmc_payload) {
@@ -689,7 +693,11 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
*/
intel_csr_runtime_pm_get(dev_priv);
- if (IS_DG1(dev_priv)) {
+ if (IS_ALDERLAKE_S(dev_priv)) {
+ csr->fw_path = ADLS_CSR_PATH;
+ csr->required_version = ADLS_CSR_VERSION_REQUIRED;
+ csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
+ } else if (IS_DG1(dev_priv)) {
csr->fw_path = DG1_CSR_PATH;
csr->required_version = DG1_CSR_VERSION_REQUIRED;
csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
@@ -697,11 +705,11 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
csr->fw_path = RKL_CSR_PATH;
csr->required_version = RKL_CSR_VERSION_REQUIRED;
csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
- } else if (INTEL_GEN(dev_priv) >= 12) {
+ } else if (DISPLAY_VER(dev_priv) >= 12) {
csr->fw_path = TGL_CSR_PATH;
csr->required_version = TGL_CSR_VERSION_REQUIRED;
csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
- } else if (IS_GEN(dev_priv, 11)) {
+ } else if (IS_DISPLAY_VER(dev_priv, 11)) {
csr->fw_path = ICL_CSR_PATH;
csr->required_version = ICL_CSR_VERSION_REQUIRED;
csr->max_fw_size = ICL_CSR_MAX_FW_SIZE;
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index 21fe4d2753e9..2345f2efd60b 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -15,6 +15,7 @@
#include "intel_cursor.h"
#include "intel_display_types.h"
#include "intel_display.h"
+#include "intel_fb.h"
#include "intel_frontbuffer.h"
#include "intel_pm.h"
@@ -44,7 +45,7 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
else
base = intel_plane_ggtt_offset(plane_state);
- return base + plane_state->color_plane[0].offset;
+ return base + plane_state->view.color_plane[0].offset;
}
static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
@@ -124,9 +125,9 @@ static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
offset += (src_h * src_w - 1) * fb->format->cpp[0];
}
- plane_state->color_plane[0].offset = offset;
- plane_state->color_plane[0].x = src_x;
- plane_state->color_plane[0].y = src_y;
+ plane_state->view.color_plane[0].offset = offset;
+ plane_state->view.color_plane[0].x = src_x;
+ plane_state->view.color_plane[0].y = src_y;
return 0;
}
@@ -193,7 +194,7 @@ static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
{
return CURSOR_ENABLE |
CURSOR_FORMAT_ARGB |
- CURSOR_STRIDE(plane_state->color_plane[0].stride);
+ CURSOR_STRIDE(plane_state->view.color_plane[0].stride);
}
static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
@@ -232,7 +233,7 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state,
}
drm_WARN_ON(&i915->drm, plane_state->uapi.visible &&
- plane_state->color_plane[0].stride != fb->pitches[0]);
+ plane_state->view.color_plane[0].stride != fb->pitches[0]);
switch (fb->pitches[0]) {
case 256:
@@ -338,7 +339,7 @@ static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 cntl = 0;
- if (INTEL_GEN(dev_priv) >= 11)
+ if (DISPLAY_VER(dev_priv) >= 11)
return cntl;
if (crtc_state->gamma_enable)
@@ -347,7 +348,7 @@ static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
if (crtc_state->csc_enable)
cntl |= MCURSOR_PIPE_CSC_ENABLE;
- if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
+ if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
return cntl;
@@ -360,7 +361,7 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
to_i915(plane_state->uapi.plane->dev);
u32 cntl = 0;
- if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
+ if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv))
cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
switch (drm_rect_width(&plane_state->uapi.dst)) {
@@ -449,7 +450,7 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
}
drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible &&
- plane_state->color_plane[0].stride != fb->pitches[0]);
+ plane_state->view.color_plane[0].stride != fb->pitches[0]);
if (fb->pitches[0] !=
drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
@@ -527,7 +528,7 @@ static void i9xx_update_cursor(struct intel_plane *plane,
* the CURCNTR write arms the update.
*/
- if (INTEL_GEN(dev_priv) >= 9)
+ if (DISPLAY_VER(dev_priv) >= 9)
skl_write_cursor_wm(plane, crtc_state);
if (!intel_crtc_needs_modeset(crtc_state))
@@ -583,7 +584,7 @@ static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
ret = val & MCURSOR_MODE;
- if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+ if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
*pipe = plane->pipe;
else
*pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
@@ -783,7 +784,7 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
if (ret)
goto fail;
- if (INTEL_GEN(dev_priv) >= 4)
+ if (DISPLAY_VER(dev_priv) >= 4)
drm_plane_create_rotation_property(&cursor->base,
DRM_MODE_ROTATE_0,
DRM_MODE_ROTATE_0 |
@@ -792,7 +793,7 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
- if (INTEL_GEN(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 12)
drm_plane_enable_fb_damage_clips(&cursor->base);
drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 1bb40ec5fe5d..953de42e277c 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -28,17 +28,18 @@
#include <drm/drm_scdc_helper.h>
#include "i915_drv.h"
-#include "i915_trace.h"
#include "intel_audio.h"
#include "intel_combo_phy.h"
#include "intel_connector.h"
#include "intel_ddi.h"
+#include "intel_ddi_buf_trans.h"
#include "intel_display_types.h"
#include "intel_dp.h"
-#include "intel_dp_mst.h"
#include "intel_dp_link_training.h"
+#include "intel_dp_mst.h"
#include "intel_dpio_phy.h"
#include "intel_dsi.h"
+#include "intel_fdi.h"
#include "intel_fifo_underrun.h"
#include "intel_gmbus.h"
#include "intel_hdcp.h"
@@ -52,12 +53,8 @@
#include "intel_tc.h"
#include "intel_vdsc.h"
#include "intel_vrr.h"
-
-struct ddi_buf_trans {
- u32 trans1; /* balance leg enable, de-emph level */
- u32 trans2; /* vref sel, vswing */
- u8 i_boost; /* SKL: I_boost; valid: 0x0, 0x1, 0x3, 0x7 */
-};
+#include "skl_scaler.h"
+#include "skl_universal_plane.h"
static const u8 index_to_dp_signal_levels[] = {
[0] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0,
@@ -72,1389 +69,15 @@ static const u8 index_to_dp_signal_levels[] = {
[9] = DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0,
};
-/* HDMI/DVI modes ignore everything but the last 2 items. So we share
- * them for both DP and FDI transports, allowing those ports to
- * automatically adapt to HDMI connections as well
- */
-static const struct ddi_buf_trans hsw_ddi_translations_dp[] = {
- { 0x00FFFFFF, 0x0006000E, 0x0 },
- { 0x00D75FFF, 0x0005000A, 0x0 },
- { 0x00C30FFF, 0x00040006, 0x0 },
- { 0x80AAAFFF, 0x000B0000, 0x0 },
- { 0x00FFFFFF, 0x0005000A, 0x0 },
- { 0x00D75FFF, 0x000C0004, 0x0 },
- { 0x80C30FFF, 0x000B0000, 0x0 },
- { 0x00FFFFFF, 0x00040006, 0x0 },
- { 0x80D75FFF, 0x000B0000, 0x0 },
-};
-
-static const struct ddi_buf_trans hsw_ddi_translations_fdi[] = {
- { 0x00FFFFFF, 0x0007000E, 0x0 },
- { 0x00D75FFF, 0x000F000A, 0x0 },
- { 0x00C30FFF, 0x00060006, 0x0 },
- { 0x00AAAFFF, 0x001E0000, 0x0 },
- { 0x00FFFFFF, 0x000F000A, 0x0 },
- { 0x00D75FFF, 0x00160004, 0x0 },
- { 0x00C30FFF, 0x001E0000, 0x0 },
- { 0x00FFFFFF, 0x00060006, 0x0 },
- { 0x00D75FFF, 0x001E0000, 0x0 },
-};
-
-static const struct ddi_buf_trans hsw_ddi_translations_hdmi[] = {
- /* Idx NT mV d T mV d db */
- { 0x00FFFFFF, 0x0006000E, 0x0 },/* 0: 400 400 0 */
- { 0x00E79FFF, 0x000E000C, 0x0 },/* 1: 400 500 2 */
- { 0x00D75FFF, 0x0005000A, 0x0 },/* 2: 400 600 3.5 */
- { 0x00FFFFFF, 0x0005000A, 0x0 },/* 3: 600 600 0 */
- { 0x00E79FFF, 0x001D0007, 0x0 },/* 4: 600 750 2 */
- { 0x00D75FFF, 0x000C0004, 0x0 },/* 5: 600 900 3.5 */
- { 0x00FFFFFF, 0x00040006, 0x0 },/* 6: 800 800 0 */
- { 0x80E79FFF, 0x00030002, 0x0 },/* 7: 800 1000 2 */
- { 0x00FFFFFF, 0x00140005, 0x0 },/* 8: 850 850 0 */
- { 0x00FFFFFF, 0x000C0004, 0x0 },/* 9: 900 900 0 */
- { 0x00FFFFFF, 0x001C0003, 0x0 },/* 10: 950 950 0 */
- { 0x80FFFFFF, 0x00030002, 0x0 },/* 11: 1000 1000 0 */
-};
-
-static const struct ddi_buf_trans bdw_ddi_translations_edp[] = {
- { 0x00FFFFFF, 0x00000012, 0x0 },
- { 0x00EBAFFF, 0x00020011, 0x0 },
- { 0x00C71FFF, 0x0006000F, 0x0 },
- { 0x00AAAFFF, 0x000E000A, 0x0 },
- { 0x00FFFFFF, 0x00020011, 0x0 },
- { 0x00DB6FFF, 0x0005000F, 0x0 },
- { 0x00BEEFFF, 0x000A000C, 0x0 },
- { 0x00FFFFFF, 0x0005000F, 0x0 },
- { 0x00DB6FFF, 0x000A000C, 0x0 },
-};
-
-static const struct ddi_buf_trans bdw_ddi_translations_dp[] = {
- { 0x00FFFFFF, 0x0007000E, 0x0 },
- { 0x00D75FFF, 0x000E000A, 0x0 },
- { 0x00BEFFFF, 0x00140006, 0x0 },
- { 0x80B2CFFF, 0x001B0002, 0x0 },
- { 0x00FFFFFF, 0x000E000A, 0x0 },
- { 0x00DB6FFF, 0x00160005, 0x0 },
- { 0x80C71FFF, 0x001A0002, 0x0 },
- { 0x00F7DFFF, 0x00180004, 0x0 },
- { 0x80D75FFF, 0x001B0002, 0x0 },
-};
-
-static const struct ddi_buf_trans bdw_ddi_translations_fdi[] = {
- { 0x00FFFFFF, 0x0001000E, 0x0 },
- { 0x00D75FFF, 0x0004000A, 0x0 },
- { 0x00C30FFF, 0x00070006, 0x0 },
- { 0x00AAAFFF, 0x000C0000, 0x0 },
- { 0x00FFFFFF, 0x0004000A, 0x0 },
- { 0x00D75FFF, 0x00090004, 0x0 },
- { 0x00C30FFF, 0x000C0000, 0x0 },
- { 0x00FFFFFF, 0x00070006, 0x0 },
- { 0x00D75FFF, 0x000C0000, 0x0 },
-};
-
-static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = {
- /* Idx NT mV d T mV df db */
- { 0x00FFFFFF, 0x0007000E, 0x0 },/* 0: 400 400 0 */
- { 0x00D75FFF, 0x000E000A, 0x0 },/* 1: 400 600 3.5 */
- { 0x00BEFFFF, 0x00140006, 0x0 },/* 2: 400 800 6 */
- { 0x00FFFFFF, 0x0009000D, 0x0 },/* 3: 450 450 0 */
- { 0x00FFFFFF, 0x000E000A, 0x0 },/* 4: 600 600 0 */
- { 0x00D7FFFF, 0x00140006, 0x0 },/* 5: 600 800 2.5 */
- { 0x80CB2FFF, 0x001B0002, 0x0 },/* 6: 600 1000 4.5 */
- { 0x00FFFFFF, 0x00140006, 0x0 },/* 7: 800 800 0 */
- { 0x80E79FFF, 0x001B0002, 0x0 },/* 8: 800 1000 2 */
- { 0x80FFFFFF, 0x001B0002, 0x0 },/* 9: 1000 1000 0 */
-};
-
-/* Skylake H and S */
-static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
- { 0x00002016, 0x000000A0, 0x0 },
- { 0x00005012, 0x0000009B, 0x0 },
- { 0x00007011, 0x00000088, 0x0 },
- { 0x80009010, 0x000000C0, 0x1 },
- { 0x00002016, 0x0000009B, 0x0 },
- { 0x00005012, 0x00000088, 0x0 },
- { 0x80007011, 0x000000C0, 0x1 },
- { 0x00002016, 0x000000DF, 0x0 },
- { 0x80005012, 0x000000C0, 0x1 },
-};
-
-/* Skylake U */
-static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
- { 0x0000201B, 0x000000A2, 0x0 },
- { 0x00005012, 0x00000088, 0x0 },
- { 0x80007011, 0x000000CD, 0x1 },
- { 0x80009010, 0x000000C0, 0x1 },
- { 0x0000201B, 0x0000009D, 0x0 },
- { 0x80005012, 0x000000C0, 0x1 },
- { 0x80007011, 0x000000C0, 0x1 },
- { 0x00002016, 0x00000088, 0x0 },
- { 0x80005012, 0x000000C0, 0x1 },
-};
-
-/* Skylake Y */
-static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
- { 0x00000018, 0x000000A2, 0x0 },
- { 0x00005012, 0x00000088, 0x0 },
- { 0x80007011, 0x000000CD, 0x3 },
- { 0x80009010, 0x000000C0, 0x3 },
- { 0x00000018, 0x0000009D, 0x0 },
- { 0x80005012, 0x000000C0, 0x3 },
- { 0x80007011, 0x000000C0, 0x3 },
- { 0x00000018, 0x00000088, 0x0 },
- { 0x80005012, 0x000000C0, 0x3 },
-};
-
-/* Kabylake H and S */
-static const struct ddi_buf_trans kbl_ddi_translations_dp[] = {
- { 0x00002016, 0x000000A0, 0x0 },
- { 0x00005012, 0x0000009B, 0x0 },
- { 0x00007011, 0x00000088, 0x0 },
- { 0x80009010, 0x000000C0, 0x1 },
- { 0x00002016, 0x0000009B, 0x0 },
- { 0x00005012, 0x00000088, 0x0 },
- { 0x80007011, 0x000000C0, 0x1 },
- { 0x00002016, 0x00000097, 0x0 },
- { 0x80005012, 0x000000C0, 0x1 },
-};
-
-/* Kabylake U */
-static const struct ddi_buf_trans kbl_u_ddi_translations_dp[] = {
- { 0x0000201B, 0x000000A1, 0x0 },
- { 0x00005012, 0x00000088, 0x0 },
- { 0x80007011, 0x000000CD, 0x3 },
- { 0x80009010, 0x000000C0, 0x3 },
- { 0x0000201B, 0x0000009D, 0x0 },
- { 0x80005012, 0x000000C0, 0x3 },
- { 0x80007011, 0x000000C0, 0x3 },
- { 0x00002016, 0x0000004F, 0x0 },
- { 0x80005012, 0x000000C0, 0x3 },
-};
-
-/* Kabylake Y */
-static const struct ddi_buf_trans kbl_y_ddi_translations_dp[] = {
- { 0x00001017, 0x000000A1, 0x0 },
- { 0x00005012, 0x00000088, 0x0 },
- { 0x80007011, 0x000000CD, 0x3 },
- { 0x8000800F, 0x000000C0, 0x3 },
- { 0x00001017, 0x0000009D, 0x0 },
- { 0x80005012, 0x000000C0, 0x3 },
- { 0x80007011, 0x000000C0, 0x3 },
- { 0x00001017, 0x0000004C, 0x0 },
- { 0x80005012, 0x000000C0, 0x3 },
-};
-
-/*
- * Skylake/Kabylake H and S
- * eDP 1.4 low vswing translation parameters
- */
-static const struct ddi_buf_trans skl_ddi_translations_edp[] = {
- { 0x00000018, 0x000000A8, 0x0 },
- { 0x00004013, 0x000000A9, 0x0 },
- { 0x00007011, 0x000000A2, 0x0 },
- { 0x00009010, 0x0000009C, 0x0 },
- { 0x00000018, 0x000000A9, 0x0 },
- { 0x00006013, 0x000000A2, 0x0 },
- { 0x00007011, 0x000000A6, 0x0 },
- { 0x00000018, 0x000000AB, 0x0 },
- { 0x00007013, 0x0000009F, 0x0 },
- { 0x00000018, 0x000000DF, 0x0 },
-};
-
-/*
- * Skylake/Kabylake U
- * eDP 1.4 low vswing translation parameters
- */
-static const struct ddi_buf_trans skl_u_ddi_translations_edp[] = {
- { 0x00000018, 0x000000A8, 0x0 },
- { 0x00004013, 0x000000A9, 0x0 },
- { 0x00007011, 0x000000A2, 0x0 },
- { 0x00009010, 0x0000009C, 0x0 },
- { 0x00000018, 0x000000A9, 0x0 },
- { 0x00006013, 0x000000A2, 0x0 },
- { 0x00007011, 0x000000A6, 0x0 },
- { 0x00002016, 0x000000AB, 0x0 },
- { 0x00005013, 0x0000009F, 0x0 },
- { 0x00000018, 0x000000DF, 0x0 },
-};
-
-/*
- * Skylake/Kabylake Y
- * eDP 1.4 low vswing translation parameters
- */
-static const struct ddi_buf_trans skl_y_ddi_translations_edp[] = {
- { 0x00000018, 0x000000A8, 0x0 },
- { 0x00004013, 0x000000AB, 0x0 },
- { 0x00007011, 0x000000A4, 0x0 },
- { 0x00009010, 0x000000DF, 0x0 },
- { 0x00000018, 0x000000AA, 0x0 },
- { 0x00006013, 0x000000A4, 0x0 },
- { 0x00007011, 0x0000009D, 0x0 },
- { 0x00000018, 0x000000A0, 0x0 },
- { 0x00006012, 0x000000DF, 0x0 },
- { 0x00000018, 0x0000008A, 0x0 },
-};
-
-/* Skylake/Kabylake U, H and S */
-static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
- { 0x00000018, 0x000000AC, 0x0 },
- { 0x00005012, 0x0000009D, 0x0 },
- { 0x00007011, 0x00000088, 0x0 },
- { 0x00000018, 0x000000A1, 0x0 },
- { 0x00000018, 0x00000098, 0x0 },
- { 0x00004013, 0x00000088, 0x0 },
- { 0x80006012, 0x000000CD, 0x1 },
- { 0x00000018, 0x000000DF, 0x0 },
- { 0x80003015, 0x000000CD, 0x1 }, /* Default */
- { 0x80003015, 0x000000C0, 0x1 },
- { 0x80000018, 0x000000C0, 0x1 },
-};
-
-/* Skylake/Kabylake Y */
-static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = {
- { 0x00000018, 0x000000A1, 0x0 },
- { 0x00005012, 0x000000DF, 0x0 },
- { 0x80007011, 0x000000CB, 0x3 },
- { 0x00000018, 0x000000A4, 0x0 },
- { 0x00000018, 0x0000009D, 0x0 },
- { 0x00004013, 0x00000080, 0x0 },
- { 0x80006013, 0x000000C0, 0x3 },
- { 0x00000018, 0x0000008A, 0x0 },
- { 0x80003015, 0x000000C0, 0x3 }, /* Default */
- { 0x80003015, 0x000000C0, 0x3 },
- { 0x80000018, 0x000000C0, 0x3 },
-};
-
-struct bxt_ddi_buf_trans {
- u8 margin; /* swing value */
- u8 scale; /* scale value */
- u8 enable; /* scale enable */
- u8 deemphasis;
-};
-
-static const struct bxt_ddi_buf_trans bxt_ddi_translations_dp[] = {
- /* Idx NT mV diff db */
- { 52, 0x9A, 0, 128, }, /* 0: 400 0 */
- { 78, 0x9A, 0, 85, }, /* 1: 400 3.5 */
- { 104, 0x9A, 0, 64, }, /* 2: 400 6 */
- { 154, 0x9A, 0, 43, }, /* 3: 400 9.5 */
- { 77, 0x9A, 0, 128, }, /* 4: 600 0 */
- { 116, 0x9A, 0, 85, }, /* 5: 600 3.5 */
- { 154, 0x9A, 0, 64, }, /* 6: 600 6 */
- { 102, 0x9A, 0, 128, }, /* 7: 800 0 */
- { 154, 0x9A, 0, 85, }, /* 8: 800 3.5 */
- { 154, 0x9A, 1, 128, }, /* 9: 1200 0 */
-};
-
-static const struct bxt_ddi_buf_trans bxt_ddi_translations_edp[] = {
- /* Idx NT mV diff db */
- { 26, 0, 0, 128, }, /* 0: 200 0 */
- { 38, 0, 0, 112, }, /* 1: 200 1.5 */
- { 48, 0, 0, 96, }, /* 2: 200 4 */
- { 54, 0, 0, 69, }, /* 3: 200 6 */
- { 32, 0, 0, 128, }, /* 4: 250 0 */
- { 48, 0, 0, 104, }, /* 5: 250 1.5 */
- { 54, 0, 0, 85, }, /* 6: 250 4 */
- { 43, 0, 0, 128, }, /* 7: 300 0 */
- { 54, 0, 0, 101, }, /* 8: 300 1.5 */
- { 48, 0, 0, 128, }, /* 9: 300 0 */
-};
-
-/* BSpec has 2 recommended values - entries 0 and 8.
- * Using the entry with higher vswing.
- */
-static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = {
- /* Idx NT mV diff db */
- { 52, 0x9A, 0, 128, }, /* 0: 400 0 */
- { 52, 0x9A, 0, 85, }, /* 1: 400 3.5 */
- { 52, 0x9A, 0, 64, }, /* 2: 400 6 */
- { 42, 0x9A, 0, 43, }, /* 3: 400 9.5 */
- { 77, 0x9A, 0, 128, }, /* 4: 600 0 */
- { 77, 0x9A, 0, 85, }, /* 5: 600 3.5 */
- { 77, 0x9A, 0, 64, }, /* 6: 600 6 */
- { 102, 0x9A, 0, 128, }, /* 7: 800 0 */
- { 102, 0x9A, 0, 85, }, /* 8: 800 3.5 */
- { 154, 0x9A, 1, 128, }, /* 9: 1200 0 */
-};
-
-struct cnl_ddi_buf_trans {
- u8 dw2_swing_sel;
- u8 dw7_n_scalar;
- u8 dw4_cursor_coeff;
- u8 dw4_post_cursor_2;
- u8 dw4_post_cursor_1;
-};
-
-/* Voltage Swing Programming for VccIO 0.85V for DP */
-static const struct cnl_ddi_buf_trans cnl_ddi_translations_dp_0_85V[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x5D, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
- { 0xA, 0x6A, 0x38, 0x00, 0x07 }, /* 350 500 3.1 */
- { 0xB, 0x7A, 0x32, 0x00, 0x0D }, /* 350 700 6.0 */
- { 0x6, 0x7C, 0x2D, 0x00, 0x12 }, /* 350 900 8.2 */
- { 0xA, 0x69, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
- { 0xB, 0x7A, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
- { 0x6, 0x7C, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
- { 0xB, 0x7D, 0x3C, 0x00, 0x03 }, /* 650 725 0.9 */
- { 0x6, 0x7C, 0x34, 0x00, 0x0B }, /* 600 900 3.5 */
- { 0x6, 0x7B, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
-};
-
-/* Voltage Swing Programming for VccIO 0.85V for HDMI */
-static const struct cnl_ddi_buf_trans cnl_ddi_translations_hdmi_0_85V[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x60, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */
- { 0xB, 0x73, 0x36, 0x00, 0x09 }, /* 450 650 3.2 */
- { 0x6, 0x7F, 0x31, 0x00, 0x0E }, /* 450 850 5.5 */
- { 0xB, 0x73, 0x3F, 0x00, 0x00 }, /* 650 650 0.0 */
- { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 650 850 2.3 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 850 850 0.0 */
- { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
-};
-
-/* Voltage Swing Programming for VccIO 0.85V for eDP */
-static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_0_85V[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x66, 0x3A, 0x00, 0x05 }, /* 384 500 2.3 */
- { 0x0, 0x7F, 0x38, 0x00, 0x07 }, /* 153 200 2.3 */
- { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 192 250 2.3 */
- { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 230 300 2.3 */
- { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 269 350 2.3 */
- { 0xA, 0x66, 0x3C, 0x00, 0x03 }, /* 446 500 1.0 */
- { 0xB, 0x70, 0x3C, 0x00, 0x03 }, /* 460 600 2.3 */
- { 0xC, 0x75, 0x3C, 0x00, 0x03 }, /* 537 700 2.3 */
- { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
-};
-
-/* Voltage Swing Programming for VccIO 0.95V for DP */
-static const struct cnl_ddi_buf_trans cnl_ddi_translations_dp_0_95V[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x5D, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
- { 0xA, 0x6A, 0x38, 0x00, 0x07 }, /* 350 500 3.1 */
- { 0xB, 0x7A, 0x32, 0x00, 0x0D }, /* 350 700 6.0 */
- { 0x6, 0x7C, 0x2D, 0x00, 0x12 }, /* 350 900 8.2 */
- { 0xA, 0x69, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
- { 0xB, 0x7A, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
- { 0x6, 0x7C, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
- { 0xB, 0x7D, 0x3C, 0x00, 0x03 }, /* 650 725 0.9 */
- { 0x6, 0x7C, 0x34, 0x00, 0x0B }, /* 600 900 3.5 */
- { 0x6, 0x7B, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
-};
-
-/* Voltage Swing Programming for VccIO 0.95V for HDMI */
-static const struct cnl_ddi_buf_trans cnl_ddi_translations_hdmi_0_95V[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x5C, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
- { 0xB, 0x69, 0x37, 0x00, 0x08 }, /* 400 600 3.5 */
- { 0x5, 0x76, 0x31, 0x00, 0x0E }, /* 400 800 6.0 */
- { 0xA, 0x5E, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */
- { 0xB, 0x69, 0x3F, 0x00, 0x00 }, /* 600 600 0.0 */
- { 0xB, 0x79, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
- { 0x6, 0x7D, 0x32, 0x00, 0x0D }, /* 600 1000 4.4 */
- { 0x5, 0x76, 0x3F, 0x00, 0x00 }, /* 800 800 0.0 */
- { 0x6, 0x7D, 0x39, 0x00, 0x06 }, /* 800 1000 1.9 */
- { 0x6, 0x7F, 0x39, 0x00, 0x06 }, /* 850 1050 1.8 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1050 1050 0.0 */
-};
-
-/* Voltage Swing Programming for VccIO 0.95V for eDP */
-static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_0_95V[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x61, 0x3A, 0x00, 0x05 }, /* 384 500 2.3 */
- { 0x0, 0x7F, 0x38, 0x00, 0x07 }, /* 153 200 2.3 */
- { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 192 250 2.3 */
- { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 230 300 2.3 */
- { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 269 350 2.3 */
- { 0xA, 0x61, 0x3C, 0x00, 0x03 }, /* 446 500 1.0 */
- { 0xB, 0x68, 0x39, 0x00, 0x06 }, /* 460 600 2.3 */
- { 0xC, 0x6E, 0x39, 0x00, 0x06 }, /* 537 700 2.3 */
- { 0x4, 0x7F, 0x3A, 0x00, 0x05 }, /* 460 600 2.3 */
- { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
-};
-
-/* Voltage Swing Programming for VccIO 1.05V for DP */
-static const struct cnl_ddi_buf_trans cnl_ddi_translations_dp_1_05V[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x58, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
- { 0xB, 0x64, 0x37, 0x00, 0x08 }, /* 400 600 3.5 */
- { 0x5, 0x70, 0x31, 0x00, 0x0E }, /* 400 800 6.0 */
- { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 400 1050 8.4 */
- { 0xB, 0x64, 0x3F, 0x00, 0x00 }, /* 600 600 0.0 */
- { 0x5, 0x73, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
- { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 550 1050 5.6 */
- { 0x5, 0x76, 0x3E, 0x00, 0x01 }, /* 850 900 0.5 */
- { 0x6, 0x7F, 0x36, 0x00, 0x09 }, /* 750 1050 2.9 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1050 1050 0.0 */
-};
-
-/* Voltage Swing Programming for VccIO 1.05V for HDMI */
-static const struct cnl_ddi_buf_trans cnl_ddi_translations_hdmi_1_05V[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x58, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
- { 0xB, 0x64, 0x37, 0x00, 0x08 }, /* 400 600 3.5 */
- { 0x5, 0x70, 0x31, 0x00, 0x0E }, /* 400 800 6.0 */
- { 0xA, 0x5B, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */
- { 0xB, 0x64, 0x3F, 0x00, 0x00 }, /* 600 600 0.0 */
- { 0x5, 0x73, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
- { 0x6, 0x7C, 0x32, 0x00, 0x0D }, /* 600 1000 4.4 */
- { 0x5, 0x70, 0x3F, 0x00, 0x00 }, /* 800 800 0.0 */
- { 0x6, 0x7C, 0x39, 0x00, 0x06 }, /* 800 1000 1.9 */
- { 0x6, 0x7F, 0x39, 0x00, 0x06 }, /* 850 1050 1.8 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1050 1050 0.0 */
-};
-
-/* Voltage Swing Programming for VccIO 1.05V for eDP */
-static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_1_05V[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x5E, 0x3A, 0x00, 0x05 }, /* 384 500 2.3 */
- { 0x0, 0x7F, 0x38, 0x00, 0x07 }, /* 153 200 2.3 */
- { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 192 250 2.3 */
- { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 230 300 2.3 */
- { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 269 350 2.3 */
- { 0xA, 0x5E, 0x3C, 0x00, 0x03 }, /* 446 500 1.0 */
- { 0xB, 0x64, 0x39, 0x00, 0x06 }, /* 460 600 2.3 */
- { 0xE, 0x6A, 0x39, 0x00, 0x06 }, /* 537 700 2.3 */
- { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
-};
-
-/* icl_combo_phy_ddi_translations */
-static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hbr2[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
- { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
- { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
- { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
- { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
- { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
- { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
- { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
- { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
-};
-
-static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr2[] = {
- /* NT mV Trans mV db */
- { 0x0, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */
- { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 200 250 1.9 */
- { 0x1, 0x7F, 0x33, 0x00, 0x0C }, /* 200 300 3.5 */
- { 0x9, 0x7F, 0x31, 0x00, 0x0E }, /* 200 350 4.9 */
- { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */
- { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 250 300 1.6 */
- { 0x9, 0x7F, 0x35, 0x00, 0x0A }, /* 250 350 2.9 */
- { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */
- { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */
- { 0x9, 0x7F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
-};
-
-static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr3[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
- { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
- { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
- { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
- { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
- { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
- { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
- { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
- { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
-};
-
-static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x60, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */
- { 0xB, 0x73, 0x36, 0x00, 0x09 }, /* 450 650 3.2 */
- { 0x6, 0x7F, 0x31, 0x00, 0x0E }, /* 450 850 5.5 */
- { 0xB, 0x73, 0x3F, 0x00, 0x00 }, /* 650 650 0.0 ALS */
- { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 650 850 2.3 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 850 850 0.0 */
- { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
-};
-
-static const struct cnl_ddi_buf_trans ehl_combo_phy_ddi_translations_dp[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x33, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
- { 0xA, 0x47, 0x36, 0x00, 0x09 }, /* 350 500 3.1 */
- { 0xC, 0x64, 0x34, 0x00, 0x0B }, /* 350 700 6.0 */
- { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 350 900 8.2 */
- { 0xA, 0x46, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
- { 0xC, 0x64, 0x38, 0x00, 0x07 }, /* 500 700 2.9 */
- { 0x6, 0x7F, 0x32, 0x00, 0x0D }, /* 500 900 5.1 */
- { 0xC, 0x61, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
- { 0x6, 0x7F, 0x38, 0x00, 0x07 }, /* 600 900 3.5 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
-};
-
-static const struct cnl_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr[] = {
- /* NT mV Trans mV db */
- { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */
- { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 200 250 1.9 */
- { 0x1, 0x7F, 0x33, 0x00, 0x0C }, /* 200 300 3.5 */
- { 0xA, 0x35, 0x36, 0x00, 0x09 }, /* 200 350 4.9 */
- { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */
- { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 250 300 1.6 */
- { 0xA, 0x35, 0x35, 0x00, 0x0A }, /* 250 350 2.9 */
- { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */
- { 0xA, 0x35, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */
- { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
-};
-
-static const struct cnl_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr2[] = {
- /* NT mV Trans mV db */
- { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */
- { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 250 1.9 */
- { 0x1, 0x7F, 0x3D, 0x00, 0x02 }, /* 200 300 3.5 */
- { 0xA, 0x35, 0x38, 0x00, 0x07 }, /* 200 350 4.9 */
- { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */
- { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 300 1.6 */
- { 0xA, 0x35, 0x3A, 0x00, 0x05 }, /* 250 350 2.9 */
- { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */
- { 0xA, 0x35, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */
- { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
-};
-
-static const struct cnl_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_rbr_hbr[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
- { 0xA, 0x48, 0x35, 0x00, 0x0A }, /* 350 500 3.1 */
- { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
- { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 350 900 8.2 */
- { 0xA, 0x43, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
- { 0xC, 0x60, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
- { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
- { 0xC, 0x60, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
- { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 600 900 3.5 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
-};
-
-static const struct cnl_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_hbr2_hbr3[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
- { 0xA, 0x48, 0x35, 0x00, 0x0A }, /* 350 500 3.1 */
- { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
- { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 350 900 8.2 */
- { 0xA, 0x43, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
- { 0xC, 0x60, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
- { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
- { 0xC, 0x58, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
- { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
-};
-
-struct icl_mg_phy_ddi_buf_trans {
- u32 cri_txdeemph_override_11_6;
- u32 cri_txdeemph_override_5_0;
- u32 cri_txdeemph_override_17_12;
-};
-
-static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_rbr_hbr[] = {
- /* Voltage swing pre-emphasis */
- { 0x18, 0x00, 0x00 }, /* 0 0 */
- { 0x1D, 0x00, 0x05 }, /* 0 1 */
- { 0x24, 0x00, 0x0C }, /* 0 2 */
- { 0x2B, 0x00, 0x14 }, /* 0 3 */
- { 0x21, 0x00, 0x00 }, /* 1 0 */
- { 0x2B, 0x00, 0x08 }, /* 1 1 */
- { 0x30, 0x00, 0x0F }, /* 1 2 */
- { 0x31, 0x00, 0x03 }, /* 2 0 */
- { 0x34, 0x00, 0x0B }, /* 2 1 */
- { 0x3F, 0x00, 0x00 }, /* 3 0 */
-};
-
-static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_hbr2_hbr3[] = {
- /* Voltage swing pre-emphasis */
- { 0x18, 0x00, 0x00 }, /* 0 0 */
- { 0x1D, 0x00, 0x05 }, /* 0 1 */
- { 0x24, 0x00, 0x0C }, /* 0 2 */
- { 0x2B, 0x00, 0x14 }, /* 0 3 */
- { 0x26, 0x00, 0x00 }, /* 1 0 */
- { 0x2C, 0x00, 0x07 }, /* 1 1 */
- { 0x33, 0x00, 0x0C }, /* 1 2 */
- { 0x2E, 0x00, 0x00 }, /* 2 0 */
- { 0x36, 0x00, 0x09 }, /* 2 1 */
- { 0x3F, 0x00, 0x00 }, /* 3 0 */
-};
-
-static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_hdmi[] = {
- /* HDMI Preset VS Pre-emph */
- { 0x1A, 0x0, 0x0 }, /* 1 400mV 0dB */
- { 0x20, 0x0, 0x0 }, /* 2 500mV 0dB */
- { 0x29, 0x0, 0x0 }, /* 3 650mV 0dB */
- { 0x32, 0x0, 0x0 }, /* 4 800mV 0dB */
- { 0x3F, 0x0, 0x0 }, /* 5 1000mV 0dB */
- { 0x3A, 0x0, 0x5 }, /* 6 Full -1.5 dB */
- { 0x39, 0x0, 0x6 }, /* 7 Full -1.8 dB */
- { 0x38, 0x0, 0x7 }, /* 8 Full -2 dB */
- { 0x37, 0x0, 0x8 }, /* 9 Full -2.5 dB */
- { 0x36, 0x0, 0x9 }, /* 10 Full -3 dB */
-};
-
-struct tgl_dkl_phy_ddi_buf_trans {
- u32 dkl_vswing_control;
- u32 dkl_preshoot_control;
- u32 dkl_de_emphasis_control;
-};
-
-static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_dp_ddi_trans[] = {
- /* VS pre-emp Non-trans mV Pre-emph dB */
- { 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */
- { 0x5, 0x0, 0x05 }, /* 0 1 400mV 3.5 dB */
- { 0x2, 0x0, 0x0B }, /* 0 2 400mV 6 dB */
- { 0x0, 0x0, 0x18 }, /* 0 3 400mV 9.5 dB */
- { 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */
- { 0x2, 0x0, 0x08 }, /* 1 1 600mV 3.5 dB */
- { 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */
- { 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */
- { 0x0, 0x0, 0x0B }, /* 2 1 800mV 3.5 dB */
- { 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB HDMI default */
-};
-
-static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_dp_ddi_trans_hbr2[] = {
- /* VS pre-emp Non-trans mV Pre-emph dB */
- { 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */
- { 0x5, 0x0, 0x05 }, /* 0 1 400mV 3.5 dB */
- { 0x2, 0x0, 0x0B }, /* 0 2 400mV 6 dB */
- { 0x0, 0x0, 0x19 }, /* 0 3 400mV 9.5 dB */
- { 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */
- { 0x2, 0x0, 0x08 }, /* 1 1 600mV 3.5 dB */
- { 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */
- { 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */
- { 0x0, 0x0, 0x0B }, /* 2 1 800mV 3.5 dB */
- { 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB HDMI default */
-};
-
-static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_hdmi_ddi_trans[] = {
- /* HDMI Preset VS Pre-emph */
- { 0x7, 0x0, 0x0 }, /* 1 400mV 0dB */
- { 0x6, 0x0, 0x0 }, /* 2 500mV 0dB */
- { 0x4, 0x0, 0x0 }, /* 3 650mV 0dB */
- { 0x2, 0x0, 0x0 }, /* 4 800mV 0dB */
- { 0x0, 0x0, 0x0 }, /* 5 1000mV 0dB */
- { 0x0, 0x0, 0x5 }, /* 6 Full -1.5 dB */
- { 0x0, 0x0, 0x6 }, /* 7 Full -1.8 dB */
- { 0x0, 0x0, 0x7 }, /* 8 Full -2 dB */
- { 0x0, 0x0, 0x8 }, /* 9 Full -2.5 dB */
- { 0x0, 0x0, 0xA }, /* 10 Full -3 dB */
-};
-
-static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
- { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
- { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
- { 0x6, 0x7D, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
- { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
- { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
- { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
- { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
- { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
-};
-
-static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr2[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
- { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
- { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
- { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
- { 0xA, 0x47, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
- { 0xC, 0x63, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
- { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
- { 0xC, 0x61, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
- { 0x6, 0x7B, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
-};
-
-static const struct cnl_ddi_buf_trans tgl_uy_combo_phy_ddi_translations_dp_hbr2[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
- { 0xA, 0x4F, 0x36, 0x00, 0x09 }, /* 350 500 3.1 */
- { 0xC, 0x60, 0x32, 0x00, 0x0D }, /* 350 700 6.0 */
- { 0xC, 0x7F, 0x2D, 0x00, 0x12 }, /* 350 900 8.2 */
- { 0xC, 0x47, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
- { 0xC, 0x6F, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
- { 0x6, 0x7D, 0x32, 0x00, 0x0D }, /* 500 900 5.1 */
- { 0x6, 0x60, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
- { 0x6, 0x7F, 0x34, 0x00, 0x0B }, /* 600 900 3.5 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
-};
-
-/*
- * Cloned the HOBL entry to comply with the voltage and pre-emphasis entries
- * that DisplayPort specification requires
- */
-static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_edp_hbr2_hobl[] = {
- /* VS pre-emp */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 0 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 1 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 2 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 3 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1 0 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1 1 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1 2 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 2 0 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 2 1 */
-};
-
-static const struct cnl_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x2F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
- { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
- { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
- { 0x6, 0x7D, 0x2A, 0x00, 0x15 }, /* 350 900 8.2 */
- { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
- { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
- { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
- { 0xC, 0x6E, 0x3E, 0x00, 0x01 }, /* 650 700 0.6 */
- { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
-};
-
-static const struct cnl_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr2_hbr3[] = {
- /* NT mV Trans mV db */
- { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
- { 0xA, 0x50, 0x38, 0x00, 0x07 }, /* 350 500 3.1 */
- { 0xC, 0x61, 0x33, 0x00, 0x0C }, /* 350 700 6.0 */
- { 0x6, 0x7F, 0x2E, 0x00, 0x11 }, /* 350 900 8.2 */
- { 0xA, 0x47, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
- { 0xC, 0x5F, 0x38, 0x00, 0x07 }, /* 500 700 2.9 */
- { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
- { 0xC, 0x5F, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
- { 0x6, 0x7E, 0x36, 0x00, 0x09 }, /* 600 900 3.5 */
- { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
-};
-
-static bool is_hobl_buf_trans(const struct cnl_ddi_buf_trans *table)
-{
- return table == tgl_combo_phy_ddi_translations_edp_hbr2_hobl;
-}
-
-static const struct ddi_buf_trans *
-bdw_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (dev_priv->vbt.edp.low_vswing) {
- *n_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
- return bdw_ddi_translations_edp;
- } else {
- *n_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
- return bdw_ddi_translations_dp;
- }
-}
-
-static const struct ddi_buf_trans *
-skl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (IS_SKL_ULX(dev_priv)) {
- *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
- return skl_y_ddi_translations_dp;
- } else if (IS_SKL_ULT(dev_priv)) {
- *n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
- return skl_u_ddi_translations_dp;
- } else {
- *n_entries = ARRAY_SIZE(skl_ddi_translations_dp);
- return skl_ddi_translations_dp;
- }
-}
-
-static const struct ddi_buf_trans *
-kbl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (IS_KBL_ULX(dev_priv) ||
- IS_CFL_ULX(dev_priv) ||
- IS_CML_ULX(dev_priv)) {
- *n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp);
- return kbl_y_ddi_translations_dp;
- } else if (IS_KBL_ULT(dev_priv) ||
- IS_CFL_ULT(dev_priv) ||
- IS_CML_ULT(dev_priv)) {
- *n_entries = ARRAY_SIZE(kbl_u_ddi_translations_dp);
- return kbl_u_ddi_translations_dp;
- } else {
- *n_entries = ARRAY_SIZE(kbl_ddi_translations_dp);
- return kbl_ddi_translations_dp;
- }
-}
-
-static const struct ddi_buf_trans *
-skl_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (dev_priv->vbt.edp.low_vswing) {
- if (IS_SKL_ULX(dev_priv) ||
- IS_KBL_ULX(dev_priv) ||
- IS_CFL_ULX(dev_priv) ||
- IS_CML_ULX(dev_priv)) {
- *n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
- return skl_y_ddi_translations_edp;
- } else if (IS_SKL_ULT(dev_priv) ||
- IS_KBL_ULT(dev_priv) ||
- IS_CFL_ULT(dev_priv) ||
- IS_CML_ULT(dev_priv)) {
- *n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp);
- return skl_u_ddi_translations_edp;
- } else {
- *n_entries = ARRAY_SIZE(skl_ddi_translations_edp);
- return skl_ddi_translations_edp;
- }
- }
-
- if (IS_KABYLAKE(dev_priv) ||
- IS_COFFEELAKE(dev_priv) ||
- IS_COMETLAKE(dev_priv))
- return kbl_get_buf_trans_dp(encoder, n_entries);
- else
- return skl_get_buf_trans_dp(encoder, n_entries);
-}
-
-static const struct ddi_buf_trans *
-skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
-{
- if (IS_SKL_ULX(dev_priv) ||
- IS_KBL_ULX(dev_priv) ||
- IS_CFL_ULX(dev_priv) ||
- IS_CML_ULX(dev_priv)) {
- *n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
- return skl_y_ddi_translations_hdmi;
- } else {
- *n_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
- return skl_ddi_translations_hdmi;
- }
-}
-
-static int skl_buf_trans_num_entries(enum port port, int n_entries)
-{
- /* Only DDIA and DDIE can select the 10th register with DP */
- if (port == PORT_A || port == PORT_E)
- return min(n_entries, 10);
- else
- return min(n_entries, 9);
-}
-
-static const struct ddi_buf_trans *
-intel_ddi_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (IS_KABYLAKE(dev_priv) ||
- IS_COFFEELAKE(dev_priv) ||
- IS_COMETLAKE(dev_priv)) {
- const struct ddi_buf_trans *ddi_translations =
- kbl_get_buf_trans_dp(encoder, n_entries);
- *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries);
- return ddi_translations;
- } else if (IS_SKYLAKE(dev_priv)) {
- const struct ddi_buf_trans *ddi_translations =
- skl_get_buf_trans_dp(encoder, n_entries);
- *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries);
- return ddi_translations;
- } else if (IS_BROADWELL(dev_priv)) {
- *n_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
- return bdw_ddi_translations_dp;
- } else if (IS_HASWELL(dev_priv)) {
- *n_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
- return hsw_ddi_translations_dp;
- }
-
- *n_entries = 0;
- return NULL;
-}
-
-static const struct ddi_buf_trans *
-intel_ddi_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (IS_GEN9_BC(dev_priv)) {
- const struct ddi_buf_trans *ddi_translations =
- skl_get_buf_trans_edp(encoder, n_entries);
- *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries);
- return ddi_translations;
- } else if (IS_BROADWELL(dev_priv)) {
- return bdw_get_buf_trans_edp(encoder, n_entries);
- } else if (IS_HASWELL(dev_priv)) {
- *n_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
- return hsw_ddi_translations_dp;
- }
-
- *n_entries = 0;
- return NULL;
-}
-
-static const struct ddi_buf_trans *
-intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv,
- int *n_entries)
-{
- if (IS_BROADWELL(dev_priv)) {
- *n_entries = ARRAY_SIZE(bdw_ddi_translations_fdi);
- return bdw_ddi_translations_fdi;
- } else if (IS_HASWELL(dev_priv)) {
- *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi);
- return hsw_ddi_translations_fdi;
- }
-
- *n_entries = 0;
- return NULL;
-}
-
-static const struct ddi_buf_trans *
-intel_ddi_get_buf_trans_hdmi(struct intel_encoder *encoder,
- int *n_entries)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (IS_GEN9_BC(dev_priv)) {
- return skl_get_buf_trans_hdmi(dev_priv, n_entries);
- } else if (IS_BROADWELL(dev_priv)) {
- *n_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
- return bdw_ddi_translations_hdmi;
- } else if (IS_HASWELL(dev_priv)) {
- *n_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
- return hsw_ddi_translations_hdmi;
- }
-
- *n_entries = 0;
- return NULL;
-}
-
-static const struct bxt_ddi_buf_trans *
-bxt_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
-{
- *n_entries = ARRAY_SIZE(bxt_ddi_translations_dp);
- return bxt_ddi_translations_dp;
-}
-
-static const struct bxt_ddi_buf_trans *
-bxt_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (dev_priv->vbt.edp.low_vswing) {
- *n_entries = ARRAY_SIZE(bxt_ddi_translations_edp);
- return bxt_ddi_translations_edp;
- }
-
- return bxt_get_buf_trans_dp(encoder, n_entries);
-}
-
-static const struct bxt_ddi_buf_trans *
-bxt_get_buf_trans_hdmi(struct intel_encoder *encoder, int *n_entries)
-{
- *n_entries = ARRAY_SIZE(bxt_ddi_translations_hdmi);
- return bxt_ddi_translations_hdmi;
-}
-
-static const struct cnl_ddi_buf_trans *
-cnl_get_buf_trans_hdmi(struct intel_encoder *encoder, int *n_entries)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
-
- if (voltage == VOLTAGE_INFO_0_85V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_85V);
- return cnl_ddi_translations_hdmi_0_85V;
- } else if (voltage == VOLTAGE_INFO_0_95V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_95V);
- return cnl_ddi_translations_hdmi_0_95V;
- } else if (voltage == VOLTAGE_INFO_1_05V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_1_05V);
- return cnl_ddi_translations_hdmi_1_05V;
- } else {
- *n_entries = 1; /* shut up gcc */
- MISSING_CASE(voltage);
- }
- return NULL;
-}
-
-static const struct cnl_ddi_buf_trans *
-cnl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
-
- if (voltage == VOLTAGE_INFO_0_85V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_85V);
- return cnl_ddi_translations_dp_0_85V;
- } else if (voltage == VOLTAGE_INFO_0_95V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_95V);
- return cnl_ddi_translations_dp_0_95V;
- } else if (voltage == VOLTAGE_INFO_1_05V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_1_05V);
- return cnl_ddi_translations_dp_1_05V;
- } else {
- *n_entries = 1; /* shut up gcc */
- MISSING_CASE(voltage);
- }
- return NULL;
-}
-
-static const struct cnl_ddi_buf_trans *
-cnl_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
-
- if (dev_priv->vbt.edp.low_vswing) {
- if (voltage == VOLTAGE_INFO_0_85V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_85V);
- return cnl_ddi_translations_edp_0_85V;
- } else if (voltage == VOLTAGE_INFO_0_95V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_95V);
- return cnl_ddi_translations_edp_0_95V;
- } else if (voltage == VOLTAGE_INFO_1_05V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_1_05V);
- return cnl_ddi_translations_edp_1_05V;
- } else {
- *n_entries = 1; /* shut up gcc */
- MISSING_CASE(voltage);
- }
- return NULL;
- } else {
- return cnl_get_buf_trans_dp(encoder, n_entries);
- }
-}
-
-static const struct cnl_ddi_buf_trans *
-icl_get_combo_buf_trans_hdmi(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
- return icl_combo_phy_ddi_translations_hdmi;
-}
-
-static const struct cnl_ddi_buf_trans *
-icl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hbr2);
- return icl_combo_phy_ddi_translations_dp_hbr2;
-}
-
-static const struct cnl_ddi_buf_trans *
-icl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (crtc_state->port_clock > 540000) {
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3);
- return icl_combo_phy_ddi_translations_edp_hbr3;
- } else if (dev_priv->vbt.edp.low_vswing) {
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2);
- return icl_combo_phy_ddi_translations_edp_hbr2;
- } else if (IS_DG1(dev_priv) && crtc_state->port_clock > 270000) {
- *n_entries = ARRAY_SIZE(dg1_combo_phy_ddi_translations_dp_hbr2_hbr3);
- return dg1_combo_phy_ddi_translations_dp_hbr2_hbr3;
- } else if (IS_DG1(dev_priv)) {
- *n_entries = ARRAY_SIZE(dg1_combo_phy_ddi_translations_dp_rbr_hbr);
- return dg1_combo_phy_ddi_translations_dp_rbr_hbr;
- }
-
- return icl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
-}
-
-static const struct cnl_ddi_buf_trans *
-icl_get_combo_buf_trans(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return icl_get_combo_buf_trans_hdmi(encoder, crtc_state, n_entries);
- else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
- return icl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
- else
- return icl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
-}
-
-static const struct icl_mg_phy_ddi_buf_trans *
-icl_get_mg_buf_trans_hdmi(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_hdmi);
- return icl_mg_phy_ddi_translations_hdmi;
-}
-
-static const struct icl_mg_phy_ddi_buf_trans *
-icl_get_mg_buf_trans_dp(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- if (crtc_state->port_clock > 270000) {
- *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_hbr2_hbr3);
- return icl_mg_phy_ddi_translations_hbr2_hbr3;
- } else {
- *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_rbr_hbr);
- return icl_mg_phy_ddi_translations_rbr_hbr;
- }
-}
-
-static const struct icl_mg_phy_ddi_buf_trans *
-icl_get_mg_buf_trans(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return icl_get_mg_buf_trans_hdmi(encoder, crtc_state, n_entries);
- else
- return icl_get_mg_buf_trans_dp(encoder, crtc_state, n_entries);
-}
-
-static const struct cnl_ddi_buf_trans *
-ehl_get_combo_buf_trans_hdmi(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
- return icl_combo_phy_ddi_translations_hdmi;
-}
-
-static const struct cnl_ddi_buf_trans *
-ehl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- *n_entries = ARRAY_SIZE(ehl_combo_phy_ddi_translations_dp);
- return ehl_combo_phy_ddi_translations_dp;
-}
-
-static const struct cnl_ddi_buf_trans *
-ehl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (dev_priv->vbt.edp.low_vswing) {
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2);
- return icl_combo_phy_ddi_translations_edp_hbr2;
- }
-
- return ehl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
-}
-
-static const struct cnl_ddi_buf_trans *
-ehl_get_combo_buf_trans(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return ehl_get_combo_buf_trans_hdmi(encoder, crtc_state, n_entries);
- else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
- return ehl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
- else
- return ehl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
-}
-
-static const struct cnl_ddi_buf_trans *
-jsl_get_combo_buf_trans_hdmi(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
- return icl_combo_phy_ddi_translations_hdmi;
-}
-
-static const struct cnl_ddi_buf_trans *
-jsl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hbr2);
- return icl_combo_phy_ddi_translations_dp_hbr2;
-}
-
-static const struct cnl_ddi_buf_trans *
-jsl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (dev_priv->vbt.edp.low_vswing) {
- if (crtc_state->port_clock > 270000) {
- *n_entries = ARRAY_SIZE(jsl_combo_phy_ddi_translations_edp_hbr2);
- return jsl_combo_phy_ddi_translations_edp_hbr2;
- } else {
- *n_entries = ARRAY_SIZE(jsl_combo_phy_ddi_translations_edp_hbr);
- return jsl_combo_phy_ddi_translations_edp_hbr;
- }
- }
-
- return jsl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
-}
-
-static const struct cnl_ddi_buf_trans *
-jsl_get_combo_buf_trans(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return jsl_get_combo_buf_trans_hdmi(encoder, crtc_state, n_entries);
- else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
- return jsl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
- else
- return jsl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
-}
-
-static const struct cnl_ddi_buf_trans *
-tgl_get_combo_buf_trans_hdmi(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
- return icl_combo_phy_ddi_translations_hdmi;
-}
-
-static const struct cnl_ddi_buf_trans *
-tgl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- if (crtc_state->port_clock > 270000) {
- if (IS_ROCKETLAKE(dev_priv)) {
- *n_entries = ARRAY_SIZE(rkl_combo_phy_ddi_translations_dp_hbr2_hbr3);
- return rkl_combo_phy_ddi_translations_dp_hbr2_hbr3;
- } else if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) {
- *n_entries = ARRAY_SIZE(tgl_uy_combo_phy_ddi_translations_dp_hbr2);
- return tgl_uy_combo_phy_ddi_translations_dp_hbr2;
- } else {
- *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr2);
- return tgl_combo_phy_ddi_translations_dp_hbr2;
- }
- } else {
- if (IS_ROCKETLAKE(dev_priv)) {
- *n_entries = ARRAY_SIZE(rkl_combo_phy_ddi_translations_dp_hbr);
- return rkl_combo_phy_ddi_translations_dp_hbr;
- } else {
- *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr);
- return tgl_combo_phy_ddi_translations_dp_hbr;
- }
- }
-}
-
-static const struct cnl_ddi_buf_trans *
-tgl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
- if (crtc_state->port_clock > 540000) {
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3);
- return icl_combo_phy_ddi_translations_edp_hbr3;
- } else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed) {
- *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_edp_hbr2_hobl);
- return tgl_combo_phy_ddi_translations_edp_hbr2_hobl;
- } else if (dev_priv->vbt.edp.low_vswing) {
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2);
- return icl_combo_phy_ddi_translations_edp_hbr2;
- }
-
- return tgl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
-}
-
-static const struct cnl_ddi_buf_trans *
-tgl_get_combo_buf_trans(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return tgl_get_combo_buf_trans_hdmi(encoder, crtc_state, n_entries);
- else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
- return tgl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
- else
- return tgl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
-}
-
-static const struct tgl_dkl_phy_ddi_buf_trans *
-tgl_get_dkl_buf_trans_hdmi(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- *n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans);
- return tgl_dkl_phy_hdmi_ddi_trans;
-}
-
-static const struct tgl_dkl_phy_ddi_buf_trans *
-tgl_get_dkl_buf_trans_dp(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- if (crtc_state->port_clock > 270000) {
- *n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans_hbr2);
- return tgl_dkl_phy_dp_ddi_trans_hbr2;
- } else {
- *n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans);
- return tgl_dkl_phy_dp_ddi_trans;
- }
-}
-
-static const struct tgl_dkl_phy_ddi_buf_trans *
-tgl_get_dkl_buf_trans(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *n_entries)
-{
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return tgl_get_dkl_buf_trans_hdmi(encoder, crtc_state, n_entries);
- else
- return tgl_get_dkl_buf_trans_dp(encoder, crtc_state, n_entries);
-}
-
static int intel_ddi_hdmi_level(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int n_entries, level, default_entry;
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
- if (INTEL_GEN(dev_priv) >= 12) {
- if (intel_phy_is_combo(dev_priv, phy))
- tgl_get_combo_buf_trans_hdmi(encoder, crtc_state, &n_entries);
- else
- tgl_get_dkl_buf_trans_hdmi(encoder, crtc_state, &n_entries);
- default_entry = n_entries - 1;
- } else if (INTEL_GEN(dev_priv) == 11) {
- if (intel_phy_is_combo(dev_priv, phy))
- icl_get_combo_buf_trans_hdmi(encoder, crtc_state, &n_entries);
- else
- icl_get_mg_buf_trans_hdmi(encoder, crtc_state, &n_entries);
- default_entry = n_entries - 1;
- } else if (IS_CANNONLAKE(dev_priv)) {
- cnl_get_buf_trans_hdmi(encoder, &n_entries);
- default_entry = n_entries - 1;
- } else if (IS_GEN9_LP(dev_priv)) {
- bxt_get_buf_trans_hdmi(encoder, &n_entries);
- default_entry = n_entries - 1;
- } else if (IS_GEN9_BC(dev_priv)) {
- intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
- default_entry = 8;
- } else if (IS_BROADWELL(dev_priv)) {
- intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
- default_entry = 7;
- } else if (IS_HASWELL(dev_priv)) {
- intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
- default_entry = 6;
- } else {
- drm_WARN(&dev_priv->drm, 1, "ddi translation table missing\n");
+ n_entries = intel_ddi_hdmi_num_entries(encoder, crtc_state, &default_entry);
+ if (n_entries == 0)
return 0;
- }
-
- if (drm_WARN_ON_ONCE(&dev_priv->drm, n_entries == 0))
- return 0;
-
level = intel_bios_hdmi_level_shift(encoder);
if (level < 0)
level = default_entry;
@@ -1470,8 +93,8 @@ static int intel_ddi_hdmi_level(struct intel_encoder *encoder,
* values in advance. This function programs the correct values for
* DP/eDP/FDI use cases.
*/
-static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 iboost_bit = 0;
@@ -1490,7 +113,7 @@ static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
&n_entries);
/* If we're boosting the current, set bit 31 of trans1 */
- if (IS_GEN9_BC(dev_priv) && intel_bios_dp_boost_level(encoder))
+ if (IS_GEN9_BC(dev_priv) && intel_bios_encoder_dp_boost_level(encoder->devdata))
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
for (i = 0; i < n_entries; i++) {
@@ -1523,7 +146,7 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
level = n_entries - 1;
/* If we're boosting the current, set bit 31 of trans1 */
- if (IS_GEN9_BC(dev_priv) && intel_bios_hdmi_boost_level(encoder))
+ if (IS_GEN9_BC(dev_priv) && intel_bios_encoder_hdmi_boost_level(encoder->devdata))
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
/* Entry 9 is for HDMI: */
@@ -1533,8 +156,8 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
ddi_translations[level].trans2);
}
-static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
- enum port port)
+void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
+ enum port port)
{
if (IS_BROXTON(dev_priv)) {
udelay(16);
@@ -1551,7 +174,7 @@ static void intel_wait_ddi_buf_active(struct drm_i915_private *dev_priv,
enum port port)
{
/* Wait > 518 usecs for DDI_BUF_CTL to be non idle */
- if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
+ if (DISPLAY_VER(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
usleep_range(518, 1000);
return;
}
@@ -1622,141 +245,6 @@ static u32 icl_pll_to_ddi_clk_sel(struct intel_encoder *encoder,
}
}
-/* Starting with Haswell, different DDI ports can work in FDI mode for
- * connection to the PCH-located connectors. For this, it is necessary to train
- * both the DDI port and PCH receiver for the desired DDI buffer settings.
- *
- * The recommended port to work in FDI mode is DDI E, which we use here. Also,
- * please note that when FDI mode is active on DDI E, it shares 2 lines with
- * DDI A (which is used for eDP)
- */
-
-void hsw_fdi_link_train(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 temp, i, rx_ctl_val, ddi_pll_sel;
-
- intel_prepare_dp_ddi_buffers(encoder, crtc_state);
-
- /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
- * mode set "sequence for CRT port" document:
- * - TP1 to TP2 time with the default value
- * - FDI delay to 90h
- *
- * WaFDIAutoLinkSetTimingOverrride:hsw
- */
- intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A),
- FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2) | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
-
- /* Enable the PCH Receiver FDI PLL */
- rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
- FDI_RX_PLL_ENABLE |
- FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
- intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
- intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
- udelay(220);
-
- /* Switch from Rawclk to PCDclk */
- rx_ctl_val |= FDI_PCDCLK;
- intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
-
- /* Configure Port Clock Select */
- ddi_pll_sel = hsw_pll_to_ddi_pll_sel(crtc_state->shared_dpll);
- intel_de_write(dev_priv, PORT_CLK_SEL(PORT_E), ddi_pll_sel);
- drm_WARN_ON(&dev_priv->drm, ddi_pll_sel != PORT_CLK_SEL_SPLL);
-
- /* Start the training iterating through available voltages and emphasis,
- * testing each value twice. */
- for (i = 0; i < ARRAY_SIZE(hsw_ddi_translations_fdi) * 2; i++) {
- /* Configure DP_TP_CTL with auto-training */
- intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
- DP_TP_CTL_FDI_AUTOTRAIN |
- DP_TP_CTL_ENHANCED_FRAME_ENABLE |
- DP_TP_CTL_LINK_TRAIN_PAT1 |
- DP_TP_CTL_ENABLE);
-
- /* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
- * DDI E does not support port reversal, the functionality is
- * achieved on the PCH side in FDI_RX_CTL, so no need to set the
- * port reversal bit */
- intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E),
- DDI_BUF_CTL_ENABLE | ((crtc_state->fdi_lanes - 1) << 1) | DDI_BUF_TRANS_SELECT(i / 2));
- intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
-
- udelay(600);
-
- /* Program PCH FDI Receiver TU */
- intel_de_write(dev_priv, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
-
- /* Enable PCH FDI Receiver with auto-training */
- rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
- intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
- intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
-
- /* Wait for FDI receiver lane calibration */
- udelay(30);
-
- /* Unset FDI_RX_MISC pwrdn lanes */
- temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
- temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
- intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
- intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
-
- /* Wait for FDI auto training time */
- udelay(5);
-
- temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E));
- if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
- drm_dbg_kms(&dev_priv->drm,
- "FDI link training done on step %d\n", i);
- break;
- }
-
- /*
- * Leave things enabled even if we failed to train FDI.
- * Results in less fireworks from the state checker.
- */
- if (i == ARRAY_SIZE(hsw_ddi_translations_fdi) * 2 - 1) {
- drm_err(&dev_priv->drm, "FDI link training failed!\n");
- break;
- }
-
- rx_ctl_val &= ~FDI_RX_ENABLE;
- intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
- intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
-
- temp = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E));
- temp &= ~DDI_BUF_CTL_ENABLE;
- intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), temp);
- intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
-
- /* Disable DP_TP_CTL and FDI_RX_CTL and retry */
- temp = intel_de_read(dev_priv, DP_TP_CTL(PORT_E));
- temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
- temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
- intel_de_write(dev_priv, DP_TP_CTL(PORT_E), temp);
- intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E));
-
- intel_wait_ddi_buf_idle(dev_priv, PORT_E);
-
- /* Reset FDI_RX_MISC pwrdn lanes */
- temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
- temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
- temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
- intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
- intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
- }
-
- /* Enable normal pixel sending for FDI */
- intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
- DP_TP_CTL_FDI_AUTOTRAIN |
- DP_TP_CTL_LINK_TRAIN_NORMAL |
- DP_TP_CTL_ENHANCED_FRAME_ENABLE |
- DP_TP_CTL_ENABLE);
-}
-
static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
@@ -1815,25 +303,6 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
pipe_config->hw.adjusted_mode.crtc_clock = dotclock;
}
-static void intel_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
-
- if (intel_phy_is_tc(dev_priv, phy) &&
- intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll) ==
- DPLL_ID_ICL_TBTPLL)
- pipe_config->port_clock = icl_calc_tbt_pll_link(dev_priv,
- encoder->port);
- else
- pipe_config->port_clock =
- intel_dpll_get_freq(dev_priv, pipe_config->shared_dpll,
- &pipe_config->dpll_hw_state);
-
- ddi_dotclock_get(pipe_config);
-}
-
void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -1921,7 +390,7 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
/* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
temp = TRANS_DDI_FUNC_ENABLE;
- if (INTEL_GEN(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 12)
temp |= TGL_TRANS_DDI_SELECT_PORT(port);
else
temp |= TRANS_DDI_SELECT_PORT(port);
@@ -1989,7 +458,7 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
temp |= TRANS_DDI_MODE_SELECT_DP_MST;
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
- if (INTEL_GEN(dev_priv) >= 12) {
+ if (DISPLAY_VER(dev_priv) >= 12) {
enum transcoder master;
master = crtc_state->mst_master_transcoder;
@@ -2002,7 +471,7 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
}
- if (IS_GEN_RANGE(dev_priv, 8, 10) &&
+ if (IS_DISPLAY_RANGE(dev_priv, 8, 10) &&
crtc_state->master_transcoder != INVALID_TRANSCODER) {
u8 master_select =
bdw_trans_port_sync_master_select(crtc_state->master_transcoder);
@@ -2021,7 +490,7 @@ void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (DISPLAY_VER(dev_priv) >= 11) {
enum transcoder master_transcoder = crtc_state->master_transcoder;
u32 ctl2 = 0;
@@ -2067,7 +536,7 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 ctl;
- if (INTEL_GEN(dev_priv) >= 11)
+ if (DISPLAY_VER(dev_priv) >= 11)
intel_de_write(dev_priv,
TRANS_DDI_FUNC_CTL2(cpu_transcoder), 0);
@@ -2077,11 +546,11 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
ctl &= ~TRANS_DDI_FUNC_ENABLE;
- if (IS_GEN_RANGE(dev_priv, 8, 10))
+ if (IS_DISPLAY_RANGE(dev_priv, 8, 10))
ctl &= ~(TRANS_DDI_PORT_SYNC_ENABLE |
TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK);
- if (INTEL_GEN(dev_priv) >= 12) {
+ if (DISPLAY_VER(dev_priv) >= 12) {
if (!intel_dp_mst_is_master_trans(crtc_state)) {
ctl &= ~(TGL_TRANS_DDI_PORT_MASK |
TRANS_DDI_MODE_SELECT_MASK);
@@ -2245,7 +714,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
if (!trans_wakeref)
continue;
- if (INTEL_GEN(dev_priv) >= 12) {
+ if (DISPLAY_VER(dev_priv) >= 12) {
port_mask = TGL_TRANS_DDI_PORT_MASK;
ddi_select = TGL_TRANS_DDI_SELECT_PORT(port);
} else {
@@ -2385,7 +854,7 @@ void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder,
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
if (cpu_transcoder != TRANSCODER_EDP) {
- if (INTEL_GEN(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 12)
intel_de_write(dev_priv,
TRANS_CLK_SEL(cpu_transcoder),
TGL_TRANS_CLK_SEL_PORT(port));
@@ -2402,7 +871,7 @@ void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state)
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
if (cpu_transcoder != TRANSCODER_EDP) {
- if (INTEL_GEN(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 12)
intel_de_write(dev_priv,
TRANS_CLK_SEL(cpu_transcoder),
TGL_TRANS_CLK_SEL_DISABLED);
@@ -2436,9 +905,9 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
u8 iboost;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- iboost = intel_bios_hdmi_boost_level(encoder);
+ iboost = intel_bios_encoder_hdmi_boost_level(encoder->devdata);
else
- iboost = intel_bios_dp_boost_level(encoder);
+ iboost = intel_bios_encoder_dp_boost_level(encoder->devdata);
if (iboost == 0) {
const struct ddi_buf_trans *ddi_translations;
@@ -2480,13 +949,7 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder,
enum port port = encoder->port;
int n_entries;
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- ddi_translations = bxt_get_buf_trans_hdmi(encoder, &n_entries);
- else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
- ddi_translations = bxt_get_buf_trans_edp(encoder, &n_entries);
- else
- ddi_translations = bxt_get_buf_trans_dp(encoder, &n_entries);
-
+ ddi_translations = bxt_get_buf_trans(encoder, crtc_state, &n_entries);
if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
return;
if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
@@ -2508,12 +971,12 @@ static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp,
enum phy phy = intel_port_to_phy(dev_priv, port);
int n_entries;
- if (INTEL_GEN(dev_priv) >= 12) {
+ if (DISPLAY_VER(dev_priv) >= 12) {
if (intel_phy_is_combo(dev_priv, phy))
tgl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
else
tgl_get_dkl_buf_trans(encoder, crtc_state, &n_entries);
- } else if (INTEL_GEN(dev_priv) == 11) {
+ } else if (IS_DISPLAY_VER(dev_priv, 11)) {
if (IS_PLATFORM(dev_priv, INTEL_JASPERLAKE))
jsl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
else if (IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE))
@@ -2523,15 +986,9 @@ static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp,
else
icl_get_mg_buf_trans(encoder, crtc_state, &n_entries);
} else if (IS_CANNONLAKE(dev_priv)) {
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
- cnl_get_buf_trans_edp(encoder, &n_entries);
- else
- cnl_get_buf_trans_dp(encoder, &n_entries);
+ cnl_get_buf_trans(encoder, crtc_state, &n_entries);
} else if (IS_GEN9_LP(dev_priv)) {
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
- bxt_get_buf_trans_edp(encoder, &n_entries);
- else
- bxt_get_buf_trans_dp(encoder, &n_entries);
+ bxt_get_buf_trans(encoder, crtc_state, &n_entries);
} else {
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
intel_ddi_get_buf_trans_edp(encoder, &n_entries);
@@ -2569,12 +1026,7 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
int n_entries, ln;
u32 val;
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- ddi_translations = cnl_get_buf_trans_hdmi(encoder, &n_entries);
- else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
- ddi_translations = cnl_get_buf_trans_edp(encoder, &n_entries);
- else
- ddi_translations = cnl_get_buf_trans_dp(encoder, &n_entries);
+ ddi_translations = cnl_get_buf_trans(encoder, crtc_state, &n_entries);
if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
return;
@@ -2695,7 +1147,7 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
int n_entries, ln;
u32 val;
- if (INTEL_GEN(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 12)
ddi_translations = tgl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
else if (IS_PLATFORM(dev_priv, INTEL_JASPERLAKE))
ddi_translations = jsl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
@@ -3110,196 +1562,580 @@ hsw_set_signal_levels(struct intel_dp *intel_dp,
intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
}
-static u32 icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv,
- enum phy phy)
+static void _cnl_ddi_enable_clock(struct drm_i915_private *i915, i915_reg_t reg,
+ u32 clk_sel_mask, u32 clk_sel, u32 clk_off)
{
- if (IS_ROCKETLAKE(dev_priv)) {
- return RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
- } else if (intel_phy_is_combo(dev_priv, phy)) {
- return ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
- } else if (intel_phy_is_tc(dev_priv, phy)) {
- enum tc_port tc_port = intel_port_to_tc(dev_priv,
- (enum port)phy);
+ mutex_lock(&i915->dpll.lock);
- return ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port);
- }
+ intel_de_rmw(i915, reg, clk_sel_mask, clk_sel);
- return 0;
+ /*
+ * "This step and the step before must be
+ * done with separate register writes."
+ */
+ intel_de_rmw(i915, reg, clk_off, 0);
+
+ mutex_unlock(&i915->dpll.lock);
+}
+
+static void _cnl_ddi_disable_clock(struct drm_i915_private *i915, i915_reg_t reg,
+ u32 clk_off)
+{
+ mutex_lock(&i915->dpll.lock);
+
+ intel_de_rmw(i915, reg, 0, clk_off);
+
+ mutex_unlock(&i915->dpll.lock);
+}
+
+static bool _cnl_ddi_is_clock_enabled(struct drm_i915_private *i915, i915_reg_t reg,
+ u32 clk_off)
+{
+ return !(intel_de_read(i915, reg) & clk_off);
+}
+
+static struct intel_shared_dpll *
+_cnl_ddi_get_pll(struct drm_i915_private *i915, i915_reg_t reg,
+ u32 clk_sel_mask, u32 clk_sel_shift)
+{
+ enum intel_dpll_id id;
+
+ id = (intel_de_read(i915, reg) & clk_sel_mask) >> clk_sel_shift;
+
+ return intel_get_shared_dpll_by_id(i915, id);
}
-static void dg1_map_plls_to_ports(struct intel_encoder *encoder,
+static void adls_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
- u32 val;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ if (drm_WARN_ON(&i915->drm, !pll))
+ return;
+
+ _cnl_ddi_enable_clock(i915, ADLS_DPCLKA_CFGCR(phy),
+ ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy),
+ pll->info->id << ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy),
+ ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
+}
+
+static void adls_ddi_disable_clock(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ _cnl_ddi_disable_clock(i915, ADLS_DPCLKA_CFGCR(phy),
+ ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
+}
+
+static bool adls_ddi_is_clock_enabled(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ return _cnl_ddi_is_clock_enabled(i915, ADLS_DPCLKA_CFGCR(phy),
+ ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
+}
+
+static struct intel_shared_dpll *adls_ddi_get_pll(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ return _cnl_ddi_get_pll(i915, ADLS_DPCLKA_CFGCR(phy),
+ ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy),
+ ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy));
+}
+
+static void rkl_ddi_enable_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ if (drm_WARN_ON(&i915->drm, !pll))
+ return;
+
+ _cnl_ddi_enable_clock(i915, ICL_DPCLKA_CFGCR0,
+ RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
+ RKL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy),
+ RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
+}
+
+static void rkl_ddi_disable_clock(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ _cnl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0,
+ RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
+}
+
+static bool rkl_ddi_is_clock_enabled(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ return _cnl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0,
+ RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
+}
+
+static struct intel_shared_dpll *rkl_ddi_get_pll(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ return _cnl_ddi_get_pll(i915, ICL_DPCLKA_CFGCR0,
+ RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
+ RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy));
+}
+
+static void dg1_ddi_enable_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ if (drm_WARN_ON(&i915->drm, !pll))
+ return;
/*
* If we fail this, something went very wrong: first 2 PLLs should be
* used by first 2 phys and last 2 PLLs by last phys
*/
- if (drm_WARN_ON(&dev_priv->drm,
+ if (drm_WARN_ON(&i915->drm,
(pll->info->id < DPLL_ID_DG1_DPLL2 && phy >= PHY_C) ||
(pll->info->id >= DPLL_ID_DG1_DPLL2 && phy < PHY_C)))
return;
- mutex_lock(&dev_priv->dpll.lock);
+ _cnl_ddi_enable_clock(i915, DG1_DPCLKA_CFGCR0(phy),
+ DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
+ DG1_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy),
+ DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
+}
- val = intel_de_read(dev_priv, DG1_DPCLKA_CFGCR0(phy));
- drm_WARN_ON(&dev_priv->drm,
- (val & DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)) == 0);
+static void dg1_ddi_disable_clock(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
- val &= ~DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
- val |= DG1_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy);
- intel_de_write(dev_priv, DG1_DPCLKA_CFGCR0(phy), val);
- intel_de_posting_read(dev_priv, DG1_DPCLKA_CFGCR0(phy));
+ _cnl_ddi_disable_clock(i915, DG1_DPCLKA_CFGCR0(phy),
+ DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
+}
- val &= ~DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
- intel_de_write(dev_priv, DG1_DPCLKA_CFGCR0(phy), val);
+static bool dg1_ddi_is_clock_enabled(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
- mutex_unlock(&dev_priv->dpll.lock);
+ return _cnl_ddi_is_clock_enabled(i915, DG1_DPCLKA_CFGCR0(phy),
+ DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
-static void icl_map_plls_to_ports(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+static struct intel_shared_dpll *dg1_ddi_get_pll(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
- u32 val;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
- mutex_lock(&dev_priv->dpll.lock);
+ return _cnl_ddi_get_pll(i915, DG1_DPCLKA_CFGCR0(phy),
+ DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
+ DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy));
+}
- val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
- drm_WARN_ON(&dev_priv->drm,
- (val & icl_dpclka_cfgcr0_clk_off(dev_priv, phy)) == 0);
+static void icl_ddi_combo_enable_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
- if (intel_phy_is_combo(dev_priv, phy)) {
- u32 mask, sel;
+ if (drm_WARN_ON(&i915->drm, !pll))
+ return;
- if (IS_ROCKETLAKE(dev_priv)) {
- mask = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
- sel = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy);
- } else {
- mask = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
- sel = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy);
- }
+ _cnl_ddi_enable_clock(i915, ICL_DPCLKA_CFGCR0,
+ ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
+ ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy),
+ ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
+}
- /*
- * Even though this register references DDIs, note that we
- * want to pass the PHY rather than the port (DDI). For
- * ICL, port=phy in all cases so it doesn't matter, but for
- * EHL the bspec notes the following:
- *
- * "DDID clock tied to DDIA clock, so DPCLKA_CFGCR0 DDIA
- * Clock Select chooses the PLL for both DDIA and DDID and
- * drives port A in all cases."
- */
- val &= ~mask;
- val |= sel;
- intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
- intel_de_posting_read(dev_priv, ICL_DPCLKA_CFGCR0);
- }
+static void icl_ddi_combo_disable_clock(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
- val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
- intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
+ _cnl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0,
+ ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
+}
- mutex_unlock(&dev_priv->dpll.lock);
+static bool icl_ddi_combo_is_clock_enabled(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ return _cnl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0,
+ ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
-static void dg1_unmap_plls_to_ports(struct intel_encoder *encoder)
+struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ return _cnl_ddi_get_pll(i915, ICL_DPCLKA_CFGCR0,
+ ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
+ ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy));
+}
- mutex_lock(&dev_priv->dpll.lock);
+static void jsl_ddi_tc_enable_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ enum port port = encoder->port;
- intel_de_rmw(dev_priv, DG1_DPCLKA_CFGCR0(phy), 0,
- DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
+ if (drm_WARN_ON(&i915->drm, !pll))
+ return;
- mutex_unlock(&dev_priv->dpll.lock);
+ /*
+ * "For DDIC and DDID, program DDI_CLK_SEL to map the MG clock to the port.
+ * MG does not exist, but the programming is required to ungate DDIC and DDID."
+ */
+ intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_MG);
+
+ icl_ddi_combo_enable_clock(encoder, crtc_state);
}
-static void icl_unmap_plls_to_ports(struct intel_encoder *encoder)
+static void jsl_ddi_tc_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
- u32 val;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+
+ icl_ddi_combo_disable_clock(encoder);
+
+ intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
+}
+
+static bool jsl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ u32 tmp;
- mutex_lock(&dev_priv->dpll.lock);
+ tmp = intel_de_read(i915, DDI_CLK_SEL(port));
- val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
- val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
- intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
+ if ((tmp & DDI_CLK_SEL_MASK) == DDI_CLK_SEL_NONE)
+ return false;
- mutex_unlock(&dev_priv->dpll.lock);
+ return icl_ddi_combo_is_clock_enabled(encoder);
}
-static void dg1_sanitize_port_clk_off(struct drm_i915_private *dev_priv,
- u32 port_mask, bool ddi_clk_needed)
+static void icl_ddi_tc_enable_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
- enum port port;
- u32 val;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
+ enum port port = encoder->port;
- for_each_port_masked(port, port_mask) {
- enum phy phy = intel_port_to_phy(dev_priv, port);
- bool ddi_clk_off;
+ if (drm_WARN_ON(&i915->drm, !pll))
+ return;
- val = intel_de_read(dev_priv, DG1_DPCLKA_CFGCR0(phy));
- ddi_clk_off = val & DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
+ intel_de_write(i915, DDI_CLK_SEL(port),
+ icl_pll_to_ddi_clk_sel(encoder, crtc_state));
- if (ddi_clk_needed == !ddi_clk_off)
- continue;
+ mutex_lock(&i915->dpll.lock);
- /*
- * Punt on the case now where clock is gated, but it would
- * be needed by the port. Something else is really broken then.
- */
- if (drm_WARN_ON(&dev_priv->drm, ddi_clk_needed))
- continue;
+ intel_de_rmw(i915, ICL_DPCLKA_CFGCR0,
+ ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port), 0);
+
+ mutex_unlock(&i915->dpll.lock);
+}
+
+static void icl_ddi_tc_disable_clock(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
+ enum port port = encoder->port;
+
+ mutex_lock(&i915->dpll.lock);
+
+ intel_de_rmw(i915, ICL_DPCLKA_CFGCR0,
+ 0, ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port));
+
+ mutex_unlock(&i915->dpll.lock);
+
+ intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
+}
+
+static bool icl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
+ enum port port = encoder->port;
+ u32 tmp;
+
+ tmp = intel_de_read(i915, DDI_CLK_SEL(port));
+
+ if ((tmp & DDI_CLK_SEL_MASK) == DDI_CLK_SEL_NONE)
+ return false;
- drm_notice(&dev_priv->drm,
- "PHY %c is disabled with an ungated DDI clock, gate it\n",
- phy_name(phy));
- val |= DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
- intel_de_write(dev_priv, DG1_DPCLKA_CFGCR0(phy), val);
+ tmp = intel_de_read(i915, ICL_DPCLKA_CFGCR0);
+
+ return !(tmp & ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port));
+}
+
+static struct intel_shared_dpll *icl_ddi_tc_get_pll(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
+ enum port port = encoder->port;
+ enum intel_dpll_id id;
+ u32 tmp;
+
+ tmp = intel_de_read(i915, DDI_CLK_SEL(port));
+
+ switch (tmp & DDI_CLK_SEL_MASK) {
+ case DDI_CLK_SEL_TBT_162:
+ case DDI_CLK_SEL_TBT_270:
+ case DDI_CLK_SEL_TBT_540:
+ case DDI_CLK_SEL_TBT_810:
+ id = DPLL_ID_ICL_TBTPLL;
+ break;
+ case DDI_CLK_SEL_MG:
+ id = icl_tc_port_to_pll_id(tc_port);
+ break;
+ default:
+ MISSING_CASE(tmp);
+ fallthrough;
+ case DDI_CLK_SEL_NONE:
+ return NULL;
}
+
+ return intel_get_shared_dpll_by_id(i915, id);
}
-static void icl_sanitize_port_clk_off(struct drm_i915_private *dev_priv,
- u32 port_mask, bool ddi_clk_needed)
+static void cnl_ddi_enable_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
- enum port port;
- u32 val;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ enum port port = encoder->port;
- val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
- for_each_port_masked(port, port_mask) {
- enum phy phy = intel_port_to_phy(dev_priv, port);
- bool ddi_clk_off = val & icl_dpclka_cfgcr0_clk_off(dev_priv,
- phy);
+ if (drm_WARN_ON(&i915->drm, !pll))
+ return;
- if (ddi_clk_needed == !ddi_clk_off)
- continue;
+ _cnl_ddi_enable_clock(i915, DPCLKA_CFGCR0,
+ DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port),
+ DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port),
+ DPCLKA_CFGCR0_DDI_CLK_OFF(port));
+}
- /*
- * Punt on the case now where clock is gated, but it would
- * be needed by the port. Something else is really broken then.
- */
- if (drm_WARN_ON(&dev_priv->drm, ddi_clk_needed))
- continue;
+static void cnl_ddi_disable_clock(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+
+ _cnl_ddi_disable_clock(i915, DPCLKA_CFGCR0,
+ DPCLKA_CFGCR0_DDI_CLK_OFF(port));
+}
- drm_notice(&dev_priv->drm,
- "PHY %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
- phy_name(phy));
- val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
- intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
+static bool cnl_ddi_is_clock_enabled(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+
+ return _cnl_ddi_is_clock_enabled(i915, DPCLKA_CFGCR0,
+ DPCLKA_CFGCR0_DDI_CLK_OFF(port));
+}
+
+static struct intel_shared_dpll *cnl_ddi_get_pll(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+
+ return _cnl_ddi_get_pll(i915, DPCLKA_CFGCR0,
+ DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port),
+ DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port));
+}
+
+static struct intel_shared_dpll *bxt_ddi_get_pll(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum intel_dpll_id id;
+
+ switch (encoder->port) {
+ case PORT_A:
+ id = DPLL_ID_SKL_DPLL0;
+ break;
+ case PORT_B:
+ id = DPLL_ID_SKL_DPLL1;
+ break;
+ case PORT_C:
+ id = DPLL_ID_SKL_DPLL2;
+ break;
+ default:
+ MISSING_CASE(encoder->port);
+ return NULL;
}
+
+ return intel_get_shared_dpll_by_id(i915, id);
}
-void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
+static void skl_ddi_enable_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ enum port port = encoder->port;
+
+ if (drm_WARN_ON(&i915->drm, !pll))
+ return;
+
+ mutex_lock(&i915->dpll.lock);
+
+ intel_de_rmw(i915, DPLL_CTRL2,
+ DPLL_CTRL2_DDI_CLK_OFF(port) |
+ DPLL_CTRL2_DDI_CLK_SEL_MASK(port),
+ DPLL_CTRL2_DDI_CLK_SEL(pll->info->id, port) |
+ DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
+
+ mutex_unlock(&i915->dpll.lock);
+}
+
+static void skl_ddi_disable_clock(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+
+ mutex_lock(&i915->dpll.lock);
+
+ intel_de_rmw(i915, DPLL_CTRL2,
+ 0, DPLL_CTRL2_DDI_CLK_OFF(port));
+
+ mutex_unlock(&i915->dpll.lock);
+}
+
+static bool skl_ddi_is_clock_enabled(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+
+ /*
+ * FIXME Not sure if the override affects both
+ * the PLL selection and the CLK_OFF bit.
+ */
+ return !(intel_de_read(i915, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_OFF(port));
+}
+
+static struct intel_shared_dpll *skl_ddi_get_pll(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ enum intel_dpll_id id;
+ u32 tmp;
+
+ tmp = intel_de_read(i915, DPLL_CTRL2);
+
+ /*
+ * FIXME Not sure if the override affects both
+ * the PLL selection and the CLK_OFF bit.
+ */
+ if ((tmp & DPLL_CTRL2_DDI_SEL_OVERRIDE(port)) == 0)
+ return NULL;
+
+ id = (tmp & DPLL_CTRL2_DDI_CLK_SEL_MASK(port)) >>
+ DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port);
+
+ return intel_get_shared_dpll_by_id(i915, id);
+}
+
+void hsw_ddi_enable_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ enum port port = encoder->port;
+
+ if (drm_WARN_ON(&i915->drm, !pll))
+ return;
+
+ intel_de_write(i915, PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
+}
+
+void hsw_ddi_disable_clock(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+
+ intel_de_write(i915, PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+}
+
+bool hsw_ddi_is_clock_enabled(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+
+ return intel_de_read(i915, PORT_CLK_SEL(port)) != PORT_CLK_SEL_NONE;
+}
+
+static struct intel_shared_dpll *hsw_ddi_get_pll(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ enum intel_dpll_id id;
+ u32 tmp;
+
+ tmp = intel_de_read(i915, PORT_CLK_SEL(port));
+
+ switch (tmp & PORT_CLK_SEL_MASK) {
+ case PORT_CLK_SEL_WRPLL1:
+ id = DPLL_ID_WRPLL1;
+ break;
+ case PORT_CLK_SEL_WRPLL2:
+ id = DPLL_ID_WRPLL2;
+ break;
+ case PORT_CLK_SEL_SPLL:
+ id = DPLL_ID_SPLL;
+ break;
+ case PORT_CLK_SEL_LCPLL_810:
+ id = DPLL_ID_LCPLL_810;
+ break;
+ case PORT_CLK_SEL_LCPLL_1350:
+ id = DPLL_ID_LCPLL_1350;
+ break;
+ case PORT_CLK_SEL_LCPLL_2700:
+ id = DPLL_ID_LCPLL_2700;
+ break;
+ default:
+ MISSING_CASE(tmp);
+ fallthrough;
+ case PORT_CLK_SEL_NONE:
+ return NULL;
+ }
+
+ return intel_get_shared_dpll_by_id(i915, id);
+}
+
+void intel_ddi_enable_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ if (encoder->enable_clock)
+ encoder->enable_clock(encoder, crtc_state);
+}
+
+static void intel_ddi_disable_clock(struct intel_encoder *encoder)
+{
+ if (encoder->disable_clock)
+ encoder->disable_clock(encoder);
+}
+
+void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
u32 port_mask;
bool ddi_clk_needed;
@@ -3319,7 +2155,7 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
* In the unlikely case that BIOS enables DP in MST mode, just
* warn since our MST HW readout is incomplete.
*/
- if (drm_WARN_ON(&dev_priv->drm, is_mst))
+ if (drm_WARN_ON(&i915->drm, is_mst))
return;
}
@@ -3334,11 +2170,11 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
* Sanity check that we haven't incorrectly registered another
* encoder using any of the ports of this DSI encoder.
*/
- for_each_intel_encoder(&dev_priv->drm, other_encoder) {
+ for_each_intel_encoder(&i915->drm, other_encoder) {
if (other_encoder == encoder)
continue;
- if (drm_WARN_ON(&dev_priv->drm,
+ if (drm_WARN_ON(&i915->drm,
port_mask & BIT(other_encoder->port)))
return;
}
@@ -3349,92 +2185,15 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
ddi_clk_needed = false;
}
- if (IS_DG1(dev_priv))
- dg1_sanitize_port_clk_off(dev_priv, port_mask, ddi_clk_needed);
- else
- icl_sanitize_port_clk_off(dev_priv, port_mask, ddi_clk_needed);
-}
-
-static void intel_ddi_clk_select(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
- enum phy phy = intel_port_to_phy(dev_priv, port);
- u32 val;
- const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
-
- if (drm_WARN_ON(&dev_priv->drm, !pll))
+ if (ddi_clk_needed || !encoder->disable_clock ||
+ !encoder->is_clock_enabled(encoder))
return;
- mutex_lock(&dev_priv->dpll.lock);
-
- if (INTEL_GEN(dev_priv) >= 11) {
- if (!intel_phy_is_combo(dev_priv, phy))
- intel_de_write(dev_priv, DDI_CLK_SEL(port),
- icl_pll_to_ddi_clk_sel(encoder, crtc_state));
- else if (IS_JSL_EHL(dev_priv) && port >= PORT_C)
- /*
- * MG does not exist but the programming is required
- * to ungate DDIC and DDID
- */
- intel_de_write(dev_priv, DDI_CLK_SEL(port),
- DDI_CLK_SEL_MG);
- } else if (IS_CANNONLAKE(dev_priv)) {
- /* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
- val = intel_de_read(dev_priv, DPCLKA_CFGCR0);
- val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
- val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
- intel_de_write(dev_priv, DPCLKA_CFGCR0, val);
-
- /*
- * Configure DPCLKA_CFGCR0 to turn on the clock for the DDI.
- * This step and the step before must be done with separate
- * register writes.
- */
- val = intel_de_read(dev_priv, DPCLKA_CFGCR0);
- val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
- intel_de_write(dev_priv, DPCLKA_CFGCR0, val);
- } else if (IS_GEN9_BC(dev_priv)) {
- /* DDI -> PLL mapping */
- val = intel_de_read(dev_priv, DPLL_CTRL2);
-
- val &= ~(DPLL_CTRL2_DDI_CLK_OFF(port) |
- DPLL_CTRL2_DDI_CLK_SEL_MASK(port));
- val |= (DPLL_CTRL2_DDI_CLK_SEL(pll->info->id, port) |
- DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
-
- intel_de_write(dev_priv, DPLL_CTRL2, val);
+ drm_notice(&i915->drm,
+ "[ENCODER:%d:%s] is disabled/in DSI mode with an ungated DDI clock, gate it\n",
+ encoder->base.base.id, encoder->base.name);
- } else if (INTEL_GEN(dev_priv) < 9) {
- intel_de_write(dev_priv, PORT_CLK_SEL(port),
- hsw_pll_to_ddi_pll_sel(pll));
- }
-
- mutex_unlock(&dev_priv->dpll.lock);
-}
-
-static void intel_ddi_clk_disable(struct intel_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
- enum phy phy = intel_port_to_phy(dev_priv, port);
-
- if (INTEL_GEN(dev_priv) >= 11) {
- if (!intel_phy_is_combo(dev_priv, phy) ||
- (IS_JSL_EHL(dev_priv) && port >= PORT_C))
- intel_de_write(dev_priv, DDI_CLK_SEL(port),
- DDI_CLK_SEL_NONE);
- } else if (IS_CANNONLAKE(dev_priv)) {
- intel_de_write(dev_priv, DPCLKA_CFGCR0,
- intel_de_read(dev_priv, DPCLKA_CFGCR0) | DPCLKA_CFGCR0_DDI_CLK_OFF(port));
- } else if (IS_GEN9_BC(dev_priv)) {
- intel_de_write(dev_priv, DPLL_CTRL2,
- intel_de_read(dev_priv, DPLL_CTRL2) | DPLL_CTRL2_DDI_CLK_OFF(port));
- } else if (INTEL_GEN(dev_priv) < 9) {
- intel_de_write(dev_priv, PORT_CLK_SEL(port),
- PORT_CLK_SEL_NONE);
- }
+ encoder->disable_clock(encoder);
}
static void
@@ -3443,13 +2202,15 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
{
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
+ enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
u32 ln0, ln1, pin_assignment;
u8 width;
- if (dig_port->tc_mode == TC_PORT_TBT_ALT)
+ if (!intel_phy_is_tc(dev_priv, phy) ||
+ dig_port->tc_mode == TC_PORT_TBT_ALT)
return;
- if (INTEL_GEN(dev_priv) >= 12) {
+ if (DISPLAY_VER(dev_priv) >= 12) {
intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
HIP_INDEX_VAL(tc_port, 0x0));
ln0 = intel_de_read(dev_priv, DKL_DP_MODE(tc_port));
@@ -3515,7 +2276,7 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
MISSING_CASE(pin_assignment);
}
- if (INTEL_GEN(dev_priv) >= 12) {
+ if (DISPLAY_VER(dev_priv) >= 12) {
intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
HIP_INDEX_VAL(tc_port, 0x0));
intel_de_write(dev_priv, DKL_DP_MODE(tc_port), ln0);
@@ -3542,7 +2303,7 @@ i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (INTEL_GEN(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 12)
return TGL_DP_TP_CTL(tgl_dp_tp_transcoder(crtc_state));
else
return DP_TP_CTL(encoder->port);
@@ -3553,7 +2314,7 @@ i915_reg_t dp_tp_status_reg(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (INTEL_GEN(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 12)
return TGL_DP_TP_STATUS(tgl_dp_tp_transcoder(crtc_state));
else
return DP_TP_STATUS(encoder->port);
@@ -3638,6 +2399,73 @@ static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
}
}
+static void intel_ddi_mso_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ u32 dss1;
+
+ if (!HAS_MSO(i915))
+ return;
+
+ dss1 = intel_de_read(i915, ICL_PIPE_DSS_CTL1(pipe));
+
+ pipe_config->splitter.enable = dss1 & SPLITTER_ENABLE;
+ if (!pipe_config->splitter.enable)
+ return;
+
+ /* Splitter enable is supported for pipe A only. */
+ if (drm_WARN_ON(&i915->drm, pipe != PIPE_A)) {
+ pipe_config->splitter.enable = false;
+ return;
+ }
+
+ switch (dss1 & SPLITTER_CONFIGURATION_MASK) {
+ default:
+ drm_WARN(&i915->drm, true,
+ "Invalid splitter configuration, dss1=0x%08x\n", dss1);
+ fallthrough;
+ case SPLITTER_CONFIGURATION_2_SEGMENT:
+ pipe_config->splitter.link_count = 2;
+ break;
+ case SPLITTER_CONFIGURATION_4_SEGMENT:
+ pipe_config->splitter.link_count = 4;
+ break;
+ }
+
+ pipe_config->splitter.pixel_overlap = REG_FIELD_GET(OVERLAP_PIXELS_MASK, dss1);
+}
+
+static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ u32 dss1 = 0;
+
+ if (!HAS_MSO(i915))
+ return;
+
+ if (crtc_state->splitter.enable) {
+ /* Splitter enable is supported for pipe A only. */
+ if (drm_WARN_ON(&i915->drm, pipe != PIPE_A))
+ return;
+
+ dss1 |= SPLITTER_ENABLE;
+ dss1 |= OVERLAP_PIXELS(crtc_state->splitter.pixel_overlap);
+ if (crtc_state->splitter.link_count == 2)
+ dss1 |= SPLITTER_CONFIGURATION_2_SEGMENT;
+ else
+ dss1 |= SPLITTER_CONFIGURATION_4_SEGMENT;
+ }
+
+ intel_de_rmw(i915, ICL_PIPE_DSS_CTL1(pipe),
+ SPLITTER_ENABLE | SPLITTER_CONFIGURATION_MASK |
+ OVERLAP_PIXELS_MASK, dss1);
+}
+
static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@@ -3679,7 +2507,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
* hsw_crtc_enable()->intel_enable_shared_dpll(). We need only
* configure the PLL to port mapping here.
*/
- intel_ddi_clk_select(encoder, crtc_state);
+ intel_ddi_enable_clock(encoder, crtc_state);
/* 5. If IO power is controlled through PWR_WELL_CTL, Enable IO Power */
if (!intel_phy_is_tc(dev_priv, phy) ||
@@ -3732,6 +2560,11 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
intel_ddi_power_up_lanes(encoder, crtc_state);
/*
+ * 7.g Program CoG/MSO configuration bits in DSS_CTL1 if selected.
+ */
+ intel_ddi_mso_configure(crtc_state);
+
+ /*
* 7.g Configure and enable DDI_BUF_CTL
* 7.h Wait for DDI_BUF_CTL DDI Idle Status = 0b (Not Idle), timeout
* after 500 us.
@@ -3788,7 +2621,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
int level = intel_ddi_dp_level(intel_dp);
- if (INTEL_GEN(dev_priv) < 11)
+ if (DISPLAY_VER(dev_priv) < 11)
drm_WARN_ON(&dev_priv->drm,
is_mst && (port == PORT_A || port == PORT_E));
else
@@ -3800,7 +2633,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
intel_pps_on(intel_dp);
- intel_ddi_clk_select(encoder, crtc_state);
+ intel_ddi_enable_clock(encoder, crtc_state);
if (!intel_phy_is_tc(dev_priv, phy) ||
dig_port->tc_mode != TC_PORT_TBT_ALT) {
@@ -3811,7 +2644,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
icl_program_mg_dp_mode(dig_port, crtc_state);
- if (INTEL_GEN(dev_priv) >= 11)
+ if (DISPLAY_VER(dev_priv) >= 11)
icl_ddi_vswing_sequence(encoder, crtc_state, level);
else if (IS_CANNONLAKE(dev_priv))
cnl_ddi_vswing_sequence(encoder, crtc_state, level);
@@ -3830,7 +2663,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
true);
intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
intel_dp_start_link_train(intel_dp, crtc_state);
- if ((port != PORT_A || INTEL_GEN(dev_priv) >= 9) &&
+ if ((port != PORT_A || DISPLAY_VER(dev_priv) >= 9) &&
!is_trans_port_sync_mode(crtc_state))
intel_dp_stop_link_train(intel_dp, crtc_state);
@@ -3850,7 +2683,7 @@ static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (INTEL_GEN(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 12)
tgl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
else
hsw_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
@@ -3873,10 +2706,9 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- int level = intel_ddi_hdmi_level(encoder, crtc_state);
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
- intel_ddi_clk_select(encoder, crtc_state);
+ intel_ddi_enable_clock(encoder, crtc_state);
drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
@@ -3884,20 +2716,6 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
icl_program_mg_dp_mode(dig_port, crtc_state);
- if (INTEL_GEN(dev_priv) >= 12)
- tgl_ddi_vswing_sequence(encoder, crtc_state, level);
- else if (INTEL_GEN(dev_priv) == 11)
- icl_ddi_vswing_sequence(encoder, crtc_state, level);
- else if (IS_CANNONLAKE(dev_priv))
- cnl_ddi_vswing_sequence(encoder, crtc_state, level);
- else if (IS_GEN9_LP(dev_priv))
- bxt_ddi_vswing_sequence(encoder, crtc_state, level);
- else
- intel_prepare_hdmi_ddi_buffers(encoder, level);
-
- if (IS_GEN9_BC(dev_priv))
- skl_ddi_set_iboost(encoder, crtc_state, level);
-
intel_ddi_enable_pipe_clock(encoder, crtc_state);
dig_port->set_infoframes(encoder,
@@ -3929,11 +2747,6 @@ static void intel_ddi_pre_enable(struct intel_atomic_state *state,
drm_WARN_ON(&dev_priv->drm, crtc_state->has_pch_encoder);
- if (IS_DG1(dev_priv))
- dg1_map_plls_to_ports(encoder, crtc_state);
- else if (INTEL_GEN(dev_priv) >= 11)
- icl_map_plls_to_ports(encoder, crtc_state);
-
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
@@ -4005,7 +2818,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
*/
intel_dp_set_power(intel_dp, DP_SET_POWER_D3);
- if (INTEL_GEN(dev_priv) >= 12) {
+ if (DISPLAY_VER(dev_priv) >= 12) {
if (is_mst) {
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
u32 val;
@@ -4030,7 +2843,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
* Configure Transcoder Clock select to direct no clock to the
* transcoder"
*/
- if (INTEL_GEN(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 12)
intel_ddi_disable_pipe_clock(old_crtc_state);
intel_pps_vdd_on(intel_dp);
@@ -4042,7 +2855,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
dig_port->ddi_io_power_domain,
fetch_and_zero(&dig_port->ddi_io_wakeref));
- intel_ddi_clk_disable(encoder);
+ intel_ddi_disable_clock(encoder);
}
static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
@@ -4065,7 +2878,7 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
dig_port->ddi_io_power_domain,
fetch_and_zero(&dig_port->ddi_io_wakeref));
- intel_ddi_clk_disable(encoder);
+ intel_ddi_disable_clock(encoder);
intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
}
@@ -4091,7 +2904,7 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state,
intel_dsc_disable(old_crtc_state);
- if (INTEL_GEN(dev_priv) >= 9)
+ if (DISPLAY_VER(dev_priv) >= 9)
skl_scaler_disable(old_crtc_state);
else
ilk_pfit_disable(old_crtc_state);
@@ -4106,7 +2919,6 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state,
intel_atomic_get_old_crtc_state(state, slave);
intel_crtc_vblank_off(old_slave_crtc_state);
- trace_intel_pipe_disable(slave);
intel_dsc_disable(old_slave_crtc_state);
skl_scaler_disable(old_slave_crtc_state);
@@ -4132,11 +2944,6 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state,
intel_ddi_post_disable_dp(state, encoder, old_crtc_state,
old_conn_state);
- if (IS_DG1(dev_priv))
- dg1_unmap_plls_to_ports(encoder);
- else if (INTEL_GEN(dev_priv) >= 11)
- icl_unmap_plls_to_ports(encoder);
-
if (intel_crtc_has_dp_encoder(old_crtc_state) || is_tc_port)
intel_display_power_put(dev_priv,
intel_ddi_main_link_aux_domain(dig_port),
@@ -4165,7 +2972,7 @@ void intel_ddi_fdi_post_disable(struct intel_atomic_state *state,
intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
intel_disable_ddi_buf(encoder, old_crtc_state);
- intel_ddi_clk_disable(encoder);
+ intel_ddi_disable_clock(encoder);
val = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
@@ -4228,7 +3035,7 @@ static void intel_enable_ddi_dp(struct intel_atomic_state *state,
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
enum port port = encoder->port;
- if (port == PORT_A && INTEL_GEN(dev_priv) < 9)
+ if (port == PORT_A && DISPLAY_VER(dev_priv) < 9)
intel_dp_stop_link_train(intel_dp, crtc_state);
intel_edp_backlight_on(crtc_state, conn_state);
@@ -4257,7 +3064,7 @@ gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
[PORT_E] = TRANSCODER_A,
};
- drm_WARN_ON(&dev_priv->drm, INTEL_GEN(dev_priv) < 9);
+ drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) < 9);
if (drm_WARN_ON(&dev_priv->drm, port < PORT_A || port > PORT_E))
port = PORT_A;
@@ -4273,6 +3080,7 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_connector *connector = conn_state->connector;
+ int level = intel_ddi_hdmi_level(encoder, crtc_state);
enum port port = encoder->port;
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
@@ -4282,6 +3090,20 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
"[CONNECTOR:%d:%s] Failed to configure sink scrambling/TMDS bit clock ratio\n",
connector->base.id, connector->name);
+ if (DISPLAY_VER(dev_priv) >= 12)
+ tgl_ddi_vswing_sequence(encoder, crtc_state, level);
+ else if (IS_DISPLAY_VER(dev_priv, 11))
+ icl_ddi_vswing_sequence(encoder, crtc_state, level);
+ else if (IS_CANNONLAKE(dev_priv))
+ cnl_ddi_vswing_sequence(encoder, crtc_state, level);
+ else if (IS_GEN9_LP(dev_priv))
+ bxt_ddi_vswing_sequence(encoder, crtc_state, level);
+ else
+ intel_prepare_hdmi_ddi_buffers(encoder, level);
+
+ if (IS_GEN9_BC(dev_priv))
+ skl_ddi_set_iboost(encoder, crtc_state, level);
+
/* Display WA #1143: skl,kbl,cfl */
if (IS_GEN9_BC(dev_priv)) {
/*
@@ -4602,7 +3424,7 @@ static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp,
* In this case there is requirement to wait for a minimum number of
* idle patterns to be sent.
*/
- if (port == PORT_A && INTEL_GEN(dev_priv) < 12)
+ if (port == PORT_A && DISPLAY_VER(dev_priv) < 12)
return;
if (intel_de_wait_for_set(dev_priv,
@@ -4628,11 +3450,11 @@ static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
struct intel_crtc_state *crtc_state)
{
- if (INTEL_GEN(dev_priv) >= 12 && crtc_state->port_clock > 594000)
+ if (DISPLAY_VER(dev_priv) >= 12 && crtc_state->port_clock > 594000)
crtc_state->min_voltage_level = 2;
else if (IS_JSL_EHL(dev_priv) && crtc_state->port_clock > 594000)
crtc_state->min_voltage_level = 3;
- else if (INTEL_GEN(dev_priv) >= 11 && crtc_state->port_clock > 594000)
+ else if (DISPLAY_VER(dev_priv) >= 11 && crtc_state->port_clock > 594000)
crtc_state->min_voltage_level = 1;
else if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000)
crtc_state->min_voltage_level = 2;
@@ -4643,7 +3465,7 @@ static enum transcoder bdw_transcoder_master_readout(struct drm_i915_private *de
{
u32 master_select;
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (DISPLAY_VER(dev_priv) >= 11) {
u32 ctl2 = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL2(cpu_transcoder));
if ((ctl2 & PORT_SYNC_MODE_ENABLE) == 0)
@@ -4767,7 +3589,7 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
intel_dp_get_m_n(intel_crtc, pipe_config);
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (DISPLAY_VER(dev_priv) >= 11) {
i915_reg_t dp_tp_ctl = dp_tp_ctl_reg(encoder, pipe_config);
pipe_config->fec_enable =
@@ -4791,7 +3613,7 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
pipe_config->lane_count =
((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
- if (INTEL_GEN(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 12)
pipe_config->mst_master_transcoder =
REG_FIELD_GET(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, temp);
@@ -4805,8 +3627,8 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
}
}
-void intel_ddi_get_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+static void intel_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
@@ -4828,6 +3650,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
intel_ddi_read_func_ctl(encoder, pipe_config);
}
+ intel_ddi_mso_get_config(encoder, pipe_config);
+
pipe_config->has_audio =
intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder);
@@ -4853,7 +3677,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
}
if (!pipe_config->bigjoiner_slave)
- intel_ddi_clock_get(encoder, pipe_config);
+ ddi_dotclock_get(pipe_config);
if (IS_GEN9_LP(dev_priv))
pipe_config->lane_lat_optim_mask =
@@ -4876,13 +3700,130 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
HDMI_INFOFRAME_TYPE_DRM,
&pipe_config->infoframes.drm);
- if (INTEL_GEN(dev_priv) >= 8)
+ if (DISPLAY_VER(dev_priv) >= 8)
bdw_get_trans_port_sync_config(pipe_config);
intel_read_dp_sdp(encoder, pipe_config, HDMI_PACKET_TYPE_GAMUT_METADATA);
intel_read_dp_sdp(encoder, pipe_config, DP_SDP_VSC);
}
+void intel_ddi_get_clock(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct intel_shared_dpll *pll)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
+ struct icl_port_dpll *port_dpll = &crtc_state->icl_port_dplls[port_dpll_id];
+ bool pll_active;
+
+ if (drm_WARN_ON(&i915->drm, !pll))
+ return;
+
+ port_dpll->pll = pll;
+ pll_active = intel_dpll_get_hw_state(i915, pll, &port_dpll->hw_state);
+ drm_WARN_ON(&i915->drm, !pll_active);
+
+ icl_set_active_port_dpll(crtc_state, port_dpll_id);
+
+ crtc_state->port_clock = intel_dpll_get_freq(i915, crtc_state->shared_dpll,
+ &crtc_state->dpll_hw_state);
+}
+
+static void adls_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ intel_ddi_get_clock(encoder, crtc_state, adls_ddi_get_pll(encoder));
+ intel_ddi_get_config(encoder, crtc_state);
+}
+
+static void rkl_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ intel_ddi_get_clock(encoder, crtc_state, rkl_ddi_get_pll(encoder));
+ intel_ddi_get_config(encoder, crtc_state);
+}
+
+static void dg1_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ intel_ddi_get_clock(encoder, crtc_state, dg1_ddi_get_pll(encoder));
+ intel_ddi_get_config(encoder, crtc_state);
+}
+
+static void icl_ddi_combo_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ intel_ddi_get_clock(encoder, crtc_state, icl_ddi_combo_get_pll(encoder));
+ intel_ddi_get_config(encoder, crtc_state);
+}
+
+static void icl_ddi_tc_get_clock(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct intel_shared_dpll *pll)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum icl_port_dpll_id port_dpll_id;
+ struct icl_port_dpll *port_dpll;
+ bool pll_active;
+
+ if (drm_WARN_ON(&i915->drm, !pll))
+ return;
+
+ if (intel_get_shared_dpll_id(i915, pll) == DPLL_ID_ICL_TBTPLL)
+ port_dpll_id = ICL_PORT_DPLL_DEFAULT;
+ else
+ port_dpll_id = ICL_PORT_DPLL_MG_PHY;
+
+ port_dpll = &crtc_state->icl_port_dplls[port_dpll_id];
+
+ port_dpll->pll = pll;
+ pll_active = intel_dpll_get_hw_state(i915, pll, &port_dpll->hw_state);
+ drm_WARN_ON(&i915->drm, !pll_active);
+
+ icl_set_active_port_dpll(crtc_state, port_dpll_id);
+
+ if (intel_get_shared_dpll_id(i915, crtc_state->shared_dpll) == DPLL_ID_ICL_TBTPLL)
+ crtc_state->port_clock = icl_calc_tbt_pll_link(i915, encoder->port);
+ else
+ crtc_state->port_clock = intel_dpll_get_freq(i915, crtc_state->shared_dpll,
+ &crtc_state->dpll_hw_state);
+}
+
+static void icl_ddi_tc_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ icl_ddi_tc_get_clock(encoder, crtc_state, icl_ddi_tc_get_pll(encoder));
+ intel_ddi_get_config(encoder, crtc_state);
+}
+
+static void cnl_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ intel_ddi_get_clock(encoder, crtc_state, cnl_ddi_get_pll(encoder));
+ intel_ddi_get_config(encoder, crtc_state);
+}
+
+static void bxt_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ intel_ddi_get_clock(encoder, crtc_state, bxt_ddi_get_pll(encoder));
+ intel_ddi_get_config(encoder, crtc_state);
+}
+
+static void skl_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ intel_ddi_get_clock(encoder, crtc_state, skl_ddi_get_pll(encoder));
+ intel_ddi_get_config(encoder, crtc_state);
+}
+
+void hsw_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ intel_ddi_get_clock(encoder, crtc_state, hsw_ddi_get_pll(encoder));
+ intel_ddi_get_config(encoder, crtc_state);
+}
+
static void intel_ddi_sync_state(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
@@ -5002,7 +3943,7 @@ intel_ddi_port_sync_transcoders(const struct intel_crtc_state *ref_crtc_state,
* We don't enable port sync on BDW due to missing w/as and
* due to not having adjusted the modeset sequence appropriately.
*/
- if (INTEL_GEN(dev_priv) < 9)
+ if (DISPLAY_VER(dev_priv) < 9)
return 0;
if (!intel_crtc_has_type(ref_crtc_state, INTEL_OUTPUT_DP))
@@ -5076,8 +4017,17 @@ static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
kfree(dig_port);
}
+static void intel_ddi_encoder_reset(struct drm_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder));
+
+ intel_dp->reset_link_params = true;
+
+ intel_pps_encoder_reset(intel_dp);
+}
+
static const struct drm_encoder_funcs intel_ddi_funcs = {
- .reset = intel_dp_encoder_reset,
+ .reset = intel_ddi_encoder_reset,
.destroy = intel_ddi_encoder_destroy,
};
@@ -5097,9 +4047,9 @@ intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
dig_port->dp.set_link_train = intel_ddi_set_link_train;
dig_port->dp.set_idle_link_train = intel_ddi_set_idle_link_train;
- if (INTEL_GEN(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 12)
dig_port->dp.set_signal_levels = tgl_set_signal_levels;
- else if (INTEL_GEN(dev_priv) >= 11)
+ else if (DISPLAY_VER(dev_priv) >= 11)
dig_port->dp.set_signal_levels = icl_set_signal_levels;
else if (IS_CANNONLAKE(dev_priv))
dig_port->dp.set_signal_levels = cnl_set_signal_levels;
@@ -5368,7 +4318,7 @@ intel_ddi_max_lanes(struct intel_digital_port *dig_port)
enum port port = dig_port->base.port;
int max_lanes = 4;
- if (INTEL_GEN(dev_priv) >= 11)
+ if (DISPLAY_VER(dev_priv) >= 11)
return max_lanes;
if (port == PORT_A || port == PORT_E) {
@@ -5460,6 +4410,24 @@ static enum hpd_pin cnl_hpd_pin(struct drm_i915_private *dev_priv,
return HPD_PORT_A + port - PORT_A;
}
+static enum hpd_pin skl_hpd_pin(struct drm_i915_private *dev_priv, enum port port)
+{
+ if (HAS_PCH_TGP(dev_priv))
+ return icl_hpd_pin(dev_priv, port);
+
+ return HPD_PORT_A + port - PORT_A;
+}
+
+static bool intel_ddi_is_tc(struct drm_i915_private *i915, enum port port)
+{
+ if (DISPLAY_VER(i915) >= 12)
+ return port >= PORT_TC1;
+ else if (DISPLAY_VER(i915) >= 11)
+ return port >= PORT_C;
+ else
+ return false;
+}
+
#define port_tc_name(port) ((port) - PORT_TC1 + '1')
#define tc_port_name(tc_port) ((tc_port) - TC_PORT_1 + '1')
@@ -5467,6 +4435,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
{
struct intel_digital_port *dig_port;
struct intel_encoder *encoder;
+ const struct intel_bios_encoder_data *devdata;
bool init_hdmi, init_dp;
enum phy phy = intel_port_to_phy(dev_priv, port);
@@ -5482,9 +4451,17 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
return;
}
- init_hdmi = intel_bios_port_supports_dvi(dev_priv, port) ||
- intel_bios_port_supports_hdmi(dev_priv, port);
- init_dp = intel_bios_port_supports_dp(dev_priv, port);
+ devdata = intel_bios_encoder_data_lookup(dev_priv, port);
+ if (!devdata) {
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT says port %c is not present\n",
+ port_name(port));
+ return;
+ }
+
+ init_hdmi = intel_bios_encoder_supports_dvi(devdata) ||
+ intel_bios_encoder_supports_hdmi(devdata);
+ init_dp = intel_bios_encoder_supports_dp(devdata);
if (intel_bios_is_lspcon_present(dev_priv, port)) {
/*
@@ -5510,8 +4487,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
return;
encoder = &dig_port->base;
+ encoder->devdata = devdata;
- if (INTEL_GEN(dev_priv) >= 12) {
+ if (DISPLAY_VER(dev_priv) >= 12) {
enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
@@ -5521,7 +4499,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
port >= PORT_TC1 ? port_tc_name(port) : port_name(port),
tc_port != TC_PORT_NONE ? "TC" : "",
tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy));
- } else if (INTEL_GEN(dev_priv) >= 11) {
+ } else if (DISPLAY_VER(dev_priv) >= 11) {
enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
@@ -5551,7 +4529,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->post_disable = intel_ddi_post_disable;
encoder->update_pipe = intel_ddi_update_pipe;
encoder->get_hw_state = intel_ddi_get_hw_state;
- encoder->get_config = intel_ddi_get_config;
encoder->sync_state = intel_ddi_sync_state;
encoder->initial_fastset_check = intel_ddi_initial_fastset_check;
encoder->suspend = intel_dp_encoder_suspend;
@@ -5564,22 +4541,83 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->cloneable = 0;
encoder->pipe_mask = ~0;
+ if (IS_ALDERLAKE_S(dev_priv)) {
+ encoder->enable_clock = adls_ddi_enable_clock;
+ encoder->disable_clock = adls_ddi_disable_clock;
+ encoder->is_clock_enabled = adls_ddi_is_clock_enabled;
+ encoder->get_config = adls_ddi_get_config;
+ } else if (IS_ROCKETLAKE(dev_priv)) {
+ encoder->enable_clock = rkl_ddi_enable_clock;
+ encoder->disable_clock = rkl_ddi_disable_clock;
+ encoder->is_clock_enabled = rkl_ddi_is_clock_enabled;
+ encoder->get_config = rkl_ddi_get_config;
+ } else if (IS_DG1(dev_priv)) {
+ encoder->enable_clock = dg1_ddi_enable_clock;
+ encoder->disable_clock = dg1_ddi_disable_clock;
+ encoder->is_clock_enabled = dg1_ddi_is_clock_enabled;
+ encoder->get_config = dg1_ddi_get_config;
+ } else if (IS_JSL_EHL(dev_priv)) {
+ if (intel_ddi_is_tc(dev_priv, port)) {
+ encoder->enable_clock = jsl_ddi_tc_enable_clock;
+ encoder->disable_clock = jsl_ddi_tc_disable_clock;
+ encoder->is_clock_enabled = jsl_ddi_tc_is_clock_enabled;
+ encoder->get_config = icl_ddi_combo_get_config;
+ } else {
+ encoder->enable_clock = icl_ddi_combo_enable_clock;
+ encoder->disable_clock = icl_ddi_combo_disable_clock;
+ encoder->is_clock_enabled = icl_ddi_combo_is_clock_enabled;
+ encoder->get_config = icl_ddi_combo_get_config;
+ }
+ } else if (DISPLAY_VER(dev_priv) >= 11) {
+ if (intel_ddi_is_tc(dev_priv, port)) {
+ encoder->enable_clock = icl_ddi_tc_enable_clock;
+ encoder->disable_clock = icl_ddi_tc_disable_clock;
+ encoder->is_clock_enabled = icl_ddi_tc_is_clock_enabled;
+ encoder->get_config = icl_ddi_tc_get_config;
+ } else {
+ encoder->enable_clock = icl_ddi_combo_enable_clock;
+ encoder->disable_clock = icl_ddi_combo_disable_clock;
+ encoder->is_clock_enabled = icl_ddi_combo_is_clock_enabled;
+ encoder->get_config = icl_ddi_combo_get_config;
+ }
+ } else if (IS_CANNONLAKE(dev_priv)) {
+ encoder->enable_clock = cnl_ddi_enable_clock;
+ encoder->disable_clock = cnl_ddi_disable_clock;
+ encoder->is_clock_enabled = cnl_ddi_is_clock_enabled;
+ encoder->get_config = cnl_ddi_get_config;
+ } else if (IS_GEN9_LP(dev_priv)) {
+ /* BXT/GLK have fixed PLL->port mapping */
+ encoder->get_config = bxt_ddi_get_config;
+ } else if (IS_GEN9_BC(dev_priv)) {
+ encoder->enable_clock = skl_ddi_enable_clock;
+ encoder->disable_clock = skl_ddi_disable_clock;
+ encoder->is_clock_enabled = skl_ddi_is_clock_enabled;
+ encoder->get_config = skl_ddi_get_config;
+ } else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+ encoder->enable_clock = hsw_ddi_enable_clock;
+ encoder->disable_clock = hsw_ddi_disable_clock;
+ encoder->is_clock_enabled = hsw_ddi_is_clock_enabled;
+ encoder->get_config = hsw_ddi_get_config;
+ }
+
if (IS_DG1(dev_priv))
encoder->hpd_pin = dg1_hpd_pin(dev_priv, port);
else if (IS_ROCKETLAKE(dev_priv))
encoder->hpd_pin = rkl_hpd_pin(dev_priv, port);
- else if (INTEL_GEN(dev_priv) >= 12)
+ else if (DISPLAY_VER(dev_priv) >= 12)
encoder->hpd_pin = tgl_hpd_pin(dev_priv, port);
else if (IS_JSL_EHL(dev_priv))
encoder->hpd_pin = ehl_hpd_pin(dev_priv, port);
- else if (IS_GEN(dev_priv, 11))
+ else if (IS_DISPLAY_VER(dev_priv, 11))
encoder->hpd_pin = icl_hpd_pin(dev_priv, port);
- else if (IS_GEN(dev_priv, 10))
+ else if (IS_DISPLAY_VER(dev_priv, 10))
encoder->hpd_pin = cnl_hpd_pin(dev_priv, port);
+ else if (IS_DISPLAY_VER(dev_priv, 9))
+ encoder->hpd_pin = skl_hpd_pin(dev_priv, port);
else
encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
- if (INTEL_GEN(dev_priv) >= 11)
+ if (DISPLAY_VER(dev_priv) >= 11)
dig_port->saved_port_bits =
intel_de_read(dev_priv, DDI_BUF_CTL(port))
& DDI_BUF_PORT_REVERSAL;
@@ -5588,14 +4626,17 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_de_read(dev_priv, DDI_BUF_CTL(port))
& (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
+ if (intel_bios_is_lane_reversal_needed(dev_priv, port))
+ dig_port->saved_port_bits |= DDI_BUF_PORT_REVERSAL;
+
dig_port->dp.output_reg = INVALID_MMIO_REG;
dig_port->max_lanes = intel_ddi_max_lanes(dig_port);
dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
if (intel_phy_is_tc(dev_priv, phy)) {
bool is_legacy =
- !intel_bios_port_supports_typec_usb(dev_priv, port) &&
- !intel_bios_port_supports_tbt(dev_priv, port);
+ !intel_bios_encoder_supports_typec_usb(devdata) &&
+ !intel_bios_encoder_supports_tbt(devdata);
intel_tc_port_init(dig_port, is_legacy);
@@ -5612,6 +4653,10 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
goto err;
dig_port->hpd_pulse = intel_dp_hpd_pulse;
+
+ /* Splitter enable for eDP MSO is supported for pipe A only. */
+ if (dig_port->dp.mso_link_count)
+ encoder->pipe_mask = BIT(PIPE_A);
}
/* In theory we don't need the encoder->type check, but leave it just in
@@ -5621,12 +4666,12 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
goto err;
}
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (DISPLAY_VER(dev_priv) >= 11) {
if (intel_phy_is_tc(dev_priv, phy))
dig_port->connected = intel_tc_port_connected;
else
dig_port->connected = lpt_digital_port_connected;
- } else if (INTEL_GEN(dev_priv) >= 8) {
+ } else if (DISPLAY_VER(dev_priv) >= 8) {
if (port == PORT_A || IS_GEN9_LP(dev_priv))
dig_port->connected = bdw_digital_port_connected;
else
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index a4dd815c0000..59c6b01d4199 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -17,6 +17,7 @@ struct intel_crtc_state;
struct intel_dp;
struct intel_dpll_hw_state;
struct intel_encoder;
+struct intel_shared_dpll;
enum transcoder;
i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder,
@@ -27,8 +28,22 @@ void intel_ddi_fdi_post_disable(struct intel_atomic_state *state,
struct intel_encoder *intel_encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state);
-void hsw_fdi_link_train(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state);
+void intel_ddi_enable_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_ddi_get_clock(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct intel_shared_dpll *pll);
+void hsw_ddi_enable_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void hsw_ddi_disable_clock(struct intel_encoder *encoder);
+bool hsw_ddi_is_clock_enabled(struct intel_encoder *encoder);
+void hsw_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state);
+struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder);
+void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
+ enum port port);
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
@@ -40,8 +55,6 @@ void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
-void intel_ddi_get_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config);
void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
bool state);
void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
@@ -53,6 +66,6 @@ u32 ddi_signal_levels(struct intel_dp *intel_dp,
int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
enum transcoder cpu_transcoder,
bool enable, u32 hdcp_mask);
-void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
+void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
#endif /* __INTEL_DDI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
new file mode 100644
index 000000000000..5d9ce6042e87
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
@@ -0,0 +1,1394 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_ddi.h"
+#include "intel_ddi_buf_trans.h"
+#include "intel_display_types.h"
+
+/* HDMI/DVI modes ignore everything but the last 2 items. So we share
+ * them for both DP and FDI transports, allowing those ports to
+ * automatically adapt to HDMI connections as well
+ */
+static const struct ddi_buf_trans hsw_ddi_translations_dp[] = {
+ { 0x00FFFFFF, 0x0006000E, 0x0 },
+ { 0x00D75FFF, 0x0005000A, 0x0 },
+ { 0x00C30FFF, 0x00040006, 0x0 },
+ { 0x80AAAFFF, 0x000B0000, 0x0 },
+ { 0x00FFFFFF, 0x0005000A, 0x0 },
+ { 0x00D75FFF, 0x000C0004, 0x0 },
+ { 0x80C30FFF, 0x000B0000, 0x0 },
+ { 0x00FFFFFF, 0x00040006, 0x0 },
+ { 0x80D75FFF, 0x000B0000, 0x0 },
+};
+
+static const struct ddi_buf_trans hsw_ddi_translations_fdi[] = {
+ { 0x00FFFFFF, 0x0007000E, 0x0 },
+ { 0x00D75FFF, 0x000F000A, 0x0 },
+ { 0x00C30FFF, 0x00060006, 0x0 },
+ { 0x00AAAFFF, 0x001E0000, 0x0 },
+ { 0x00FFFFFF, 0x000F000A, 0x0 },
+ { 0x00D75FFF, 0x00160004, 0x0 },
+ { 0x00C30FFF, 0x001E0000, 0x0 },
+ { 0x00FFFFFF, 0x00060006, 0x0 },
+ { 0x00D75FFF, 0x001E0000, 0x0 },
+};
+
+static const struct ddi_buf_trans hsw_ddi_translations_hdmi[] = {
+ /* Idx NT mV d T mV d db */
+ { 0x00FFFFFF, 0x0006000E, 0x0 },/* 0: 400 400 0 */
+ { 0x00E79FFF, 0x000E000C, 0x0 },/* 1: 400 500 2 */
+ { 0x00D75FFF, 0x0005000A, 0x0 },/* 2: 400 600 3.5 */
+ { 0x00FFFFFF, 0x0005000A, 0x0 },/* 3: 600 600 0 */
+ { 0x00E79FFF, 0x001D0007, 0x0 },/* 4: 600 750 2 */
+ { 0x00D75FFF, 0x000C0004, 0x0 },/* 5: 600 900 3.5 */
+ { 0x00FFFFFF, 0x00040006, 0x0 },/* 6: 800 800 0 */
+ { 0x80E79FFF, 0x00030002, 0x0 },/* 7: 800 1000 2 */
+ { 0x00FFFFFF, 0x00140005, 0x0 },/* 8: 850 850 0 */
+ { 0x00FFFFFF, 0x000C0004, 0x0 },/* 9: 900 900 0 */
+ { 0x00FFFFFF, 0x001C0003, 0x0 },/* 10: 950 950 0 */
+ { 0x80FFFFFF, 0x00030002, 0x0 },/* 11: 1000 1000 0 */
+};
+
+static const struct ddi_buf_trans bdw_ddi_translations_edp[] = {
+ { 0x00FFFFFF, 0x00000012, 0x0 },
+ { 0x00EBAFFF, 0x00020011, 0x0 },
+ { 0x00C71FFF, 0x0006000F, 0x0 },
+ { 0x00AAAFFF, 0x000E000A, 0x0 },
+ { 0x00FFFFFF, 0x00020011, 0x0 },
+ { 0x00DB6FFF, 0x0005000F, 0x0 },
+ { 0x00BEEFFF, 0x000A000C, 0x0 },
+ { 0x00FFFFFF, 0x0005000F, 0x0 },
+ { 0x00DB6FFF, 0x000A000C, 0x0 },
+};
+
+static const struct ddi_buf_trans bdw_ddi_translations_dp[] = {
+ { 0x00FFFFFF, 0x0007000E, 0x0 },
+ { 0x00D75FFF, 0x000E000A, 0x0 },
+ { 0x00BEFFFF, 0x00140006, 0x0 },
+ { 0x80B2CFFF, 0x001B0002, 0x0 },
+ { 0x00FFFFFF, 0x000E000A, 0x0 },
+ { 0x00DB6FFF, 0x00160005, 0x0 },
+ { 0x80C71FFF, 0x001A0002, 0x0 },
+ { 0x00F7DFFF, 0x00180004, 0x0 },
+ { 0x80D75FFF, 0x001B0002, 0x0 },
+};
+
+static const struct ddi_buf_trans bdw_ddi_translations_fdi[] = {
+ { 0x00FFFFFF, 0x0001000E, 0x0 },
+ { 0x00D75FFF, 0x0004000A, 0x0 },
+ { 0x00C30FFF, 0x00070006, 0x0 },
+ { 0x00AAAFFF, 0x000C0000, 0x0 },
+ { 0x00FFFFFF, 0x0004000A, 0x0 },
+ { 0x00D75FFF, 0x00090004, 0x0 },
+ { 0x00C30FFF, 0x000C0000, 0x0 },
+ { 0x00FFFFFF, 0x00070006, 0x0 },
+ { 0x00D75FFF, 0x000C0000, 0x0 },
+};
+
+static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = {
+ /* Idx NT mV d T mV df db */
+ { 0x00FFFFFF, 0x0007000E, 0x0 },/* 0: 400 400 0 */
+ { 0x00D75FFF, 0x000E000A, 0x0 },/* 1: 400 600 3.5 */
+ { 0x00BEFFFF, 0x00140006, 0x0 },/* 2: 400 800 6 */
+ { 0x00FFFFFF, 0x0009000D, 0x0 },/* 3: 450 450 0 */
+ { 0x00FFFFFF, 0x000E000A, 0x0 },/* 4: 600 600 0 */
+ { 0x00D7FFFF, 0x00140006, 0x0 },/* 5: 600 800 2.5 */
+ { 0x80CB2FFF, 0x001B0002, 0x0 },/* 6: 600 1000 4.5 */
+ { 0x00FFFFFF, 0x00140006, 0x0 },/* 7: 800 800 0 */
+ { 0x80E79FFF, 0x001B0002, 0x0 },/* 8: 800 1000 2 */
+ { 0x80FFFFFF, 0x001B0002, 0x0 },/* 9: 1000 1000 0 */
+};
+
+/* Skylake H and S */
+static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
+ { 0x00002016, 0x000000A0, 0x0 },
+ { 0x00005012, 0x0000009B, 0x0 },
+ { 0x00007011, 0x00000088, 0x0 },
+ { 0x80009010, 0x000000C0, 0x1 },
+ { 0x00002016, 0x0000009B, 0x0 },
+ { 0x00005012, 0x00000088, 0x0 },
+ { 0x80007011, 0x000000C0, 0x1 },
+ { 0x00002016, 0x000000DF, 0x0 },
+ { 0x80005012, 0x000000C0, 0x1 },
+};
+
+/* Skylake U */
+static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
+ { 0x0000201B, 0x000000A2, 0x0 },
+ { 0x00005012, 0x00000088, 0x0 },
+ { 0x80007011, 0x000000CD, 0x1 },
+ { 0x80009010, 0x000000C0, 0x1 },
+ { 0x0000201B, 0x0000009D, 0x0 },
+ { 0x80005012, 0x000000C0, 0x1 },
+ { 0x80007011, 0x000000C0, 0x1 },
+ { 0x00002016, 0x00000088, 0x0 },
+ { 0x80005012, 0x000000C0, 0x1 },
+};
+
+/* Skylake Y */
+static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
+ { 0x00000018, 0x000000A2, 0x0 },
+ { 0x00005012, 0x00000088, 0x0 },
+ { 0x80007011, 0x000000CD, 0x3 },
+ { 0x80009010, 0x000000C0, 0x3 },
+ { 0x00000018, 0x0000009D, 0x0 },
+ { 0x80005012, 0x000000C0, 0x3 },
+ { 0x80007011, 0x000000C0, 0x3 },
+ { 0x00000018, 0x00000088, 0x0 },
+ { 0x80005012, 0x000000C0, 0x3 },
+};
+
+/* Kabylake H and S */
+static const struct ddi_buf_trans kbl_ddi_translations_dp[] = {
+ { 0x00002016, 0x000000A0, 0x0 },
+ { 0x00005012, 0x0000009B, 0x0 },
+ { 0x00007011, 0x00000088, 0x0 },
+ { 0x80009010, 0x000000C0, 0x1 },
+ { 0x00002016, 0x0000009B, 0x0 },
+ { 0x00005012, 0x00000088, 0x0 },
+ { 0x80007011, 0x000000C0, 0x1 },
+ { 0x00002016, 0x00000097, 0x0 },
+ { 0x80005012, 0x000000C0, 0x1 },
+};
+
+/* Kabylake U */
+static const struct ddi_buf_trans kbl_u_ddi_translations_dp[] = {
+ { 0x0000201B, 0x000000A1, 0x0 },
+ { 0x00005012, 0x00000088, 0x0 },
+ { 0x80007011, 0x000000CD, 0x3 },
+ { 0x80009010, 0x000000C0, 0x3 },
+ { 0x0000201B, 0x0000009D, 0x0 },
+ { 0x80005012, 0x000000C0, 0x3 },
+ { 0x80007011, 0x000000C0, 0x3 },
+ { 0x00002016, 0x0000004F, 0x0 },
+ { 0x80005012, 0x000000C0, 0x3 },
+};
+
+/* Kabylake Y */
+static const struct ddi_buf_trans kbl_y_ddi_translations_dp[] = {
+ { 0x00001017, 0x000000A1, 0x0 },
+ { 0x00005012, 0x00000088, 0x0 },
+ { 0x80007011, 0x000000CD, 0x3 },
+ { 0x8000800F, 0x000000C0, 0x3 },
+ { 0x00001017, 0x0000009D, 0x0 },
+ { 0x80005012, 0x000000C0, 0x3 },
+ { 0x80007011, 0x000000C0, 0x3 },
+ { 0x00001017, 0x0000004C, 0x0 },
+ { 0x80005012, 0x000000C0, 0x3 },
+};
+
+/*
+ * Skylake/Kabylake H and S
+ * eDP 1.4 low vswing translation parameters
+ */
+static const struct ddi_buf_trans skl_ddi_translations_edp[] = {
+ { 0x00000018, 0x000000A8, 0x0 },
+ { 0x00004013, 0x000000A9, 0x0 },
+ { 0x00007011, 0x000000A2, 0x0 },
+ { 0x00009010, 0x0000009C, 0x0 },
+ { 0x00000018, 0x000000A9, 0x0 },
+ { 0x00006013, 0x000000A2, 0x0 },
+ { 0x00007011, 0x000000A6, 0x0 },
+ { 0x00000018, 0x000000AB, 0x0 },
+ { 0x00007013, 0x0000009F, 0x0 },
+ { 0x00000018, 0x000000DF, 0x0 },
+};
+
+/*
+ * Skylake/Kabylake U
+ * eDP 1.4 low vswing translation parameters
+ */
+static const struct ddi_buf_trans skl_u_ddi_translations_edp[] = {
+ { 0x00000018, 0x000000A8, 0x0 },
+ { 0x00004013, 0x000000A9, 0x0 },
+ { 0x00007011, 0x000000A2, 0x0 },
+ { 0x00009010, 0x0000009C, 0x0 },
+ { 0x00000018, 0x000000A9, 0x0 },
+ { 0x00006013, 0x000000A2, 0x0 },
+ { 0x00007011, 0x000000A6, 0x0 },
+ { 0x00002016, 0x000000AB, 0x0 },
+ { 0x00005013, 0x0000009F, 0x0 },
+ { 0x00000018, 0x000000DF, 0x0 },
+};
+
+/*
+ * Skylake/Kabylake Y
+ * eDP 1.4 low vswing translation parameters
+ */
+static const struct ddi_buf_trans skl_y_ddi_translations_edp[] = {
+ { 0x00000018, 0x000000A8, 0x0 },
+ { 0x00004013, 0x000000AB, 0x0 },
+ { 0x00007011, 0x000000A4, 0x0 },
+ { 0x00009010, 0x000000DF, 0x0 },
+ { 0x00000018, 0x000000AA, 0x0 },
+ { 0x00006013, 0x000000A4, 0x0 },
+ { 0x00007011, 0x0000009D, 0x0 },
+ { 0x00000018, 0x000000A0, 0x0 },
+ { 0x00006012, 0x000000DF, 0x0 },
+ { 0x00000018, 0x0000008A, 0x0 },
+};
+
+/* Skylake/Kabylake U, H and S */
+static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
+ { 0x00000018, 0x000000AC, 0x0 },
+ { 0x00005012, 0x0000009D, 0x0 },
+ { 0x00007011, 0x00000088, 0x0 },
+ { 0x00000018, 0x000000A1, 0x0 },
+ { 0x00000018, 0x00000098, 0x0 },
+ { 0x00004013, 0x00000088, 0x0 },
+ { 0x80006012, 0x000000CD, 0x1 },
+ { 0x00000018, 0x000000DF, 0x0 },
+ { 0x80003015, 0x000000CD, 0x1 }, /* Default */
+ { 0x80003015, 0x000000C0, 0x1 },
+ { 0x80000018, 0x000000C0, 0x1 },
+};
+
+/* Skylake/Kabylake Y */
+static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = {
+ { 0x00000018, 0x000000A1, 0x0 },
+ { 0x00005012, 0x000000DF, 0x0 },
+ { 0x80007011, 0x000000CB, 0x3 },
+ { 0x00000018, 0x000000A4, 0x0 },
+ { 0x00000018, 0x0000009D, 0x0 },
+ { 0x00004013, 0x00000080, 0x0 },
+ { 0x80006013, 0x000000C0, 0x3 },
+ { 0x00000018, 0x0000008A, 0x0 },
+ { 0x80003015, 0x000000C0, 0x3 }, /* Default */
+ { 0x80003015, 0x000000C0, 0x3 },
+ { 0x80000018, 0x000000C0, 0x3 },
+};
+
+
+static const struct bxt_ddi_buf_trans bxt_ddi_translations_dp[] = {
+ /* Idx NT mV diff db */
+ { 52, 0x9A, 0, 128, }, /* 0: 400 0 */
+ { 78, 0x9A, 0, 85, }, /* 1: 400 3.5 */
+ { 104, 0x9A, 0, 64, }, /* 2: 400 6 */
+ { 154, 0x9A, 0, 43, }, /* 3: 400 9.5 */
+ { 77, 0x9A, 0, 128, }, /* 4: 600 0 */
+ { 116, 0x9A, 0, 85, }, /* 5: 600 3.5 */
+ { 154, 0x9A, 0, 64, }, /* 6: 600 6 */
+ { 102, 0x9A, 0, 128, }, /* 7: 800 0 */
+ { 154, 0x9A, 0, 85, }, /* 8: 800 3.5 */
+ { 154, 0x9A, 1, 128, }, /* 9: 1200 0 */
+};
+
+static const struct bxt_ddi_buf_trans bxt_ddi_translations_edp[] = {
+ /* Idx NT mV diff db */
+ { 26, 0, 0, 128, }, /* 0: 200 0 */
+ { 38, 0, 0, 112, }, /* 1: 200 1.5 */
+ { 48, 0, 0, 96, }, /* 2: 200 4 */
+ { 54, 0, 0, 69, }, /* 3: 200 6 */
+ { 32, 0, 0, 128, }, /* 4: 250 0 */
+ { 48, 0, 0, 104, }, /* 5: 250 1.5 */
+ { 54, 0, 0, 85, }, /* 6: 250 4 */
+ { 43, 0, 0, 128, }, /* 7: 300 0 */
+ { 54, 0, 0, 101, }, /* 8: 300 1.5 */
+ { 48, 0, 0, 128, }, /* 9: 300 0 */
+};
+
+/* BSpec has 2 recommended values - entries 0 and 8.
+ * Using the entry with higher vswing.
+ */
+static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = {
+ /* Idx NT mV diff db */
+ { 52, 0x9A, 0, 128, }, /* 0: 400 0 */
+ { 52, 0x9A, 0, 85, }, /* 1: 400 3.5 */
+ { 52, 0x9A, 0, 64, }, /* 2: 400 6 */
+ { 42, 0x9A, 0, 43, }, /* 3: 400 9.5 */
+ { 77, 0x9A, 0, 128, }, /* 4: 600 0 */
+ { 77, 0x9A, 0, 85, }, /* 5: 600 3.5 */
+ { 77, 0x9A, 0, 64, }, /* 6: 600 6 */
+ { 102, 0x9A, 0, 128, }, /* 7: 800 0 */
+ { 102, 0x9A, 0, 85, }, /* 8: 800 3.5 */
+ { 154, 0x9A, 1, 128, }, /* 9: 1200 0 */
+};
+
+/* Voltage Swing Programming for VccIO 0.85V for DP */
+static const struct cnl_ddi_buf_trans cnl_ddi_translations_dp_0_85V[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x5D, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x6A, 0x38, 0x00, 0x07 }, /* 350 500 3.1 */
+ { 0xB, 0x7A, 0x32, 0x00, 0x0D }, /* 350 700 6.0 */
+ { 0x6, 0x7C, 0x2D, 0x00, 0x12 }, /* 350 900 8.2 */
+ { 0xA, 0x69, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xB, 0x7A, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
+ { 0x6, 0x7C, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
+ { 0xB, 0x7D, 0x3C, 0x00, 0x03 }, /* 650 725 0.9 */
+ { 0x6, 0x7C, 0x34, 0x00, 0x0B }, /* 600 900 3.5 */
+ { 0x6, 0x7B, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
+/* Voltage Swing Programming for VccIO 0.85V for HDMI */
+static const struct cnl_ddi_buf_trans cnl_ddi_translations_hdmi_0_85V[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x60, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */
+ { 0xB, 0x73, 0x36, 0x00, 0x09 }, /* 450 650 3.2 */
+ { 0x6, 0x7F, 0x31, 0x00, 0x0E }, /* 450 850 5.5 */
+ { 0xB, 0x73, 0x3F, 0x00, 0x00 }, /* 650 650 0.0 */
+ { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 650 850 2.3 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 850 850 0.0 */
+ { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
+};
+
+/* Voltage Swing Programming for VccIO 0.85V for eDP */
+static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_0_85V[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x66, 0x3A, 0x00, 0x05 }, /* 384 500 2.3 */
+ { 0x0, 0x7F, 0x38, 0x00, 0x07 }, /* 153 200 2.3 */
+ { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 192 250 2.3 */
+ { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 230 300 2.3 */
+ { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 269 350 2.3 */
+ { 0xA, 0x66, 0x3C, 0x00, 0x03 }, /* 446 500 1.0 */
+ { 0xB, 0x70, 0x3C, 0x00, 0x03 }, /* 460 600 2.3 */
+ { 0xC, 0x75, 0x3C, 0x00, 0x03 }, /* 537 700 2.3 */
+ { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
+};
+
+/* Voltage Swing Programming for VccIO 0.95V for DP */
+static const struct cnl_ddi_buf_trans cnl_ddi_translations_dp_0_95V[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x5D, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x6A, 0x38, 0x00, 0x07 }, /* 350 500 3.1 */
+ { 0xB, 0x7A, 0x32, 0x00, 0x0D }, /* 350 700 6.0 */
+ { 0x6, 0x7C, 0x2D, 0x00, 0x12 }, /* 350 900 8.2 */
+ { 0xA, 0x69, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xB, 0x7A, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
+ { 0x6, 0x7C, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
+ { 0xB, 0x7D, 0x3C, 0x00, 0x03 }, /* 650 725 0.9 */
+ { 0x6, 0x7C, 0x34, 0x00, 0x0B }, /* 600 900 3.5 */
+ { 0x6, 0x7B, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
+/* Voltage Swing Programming for VccIO 0.95V for HDMI */
+static const struct cnl_ddi_buf_trans cnl_ddi_translations_hdmi_0_95V[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x5C, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
+ { 0xB, 0x69, 0x37, 0x00, 0x08 }, /* 400 600 3.5 */
+ { 0x5, 0x76, 0x31, 0x00, 0x0E }, /* 400 800 6.0 */
+ { 0xA, 0x5E, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */
+ { 0xB, 0x69, 0x3F, 0x00, 0x00 }, /* 600 600 0.0 */
+ { 0xB, 0x79, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
+ { 0x6, 0x7D, 0x32, 0x00, 0x0D }, /* 600 1000 4.4 */
+ { 0x5, 0x76, 0x3F, 0x00, 0x00 }, /* 800 800 0.0 */
+ { 0x6, 0x7D, 0x39, 0x00, 0x06 }, /* 800 1000 1.9 */
+ { 0x6, 0x7F, 0x39, 0x00, 0x06 }, /* 850 1050 1.8 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1050 1050 0.0 */
+};
+
+/* Voltage Swing Programming for VccIO 0.95V for eDP */
+static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_0_95V[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x61, 0x3A, 0x00, 0x05 }, /* 384 500 2.3 */
+ { 0x0, 0x7F, 0x38, 0x00, 0x07 }, /* 153 200 2.3 */
+ { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 192 250 2.3 */
+ { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 230 300 2.3 */
+ { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 269 350 2.3 */
+ { 0xA, 0x61, 0x3C, 0x00, 0x03 }, /* 446 500 1.0 */
+ { 0xB, 0x68, 0x39, 0x00, 0x06 }, /* 460 600 2.3 */
+ { 0xC, 0x6E, 0x39, 0x00, 0x06 }, /* 537 700 2.3 */
+ { 0x4, 0x7F, 0x3A, 0x00, 0x05 }, /* 460 600 2.3 */
+ { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
+};
+
+/* Voltage Swing Programming for VccIO 1.05V for DP */
+static const struct cnl_ddi_buf_trans cnl_ddi_translations_dp_1_05V[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x58, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
+ { 0xB, 0x64, 0x37, 0x00, 0x08 }, /* 400 600 3.5 */
+ { 0x5, 0x70, 0x31, 0x00, 0x0E }, /* 400 800 6.0 */
+ { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 400 1050 8.4 */
+ { 0xB, 0x64, 0x3F, 0x00, 0x00 }, /* 600 600 0.0 */
+ { 0x5, 0x73, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
+ { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 550 1050 5.6 */
+ { 0x5, 0x76, 0x3E, 0x00, 0x01 }, /* 850 900 0.5 */
+ { 0x6, 0x7F, 0x36, 0x00, 0x09 }, /* 750 1050 2.9 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1050 1050 0.0 */
+};
+
+/* Voltage Swing Programming for VccIO 1.05V for HDMI */
+static const struct cnl_ddi_buf_trans cnl_ddi_translations_hdmi_1_05V[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x58, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
+ { 0xB, 0x64, 0x37, 0x00, 0x08 }, /* 400 600 3.5 */
+ { 0x5, 0x70, 0x31, 0x00, 0x0E }, /* 400 800 6.0 */
+ { 0xA, 0x5B, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */
+ { 0xB, 0x64, 0x3F, 0x00, 0x00 }, /* 600 600 0.0 */
+ { 0x5, 0x73, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
+ { 0x6, 0x7C, 0x32, 0x00, 0x0D }, /* 600 1000 4.4 */
+ { 0x5, 0x70, 0x3F, 0x00, 0x00 }, /* 800 800 0.0 */
+ { 0x6, 0x7C, 0x39, 0x00, 0x06 }, /* 800 1000 1.9 */
+ { 0x6, 0x7F, 0x39, 0x00, 0x06 }, /* 850 1050 1.8 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1050 1050 0.0 */
+};
+
+/* Voltage Swing Programming for VccIO 1.05V for eDP */
+static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_1_05V[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x5E, 0x3A, 0x00, 0x05 }, /* 384 500 2.3 */
+ { 0x0, 0x7F, 0x38, 0x00, 0x07 }, /* 153 200 2.3 */
+ { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 192 250 2.3 */
+ { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 230 300 2.3 */
+ { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 269 350 2.3 */
+ { 0xA, 0x5E, 0x3C, 0x00, 0x03 }, /* 446 500 1.0 */
+ { 0xB, 0x64, 0x39, 0x00, 0x06 }, /* 460 600 2.3 */
+ { 0xE, 0x6A, 0x39, 0x00, 0x06 }, /* 537 700 2.3 */
+ { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
+};
+
+/* icl_combo_phy_ddi_translations */
+static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hbr2[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
+ { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
+ { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
+ { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
+ { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
+static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr2[] = {
+ /* NT mV Trans mV db */
+ { 0x0, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */
+ { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 200 250 1.9 */
+ { 0x1, 0x7F, 0x33, 0x00, 0x0C }, /* 200 300 3.5 */
+ { 0x9, 0x7F, 0x31, 0x00, 0x0E }, /* 200 350 4.9 */
+ { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */
+ { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 250 300 1.6 */
+ { 0x9, 0x7F, 0x35, 0x00, 0x0A }, /* 250 350 2.9 */
+ { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */
+ { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */
+ { 0x9, 0x7F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+};
+
+static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr3[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
+ { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
+ { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
+ { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
+ { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
+static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x60, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */
+ { 0xB, 0x73, 0x36, 0x00, 0x09 }, /* 450 650 3.2 */
+ { 0x6, 0x7F, 0x31, 0x00, 0x0E }, /* 450 850 5.5 */
+ { 0xB, 0x73, 0x3F, 0x00, 0x00 }, /* 650 650 0.0 ALS */
+ { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 650 850 2.3 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 850 850 0.0 */
+ { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
+};
+
+static const struct cnl_ddi_buf_trans ehl_combo_phy_ddi_translations_dp[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x33, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x47, 0x36, 0x00, 0x09 }, /* 350 500 3.1 */
+ { 0xC, 0x64, 0x34, 0x00, 0x0B }, /* 350 700 6.0 */
+ { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 350 900 8.2 */
+ { 0xA, 0x46, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x64, 0x38, 0x00, 0x07 }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x32, 0x00, 0x0D }, /* 500 900 5.1 */
+ { 0xC, 0x61, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x38, 0x00, 0x07 }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
+static const struct cnl_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr[] = {
+ /* NT mV Trans mV db */
+ { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */
+ { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 200 250 1.9 */
+ { 0x1, 0x7F, 0x33, 0x00, 0x0C }, /* 200 300 3.5 */
+ { 0xA, 0x35, 0x36, 0x00, 0x09 }, /* 200 350 4.9 */
+ { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */
+ { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 250 300 1.6 */
+ { 0xA, 0x35, 0x35, 0x00, 0x0A }, /* 250 350 2.9 */
+ { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */
+ { 0xA, 0x35, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */
+ { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+};
+
+static const struct cnl_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr2[] = {
+ /* NT mV Trans mV db */
+ { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */
+ { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 250 1.9 */
+ { 0x1, 0x7F, 0x3D, 0x00, 0x02 }, /* 200 300 3.5 */
+ { 0xA, 0x35, 0x38, 0x00, 0x07 }, /* 200 350 4.9 */
+ { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */
+ { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 300 1.6 */
+ { 0xA, 0x35, 0x3A, 0x00, 0x05 }, /* 250 350 2.9 */
+ { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */
+ { 0xA, 0x35, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */
+ { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+};
+
+static const struct cnl_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_rbr_hbr[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x48, 0x35, 0x00, 0x0A }, /* 350 500 3.1 */
+ { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
+ { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 350 900 8.2 */
+ { 0xA, 0x43, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x60, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
+ { 0xC, 0x60, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
+static const struct cnl_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_hbr2_hbr3[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x48, 0x35, 0x00, 0x0A }, /* 350 500 3.1 */
+ { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
+ { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 350 900 8.2 */
+ { 0xA, 0x43, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x60, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
+ { 0xC, 0x58, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
+static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_rbr_hbr[] = {
+ /* Voltage swing pre-emphasis */
+ { 0x18, 0x00, 0x00 }, /* 0 0 */
+ { 0x1D, 0x00, 0x05 }, /* 0 1 */
+ { 0x24, 0x00, 0x0C }, /* 0 2 */
+ { 0x2B, 0x00, 0x14 }, /* 0 3 */
+ { 0x21, 0x00, 0x00 }, /* 1 0 */
+ { 0x2B, 0x00, 0x08 }, /* 1 1 */
+ { 0x30, 0x00, 0x0F }, /* 1 2 */
+ { 0x31, 0x00, 0x03 }, /* 2 0 */
+ { 0x34, 0x00, 0x0B }, /* 2 1 */
+ { 0x3F, 0x00, 0x00 }, /* 3 0 */
+};
+
+static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_hbr2_hbr3[] = {
+ /* Voltage swing pre-emphasis */
+ { 0x18, 0x00, 0x00 }, /* 0 0 */
+ { 0x1D, 0x00, 0x05 }, /* 0 1 */
+ { 0x24, 0x00, 0x0C }, /* 0 2 */
+ { 0x2B, 0x00, 0x14 }, /* 0 3 */
+ { 0x26, 0x00, 0x00 }, /* 1 0 */
+ { 0x2C, 0x00, 0x07 }, /* 1 1 */
+ { 0x33, 0x00, 0x0C }, /* 1 2 */
+ { 0x2E, 0x00, 0x00 }, /* 2 0 */
+ { 0x36, 0x00, 0x09 }, /* 2 1 */
+ { 0x3F, 0x00, 0x00 }, /* 3 0 */
+};
+
+static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_hdmi[] = {
+ /* HDMI Preset VS Pre-emph */
+ { 0x1A, 0x0, 0x0 }, /* 1 400mV 0dB */
+ { 0x20, 0x0, 0x0 }, /* 2 500mV 0dB */
+ { 0x29, 0x0, 0x0 }, /* 3 650mV 0dB */
+ { 0x32, 0x0, 0x0 }, /* 4 800mV 0dB */
+ { 0x3F, 0x0, 0x0 }, /* 5 1000mV 0dB */
+ { 0x3A, 0x0, 0x5 }, /* 6 Full -1.5 dB */
+ { 0x39, 0x0, 0x6 }, /* 7 Full -1.8 dB */
+ { 0x38, 0x0, 0x7 }, /* 8 Full -2 dB */
+ { 0x37, 0x0, 0x8 }, /* 9 Full -2.5 dB */
+ { 0x36, 0x0, 0x9 }, /* 10 Full -3 dB */
+};
+
+static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_dp_ddi_trans[] = {
+ /* VS pre-emp Non-trans mV Pre-emph dB */
+ { 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */
+ { 0x5, 0x0, 0x05 }, /* 0 1 400mV 3.5 dB */
+ { 0x2, 0x0, 0x0B }, /* 0 2 400mV 6 dB */
+ { 0x0, 0x0, 0x18 }, /* 0 3 400mV 9.5 dB */
+ { 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */
+ { 0x2, 0x0, 0x08 }, /* 1 1 600mV 3.5 dB */
+ { 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */
+ { 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */
+ { 0x0, 0x0, 0x0B }, /* 2 1 800mV 3.5 dB */
+ { 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB HDMI default */
+};
+
+static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_dp_ddi_trans_hbr2[] = {
+ /* VS pre-emp Non-trans mV Pre-emph dB */
+ { 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */
+ { 0x5, 0x0, 0x05 }, /* 0 1 400mV 3.5 dB */
+ { 0x2, 0x0, 0x0B }, /* 0 2 400mV 6 dB */
+ { 0x0, 0x0, 0x19 }, /* 0 3 400mV 9.5 dB */
+ { 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */
+ { 0x2, 0x0, 0x08 }, /* 1 1 600mV 3.5 dB */
+ { 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */
+ { 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */
+ { 0x0, 0x0, 0x0B }, /* 2 1 800mV 3.5 dB */
+ { 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB HDMI default */
+};
+
+static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_hdmi_ddi_trans[] = {
+ /* HDMI Preset VS Pre-emph */
+ { 0x7, 0x0, 0x0 }, /* 1 400mV 0dB */
+ { 0x6, 0x0, 0x0 }, /* 2 500mV 0dB */
+ { 0x4, 0x0, 0x0 }, /* 3 650mV 0dB */
+ { 0x2, 0x0, 0x0 }, /* 4 800mV 0dB */
+ { 0x0, 0x0, 0x0 }, /* 5 1000mV 0dB */
+ { 0x0, 0x0, 0x5 }, /* 6 Full -1.5 dB */
+ { 0x0, 0x0, 0x6 }, /* 7 Full -1.8 dB */
+ { 0x0, 0x0, 0x7 }, /* 8 Full -2 dB */
+ { 0x0, 0x0, 0x8 }, /* 9 Full -2.5 dB */
+ { 0x0, 0x0, 0xA }, /* 10 Full -3 dB */
+};
+
+static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
+ { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
+ { 0x6, 0x7D, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
+ { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
+ { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
+static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr2[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
+ { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
+ { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
+ { 0xA, 0x47, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x63, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
+ { 0xC, 0x61, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
+ { 0x6, 0x7B, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
+static const struct cnl_ddi_buf_trans tgl_uy_combo_phy_ddi_translations_dp_hbr2[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x4F, 0x36, 0x00, 0x09 }, /* 350 500 3.1 */
+ { 0xC, 0x60, 0x32, 0x00, 0x0D }, /* 350 700 6.0 */
+ { 0xC, 0x7F, 0x2D, 0x00, 0x12 }, /* 350 900 8.2 */
+ { 0xC, 0x47, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x6F, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
+ { 0x6, 0x7D, 0x32, 0x00, 0x0D }, /* 500 900 5.1 */
+ { 0x6, 0x60, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x34, 0x00, 0x0B }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
+/*
+ * Cloned the HOBL entry to comply with the voltage and pre-emphasis entries
+ * that DisplayPort specification requires
+ */
+static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_edp_hbr2_hobl[] = {
+ /* VS pre-emp */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 0 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 1 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 2 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 3 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1 0 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1 1 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1 2 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 2 0 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 2 1 */
+};
+
+static const struct cnl_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x2F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
+ { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
+ { 0x6, 0x7D, 0x2A, 0x00, 0x15 }, /* 350 900 8.2 */
+ { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
+ { 0xC, 0x6E, 0x3E, 0x00, 0x01 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
+static const struct cnl_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr2_hbr3[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x50, 0x38, 0x00, 0x07 }, /* 350 500 3.1 */
+ { 0xC, 0x61, 0x33, 0x00, 0x0C }, /* 350 700 6.0 */
+ { 0x6, 0x7F, 0x2E, 0x00, 0x11 }, /* 350 900 8.2 */
+ { 0xA, 0x47, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x5F, 0x38, 0x00, 0x07 }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
+ { 0xC, 0x5F, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
+ { 0x6, 0x7E, 0x36, 0x00, 0x09 }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
+bool is_hobl_buf_trans(const struct cnl_ddi_buf_trans *table)
+{
+ return table == tgl_combo_phy_ddi_translations_edp_hbr2_hobl;
+}
+
+static const struct ddi_buf_trans *
+bdw_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (dev_priv->vbt.edp.low_vswing) {
+ *n_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
+ return bdw_ddi_translations_edp;
+ } else {
+ *n_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
+ return bdw_ddi_translations_dp;
+ }
+}
+
+static const struct ddi_buf_trans *
+skl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (IS_SKL_ULX(dev_priv)) {
+ *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
+ return skl_y_ddi_translations_dp;
+ } else if (IS_SKL_ULT(dev_priv)) {
+ *n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
+ return skl_u_ddi_translations_dp;
+ } else {
+ *n_entries = ARRAY_SIZE(skl_ddi_translations_dp);
+ return skl_ddi_translations_dp;
+ }
+}
+
+static const struct ddi_buf_trans *
+kbl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (IS_KBL_ULX(dev_priv) ||
+ IS_CFL_ULX(dev_priv) ||
+ IS_CML_ULX(dev_priv)) {
+ *n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp);
+ return kbl_y_ddi_translations_dp;
+ } else if (IS_KBL_ULT(dev_priv) ||
+ IS_CFL_ULT(dev_priv) ||
+ IS_CML_ULT(dev_priv)) {
+ *n_entries = ARRAY_SIZE(kbl_u_ddi_translations_dp);
+ return kbl_u_ddi_translations_dp;
+ } else {
+ *n_entries = ARRAY_SIZE(kbl_ddi_translations_dp);
+ return kbl_ddi_translations_dp;
+ }
+}
+
+static const struct ddi_buf_trans *
+skl_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (dev_priv->vbt.edp.low_vswing) {
+ if (IS_SKL_ULX(dev_priv) ||
+ IS_KBL_ULX(dev_priv) ||
+ IS_CFL_ULX(dev_priv) ||
+ IS_CML_ULX(dev_priv)) {
+ *n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
+ return skl_y_ddi_translations_edp;
+ } else if (IS_SKL_ULT(dev_priv) ||
+ IS_KBL_ULT(dev_priv) ||
+ IS_CFL_ULT(dev_priv) ||
+ IS_CML_ULT(dev_priv)) {
+ *n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp);
+ return skl_u_ddi_translations_edp;
+ } else {
+ *n_entries = ARRAY_SIZE(skl_ddi_translations_edp);
+ return skl_ddi_translations_edp;
+ }
+ }
+
+ if (IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv) ||
+ IS_COMETLAKE(dev_priv))
+ return kbl_get_buf_trans_dp(encoder, n_entries);
+ else
+ return skl_get_buf_trans_dp(encoder, n_entries);
+}
+
+static const struct ddi_buf_trans *
+skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
+{
+ if (IS_SKL_ULX(dev_priv) ||
+ IS_KBL_ULX(dev_priv) ||
+ IS_CFL_ULX(dev_priv) ||
+ IS_CML_ULX(dev_priv)) {
+ *n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
+ return skl_y_ddi_translations_hdmi;
+ } else {
+ *n_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
+ return skl_ddi_translations_hdmi;
+ }
+}
+
+static int skl_buf_trans_num_entries(enum port port, int n_entries)
+{
+ /* Only DDIA and DDIE can select the 10th register with DP */
+ if (port == PORT_A || port == PORT_E)
+ return min(n_entries, 10);
+ else
+ return min(n_entries, 9);
+}
+
+const struct ddi_buf_trans *
+intel_ddi_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv) ||
+ IS_COMETLAKE(dev_priv)) {
+ const struct ddi_buf_trans *ddi_translations =
+ kbl_get_buf_trans_dp(encoder, n_entries);
+ *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries);
+ return ddi_translations;
+ } else if (IS_SKYLAKE(dev_priv)) {
+ const struct ddi_buf_trans *ddi_translations =
+ skl_get_buf_trans_dp(encoder, n_entries);
+ *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries);
+ return ddi_translations;
+ } else if (IS_BROADWELL(dev_priv)) {
+ *n_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
+ return bdw_ddi_translations_dp;
+ } else if (IS_HASWELL(dev_priv)) {
+ *n_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
+ return hsw_ddi_translations_dp;
+ }
+
+ *n_entries = 0;
+ return NULL;
+}
+
+const struct ddi_buf_trans *
+intel_ddi_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (IS_GEN9_BC(dev_priv)) {
+ const struct ddi_buf_trans *ddi_translations =
+ skl_get_buf_trans_edp(encoder, n_entries);
+ *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries);
+ return ddi_translations;
+ } else if (IS_BROADWELL(dev_priv)) {
+ return bdw_get_buf_trans_edp(encoder, n_entries);
+ } else if (IS_HASWELL(dev_priv)) {
+ *n_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
+ return hsw_ddi_translations_dp;
+ }
+
+ *n_entries = 0;
+ return NULL;
+}
+
+const struct ddi_buf_trans *
+intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv,
+ int *n_entries)
+{
+ if (IS_BROADWELL(dev_priv)) {
+ *n_entries = ARRAY_SIZE(bdw_ddi_translations_fdi);
+ return bdw_ddi_translations_fdi;
+ } else if (IS_HASWELL(dev_priv)) {
+ *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi);
+ return hsw_ddi_translations_fdi;
+ }
+
+ *n_entries = 0;
+ return NULL;
+}
+
+const struct ddi_buf_trans *
+intel_ddi_get_buf_trans_hdmi(struct intel_encoder *encoder,
+ int *n_entries)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (IS_GEN9_BC(dev_priv)) {
+ return skl_get_buf_trans_hdmi(dev_priv, n_entries);
+ } else if (IS_BROADWELL(dev_priv)) {
+ *n_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
+ return bdw_ddi_translations_hdmi;
+ } else if (IS_HASWELL(dev_priv)) {
+ *n_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
+ return hsw_ddi_translations_hdmi;
+ }
+
+ *n_entries = 0;
+ return NULL;
+}
+
+static const struct bxt_ddi_buf_trans *
+bxt_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
+{
+ *n_entries = ARRAY_SIZE(bxt_ddi_translations_dp);
+ return bxt_ddi_translations_dp;
+}
+
+static const struct bxt_ddi_buf_trans *
+bxt_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (dev_priv->vbt.edp.low_vswing) {
+ *n_entries = ARRAY_SIZE(bxt_ddi_translations_edp);
+ return bxt_ddi_translations_edp;
+ }
+
+ return bxt_get_buf_trans_dp(encoder, n_entries);
+}
+
+static const struct bxt_ddi_buf_trans *
+bxt_get_buf_trans_hdmi(struct intel_encoder *encoder, int *n_entries)
+{
+ *n_entries = ARRAY_SIZE(bxt_ddi_translations_hdmi);
+ return bxt_ddi_translations_hdmi;
+}
+
+const struct bxt_ddi_buf_trans *
+bxt_get_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return bxt_get_buf_trans_hdmi(encoder, n_entries);
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ return bxt_get_buf_trans_edp(encoder, n_entries);
+ return bxt_get_buf_trans_dp(encoder, n_entries);
+}
+
+static const struct cnl_ddi_buf_trans *
+cnl_get_buf_trans_hdmi(struct intel_encoder *encoder, int *n_entries)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
+
+ if (voltage == VOLTAGE_INFO_0_85V) {
+ *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_85V);
+ return cnl_ddi_translations_hdmi_0_85V;
+ } else if (voltage == VOLTAGE_INFO_0_95V) {
+ *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_95V);
+ return cnl_ddi_translations_hdmi_0_95V;
+ } else if (voltage == VOLTAGE_INFO_1_05V) {
+ *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_1_05V);
+ return cnl_ddi_translations_hdmi_1_05V;
+ } else {
+ *n_entries = 1; /* shut up gcc */
+ MISSING_CASE(voltage);
+ }
+ return NULL;
+}
+
+static const struct cnl_ddi_buf_trans *
+cnl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
+
+ if (voltage == VOLTAGE_INFO_0_85V) {
+ *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_85V);
+ return cnl_ddi_translations_dp_0_85V;
+ } else if (voltage == VOLTAGE_INFO_0_95V) {
+ *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_95V);
+ return cnl_ddi_translations_dp_0_95V;
+ } else if (voltage == VOLTAGE_INFO_1_05V) {
+ *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_1_05V);
+ return cnl_ddi_translations_dp_1_05V;
+ } else {
+ *n_entries = 1; /* shut up gcc */
+ MISSING_CASE(voltage);
+ }
+ return NULL;
+}
+
+static const struct cnl_ddi_buf_trans *
+cnl_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
+
+ if (dev_priv->vbt.edp.low_vswing) {
+ if (voltage == VOLTAGE_INFO_0_85V) {
+ *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_85V);
+ return cnl_ddi_translations_edp_0_85V;
+ } else if (voltage == VOLTAGE_INFO_0_95V) {
+ *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_95V);
+ return cnl_ddi_translations_edp_0_95V;
+ } else if (voltage == VOLTAGE_INFO_1_05V) {
+ *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_1_05V);
+ return cnl_ddi_translations_edp_1_05V;
+ } else {
+ *n_entries = 1; /* shut up gcc */
+ MISSING_CASE(voltage);
+ }
+ return NULL;
+ } else {
+ return cnl_get_buf_trans_dp(encoder, n_entries);
+ }
+}
+
+const struct cnl_ddi_buf_trans *
+cnl_get_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return cnl_get_buf_trans_hdmi(encoder, n_entries);
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ return cnl_get_buf_trans_edp(encoder, n_entries);
+ return cnl_get_buf_trans_dp(encoder, n_entries);
+}
+
+static const struct cnl_ddi_buf_trans *
+icl_get_combo_buf_trans_hdmi(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
+ return icl_combo_phy_ddi_translations_hdmi;
+}
+
+static const struct cnl_ddi_buf_trans *
+icl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hbr2);
+ return icl_combo_phy_ddi_translations_dp_hbr2;
+}
+
+static const struct cnl_ddi_buf_trans *
+icl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (crtc_state->port_clock > 540000) {
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3);
+ return icl_combo_phy_ddi_translations_edp_hbr3;
+ } else if (dev_priv->vbt.edp.low_vswing) {
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2);
+ return icl_combo_phy_ddi_translations_edp_hbr2;
+ } else if (IS_DG1(dev_priv) && crtc_state->port_clock > 270000) {
+ *n_entries = ARRAY_SIZE(dg1_combo_phy_ddi_translations_dp_hbr2_hbr3);
+ return dg1_combo_phy_ddi_translations_dp_hbr2_hbr3;
+ } else if (IS_DG1(dev_priv)) {
+ *n_entries = ARRAY_SIZE(dg1_combo_phy_ddi_translations_dp_rbr_hbr);
+ return dg1_combo_phy_ddi_translations_dp_rbr_hbr;
+ }
+
+ return icl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+const struct cnl_ddi_buf_trans *
+icl_get_combo_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return icl_get_combo_buf_trans_hdmi(encoder, crtc_state, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ return icl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
+ else
+ return icl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+static const struct icl_mg_phy_ddi_buf_trans *
+icl_get_mg_buf_trans_hdmi(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_hdmi);
+ return icl_mg_phy_ddi_translations_hdmi;
+}
+
+static const struct icl_mg_phy_ddi_buf_trans *
+icl_get_mg_buf_trans_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (crtc_state->port_clock > 270000) {
+ *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_hbr2_hbr3);
+ return icl_mg_phy_ddi_translations_hbr2_hbr3;
+ } else {
+ *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_rbr_hbr);
+ return icl_mg_phy_ddi_translations_rbr_hbr;
+ }
+}
+
+const struct icl_mg_phy_ddi_buf_trans *
+icl_get_mg_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return icl_get_mg_buf_trans_hdmi(encoder, crtc_state, n_entries);
+ else
+ return icl_get_mg_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+static const struct cnl_ddi_buf_trans *
+ehl_get_combo_buf_trans_hdmi(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
+ return icl_combo_phy_ddi_translations_hdmi;
+}
+
+static const struct cnl_ddi_buf_trans *
+ehl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ *n_entries = ARRAY_SIZE(ehl_combo_phy_ddi_translations_dp);
+ return ehl_combo_phy_ddi_translations_dp;
+}
+
+static const struct cnl_ddi_buf_trans *
+ehl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (dev_priv->vbt.edp.low_vswing) {
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2);
+ return icl_combo_phy_ddi_translations_edp_hbr2;
+ }
+
+ return ehl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+const struct cnl_ddi_buf_trans *
+ehl_get_combo_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return ehl_get_combo_buf_trans_hdmi(encoder, crtc_state, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ return ehl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
+ else
+ return ehl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+static const struct cnl_ddi_buf_trans *
+jsl_get_combo_buf_trans_hdmi(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
+ return icl_combo_phy_ddi_translations_hdmi;
+}
+
+static const struct cnl_ddi_buf_trans *
+jsl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hbr2);
+ return icl_combo_phy_ddi_translations_dp_hbr2;
+}
+
+static const struct cnl_ddi_buf_trans *
+jsl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (dev_priv->vbt.edp.low_vswing) {
+ if (crtc_state->port_clock > 270000) {
+ *n_entries = ARRAY_SIZE(jsl_combo_phy_ddi_translations_edp_hbr2);
+ return jsl_combo_phy_ddi_translations_edp_hbr2;
+ } else {
+ *n_entries = ARRAY_SIZE(jsl_combo_phy_ddi_translations_edp_hbr);
+ return jsl_combo_phy_ddi_translations_edp_hbr;
+ }
+ }
+
+ return jsl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+const struct cnl_ddi_buf_trans *
+jsl_get_combo_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return jsl_get_combo_buf_trans_hdmi(encoder, crtc_state, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ return jsl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
+ else
+ return jsl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+static const struct cnl_ddi_buf_trans *
+tgl_get_combo_buf_trans_hdmi(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
+ return icl_combo_phy_ddi_translations_hdmi;
+}
+
+static const struct cnl_ddi_buf_trans *
+tgl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (crtc_state->port_clock > 270000) {
+ if (IS_ROCKETLAKE(dev_priv)) {
+ *n_entries = ARRAY_SIZE(rkl_combo_phy_ddi_translations_dp_hbr2_hbr3);
+ return rkl_combo_phy_ddi_translations_dp_hbr2_hbr3;
+ } else if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) {
+ *n_entries = ARRAY_SIZE(tgl_uy_combo_phy_ddi_translations_dp_hbr2);
+ return tgl_uy_combo_phy_ddi_translations_dp_hbr2;
+ } else {
+ *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr2);
+ return tgl_combo_phy_ddi_translations_dp_hbr2;
+ }
+ } else {
+ if (IS_ROCKETLAKE(dev_priv)) {
+ *n_entries = ARRAY_SIZE(rkl_combo_phy_ddi_translations_dp_hbr);
+ return rkl_combo_phy_ddi_translations_dp_hbr;
+ } else {
+ *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr);
+ return tgl_combo_phy_ddi_translations_dp_hbr;
+ }
+ }
+}
+
+static const struct cnl_ddi_buf_trans *
+tgl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ if (crtc_state->port_clock > 540000) {
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3);
+ return icl_combo_phy_ddi_translations_edp_hbr3;
+ } else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed) {
+ *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_edp_hbr2_hobl);
+ return tgl_combo_phy_ddi_translations_edp_hbr2_hobl;
+ } else if (dev_priv->vbt.edp.low_vswing) {
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2);
+ return icl_combo_phy_ddi_translations_edp_hbr2;
+ }
+
+ return tgl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+const struct cnl_ddi_buf_trans *
+tgl_get_combo_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return tgl_get_combo_buf_trans_hdmi(encoder, crtc_state, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ return tgl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
+ else
+ return tgl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+static const struct tgl_dkl_phy_ddi_buf_trans *
+tgl_get_dkl_buf_trans_hdmi(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ *n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans);
+ return tgl_dkl_phy_hdmi_ddi_trans;
+}
+
+static const struct tgl_dkl_phy_ddi_buf_trans *
+tgl_get_dkl_buf_trans_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (crtc_state->port_clock > 270000) {
+ *n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans_hbr2);
+ return tgl_dkl_phy_dp_ddi_trans_hbr2;
+ } else {
+ *n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans);
+ return tgl_dkl_phy_dp_ddi_trans;
+ }
+}
+
+const struct tgl_dkl_phy_ddi_buf_trans *
+tgl_get_dkl_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return tgl_get_dkl_buf_trans_hdmi(encoder, crtc_state, n_entries);
+ else
+ return tgl_get_dkl_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+int intel_ddi_hdmi_num_entries(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *default_entry)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ int n_entries;
+
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ if (intel_phy_is_combo(dev_priv, phy))
+ tgl_get_combo_buf_trans_hdmi(encoder, crtc_state, &n_entries);
+ else
+ tgl_get_dkl_buf_trans_hdmi(encoder, crtc_state, &n_entries);
+ *default_entry = n_entries - 1;
+ } else if (IS_DISPLAY_VER(dev_priv, 11)) {
+ if (intel_phy_is_combo(dev_priv, phy))
+ icl_get_combo_buf_trans_hdmi(encoder, crtc_state, &n_entries);
+ else
+ icl_get_mg_buf_trans_hdmi(encoder, crtc_state, &n_entries);
+ *default_entry = n_entries - 1;
+ } else if (IS_CANNONLAKE(dev_priv)) {
+ cnl_get_buf_trans_hdmi(encoder, &n_entries);
+ *default_entry = n_entries - 1;
+ } else if (IS_GEN9_LP(dev_priv)) {
+ bxt_get_buf_trans_hdmi(encoder, &n_entries);
+ *default_entry = n_entries - 1;
+ } else if (IS_GEN9_BC(dev_priv)) {
+ intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
+ *default_entry = 8;
+ } else if (IS_BROADWELL(dev_priv)) {
+ intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
+ *default_entry = 7;
+ } else if (IS_HASWELL(dev_priv)) {
+ intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
+ *default_entry = 6;
+ } else {
+ drm_WARN(&dev_priv->drm, 1, "ddi translation table missing\n");
+ return 0;
+ }
+
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, n_entries == 0))
+ return 0;
+
+ return n_entries;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
new file mode 100644
index 000000000000..f8f0ef87e977
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _INTEL_DDI_BUF_TRANS_H_
+#define _INTEL_DDI_BUF_TRANS_H_
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct intel_encoder;
+struct intel_crtc_state;
+
+struct ddi_buf_trans {
+ u32 trans1; /* balance leg enable, de-emph level */
+ u32 trans2; /* vref sel, vswing */
+ u8 i_boost; /* SKL: I_boost; valid: 0x0, 0x1, 0x3, 0x7 */
+};
+
+struct bxt_ddi_buf_trans {
+ u8 margin; /* swing value */
+ u8 scale; /* scale value */
+ u8 enable; /* scale enable */
+ u8 deemphasis;
+};
+
+struct cnl_ddi_buf_trans {
+ u8 dw2_swing_sel;
+ u8 dw7_n_scalar;
+ u8 dw4_cursor_coeff;
+ u8 dw4_post_cursor_2;
+ u8 dw4_post_cursor_1;
+};
+
+struct icl_mg_phy_ddi_buf_trans {
+ u32 cri_txdeemph_override_11_6;
+ u32 cri_txdeemph_override_5_0;
+ u32 cri_txdeemph_override_17_12;
+};
+
+struct tgl_dkl_phy_ddi_buf_trans {
+ u32 dkl_vswing_control;
+ u32 dkl_preshoot_control;
+ u32 dkl_de_emphasis_control;
+};
+
+bool is_hobl_buf_trans(const struct cnl_ddi_buf_trans *table);
+
+int intel_ddi_hdmi_num_entries(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *default_entry);
+
+const struct ddi_buf_trans *
+intel_ddi_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries);
+const struct ddi_buf_trans *
+intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv,
+ int *n_entries);
+const struct ddi_buf_trans *
+intel_ddi_get_buf_trans_hdmi(struct intel_encoder *encoder,
+ int *n_entries);
+const struct ddi_buf_trans *
+intel_ddi_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries);
+
+const struct bxt_ddi_buf_trans *
+bxt_get_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries);
+
+const struct cnl_ddi_buf_trans *
+tgl_get_combo_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries);
+const struct tgl_dkl_phy_ddi_buf_trans *
+tgl_get_dkl_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries);
+const struct cnl_ddi_buf_trans *
+jsl_get_combo_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries);
+const struct cnl_ddi_buf_trans *
+ehl_get_combo_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries);
+const struct cnl_ddi_buf_trans *
+icl_get_combo_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries);
+const struct icl_mg_phy_ddi_buf_trans *
+icl_get_mg_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries);
+
+const struct cnl_ddi_buf_trans *
+cnl_get_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 8d7aaa68c6f6..64e9107d70f7 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -24,6 +24,7 @@
* Eric Anholt <eric@anholt.net>
*/
+#include <acpi/video.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/intel-iommu.h>
@@ -43,6 +44,7 @@
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
+#include "display/intel_audio.h"
#include "display/intel_crt.h"
#include "display/intel_ddi.h"
#include "display/intel_display_debugfs.h"
@@ -52,6 +54,7 @@
#include "display/intel_dpll_mgr.h"
#include "display/intel_dsi.h"
#include "display/intel_dvo.h"
+#include "display/intel_fb.h"
#include "display/intel_gmbus.h"
#include "display/intel_hdmi.h"
#include "display/intel_lvds.h"
@@ -64,8 +67,9 @@
#include "gt/intel_rps.h"
+#include "g4x_dp.h"
+#include "g4x_hdmi.h"
#include "i915_drv.h"
-#include "i915_trace.h"
#include "intel_acpi.h"
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
@@ -94,6 +98,8 @@
#include "intel_tc.h"
#include "intel_vga.h"
#include "i9xx_plane.h"
+#include "skl_scaler.h"
+#include "skl_universal_plane.h"
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
@@ -112,11 +118,6 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state);
static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
-static void vlv_prepare_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config);
-static void chv_prepare_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config);
-static void skl_pfit_enable(const struct intel_crtc_state *crtc_state);
static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
static void intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
@@ -229,7 +230,7 @@ static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
u32 line1, line2;
u32 line_mask;
- if (IS_GEN(dev_priv, 2))
+ if (IS_DISPLAY_VER(dev_priv, 2))
line_mask = DSL_LINEMASK_GEN2;
else
line_mask = DSL_LINEMASK_GEN3;
@@ -269,7 +270,7 @@ intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- if (INTEL_GEN(dev_priv) >= 4) {
+ if (DISPLAY_VER(dev_priv) >= 4) {
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
i915_reg_t reg = PIPECONF(cpu_transcoder);
@@ -361,7 +362,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
u32 val;
/* ILK FDI PLL is always enabled */
- if (IS_GEN(dev_priv, 5))
+ if (IS_IRONLAKE(dev_priv))
return;
/* On Haswell, DDI ports are responsible for the FDI PLL setup */
@@ -406,13 +407,13 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
break;
case PANEL_PORT_SELECT_DPA:
- intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
+ g4x_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
break;
case PANEL_PORT_SELECT_DPC:
- intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
+ g4x_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
break;
case PANEL_PORT_SELECT_DPD:
- intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
+ g4x_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
break;
default:
MISSING_CASE(port_sel);
@@ -515,7 +516,7 @@ static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
enum pipe port_pipe;
bool state;
- state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
+ state = g4x_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
I915_STATE_WARN(state && port_pipe == pipe,
"PCH DP %c enabled on transcoder %c, should be disabled\n",
@@ -569,224 +570,6 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
}
-static void _vlv_enable_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
-
- intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
- intel_de_posting_read(dev_priv, DPLL(pipe));
- udelay(150);
-
- if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
- drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe);
-}
-
-static void vlv_enable_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
-
- assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
-
- /* PLL is protected by panel, make sure we can write it */
- assert_panel_unlocked(dev_priv, pipe);
-
- if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
- _vlv_enable_pll(crtc, pipe_config);
-
- intel_de_write(dev_priv, DPLL_MD(pipe),
- pipe_config->dpll_hw_state.dpll_md);
- intel_de_posting_read(dev_priv, DPLL_MD(pipe));
-}
-
-
-static void _chv_enable_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- enum dpio_channel port = vlv_pipe_to_channel(pipe);
- u32 tmp;
-
- vlv_dpio_get(dev_priv);
-
- /* Enable back the 10bit clock to display controller */
- tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
- tmp |= DPIO_DCLKP_EN;
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
-
- vlv_dpio_put(dev_priv);
-
- /*
- * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
- */
- udelay(1);
-
- /* Enable PLL */
- intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
-
- /* Check PLL is locked */
- if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
- drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe);
-}
-
-static void chv_enable_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
-
- assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
-
- /* PLL is protected by panel, make sure we can write it */
- assert_panel_unlocked(dev_priv, pipe);
-
- if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
- _chv_enable_pll(crtc, pipe_config);
-
- if (pipe != PIPE_A) {
- /*
- * WaPixelRepeatModeFixForC0:chv
- *
- * DPLLCMD is AWOL. Use chicken bits to propagate
- * the value from DPLLBMD to either pipe B or C.
- */
- intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
- intel_de_write(dev_priv, DPLL_MD(PIPE_B),
- pipe_config->dpll_hw_state.dpll_md);
- intel_de_write(dev_priv, CBR4_VLV, 0);
- dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
-
- /*
- * DPLLB VGA mode also seems to cause problems.
- * We should always have it disabled.
- */
- drm_WARN_ON(&dev_priv->drm,
- (intel_de_read(dev_priv, DPLL(PIPE_B)) &
- DPLL_VGA_MODE_DIS) == 0);
- } else {
- intel_de_write(dev_priv, DPLL_MD(pipe),
- pipe_config->dpll_hw_state.dpll_md);
- intel_de_posting_read(dev_priv, DPLL_MD(pipe));
- }
-}
-
-static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
-{
- if (IS_I830(dev_priv))
- return false;
-
- return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
-}
-
-static void i9xx_enable_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- i915_reg_t reg = DPLL(crtc->pipe);
- u32 dpll = crtc_state->dpll_hw_state.dpll;
- int i;
-
- assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
-
- /* PLL is protected by panel, make sure we can write it */
- if (i9xx_has_pps(dev_priv))
- assert_panel_unlocked(dev_priv, crtc->pipe);
-
- /*
- * Apparently we need to have VGA mode enabled prior to changing
- * the P1/P2 dividers. Otherwise the DPLL will keep using the old
- * dividers, even though the register value does change.
- */
- intel_de_write(dev_priv, reg, dpll & ~DPLL_VGA_MODE_DIS);
- intel_de_write(dev_priv, reg, dpll);
-
- /* Wait for the clocks to stabilize. */
- intel_de_posting_read(dev_priv, reg);
- udelay(150);
-
- if (INTEL_GEN(dev_priv) >= 4) {
- intel_de_write(dev_priv, DPLL_MD(crtc->pipe),
- crtc_state->dpll_hw_state.dpll_md);
- } else {
- /* The pixel multiplier can only be updated once the
- * DPLL is enabled and the clocks are stable.
- *
- * So write it again.
- */
- intel_de_write(dev_priv, reg, dpll);
- }
-
- /* We do this three times for luck */
- for (i = 0; i < 3; i++) {
- intel_de_write(dev_priv, reg, dpll);
- intel_de_posting_read(dev_priv, reg);
- udelay(150); /* wait for warmup */
- }
-}
-
-static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
-
- /* Don't disable pipe or pipe PLLs if needed */
- if (IS_I830(dev_priv))
- return;
-
- /* Make sure the pipe isn't still relying on us */
- assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
-
- intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
- intel_de_posting_read(dev_priv, DPLL(pipe));
-}
-
-static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
-{
- u32 val;
-
- /* Make sure the pipe isn't still relying on us */
- assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
-
- val = DPLL_INTEGRATED_REF_CLK_VLV |
- DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
- if (pipe != PIPE_A)
- val |= DPLL_INTEGRATED_CRI_CLK_VLV;
-
- intel_de_write(dev_priv, DPLL(pipe), val);
- intel_de_posting_read(dev_priv, DPLL(pipe));
-}
-
-static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
-{
- enum dpio_channel port = vlv_pipe_to_channel(pipe);
- u32 val;
-
- /* Make sure the pipe isn't still relying on us */
- assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
-
- val = DPLL_SSC_REF_CLK_CHV |
- DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
- if (pipe != PIPE_A)
- val |= DPLL_INTEGRATED_CRI_CLK_VLV;
-
- intel_de_write(dev_priv, DPLL(pipe), val);
- intel_de_posting_read(dev_priv, DPLL(pipe));
-
- vlv_dpio_get(dev_priv);
-
- /* Disable 10bit clock to display controller */
- val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
- val &= ~DPIO_DCLKP_EN;
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
-
- vlv_dpio_put(dev_priv);
-}
-
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
struct intel_digital_port *dig_port,
unsigned int expected_mask)
@@ -1013,8 +796,6 @@ void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
/* FIXME: assert CPU port conditions for SNB+ */
}
- trace_intel_pipe_enable(crtc);
-
reg = PIPECONF(cpu_transcoder);
val = intel_de_read(dev_priv, reg);
if (val & PIPECONF_ENABLE) {
@@ -1054,8 +835,6 @@ void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
*/
assert_planes_disabled(crtc);
- trace_intel_pipe_disable(crtc);
-
reg = PIPECONF(cpu_transcoder);
val = intel_de_read(dev_priv, reg);
if ((val & PIPECONF_ENABLE) == 0)
@@ -1077,77 +856,6 @@ void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
intel_wait_for_pipe_off(old_crtc_state);
}
-static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
-{
- return IS_GEN(dev_priv, 2) ? 2048 : 4096;
-}
-
-static bool is_ccs_plane(const struct drm_framebuffer *fb, int plane)
-{
- if (!is_ccs_modifier(fb->modifier))
- return false;
-
- return plane >= fb->format->num_planes / 2;
-}
-
-static bool is_gen12_ccs_modifier(u64 modifier)
-{
- return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
- modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
- modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
-}
-
-static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane)
-{
- return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane);
-}
-
-static bool is_gen12_ccs_cc_plane(const struct drm_framebuffer *fb, int plane)
-{
- return fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC &&
- plane == 2;
-}
-
-static bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
-{
- if (is_ccs_modifier(fb->modifier))
- return is_ccs_plane(fb, plane);
-
- return plane == 1;
-}
-
-static int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane)
-{
- drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
- (main_plane && main_plane >= fb->format->num_planes / 2));
-
- return fb->format->num_planes / 2 + main_plane;
-}
-
-static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
-{
- drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
- ccs_plane < fb->format->num_planes / 2);
-
- if (is_gen12_ccs_cc_plane(fb, ccs_plane))
- return 0;
-
- return ccs_plane - fb->format->num_planes / 2;
-}
-
-int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane)
-{
- struct drm_i915_private *i915 = to_i915(fb->dev);
-
- if (is_ccs_modifier(fb->modifier))
- return main_to_ccs_plane(fb, main_plane);
- else if (INTEL_GEN(i915) < 11 &&
- intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
- return 1;
- else
- return 0;
-}
-
bool
intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
u64 modifier)
@@ -1156,14 +864,7 @@ intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
}
-static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb,
- int color_plane)
-{
- return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
- color_plane == 1;
-}
-
-static unsigned int
+unsigned int
intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
{
struct drm_i915_private *dev_priv = to_i915(fb->dev);
@@ -1173,7 +874,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
case DRM_FORMAT_MOD_LINEAR:
return intel_tile_size(dev_priv);
case I915_FORMAT_MOD_X_TILED:
- if (IS_GEN(dev_priv, 2))
+ if (IS_DISPLAY_VER(dev_priv, 2))
return 128;
else
return 512;
@@ -1188,7 +889,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
return 64;
fallthrough;
case I915_FORMAT_MOD_Y_TILED:
- if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
+ if (IS_DISPLAY_VER(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
return 128;
else
return 512;
@@ -1217,38 +918,6 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
}
}
-static unsigned int
-intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
-{
- if (is_gen12_ccs_plane(fb, color_plane))
- return 1;
-
- return intel_tile_size(to_i915(fb->dev)) /
- intel_tile_width_bytes(fb, color_plane);
-}
-
-/* Return the tile dimensions in pixel units */
-static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
- unsigned int *tile_width,
- unsigned int *tile_height)
-{
- unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
- unsigned int cpp = fb->format->cpp[color_plane];
-
- *tile_width = tile_width_bytes / cpp;
- *tile_height = intel_tile_height(fb, color_plane);
-}
-
-static unsigned int intel_tile_row_size(const struct drm_framebuffer *fb,
- int color_plane)
-{
- unsigned int tile_width, tile_height;
-
- intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
-
- return fb->pitches[color_plane] * tile_height;
-}
-
unsigned int
intel_fb_align_height(const struct drm_framebuffer *fb,
int color_plane, unsigned int height)
@@ -1264,7 +933,7 @@ unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info
int i;
for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
- size += rot_info->plane[i].width * rot_info->plane[i].height;
+ size += rot_info->plane[i].dst_stride * rot_info->plane[i].width;
return size;
}
@@ -1275,43 +944,19 @@ unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info
int i;
for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
- size += rem_info->plane[i].width * rem_info->plane[i].height;
+ size += rem_info->plane[i].dst_stride * rem_info->plane[i].height;
return size;
}
-static void
-intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
- const struct drm_framebuffer *fb,
- unsigned int rotation)
-{
- view->type = I915_GGTT_VIEW_NORMAL;
- if (drm_rotation_90_or_270(rotation)) {
- view->type = I915_GGTT_VIEW_ROTATED;
- view->rotated = to_intel_framebuffer(fb)->rot_info;
- }
-}
-
-static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
-{
- if (IS_I830(dev_priv))
- return 16 * 1024;
- else if (IS_I85X(dev_priv))
- return 256;
- else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
- return 32;
- else
- return 4 * 1024;
-}
-
static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
{
- if (INTEL_GEN(dev_priv) >= 9)
+ if (DISPLAY_VER(dev_priv) >= 9)
return 256 * 1024;
else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return 128 * 1024;
- else if (INTEL_GEN(dev_priv) >= 4)
+ else if (DISPLAY_VER(dev_priv) >= 4)
return 4 * 1024;
else
return 0;
@@ -1319,7 +964,7 @@ static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_pr
static bool has_async_flips(struct drm_i915_private *i915)
{
- return INTEL_GEN(i915) >= 5;
+ return DISPLAY_VER(i915) >= 5;
}
unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
@@ -1328,7 +973,7 @@ unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
struct drm_i915_private *dev_priv = to_i915(fb->dev);
/* AUX_DIST needs only 4K alignment */
- if ((INTEL_GEN(dev_priv) < 12 && is_aux_plane(fb, color_plane)) ||
+ if ((DISPLAY_VER(dev_priv) < 12 && is_aux_plane(fb, color_plane)) ||
is_ccs_plane(fb, color_plane))
return 4096;
@@ -1349,7 +994,7 @@ unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Yf_TILED_CCS:
case I915_FORMAT_MOD_Y_TILED:
- if (INTEL_GEN(dev_priv) >= 12 &&
+ if (DISPLAY_VER(dev_priv) >= 12 &&
is_semiplanar_uv_plane(fb, color_plane))
return intel_tile_row_size(fb, color_plane);
fallthrough;
@@ -1366,13 +1011,14 @@ static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- return INTEL_GEN(dev_priv) < 4 ||
+ return DISPLAY_VER(dev_priv) < 4 ||
(plane->has_fbc &&
- plane_state->view.type == I915_GGTT_VIEW_NORMAL);
+ plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL);
}
struct i915_vma *
intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
+ bool phys_cursor,
const struct i915_ggtt_view *view,
bool uses_fence,
unsigned long *out_flags)
@@ -1381,14 +1027,19 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
intel_wakeref_t wakeref;
+ struct i915_gem_ww_ctx ww;
struct i915_vma *vma;
unsigned int pinctl;
u32 alignment;
+ int ret;
if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
return ERR_PTR(-EINVAL);
- alignment = intel_surf_alignment(fb, 0);
+ if (phys_cursor)
+ alignment = intel_cursor_alignment(dev_priv);
+ else
+ alignment = intel_surf_alignment(fb, 0);
if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
return ERR_PTR(-EINVAL);
@@ -1423,14 +1074,26 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
if (HAS_GMCH(dev_priv))
pinctl |= PIN_MAPPABLE;
- vma = i915_gem_object_pin_to_display_plane(obj,
- alignment, view, pinctl);
- if (IS_ERR(vma))
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ ret = i915_gem_object_lock(obj, &ww);
+ if (!ret && phys_cursor)
+ ret = i915_gem_object_attach_phys(obj, alignment);
+ if (!ret)
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
goto err;
- if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
- int ret;
+ if (!ret) {
+ vma = i915_gem_object_pin_to_display_plane(obj, &ww, alignment,
+ view, pinctl);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_unpin;
+ }
+ }
+ if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
/*
* Install a fence for tiled scan-out. Pre-i965 always needs a
* fence, whereas 965+ only requires a fence if using
@@ -1449,18 +1112,30 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
* mode that matches the user configuration.
*/
ret = i915_vma_pin_fence(vma);
- if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
+ if (ret != 0 && DISPLAY_VER(dev_priv) < 4) {
i915_vma_unpin(vma);
- vma = ERR_PTR(ret);
- goto err;
+ goto err_unpin;
}
+ ret = 0;
- if (ret == 0 && vma->fence)
+ if (vma->fence)
*out_flags |= PLANE_HAS_FENCE;
}
i915_vma_get(vma);
+
+err_unpin:
+ i915_gem_object_unpin_pages(obj);
err:
+ if (ret == -EDEADLK) {
+ ret = i915_gem_ww_ctx_backoff(&ww);
+ if (!ret)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+ if (ret)
+ vma = ERR_PTR(ret);
+
atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return vma;
@@ -1474,15 +1149,6 @@ void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
i915_vma_put(vma);
}
-static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
- unsigned int rotation)
-{
- if (drm_rotation_90_or_270(rotation))
- return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
- else
- return fb->pitches[color_plane];
-}
-
/*
* Convert the x/y offsets into a linear offset.
* Only valid with 0/180 degree rotation, which is fine since linear
@@ -1495,7 +1161,7 @@ u32 intel_fb_xy_to_linear(int x, int y,
{
const struct drm_framebuffer *fb = state->hw.fb;
unsigned int cpp = fb->format->cpp[color_plane];
- unsigned int pitch = state->color_plane[color_plane].stride;
+ unsigned int pitch = state->view.color_plane[color_plane].stride;
return y * pitch + x * cpp;
}
@@ -1510,232 +1176,8 @@ void intel_add_fb_offsets(int *x, int *y,
int color_plane)
{
- *x += state->color_plane[color_plane].x;
- *y += state->color_plane[color_plane].y;
-}
-
-static u32 intel_adjust_tile_offset(int *x, int *y,
- unsigned int tile_width,
- unsigned int tile_height,
- unsigned int tile_size,
- unsigned int pitch_tiles,
- u32 old_offset,
- u32 new_offset)
-{
- unsigned int pitch_pixels = pitch_tiles * tile_width;
- unsigned int tiles;
-
- WARN_ON(old_offset & (tile_size - 1));
- WARN_ON(new_offset & (tile_size - 1));
- WARN_ON(new_offset > old_offset);
-
- tiles = (old_offset - new_offset) / tile_size;
-
- *y += tiles / pitch_tiles * tile_height;
- *x += tiles % pitch_tiles * tile_width;
-
- /* minimize x in case it got needlessly big */
- *y += *x / pitch_pixels * tile_height;
- *x %= pitch_pixels;
-
- return new_offset;
-}
-
-static bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane)
-{
- return fb->modifier == DRM_FORMAT_MOD_LINEAR ||
- is_gen12_ccs_plane(fb, color_plane);
-}
-
-static u32 intel_adjust_aligned_offset(int *x, int *y,
- const struct drm_framebuffer *fb,
- int color_plane,
- unsigned int rotation,
- unsigned int pitch,
- u32 old_offset, u32 new_offset)
-{
- struct drm_i915_private *dev_priv = to_i915(fb->dev);
- unsigned int cpp = fb->format->cpp[color_plane];
-
- drm_WARN_ON(&dev_priv->drm, new_offset > old_offset);
-
- if (!is_surface_linear(fb, color_plane)) {
- unsigned int tile_size, tile_width, tile_height;
- unsigned int pitch_tiles;
-
- tile_size = intel_tile_size(dev_priv);
- intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
-
- if (drm_rotation_90_or_270(rotation)) {
- pitch_tiles = pitch / tile_height;
- swap(tile_width, tile_height);
- } else {
- pitch_tiles = pitch / (tile_width * cpp);
- }
-
- intel_adjust_tile_offset(x, y, tile_width, tile_height,
- tile_size, pitch_tiles,
- old_offset, new_offset);
- } else {
- old_offset += *y * pitch + *x * cpp;
-
- *y = (old_offset - new_offset) / pitch;
- *x = ((old_offset - new_offset) - *y * pitch) / cpp;
- }
-
- return new_offset;
-}
-
-/*
- * Adjust the tile offset by moving the difference into
- * the x/y offsets.
- */
-u32 intel_plane_adjust_aligned_offset(int *x, int *y,
- const struct intel_plane_state *state,
- int color_plane,
- u32 old_offset, u32 new_offset)
-{
- return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
- state->hw.rotation,
- state->color_plane[color_plane].stride,
- old_offset, new_offset);
-}
-
-/*
- * Computes the aligned offset to the base tile and adjusts
- * x, y. bytes per pixel is assumed to be a power-of-two.
- *
- * In the 90/270 rotated case, x and y are assumed
- * to be already rotated to match the rotated GTT view, and
- * pitch is the tile_height aligned framebuffer height.
- *
- * This function is used when computing the derived information
- * under intel_framebuffer, so using any of that information
- * here is not allowed. Anything under drm_framebuffer can be
- * used. This is why the user has to pass in the pitch since it
- * is specified in the rotated orientation.
- */
-static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
- int *x, int *y,
- const struct drm_framebuffer *fb,
- int color_plane,
- unsigned int pitch,
- unsigned int rotation,
- u32 alignment)
-{
- unsigned int cpp = fb->format->cpp[color_plane];
- u32 offset, offset_aligned;
-
- if (!is_surface_linear(fb, color_plane)) {
- unsigned int tile_size, tile_width, tile_height;
- unsigned int tile_rows, tiles, pitch_tiles;
-
- tile_size = intel_tile_size(dev_priv);
- intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
-
- if (drm_rotation_90_or_270(rotation)) {
- pitch_tiles = pitch / tile_height;
- swap(tile_width, tile_height);
- } else {
- pitch_tiles = pitch / (tile_width * cpp);
- }
-
- tile_rows = *y / tile_height;
- *y %= tile_height;
-
- tiles = *x / tile_width;
- *x %= tile_width;
-
- offset = (tile_rows * pitch_tiles + tiles) * tile_size;
-
- offset_aligned = offset;
- if (alignment)
- offset_aligned = rounddown(offset_aligned, alignment);
-
- intel_adjust_tile_offset(x, y, tile_width, tile_height,
- tile_size, pitch_tiles,
- offset, offset_aligned);
- } else {
- offset = *y * pitch + *x * cpp;
- offset_aligned = offset;
- if (alignment) {
- offset_aligned = rounddown(offset_aligned, alignment);
- *y = (offset % alignment) / pitch;
- *x = ((offset % alignment) - *y * pitch) / cpp;
- } else {
- *y = *x = 0;
- }
- }
-
- return offset_aligned;
-}
-
-u32 intel_plane_compute_aligned_offset(int *x, int *y,
- const struct intel_plane_state *state,
- int color_plane)
-{
- struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
- const struct drm_framebuffer *fb = state->hw.fb;
- unsigned int rotation = state->hw.rotation;
- int pitch = state->color_plane[color_plane].stride;
- u32 alignment;
-
- if (intel_plane->id == PLANE_CURSOR)
- alignment = intel_cursor_alignment(dev_priv);
- else
- alignment = intel_surf_alignment(fb, color_plane);
-
- return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
- pitch, rotation, alignment);
-}
-
-/* Convert the fb->offset[] into x/y offsets */
-static int intel_fb_offset_to_xy(int *x, int *y,
- const struct drm_framebuffer *fb,
- int color_plane)
-{
- struct drm_i915_private *dev_priv = to_i915(fb->dev);
- unsigned int height;
- u32 alignment;
-
- if (INTEL_GEN(dev_priv) >= 12 &&
- is_semiplanar_uv_plane(fb, color_plane))
- alignment = intel_tile_row_size(fb, color_plane);
- else if (fb->modifier != DRM_FORMAT_MOD_LINEAR)
- alignment = intel_tile_size(dev_priv);
- else
- alignment = 0;
-
- if (alignment != 0 && fb->offsets[color_plane] % alignment) {
- drm_dbg_kms(&dev_priv->drm,
- "Misaligned offset 0x%08x for color plane %d\n",
- fb->offsets[color_plane], color_plane);
- return -EINVAL;
- }
-
- height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
- height = ALIGN(height, intel_tile_height(fb, color_plane));
-
- /* Catch potential overflows early */
- if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
- fb->offsets[color_plane])) {
- drm_dbg_kms(&dev_priv->drm,
- "Bad offset 0x%08x or pitch %d for color plane %d\n",
- fb->offsets[color_plane], fb->pitches[color_plane],
- color_plane);
- return -ERANGE;
- }
-
- *x = 0;
- *y = 0;
-
- intel_adjust_aligned_offset(x, y,
- fb, color_plane, DRM_MODE_ROTATE_0,
- fb->pitches[color_plane],
- fb->offsets[color_plane], 0);
-
- return 0;
+ *x += state->view.color_plane[color_plane].x;
+ *y += state->view.color_plane[color_plane].y;
}
static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
@@ -1881,18 +1323,9 @@ intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
}
}
-bool is_ccs_modifier(u64 modifier)
-{
- return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
- modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
- modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
- modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
-}
-
static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane)
{
- return DIV_ROUND_UP(fb->pitches[ccs_to_main_plane(fb, ccs_plane)],
+ return DIV_ROUND_UP(fb->pitches[skl_ccs_to_main_plane(fb, ccs_plane)],
512) * 64;
}
@@ -1928,9 +1361,9 @@ u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
* The new CCS hash mode makes remapping impossible
*/
if (!is_ccs_modifier(modifier)) {
- if (INTEL_GEN(dev_priv) >= 7)
+ if (DISPLAY_VER(dev_priv) >= 7)
return 256*1024;
- else if (INTEL_GEN(dev_priv) >= 4)
+ else if (DISPLAY_VER(dev_priv) >= 4)
return 128*1024;
}
@@ -1970,631 +1403,19 @@ intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
* require the entire fb to accommodate that to avoid
* potential runtime errors at plane configuration time.
*/
- if (IS_GEN(dev_priv, 9) && color_plane == 0 && fb->width > 3840)
+ if ((IS_DISPLAY_VER(dev_priv, 9) || IS_GEMINILAKE(dev_priv)) &&
+ color_plane == 0 && fb->width > 3840)
tile_width *= 4;
/*
* The main surface pitch must be padded to a multiple of four
* tile widths.
*/
- else if (INTEL_GEN(dev_priv) >= 12)
+ else if (DISPLAY_VER(dev_priv) >= 12)
tile_width *= 4;
}
return tile_width;
}
-bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- int i;
-
- /* We don't want to deal with remapping with cursors */
- if (plane->id == PLANE_CURSOR)
- return false;
-
- /*
- * The display engine limits already match/exceed the
- * render engine limits, so not much point in remapping.
- * Would also need to deal with the fence POT alignment
- * and gen2 2KiB GTT tile size.
- */
- if (INTEL_GEN(dev_priv) < 4)
- return false;
-
- /*
- * The new CCS hash mode isn't compatible with remapping as
- * the virtual address of the pages affects the compressed data.
- */
- if (is_ccs_modifier(fb->modifier))
- return false;
-
- /* Linear needs a page aligned stride for remapping */
- if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
- unsigned int alignment = intel_tile_size(dev_priv) - 1;
-
- for (i = 0; i < fb->format->num_planes; i++) {
- if (fb->pitches[i] & alignment)
- return false;
- }
- }
-
- return true;
-}
-
-static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- unsigned int rotation = plane_state->hw.rotation;
- u32 stride, max_stride;
-
- /*
- * No remapping for invisible planes since we don't have
- * an actual source viewport to remap.
- */
- if (!plane_state->uapi.visible)
- return false;
-
- if (!intel_plane_can_remap(plane_state))
- return false;
-
- /*
- * FIXME: aux plane limits on gen9+ are
- * unclear in Bspec, for now no checking.
- */
- stride = intel_fb_pitch(fb, 0, rotation);
- max_stride = plane->max_stride(plane, fb->format->format,
- fb->modifier, rotation);
-
- return stride > max_stride;
-}
-
-static void
-intel_fb_plane_get_subsampling(int *hsub, int *vsub,
- const struct drm_framebuffer *fb,
- int color_plane)
-{
- int main_plane;
-
- if (color_plane == 0) {
- *hsub = 1;
- *vsub = 1;
-
- return;
- }
-
- /*
- * TODO: Deduct the subsampling from the char block for all CCS
- * formats and planes.
- */
- if (!is_gen12_ccs_plane(fb, color_plane)) {
- *hsub = fb->format->hsub;
- *vsub = fb->format->vsub;
-
- return;
- }
-
- main_plane = ccs_to_main_plane(fb, color_plane);
- *hsub = drm_format_info_block_width(fb->format, color_plane) /
- drm_format_info_block_width(fb->format, main_plane);
-
- /*
- * The min stride check in the core framebuffer_check() function
- * assumes that format->hsub applies to every plane except for the
- * first plane. That's incorrect for the CCS AUX plane of the first
- * plane, but for the above check to pass we must define the block
- * width with that subsampling applied to it. Adjust the width here
- * accordingly, so we can calculate the actual subsampling factor.
- */
- if (main_plane == 0)
- *hsub *= fb->format->hsub;
-
- *vsub = 32;
-}
-static int
-intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
-{
- struct drm_i915_private *i915 = to_i915(fb->dev);
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- int main_plane;
- int hsub, vsub;
- int tile_width, tile_height;
- int ccs_x, ccs_y;
- int main_x, main_y;
-
- if (!is_ccs_plane(fb, ccs_plane) || is_gen12_ccs_cc_plane(fb, ccs_plane))
- return 0;
-
- intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height);
- intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
-
- tile_width *= hsub;
- tile_height *= vsub;
-
- ccs_x = (x * hsub) % tile_width;
- ccs_y = (y * vsub) % tile_height;
-
- main_plane = ccs_to_main_plane(fb, ccs_plane);
- main_x = intel_fb->normal[main_plane].x % tile_width;
- main_y = intel_fb->normal[main_plane].y % tile_height;
-
- /*
- * CCS doesn't have its own x/y offset register, so the intra CCS tile
- * x/y offsets must match between CCS and the main surface.
- */
- if (main_x != ccs_x || main_y != ccs_y) {
- drm_dbg_kms(&i915->drm,
- "Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
- main_x, main_y,
- ccs_x, ccs_y,
- intel_fb->normal[main_plane].x,
- intel_fb->normal[main_plane].y,
- x, y);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void
-intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane)
-{
- int main_plane = is_ccs_plane(fb, color_plane) ?
- ccs_to_main_plane(fb, color_plane) : 0;
- int main_hsub, main_vsub;
- int hsub, vsub;
-
- intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, main_plane);
- intel_fb_plane_get_subsampling(&hsub, &vsub, fb, color_plane);
- *w = fb->width / main_hsub / hsub;
- *h = fb->height / main_vsub / vsub;
-}
-
-/*
- * Setup the rotated view for an FB plane and return the size the GTT mapping
- * requires for this view.
- */
-static u32
-setup_fb_rotation(int plane, const struct intel_remapped_plane_info *plane_info,
- u32 gtt_offset_rotated, int x, int y,
- unsigned int width, unsigned int height,
- unsigned int tile_size,
- unsigned int tile_width, unsigned int tile_height,
- struct drm_framebuffer *fb)
-{
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct intel_rotation_info *rot_info = &intel_fb->rot_info;
- unsigned int pitch_tiles;
- struct drm_rect r;
-
- /* Y or Yf modifiers required for 90/270 rotation */
- if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
- fb->modifier != I915_FORMAT_MOD_Yf_TILED)
- return 0;
-
- if (drm_WARN_ON(fb->dev, plane >= ARRAY_SIZE(rot_info->plane)))
- return 0;
-
- rot_info->plane[plane] = *plane_info;
-
- intel_fb->rotated[plane].pitch = plane_info->height * tile_height;
-
- /* rotate the x/y offsets to match the GTT view */
- drm_rect_init(&r, x, y, width, height);
- drm_rect_rotate(&r,
- plane_info->width * tile_width,
- plane_info->height * tile_height,
- DRM_MODE_ROTATE_270);
- x = r.x1;
- y = r.y1;
-
- /* rotate the tile dimensions to match the GTT view */
- pitch_tiles = intel_fb->rotated[plane].pitch / tile_height;
- swap(tile_width, tile_height);
-
- /*
- * We only keep the x/y offsets, so push all of the
- * gtt offset into the x/y offsets.
- */
- intel_adjust_tile_offset(&x, &y,
- tile_width, tile_height,
- tile_size, pitch_tiles,
- gtt_offset_rotated * tile_size, 0);
-
- /*
- * First pixel of the framebuffer from
- * the start of the rotated gtt mapping.
- */
- intel_fb->rotated[plane].x = x;
- intel_fb->rotated[plane].y = y;
-
- return plane_info->width * plane_info->height;
-}
-
-static int
-intel_fill_fb_info(struct drm_i915_private *dev_priv,
- struct drm_framebuffer *fb)
-{
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- u32 gtt_offset_rotated = 0;
- unsigned int max_size = 0;
- int i, num_planes = fb->format->num_planes;
- unsigned int tile_size = intel_tile_size(dev_priv);
-
- for (i = 0; i < num_planes; i++) {
- unsigned int width, height;
- unsigned int cpp, size;
- u32 offset;
- int x, y;
- int ret;
-
- /*
- * Plane 2 of Render Compression with Clear Color fb modifier
- * is consumed by the driver and not passed to DE. Skip the
- * arithmetic related to alignment and offset calculation.
- */
- if (is_gen12_ccs_cc_plane(fb, i)) {
- if (IS_ALIGNED(fb->offsets[i], PAGE_SIZE))
- continue;
- else
- return -EINVAL;
- }
-
- cpp = fb->format->cpp[i];
- intel_fb_plane_dims(&width, &height, fb, i);
-
- ret = intel_fb_offset_to_xy(&x, &y, fb, i);
- if (ret) {
- drm_dbg_kms(&dev_priv->drm,
- "bad fb plane %d offset: 0x%x\n",
- i, fb->offsets[i]);
- return ret;
- }
-
- ret = intel_fb_check_ccs_xy(fb, i, x, y);
- if (ret)
- return ret;
-
- /*
- * The fence (if used) is aligned to the start of the object
- * so having the framebuffer wrap around across the edge of the
- * fenced region doesn't really work. We have no API to configure
- * the fence start offset within the object (nor could we probably
- * on gen2/3). So it's just easier if we just require that the
- * fb layout agrees with the fence layout. We already check that the
- * fb stride matches the fence stride elsewhere.
- */
- if (i == 0 && i915_gem_object_is_tiled(obj) &&
- (x + width) * cpp > fb->pitches[i]) {
- drm_dbg_kms(&dev_priv->drm,
- "bad fb plane %d offset: 0x%x\n",
- i, fb->offsets[i]);
- return -EINVAL;
- }
-
- /*
- * First pixel of the framebuffer from
- * the start of the normal gtt mapping.
- */
- intel_fb->normal[i].x = x;
- intel_fb->normal[i].y = y;
-
- offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
- fb->pitches[i],
- DRM_MODE_ROTATE_0,
- tile_size);
- offset /= tile_size;
-
- if (!is_surface_linear(fb, i)) {
- struct intel_remapped_plane_info plane_info;
- unsigned int tile_width, tile_height;
-
- intel_tile_dims(fb, i, &tile_width, &tile_height);
-
- plane_info.offset = offset;
- plane_info.stride = DIV_ROUND_UP(fb->pitches[i],
- tile_width * cpp);
- plane_info.width = DIV_ROUND_UP(x + width, tile_width);
- plane_info.height = DIV_ROUND_UP(y + height,
- tile_height);
-
- /* how many tiles does this plane need */
- size = plane_info.stride * plane_info.height;
- /*
- * If the plane isn't horizontally tile aligned,
- * we need one more tile.
- */
- if (x != 0)
- size++;
-
- gtt_offset_rotated +=
- setup_fb_rotation(i, &plane_info,
- gtt_offset_rotated,
- x, y, width, height,
- tile_size,
- tile_width, tile_height,
- fb);
- } else {
- size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
- x * cpp, tile_size);
- }
-
- /* how many tiles in total needed in the bo */
- max_size = max(max_size, offset + size);
- }
-
- if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
- drm_dbg_kms(&dev_priv->drm,
- "fb too big for bo (need %llu bytes, have %zu bytes)\n",
- mul_u32_u32(max_size, tile_size), obj->base.size);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void
-intel_plane_remap_gtt(struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
- struct drm_framebuffer *fb = plane_state->hw.fb;
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct intel_rotation_info *info = &plane_state->view.rotated;
- unsigned int rotation = plane_state->hw.rotation;
- int i, num_planes = fb->format->num_planes;
- unsigned int tile_size = intel_tile_size(dev_priv);
- unsigned int src_x, src_y;
- unsigned int src_w, src_h;
- u32 gtt_offset = 0;
-
- memset(&plane_state->view, 0, sizeof(plane_state->view));
- plane_state->view.type = drm_rotation_90_or_270(rotation) ?
- I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
-
- src_x = plane_state->uapi.src.x1 >> 16;
- src_y = plane_state->uapi.src.y1 >> 16;
- src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
- src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
-
- drm_WARN_ON(&dev_priv->drm, is_ccs_modifier(fb->modifier));
-
- /* Make src coordinates relative to the viewport */
- drm_rect_translate(&plane_state->uapi.src,
- -(src_x << 16), -(src_y << 16));
-
- /* Rotate src coordinates to match rotated GTT view */
- if (drm_rotation_90_or_270(rotation))
- drm_rect_rotate(&plane_state->uapi.src,
- src_w << 16, src_h << 16,
- DRM_MODE_ROTATE_270);
-
- for (i = 0; i < num_planes; i++) {
- unsigned int hsub = i ? fb->format->hsub : 1;
- unsigned int vsub = i ? fb->format->vsub : 1;
- unsigned int cpp = fb->format->cpp[i];
- unsigned int tile_width, tile_height;
- unsigned int width, height;
- unsigned int pitch_tiles;
- unsigned int x, y;
- u32 offset;
-
- intel_tile_dims(fb, i, &tile_width, &tile_height);
-
- x = src_x / hsub;
- y = src_y / vsub;
- width = src_w / hsub;
- height = src_h / vsub;
-
- /*
- * First pixel of the src viewport from the
- * start of the normal gtt mapping.
- */
- x += intel_fb->normal[i].x;
- y += intel_fb->normal[i].y;
-
- offset = intel_compute_aligned_offset(dev_priv, &x, &y,
- fb, i, fb->pitches[i],
- DRM_MODE_ROTATE_0, tile_size);
- offset /= tile_size;
-
- drm_WARN_ON(&dev_priv->drm, i >= ARRAY_SIZE(info->plane));
- info->plane[i].offset = offset;
- info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
- tile_width * cpp);
- info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
- info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
-
- if (drm_rotation_90_or_270(rotation)) {
- struct drm_rect r;
-
- /* rotate the x/y offsets to match the GTT view */
- drm_rect_init(&r, x, y, width, height);
- drm_rect_rotate(&r,
- info->plane[i].width * tile_width,
- info->plane[i].height * tile_height,
- DRM_MODE_ROTATE_270);
- x = r.x1;
- y = r.y1;
-
- pitch_tiles = info->plane[i].height;
- plane_state->color_plane[i].stride = pitch_tiles * tile_height;
-
- /* rotate the tile dimensions to match the GTT view */
- swap(tile_width, tile_height);
- } else {
- pitch_tiles = info->plane[i].width;
- plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
- }
-
- /*
- * We only keep the x/y offsets, so push all of the
- * gtt offset into the x/y offsets.
- */
- intel_adjust_tile_offset(&x, &y,
- tile_width, tile_height,
- tile_size, pitch_tiles,
- gtt_offset * tile_size, 0);
-
- gtt_offset += info->plane[i].width * info->plane[i].height;
-
- plane_state->color_plane[i].offset = 0;
- plane_state->color_plane[i].x = x;
- plane_state->color_plane[i].y = y;
- }
-}
-
-int
-intel_plane_compute_gtt(struct intel_plane_state *plane_state)
-{
- const struct intel_framebuffer *fb =
- to_intel_framebuffer(plane_state->hw.fb);
- unsigned int rotation = plane_state->hw.rotation;
- int i, num_planes;
-
- if (!fb)
- return 0;
-
- num_planes = fb->base.format->num_planes;
-
- if (intel_plane_needs_remap(plane_state)) {
- intel_plane_remap_gtt(plane_state);
-
- /*
- * Sometimes even remapping can't overcome
- * the stride limitations :( Can happen with
- * big plane sizes and suitably misaligned
- * offsets.
- */
- return intel_plane_check_stride(plane_state);
- }
-
- intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
-
- for (i = 0; i < num_planes; i++) {
- plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
- plane_state->color_plane[i].offset = 0;
-
- if (drm_rotation_90_or_270(rotation)) {
- plane_state->color_plane[i].x = fb->rotated[i].x;
- plane_state->color_plane[i].y = fb->rotated[i].y;
- } else {
- plane_state->color_plane[i].x = fb->normal[i].x;
- plane_state->color_plane[i].y = fb->normal[i].y;
- }
- }
-
- /* Rotate src coordinates to match rotated GTT view */
- if (drm_rotation_90_or_270(rotation))
- drm_rect_rotate(&plane_state->uapi.src,
- fb->base.width << 16, fb->base.height << 16,
- DRM_MODE_ROTATE_270);
-
- return intel_plane_check_stride(plane_state);
-}
-
-static int i9xx_format_to_fourcc(int format)
-{
- switch (format) {
- case DISPPLANE_8BPP:
- return DRM_FORMAT_C8;
- case DISPPLANE_BGRA555:
- return DRM_FORMAT_ARGB1555;
- case DISPPLANE_BGRX555:
- return DRM_FORMAT_XRGB1555;
- case DISPPLANE_BGRX565:
- return DRM_FORMAT_RGB565;
- default:
- case DISPPLANE_BGRX888:
- return DRM_FORMAT_XRGB8888;
- case DISPPLANE_RGBX888:
- return DRM_FORMAT_XBGR8888;
- case DISPPLANE_BGRA888:
- return DRM_FORMAT_ARGB8888;
- case DISPPLANE_RGBA888:
- return DRM_FORMAT_ABGR8888;
- case DISPPLANE_BGRX101010:
- return DRM_FORMAT_XRGB2101010;
- case DISPPLANE_RGBX101010:
- return DRM_FORMAT_XBGR2101010;
- case DISPPLANE_BGRA101010:
- return DRM_FORMAT_ARGB2101010;
- case DISPPLANE_RGBA101010:
- return DRM_FORMAT_ABGR2101010;
- case DISPPLANE_RGBX161616:
- return DRM_FORMAT_XBGR16161616F;
- }
-}
-
-int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
-{
- switch (format) {
- case PLANE_CTL_FORMAT_RGB_565:
- return DRM_FORMAT_RGB565;
- case PLANE_CTL_FORMAT_NV12:
- return DRM_FORMAT_NV12;
- case PLANE_CTL_FORMAT_XYUV:
- return DRM_FORMAT_XYUV8888;
- case PLANE_CTL_FORMAT_P010:
- return DRM_FORMAT_P010;
- case PLANE_CTL_FORMAT_P012:
- return DRM_FORMAT_P012;
- case PLANE_CTL_FORMAT_P016:
- return DRM_FORMAT_P016;
- case PLANE_CTL_FORMAT_Y210:
- return DRM_FORMAT_Y210;
- case PLANE_CTL_FORMAT_Y212:
- return DRM_FORMAT_Y212;
- case PLANE_CTL_FORMAT_Y216:
- return DRM_FORMAT_Y216;
- case PLANE_CTL_FORMAT_Y410:
- return DRM_FORMAT_XVYU2101010;
- case PLANE_CTL_FORMAT_Y412:
- return DRM_FORMAT_XVYU12_16161616;
- case PLANE_CTL_FORMAT_Y416:
- return DRM_FORMAT_XVYU16161616;
- default:
- case PLANE_CTL_FORMAT_XRGB_8888:
- if (rgb_order) {
- if (alpha)
- return DRM_FORMAT_ABGR8888;
- else
- return DRM_FORMAT_XBGR8888;
- } else {
- if (alpha)
- return DRM_FORMAT_ARGB8888;
- else
- return DRM_FORMAT_XRGB8888;
- }
- case PLANE_CTL_FORMAT_XRGB_2101010:
- if (rgb_order) {
- if (alpha)
- return DRM_FORMAT_ABGR2101010;
- else
- return DRM_FORMAT_XBGR2101010;
- } else {
- if (alpha)
- return DRM_FORMAT_ARGB2101010;
- else
- return DRM_FORMAT_XRGB2101010;
- }
- case PLANE_CTL_FORMAT_XRGB_16161616F:
- if (rgb_order) {
- if (alpha)
- return DRM_FORMAT_ABGR16161616F;
- else
- return DRM_FORMAT_XBGR16161616F;
- } else {
- if (alpha)
- return DRM_FORMAT_ARGB16161616F;
- else
- return DRM_FORMAT_XRGB16161616F;
- }
- }
-}
-
static struct i915_vma *
initial_plane_vma(struct drm_i915_private *i915,
struct intel_initial_plane_config *plane_config)
@@ -2785,10 +1606,11 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
* Gen2 reports pipe underruns whenever all planes are disabled.
* So disable underrun reporting before all the planes get disabled.
*/
- if (IS_GEN(dev_priv, 2) && !crtc_state->active_planes)
+ if (IS_DISPLAY_VER(dev_priv, 2) && !crtc_state->active_planes)
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
intel_disable_plane(plane, crtc_state);
+ intel_wait_for_vblank(dev_priv, crtc->pipe);
}
static void
@@ -2808,6 +1630,11 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
struct drm_framebuffer *fb;
struct i915_vma *vma;
+ /*
+ * TODO:
+ * Disable planes if get_initial_plane_config() failed.
+ * Make sure things work if the surface base is not page aligned.
+ */
if (!plane_config->fb)
return;
@@ -2858,11 +1685,9 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
return;
valid_fb:
- intel_state->hw.rotation = plane_config->rotation;
- intel_fill_fb_ggtt_view(&intel_state->view, fb,
- intel_state->hw.rotation);
- intel_state->color_plane[0].stride =
- intel_fb_pitch(fb, 0, intel_state->hw.rotation);
+ plane_state->rotation = plane_config->rotation;
+ intel_fb_fill_view(to_intel_framebuffer(fb), plane_state->rotation,
+ &intel_state->view);
__i915_vma_pin(vma);
intel_state->vma = i915_vma_get(vma);
@@ -2880,9 +1705,6 @@ valid_fb:
plane_state->crtc_w = fb->width;
plane_state->crtc_h = fb->height;
- intel_state->uapi.src = drm_plane_state_src(plane_state);
- intel_state->uapi.dst = drm_plane_state_dest(plane_state);
-
if (plane_config->tiling)
dev_priv->preserve_bios_swizzle = true;
@@ -2899,700 +1721,17 @@ valid_fb:
&to_intel_frontbuffer(fb)->bits);
}
-
-static bool
-skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
- int main_x, int main_y, u32 main_offset,
- int ccs_plane)
-{
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- int aux_x = plane_state->color_plane[ccs_plane].x;
- int aux_y = plane_state->color_plane[ccs_plane].y;
- u32 aux_offset = plane_state->color_plane[ccs_plane].offset;
- u32 alignment = intel_surf_alignment(fb, ccs_plane);
- int hsub;
- int vsub;
-
- intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
- while (aux_offset >= main_offset && aux_y <= main_y) {
- int x, y;
-
- if (aux_x == main_x && aux_y == main_y)
- break;
-
- if (aux_offset == 0)
- break;
-
- x = aux_x / hsub;
- y = aux_y / vsub;
- aux_offset = intel_plane_adjust_aligned_offset(&x, &y,
- plane_state,
- ccs_plane,
- aux_offset,
- aux_offset -
- alignment);
- aux_x = x * hsub + aux_x % hsub;
- aux_y = y * vsub + aux_y % vsub;
- }
-
- if (aux_x != main_x || aux_y != main_y)
- return false;
-
- plane_state->color_plane[ccs_plane].offset = aux_offset;
- plane_state->color_plane[ccs_plane].x = aux_x;
- plane_state->color_plane[ccs_plane].y = aux_y;
-
- return true;
-}
-
unsigned int
intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
{
int x = 0, y = 0;
intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
- plane_state->color_plane[0].offset, 0);
+ plane_state->view.color_plane[0].offset, 0);
return y;
}
-static int intel_plane_min_width(struct intel_plane *plane,
- const struct drm_framebuffer *fb,
- int color_plane,
- unsigned int rotation)
-{
- if (plane->min_width)
- return plane->min_width(fb, color_plane, rotation);
- else
- return 1;
-}
-
-static int intel_plane_max_width(struct intel_plane *plane,
- const struct drm_framebuffer *fb,
- int color_plane,
- unsigned int rotation)
-{
- if (plane->max_width)
- return plane->max_width(fb, color_plane, rotation);
- else
- return INT_MAX;
-}
-
-static int intel_plane_max_height(struct intel_plane *plane,
- const struct drm_framebuffer *fb,
- int color_plane,
- unsigned int rotation)
-{
- if (plane->max_height)
- return plane->max_height(fb, color_plane, rotation);
- else
- return INT_MAX;
-}
-
-int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state,
- int *x, int *y, u32 *offset)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- const int aux_plane = intel_main_to_aux_plane(fb, 0);
- const u32 aux_offset = plane_state->color_plane[aux_plane].offset;
- const u32 alignment = intel_surf_alignment(fb, 0);
- const int w = drm_rect_width(&plane_state->uapi.src) >> 16;
-
- intel_add_fb_offsets(x, y, plane_state, 0);
- *offset = intel_plane_compute_aligned_offset(x, y, plane_state, 0);
- if (drm_WARN_ON(&dev_priv->drm, alignment && !is_power_of_2(alignment)))
- return -EINVAL;
-
- /*
- * AUX surface offset is specified as the distance from the
- * main surface offset, and it must be non-negative. Make
- * sure that is what we will get.
- */
- if (aux_plane && *offset > aux_offset)
- *offset = intel_plane_adjust_aligned_offset(x, y, plane_state, 0,
- *offset,
- aux_offset & ~(alignment - 1));
-
- /*
- * When using an X-tiled surface, the plane blows up
- * if the x offset + width exceed the stride.
- *
- * TODO: linear and Y-tiled seem fine, Yf untested,
- */
- if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
- int cpp = fb->format->cpp[0];
-
- while ((*x + w) * cpp > plane_state->color_plane[0].stride) {
- if (*offset == 0) {
- drm_dbg_kms(&dev_priv->drm,
- "Unable to find suitable display surface offset due to X-tiling\n");
- return -EINVAL;
- }
-
- *offset = intel_plane_adjust_aligned_offset(x, y, plane_state, 0,
- *offset,
- *offset - alignment);
- }
- }
-
- return 0;
-}
-
-static int skl_check_main_surface(struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- const unsigned int rotation = plane_state->hw.rotation;
- int x = plane_state->uapi.src.x1 >> 16;
- int y = plane_state->uapi.src.y1 >> 16;
- const int w = drm_rect_width(&plane_state->uapi.src) >> 16;
- const int h = drm_rect_height(&plane_state->uapi.src) >> 16;
- const int min_width = intel_plane_min_width(plane, fb, 0, rotation);
- const int max_width = intel_plane_max_width(plane, fb, 0, rotation);
- const int max_height = intel_plane_max_height(plane, fb, 0, rotation);
- const int aux_plane = intel_main_to_aux_plane(fb, 0);
- const u32 alignment = intel_surf_alignment(fb, 0);
- u32 offset;
- int ret;
-
- if (w > max_width || w < min_width || h > max_height) {
- drm_dbg_kms(&dev_priv->drm,
- "requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n",
- w, h, min_width, max_width, max_height);
- return -EINVAL;
- }
-
- ret = skl_calc_main_surface_offset(plane_state, &x, &y, &offset);
- if (ret)
- return ret;
-
- /*
- * CCS AUX surface doesn't have its own x/y offsets, we must make sure
- * they match with the main surface x/y offsets.
- */
- if (is_ccs_modifier(fb->modifier)) {
- while (!skl_check_main_ccs_coordinates(plane_state, x, y,
- offset, aux_plane)) {
- if (offset == 0)
- break;
-
- offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
- offset, offset - alignment);
- }
-
- if (x != plane_state->color_plane[aux_plane].x ||
- y != plane_state->color_plane[aux_plane].y) {
- drm_dbg_kms(&dev_priv->drm,
- "Unable to find suitable display surface offset due to CCS\n");
- return -EINVAL;
- }
- }
-
- drm_WARN_ON(&dev_priv->drm, x > 8191 || y > 8191);
-
- plane_state->color_plane[0].offset = offset;
- plane_state->color_plane[0].x = x;
- plane_state->color_plane[0].y = y;
-
- /*
- * Put the final coordinates back so that the src
- * coordinate checks will see the right values.
- */
- drm_rect_translate_to(&plane_state->uapi.src,
- x << 16, y << 16);
-
- return 0;
-}
-
-static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- unsigned int rotation = plane_state->hw.rotation;
- int uv_plane = 1;
- int max_width = intel_plane_max_width(plane, fb, uv_plane, rotation);
- int max_height = intel_plane_max_height(plane, fb, uv_plane, rotation);
- int x = plane_state->uapi.src.x1 >> 17;
- int y = plane_state->uapi.src.y1 >> 17;
- int w = drm_rect_width(&plane_state->uapi.src) >> 17;
- int h = drm_rect_height(&plane_state->uapi.src) >> 17;
- u32 offset;
-
- /* FIXME not quite sure how/if these apply to the chroma plane */
- if (w > max_width || h > max_height) {
- drm_dbg_kms(&i915->drm,
- "CbCr source size %dx%d too big (limit %dx%d)\n",
- w, h, max_width, max_height);
- return -EINVAL;
- }
-
- intel_add_fb_offsets(&x, &y, plane_state, uv_plane);
- offset = intel_plane_compute_aligned_offset(&x, &y,
- plane_state, uv_plane);
-
- if (is_ccs_modifier(fb->modifier)) {
- int ccs_plane = main_to_ccs_plane(fb, uv_plane);
- u32 aux_offset = plane_state->color_plane[ccs_plane].offset;
- u32 alignment = intel_surf_alignment(fb, uv_plane);
-
- if (offset > aux_offset)
- offset = intel_plane_adjust_aligned_offset(&x, &y,
- plane_state,
- uv_plane,
- offset,
- aux_offset & ~(alignment - 1));
-
- while (!skl_check_main_ccs_coordinates(plane_state, x, y,
- offset, ccs_plane)) {
- if (offset == 0)
- break;
-
- offset = intel_plane_adjust_aligned_offset(&x, &y,
- plane_state,
- uv_plane,
- offset, offset - alignment);
- }
-
- if (x != plane_state->color_plane[ccs_plane].x ||
- y != plane_state->color_plane[ccs_plane].y) {
- drm_dbg_kms(&i915->drm,
- "Unable to find suitable display surface offset due to CCS\n");
- return -EINVAL;
- }
- }
-
- drm_WARN_ON(&i915->drm, x > 8191 || y > 8191);
-
- plane_state->color_plane[uv_plane].offset = offset;
- plane_state->color_plane[uv_plane].x = x;
- plane_state->color_plane[uv_plane].y = y;
-
- return 0;
-}
-
-static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
-{
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- int src_x = plane_state->uapi.src.x1 >> 16;
- int src_y = plane_state->uapi.src.y1 >> 16;
- u32 offset;
- int ccs_plane;
-
- for (ccs_plane = 0; ccs_plane < fb->format->num_planes; ccs_plane++) {
- int main_hsub, main_vsub;
- int hsub, vsub;
- int x, y;
-
- if (!is_ccs_plane(fb, ccs_plane) ||
- is_gen12_ccs_cc_plane(fb, ccs_plane))
- continue;
-
- intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb,
- ccs_to_main_plane(fb, ccs_plane));
- intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
-
- hsub *= main_hsub;
- vsub *= main_vsub;
- x = src_x / hsub;
- y = src_y / vsub;
-
- intel_add_fb_offsets(&x, &y, plane_state, ccs_plane);
-
- offset = intel_plane_compute_aligned_offset(&x, &y,
- plane_state,
- ccs_plane);
-
- plane_state->color_plane[ccs_plane].offset = offset;
- plane_state->color_plane[ccs_plane].x = (x * hsub +
- src_x % hsub) /
- main_hsub;
- plane_state->color_plane[ccs_plane].y = (y * vsub +
- src_y % vsub) /
- main_vsub;
- }
-
- return 0;
-}
-
-int skl_check_plane_surface(struct intel_plane_state *plane_state)
-{
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- int ret, i;
-
- ret = intel_plane_compute_gtt(plane_state);
- if (ret)
- return ret;
-
- if (!plane_state->uapi.visible)
- return 0;
-
- /*
- * Handle the AUX surface first since the main surface setup depends on
- * it.
- */
- if (is_ccs_modifier(fb->modifier)) {
- ret = skl_check_ccs_aux_surface(plane_state);
- if (ret)
- return ret;
- }
-
- if (intel_format_info_is_yuv_semiplanar(fb->format,
- fb->modifier)) {
- ret = skl_check_nv12_aux_surface(plane_state);
- if (ret)
- return ret;
- }
-
- for (i = fb->format->num_planes; i < ARRAY_SIZE(plane_state->color_plane); i++) {
- plane_state->color_plane[i].offset = 0;
- plane_state->color_plane[i].x = 0;
- plane_state->color_plane[i].y = 0;
- }
-
- ret = skl_check_main_surface(plane_state);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
-{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- intel_de_write_fw(dev_priv, SKL_PS_CTRL(intel_crtc->pipe, id), 0);
- intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
- intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-/*
- * This function detaches (aka. unbinds) unused scalers in hardware
- */
-static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
- const struct intel_crtc_scaler_state *scaler_state =
- &crtc_state->scaler_state;
- int i;
-
- /* loop through and disable scalers that aren't in use */
- for (i = 0; i < intel_crtc->num_scalers; i++) {
- if (!scaler_state->scalers[i].in_use)
- skl_detach_scaler(intel_crtc, i);
- }
-}
-
-static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
- int color_plane, unsigned int rotation)
-{
- /*
- * The stride is either expressed as a multiple of 64 bytes chunks for
- * linear buffers or in number of tiles for tiled buffers.
- */
- if (is_surface_linear(fb, color_plane))
- return 64;
- else if (drm_rotation_90_or_270(rotation))
- return intel_tile_height(fb, color_plane);
- else
- return intel_tile_width_bytes(fb, color_plane);
-}
-
-u32 skl_plane_stride(const struct intel_plane_state *plane_state,
- int color_plane)
-{
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- unsigned int rotation = plane_state->hw.rotation;
- u32 stride = plane_state->color_plane[color_plane].stride;
-
- if (color_plane >= fb->format->num_planes)
- return 0;
-
- return stride / skl_plane_stride_mult(fb, color_plane, rotation);
-}
-
-static u32 skl_plane_ctl_format(u32 pixel_format)
-{
- switch (pixel_format) {
- case DRM_FORMAT_C8:
- return PLANE_CTL_FORMAT_INDEXED;
- case DRM_FORMAT_RGB565:
- return PLANE_CTL_FORMAT_RGB_565;
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ABGR8888:
- return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
- return PLANE_CTL_FORMAT_XRGB_8888;
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_ABGR2101010:
- return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX;
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_ARGB2101010:
- return PLANE_CTL_FORMAT_XRGB_2101010;
- case DRM_FORMAT_XBGR16161616F:
- case DRM_FORMAT_ABGR16161616F:
- return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
- case DRM_FORMAT_XRGB16161616F:
- case DRM_FORMAT_ARGB16161616F:
- return PLANE_CTL_FORMAT_XRGB_16161616F;
- case DRM_FORMAT_XYUV8888:
- return PLANE_CTL_FORMAT_XYUV;
- case DRM_FORMAT_YUYV:
- return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
- case DRM_FORMAT_YVYU:
- return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
- case DRM_FORMAT_UYVY:
- return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
- case DRM_FORMAT_VYUY:
- return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
- case DRM_FORMAT_NV12:
- return PLANE_CTL_FORMAT_NV12;
- case DRM_FORMAT_P010:
- return PLANE_CTL_FORMAT_P010;
- case DRM_FORMAT_P012:
- return PLANE_CTL_FORMAT_P012;
- case DRM_FORMAT_P016:
- return PLANE_CTL_FORMAT_P016;
- case DRM_FORMAT_Y210:
- return PLANE_CTL_FORMAT_Y210;
- case DRM_FORMAT_Y212:
- return PLANE_CTL_FORMAT_Y212;
- case DRM_FORMAT_Y216:
- return PLANE_CTL_FORMAT_Y216;
- case DRM_FORMAT_XVYU2101010:
- return PLANE_CTL_FORMAT_Y410;
- case DRM_FORMAT_XVYU12_16161616:
- return PLANE_CTL_FORMAT_Y412;
- case DRM_FORMAT_XVYU16161616:
- return PLANE_CTL_FORMAT_Y416;
- default:
- MISSING_CASE(pixel_format);
- }
-
- return 0;
-}
-
-static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
-{
- if (!plane_state->hw.fb->format->has_alpha)
- return PLANE_CTL_ALPHA_DISABLE;
-
- switch (plane_state->hw.pixel_blend_mode) {
- case DRM_MODE_BLEND_PIXEL_NONE:
- return PLANE_CTL_ALPHA_DISABLE;
- case DRM_MODE_BLEND_PREMULTI:
- return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
- case DRM_MODE_BLEND_COVERAGE:
- return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
- default:
- MISSING_CASE(plane_state->hw.pixel_blend_mode);
- return PLANE_CTL_ALPHA_DISABLE;
- }
-}
-
-static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
-{
- if (!plane_state->hw.fb->format->has_alpha)
- return PLANE_COLOR_ALPHA_DISABLE;
-
- switch (plane_state->hw.pixel_blend_mode) {
- case DRM_MODE_BLEND_PIXEL_NONE:
- return PLANE_COLOR_ALPHA_DISABLE;
- case DRM_MODE_BLEND_PREMULTI:
- return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
- case DRM_MODE_BLEND_COVERAGE:
- return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
- default:
- MISSING_CASE(plane_state->hw.pixel_blend_mode);
- return PLANE_COLOR_ALPHA_DISABLE;
- }
-}
-
-static u32 skl_plane_ctl_tiling(u64 fb_modifier)
-{
- switch (fb_modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- break;
- case I915_FORMAT_MOD_X_TILED:
- return PLANE_CTL_TILED_X;
- case I915_FORMAT_MOD_Y_TILED:
- return PLANE_CTL_TILED_Y;
- case I915_FORMAT_MOD_Y_TILED_CCS:
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
- return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
- return PLANE_CTL_TILED_Y |
- PLANE_CTL_RENDER_DECOMPRESSION_ENABLE |
- PLANE_CTL_CLEAR_COLOR_DISABLE;
- case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
- return PLANE_CTL_TILED_Y | PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE;
- case I915_FORMAT_MOD_Yf_TILED:
- return PLANE_CTL_TILED_YF;
- case I915_FORMAT_MOD_Yf_TILED_CCS:
- return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
- default:
- MISSING_CASE(fb_modifier);
- }
-
- return 0;
-}
-
-static u32 skl_plane_ctl_rotate(unsigned int rotate)
-{
- switch (rotate) {
- case DRM_MODE_ROTATE_0:
- break;
- /*
- * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
- * while i915 HW rotation is clockwise, thats why this swapping.
- */
- case DRM_MODE_ROTATE_90:
- return PLANE_CTL_ROTATE_270;
- case DRM_MODE_ROTATE_180:
- return PLANE_CTL_ROTATE_180;
- case DRM_MODE_ROTATE_270:
- return PLANE_CTL_ROTATE_90;
- default:
- MISSING_CASE(rotate);
- }
-
- return 0;
-}
-
-static u32 cnl_plane_ctl_flip(unsigned int reflect)
-{
- switch (reflect) {
- case 0:
- break;
- case DRM_MODE_REFLECT_X:
- return PLANE_CTL_FLIP_HORIZONTAL;
- case DRM_MODE_REFLECT_Y:
- default:
- MISSING_CASE(reflect);
- }
-
- return 0;
-}
-
-u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- u32 plane_ctl = 0;
-
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
- return plane_ctl;
-
- if (crtc_state->gamma_enable)
- plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
-
- if (crtc_state->csc_enable)
- plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
-
- return plane_ctl;
-}
-
-u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- unsigned int rotation = plane_state->hw.rotation;
- const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- u32 plane_ctl;
-
- plane_ctl = PLANE_CTL_ENABLE;
-
- if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
- plane_ctl |= skl_plane_ctl_alpha(plane_state);
- plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
-
- if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
- plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
-
- if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
- plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
- }
-
- plane_ctl |= skl_plane_ctl_format(fb->format->format);
- plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
- plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
-
- if (INTEL_GEN(dev_priv) >= 10)
- plane_ctl |= cnl_plane_ctl_flip(rotation &
- DRM_MODE_REFLECT_MASK);
-
- if (key->flags & I915_SET_COLORKEY_DESTINATION)
- plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
- else if (key->flags & I915_SET_COLORKEY_SOURCE)
- plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
-
- return plane_ctl;
-}
-
-u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- u32 plane_color_ctl = 0;
-
- if (INTEL_GEN(dev_priv) >= 11)
- return plane_color_ctl;
-
- if (crtc_state->gamma_enable)
- plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
-
- if (crtc_state->csc_enable)
- plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
-
- return plane_color_ctl;
-}
-
-u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- u32 plane_color_ctl = 0;
-
- plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
- plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
-
- if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
- switch (plane_state->hw.color_encoding) {
- case DRM_COLOR_YCBCR_BT709:
- plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
- break;
- case DRM_COLOR_YCBCR_BT2020:
- plane_color_ctl |=
- PLANE_COLOR_CSC_MODE_YUV2020_TO_RGB2020;
- break;
- default:
- plane_color_ctl |=
- PLANE_COLOR_CSC_MODE_YUV601_TO_RGB601;
- }
- if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
- plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
- } else if (fb->format->is_yuv) {
- plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
- if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
- plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
- }
-
- return plane_color_ctl;
-}
-
static int
__intel_display_resume(struct drm_device *dev,
struct drm_atomic_state *state,
@@ -4157,461 +2296,6 @@ static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
}
}
-/*
- * The hardware phase 0.0 refers to the center of the pixel.
- * We want to start from the top/left edge which is phase
- * -0.5. That matches how the hardware calculates the scaling
- * factors (from top-left of the first pixel to bottom-right
- * of the last pixel, as opposed to the pixel centers).
- *
- * For 4:2:0 subsampled chroma planes we obviously have to
- * adjust that so that the chroma sample position lands in
- * the right spot.
- *
- * Note that for packed YCbCr 4:2:2 formats there is no way to
- * control chroma siting. The hardware simply replicates the
- * chroma samples for both of the luma samples, and thus we don't
- * actually get the expected MPEG2 chroma siting convention :(
- * The same behaviour is observed on pre-SKL platforms as well.
- *
- * Theory behind the formula (note that we ignore sub-pixel
- * source coordinates):
- * s = source sample position
- * d = destination sample position
- *
- * Downscaling 4:1:
- * -0.5
- * | 0.0
- * | | 1.5 (initial phase)
- * | | |
- * v v v
- * | s | s | s | s |
- * | d |
- *
- * Upscaling 1:4:
- * -0.5
- * | -0.375 (initial phase)
- * | | 0.0
- * | | |
- * v v v
- * | s |
- * | d | d | d | d |
- */
-u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
-{
- int phase = -0x8000;
- u16 trip = 0;
-
- if (chroma_cosited)
- phase += (sub - 1) * 0x8000 / sub;
-
- phase += scale / (2 * sub);
-
- /*
- * Hardware initial phase limited to [-0.5:1.5].
- * Since the max hardware scale factor is 3.0, we
- * should never actually excdeed 1.0 here.
- */
- WARN_ON(phase < -0x8000 || phase > 0x18000);
-
- if (phase < 0)
- phase = 0x10000 + phase;
- else
- trip = PS_PHASE_TRIP;
-
- return ((phase >> 2) & PS_PHASE_MASK) | trip;
-}
-
-#define SKL_MIN_SRC_W 8
-#define SKL_MAX_SRC_W 4096
-#define SKL_MIN_SRC_H 8
-#define SKL_MAX_SRC_H 4096
-#define SKL_MIN_DST_W 8
-#define SKL_MAX_DST_W 4096
-#define SKL_MIN_DST_H 8
-#define SKL_MAX_DST_H 4096
-#define ICL_MAX_SRC_W 5120
-#define ICL_MAX_SRC_H 4096
-#define ICL_MAX_DST_W 5120
-#define ICL_MAX_DST_H 4096
-#define SKL_MIN_YUV_420_SRC_W 16
-#define SKL_MIN_YUV_420_SRC_H 16
-
-static int
-skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
- unsigned int scaler_user, int *scaler_id,
- int src_w, int src_h, int dst_w, int dst_h,
- const struct drm_format_info *format,
- u64 modifier, bool need_scaler)
-{
- struct intel_crtc_scaler_state *scaler_state =
- &crtc_state->scaler_state;
- struct intel_crtc *intel_crtc =
- to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->hw.adjusted_mode;
-
- /*
- * Src coordinates are already rotated by 270 degrees for
- * the 90/270 degree plane rotation cases (to match the
- * GTT mapping), hence no need to account for rotation here.
- */
- if (src_w != dst_w || src_h != dst_h)
- need_scaler = true;
-
- /*
- * Scaling/fitting not supported in IF-ID mode in GEN9+
- * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
- * Once NV12 is enabled, handle it here while allocating scaler
- * for NV12.
- */
- if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable &&
- need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
- drm_dbg_kms(&dev_priv->drm,
- "Pipe/Plane scaling not supported with IF-ID mode\n");
- return -EINVAL;
- }
-
- /*
- * if plane is being disabled or scaler is no more required or force detach
- * - free scaler binded to this plane/crtc
- * - in order to do this, update crtc->scaler_usage
- *
- * Here scaler state in crtc_state is set free so that
- * scaler can be assigned to other user. Actual register
- * update to free the scaler is done in plane/panel-fit programming.
- * For this purpose crtc/plane_state->scaler_id isn't reset here.
- */
- if (force_detach || !need_scaler) {
- if (*scaler_id >= 0) {
- scaler_state->scaler_users &= ~(1 << scaler_user);
- scaler_state->scalers[*scaler_id].in_use = 0;
-
- drm_dbg_kms(&dev_priv->drm,
- "scaler_user index %u.%u: "
- "Staged freeing scaler id %d scaler_users = 0x%x\n",
- intel_crtc->pipe, scaler_user, *scaler_id,
- scaler_state->scaler_users);
- *scaler_id = -1;
- }
- return 0;
- }
-
- if (format && intel_format_info_is_yuv_semiplanar(format, modifier) &&
- (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
- drm_dbg_kms(&dev_priv->drm,
- "Planar YUV: src dimensions not met\n");
- return -EINVAL;
- }
-
- /* range checks */
- if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
- dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
- (INTEL_GEN(dev_priv) >= 11 &&
- (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
- dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
- (INTEL_GEN(dev_priv) < 11 &&
- (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
- dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
- drm_dbg_kms(&dev_priv->drm,
- "scaler_user index %u.%u: src %ux%u dst %ux%u "
- "size is out of scaler range\n",
- intel_crtc->pipe, scaler_user, src_w, src_h,
- dst_w, dst_h);
- return -EINVAL;
- }
-
- /* mark this plane as a scaler user in crtc_state */
- scaler_state->scaler_users |= (1 << scaler_user);
- drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: "
- "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
- intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
- scaler_state->scaler_users);
-
- return 0;
-}
-
-static int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state)
-{
- const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
- int width, height;
-
- if (crtc_state->pch_pfit.enabled) {
- width = drm_rect_width(&crtc_state->pch_pfit.dst);
- height = drm_rect_height(&crtc_state->pch_pfit.dst);
- } else {
- width = pipe_mode->crtc_hdisplay;
- height = pipe_mode->crtc_vdisplay;
- }
- return skl_update_scaler(crtc_state, !crtc_state->hw.active,
- SKL_CRTC_INDEX,
- &crtc_state->scaler_state.scaler_id,
- crtc_state->pipe_src_w, crtc_state->pipe_src_h,
- width, height, NULL, 0,
- crtc_state->pch_pfit.enabled);
-}
-
-/**
- * skl_update_scaler_plane - Stages update to scaler state for a given plane.
- * @crtc_state: crtc's scaler state
- * @plane_state: atomic plane state to update
- *
- * Return
- * 0 - scaler_usage updated successfully
- * error - requested scaling cannot be supported or other error condition
- */
-static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state)
-{
- struct intel_plane *intel_plane =
- to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
- struct drm_framebuffer *fb = plane_state->hw.fb;
- int ret;
- bool force_detach = !fb || !plane_state->uapi.visible;
- bool need_scaler = false;
-
- /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
- if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
- fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
- need_scaler = true;
-
- ret = skl_update_scaler(crtc_state, force_detach,
- drm_plane_index(&intel_plane->base),
- &plane_state->scaler_id,
- drm_rect_width(&plane_state->uapi.src) >> 16,
- drm_rect_height(&plane_state->uapi.src) >> 16,
- drm_rect_width(&plane_state->uapi.dst),
- drm_rect_height(&plane_state->uapi.dst),
- fb ? fb->format : NULL,
- fb ? fb->modifier : 0,
- need_scaler);
-
- if (ret || plane_state->scaler_id < 0)
- return ret;
-
- /* check colorkey */
- if (plane_state->ckey.flags) {
- drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] scaling with color key not allowed",
- intel_plane->base.base.id,
- intel_plane->base.name);
- return -EINVAL;
- }
-
- /* Check src format */
- switch (fb->format->format) {
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_ARGB2101010:
- case DRM_FORMAT_ABGR2101010:
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_XYUV8888:
- case DRM_FORMAT_P010:
- case DRM_FORMAT_P012:
- case DRM_FORMAT_P016:
- case DRM_FORMAT_Y210:
- case DRM_FORMAT_Y212:
- case DRM_FORMAT_Y216:
- case DRM_FORMAT_XVYU2101010:
- case DRM_FORMAT_XVYU12_16161616:
- case DRM_FORMAT_XVYU16161616:
- break;
- case DRM_FORMAT_XBGR16161616F:
- case DRM_FORMAT_ABGR16161616F:
- case DRM_FORMAT_XRGB16161616F:
- case DRM_FORMAT_ARGB16161616F:
- if (INTEL_GEN(dev_priv) >= 11)
- break;
- fallthrough;
- default:
- drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
- intel_plane->base.base.id, intel_plane->base.name,
- fb->base.id, fb->format->format);
- return -EINVAL;
- }
-
- return 0;
-}
-
-void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
- int i;
-
- for (i = 0; i < crtc->num_scalers; i++)
- skl_detach_scaler(crtc, i);
-}
-
-static int cnl_coef_tap(int i)
-{
- return i % 7;
-}
-
-static u16 cnl_nearest_filter_coef(int t)
-{
- return t == 3 ? 0x0800 : 0x3000;
-}
-
-/*
- * Theory behind setting nearest-neighbor integer scaling:
- *
- * 17 phase of 7 taps requires 119 coefficients in 60 dwords per set.
- * The letter represents the filter tap (D is the center tap) and the number
- * represents the coefficient set for a phase (0-16).
- *
- * +------------+------------------------+------------------------+
- * |Index value | Data value coeffient 1 | Data value coeffient 2 |
- * +------------+------------------------+------------------------+
- * | 00h | B0 | A0 |
- * +------------+------------------------+------------------------+
- * | 01h | D0 | C0 |
- * +------------+------------------------+------------------------+
- * | 02h | F0 | E0 |
- * +------------+------------------------+------------------------+
- * | 03h | A1 | G0 |
- * +------------+------------------------+------------------------+
- * | 04h | C1 | B1 |
- * +------------+------------------------+------------------------+
- * | ... | ... | ... |
- * +------------+------------------------+------------------------+
- * | 38h | B16 | A16 |
- * +------------+------------------------+------------------------+
- * | 39h | D16 | C16 |
- * +------------+------------------------+------------------------+
- * | 3Ah | F16 | C16 |
- * +------------+------------------------+------------------------+
- * | 3Bh | Reserved | G16 |
- * +------------+------------------------+------------------------+
- *
- * To enable nearest-neighbor scaling: program scaler coefficents with
- * the center tap (Dxx) values set to 1 and all other values set to 0 as per
- * SCALER_COEFFICIENT_FORMAT
- *
- */
-
-static void cnl_program_nearest_filter_coefs(struct drm_i915_private *dev_priv,
- enum pipe pipe, int id, int set)
-{
- int i;
-
- intel_de_write_fw(dev_priv, CNL_PS_COEF_INDEX_SET(pipe, id, set),
- PS_COEE_INDEX_AUTO_INC);
-
- for (i = 0; i < 17 * 7; i += 2) {
- u32 tmp;
- int t;
-
- t = cnl_coef_tap(i);
- tmp = cnl_nearest_filter_coef(t);
-
- t = cnl_coef_tap(i + 1);
- tmp |= cnl_nearest_filter_coef(t) << 16;
-
- intel_de_write_fw(dev_priv, CNL_PS_COEF_DATA_SET(pipe, id, set),
- tmp);
- }
-
- intel_de_write_fw(dev_priv, CNL_PS_COEF_INDEX_SET(pipe, id, set), 0);
-}
-
-u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set)
-{
- if (filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR) {
- return (PS_FILTER_PROGRAMMED |
- PS_Y_VERT_FILTER_SELECT(set) |
- PS_Y_HORZ_FILTER_SELECT(set) |
- PS_UV_VERT_FILTER_SELECT(set) |
- PS_UV_HORZ_FILTER_SELECT(set));
- }
-
- return PS_FILTER_MEDIUM;
-}
-
-void skl_scaler_setup_filter(struct drm_i915_private *dev_priv, enum pipe pipe,
- int id, int set, enum drm_scaling_filter filter)
-{
- switch (filter) {
- case DRM_SCALING_FILTER_DEFAULT:
- break;
- case DRM_SCALING_FILTER_NEAREST_NEIGHBOR:
- cnl_program_nearest_filter_coefs(dev_priv, pipe, id, set);
- break;
- default:
- MISSING_CASE(filter);
- }
-}
-
-static void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct intel_crtc_scaler_state *scaler_state =
- &crtc_state->scaler_state;
- struct drm_rect src = {
- .x2 = crtc_state->pipe_src_w << 16,
- .y2 = crtc_state->pipe_src_h << 16,
- };
- const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
- u16 uv_rgb_hphase, uv_rgb_vphase;
- enum pipe pipe = crtc->pipe;
- int width = drm_rect_width(dst);
- int height = drm_rect_height(dst);
- int x = dst->x1;
- int y = dst->y1;
- int hscale, vscale;
- unsigned long irqflags;
- int id;
- u32 ps_ctrl;
-
- if (!crtc_state->pch_pfit.enabled)
- return;
-
- if (drm_WARN_ON(&dev_priv->drm,
- crtc_state->scaler_state.scaler_id < 0))
- return;
-
- hscale = drm_rect_calc_hscale(&src, dst, 0, INT_MAX);
- vscale = drm_rect_calc_vscale(&src, dst, 0, INT_MAX);
-
- uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
- uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
-
- id = scaler_state->scaler_id;
-
- ps_ctrl = skl_scaler_get_filter_select(crtc_state->hw.scaling_filter, 0);
- ps_ctrl |= PS_SCALER_EN | scaler_state->scalers[id].mode;
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- skl_scaler_setup_filter(dev_priv, pipe, id, 0,
- crtc_state->hw.scaling_filter);
-
- intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), ps_ctrl);
-
- intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id),
- PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
- intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
- PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
- intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id),
- x << 16 | y);
- intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id),
- width << 16 | height);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -4785,7 +2469,7 @@ static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
return false;
/* WA Display #0827: Gen9:all */
- if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
+ if (IS_DISPLAY_VER(dev_priv, 9))
return true;
return false;
@@ -4796,7 +2480,7 @@ static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
/* Wa_2006604312:icl,ehl */
- if (crtc_state->scaler_state.scaler_users > 0 && IS_GEN(dev_priv, 11))
+ if (crtc_state->scaler_state.scaler_users > 0 && IS_DISPLAY_VER(dev_priv, 11))
return true;
return false;
@@ -4996,7 +2680,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
* chance of catching underruns with the intermediate watermarks
* vs. the old plane configuration.
*/
- if (IS_GEN(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state))
+ if (IS_DISPLAY_VER(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
/*
@@ -5394,7 +3078,7 @@ static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
val = MBUS_DBOX_A_CREDIT(2);
- if (INTEL_GEN(dev_priv) >= 12) {
+ if (DISPLAY_VER(dev_priv) >= 12) {
val |= MBUS_DBOX_BW_CREDIT(2);
val |= MBUS_DBOX_B_CREDIT(12);
} else {
@@ -5492,7 +3176,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
}
intel_set_pipe_src_size(new_crtc_state);
- if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
+ if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
bdw_set_pipemisc(new_crtc_state);
if (!new_crtc_state->bigjoiner_slave && !transcoder_is_dsi(cpu_transcoder)) {
@@ -5515,12 +3199,12 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
crtc->active = true;
/* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
- psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
+ psl_clkgate_wa = IS_DISPLAY_VER(dev_priv, 10) &&
new_crtc_state->pch_pfit.enabled;
if (psl_clkgate_wa)
glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
- if (INTEL_GEN(dev_priv) >= 9)
+ if (DISPLAY_VER(dev_priv) >= 9)
skl_pfit_enable(new_crtc_state);
else
ilk_pfit_enable(new_crtc_state);
@@ -5532,24 +3216,22 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
intel_color_load_luts(new_crtc_state);
intel_color_commit(new_crtc_state);
/* update DSPCNTR to configure gamma/csc for pipe bottom color */
- if (INTEL_GEN(dev_priv) < 9)
+ if (DISPLAY_VER(dev_priv) < 9)
intel_disable_primary_plane(new_crtc_state);
hsw_set_linetime_wm(new_crtc_state);
- if (INTEL_GEN(dev_priv) >= 11)
+ if (DISPLAY_VER(dev_priv) >= 11)
icl_set_pipe_chicken(crtc);
if (dev_priv->display.initial_watermarks)
dev_priv->display.initial_watermarks(state, crtc);
- if (INTEL_GEN(dev_priv) >= 11)
+ if (DISPLAY_VER(dev_priv) >= 11)
icl_pipe_mbus_enable(crtc);
- if (new_crtc_state->bigjoiner_slave) {
- trace_intel_pipe_enable(crtc);
+ if (new_crtc_state->bigjoiner_slave)
intel_crtc_vblank_on(new_crtc_state);
- }
intel_encoders_enable(state, crtc);
@@ -5680,11 +3362,13 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
{
if (phy == PHY_NONE)
return false;
+ else if (IS_ALDERLAKE_S(dev_priv))
+ return phy <= PHY_E;
else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
return phy <= PHY_D;
else if (IS_JSL_EHL(dev_priv))
return phy <= PHY_C;
- else if (INTEL_GEN(dev_priv) >= 11)
+ else if (DISPLAY_VER(dev_priv) >= 11)
return phy <= PHY_B;
else
return false;
@@ -5692,11 +3376,9 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
{
- if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
- return false;
- else if (INTEL_GEN(dev_priv) >= 12)
+ if (IS_TIGERLAKE(dev_priv))
return phy >= PHY_D && phy <= PHY_I;
- else if (INTEL_GEN(dev_priv) >= 11 && !IS_JSL_EHL(dev_priv))
+ else if (IS_ICELAKE(dev_priv))
return phy >= PHY_C && phy <= PHY_F;
else
return false;
@@ -5704,7 +3386,9 @@ bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
{
- if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
+ if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
+ return PHY_B + port - PORT_TC1;
+ else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
return PHY_C + port - PORT_TC1;
else if (IS_JSL_EHL(i915) && port == PORT_D)
return PHY_A;
@@ -5717,7 +3401,7 @@ enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
return TC_PORT_NONE;
- if (INTEL_GEN(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 12)
return TC_PORT_1 + port - PORT_TC1;
else
return TC_PORT_1 + port - PORT_C;
@@ -5969,7 +3653,7 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
crtc->active = true;
- if (!IS_GEN(dev_priv, 2))
+ if (!IS_DISPLAY_VER(dev_priv, 2))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_encoders_pre_enable(state, crtc);
@@ -5994,7 +3678,7 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
intel_encoders_enable(state, crtc);
/* prevents spurious underruns */
- if (IS_GEN(dev_priv, 2))
+ if (IS_DISPLAY_VER(dev_priv, 2))
intel_wait_for_vblank(dev_priv, pipe);
}
@@ -6025,7 +3709,7 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
* On gen2 planes are double buffered but the pipe isn't, so we must
* wait for planes to fully turn off before disabling the pipe.
*/
- if (IS_GEN(dev_priv, 2))
+ if (IS_DISPLAY_VER(dev_priv, 2))
intel_wait_for_vblank(dev_priv, pipe);
intel_encoders_disable(state, crtc);
@@ -6049,7 +3733,7 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
intel_encoders_post_pll_disable(state, crtc);
- if (!IS_GEN(dev_priv, 2))
+ if (!IS_DISPLAY_VER(dev_priv, 2))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
if (!dev_priv->display.initial_watermarks)
@@ -6288,7 +3972,7 @@ static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
/* GDG double wide on either pipe, otherwise pipe A only */
- return INTEL_GEN(dev_priv) < 4 &&
+ return DISPLAY_VER(dev_priv) < 4 &&
(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
}
@@ -6380,8 +4064,30 @@ static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state
pipe_mode->crtc_clock /= 2;
}
- intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
- intel_mode_from_crtc_timings(adjusted_mode, adjusted_mode);
+ if (crtc_state->splitter.enable) {
+ int n = crtc_state->splitter.link_count;
+ int overlap = crtc_state->splitter.pixel_overlap;
+
+ /*
+ * eDP MSO uses segment timings from EDID for transcoder
+ * timings, but full mode for everything else.
+ *
+ * h_full = (h_segment - pixel_overlap) * link_count
+ */
+ pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
+ pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
+ pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
+ pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
+ pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
+ pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
+ pipe_mode->crtc_clock *= n;
+
+ intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
+ intel_mode_from_crtc_timings(adjusted_mode, pipe_mode);
+ } else {
+ intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
+ intel_mode_from_crtc_timings(adjusted_mode, adjusted_mode);
+ }
intel_crtc_compute_pixel_rate(crtc_state);
@@ -6419,9 +4125,22 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
pipe_config->pipe_src_w /= 2;
}
+ if (pipe_config->splitter.enable) {
+ int n = pipe_config->splitter.link_count;
+ int overlap = pipe_config->splitter.pixel_overlap;
+
+ pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
+ pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
+ pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
+ pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
+ pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
+ pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
+ pipe_mode->crtc_clock *= n;
+ }
+
intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
- if (INTEL_GEN(dev_priv) < 4) {
+ if (DISPLAY_VER(dev_priv) < 4) {
clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
/*
@@ -6467,7 +4186,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
/* Cantiga+ cannot handle modes with a hsync front porch of 0.
* WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
*/
- if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
+ if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) &&
pipe_mode->crtc_hsync_start == pipe_mode->crtc_hdisplay)
return -EINVAL;
@@ -6554,35 +4273,6 @@ static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
}
}
-static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
- pipe)
-{
- u32 reg_val;
-
- /*
- * PLLB opamp always calibrates to max value of 0x3f, force enable it
- * and set it to a reasonable value instead.
- */
- reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
- reg_val &= 0xffffff00;
- reg_val |= 0x00000030;
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
-
- reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
- reg_val &= 0x00ffffff;
- reg_val |= 0x8c000000;
- vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
-
- reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
- reg_val &= 0xffffff00;
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
-
- reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
- reg_val &= 0x00ffffff;
- reg_val |= 0xb0000000;
- vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
-}
-
static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
const struct intel_link_m_n *m_n)
{
@@ -6607,7 +4297,7 @@ static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
* Strictly speaking some registers are available before
* gen7, but we only support DRRS on gen7+
*/
- return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
+ return IS_DISPLAY_VER(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
}
static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
@@ -6619,7 +4309,7 @@ static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_sta
enum pipe pipe = crtc->pipe;
enum transcoder transcoder = crtc_state->cpu_transcoder;
- if (INTEL_GEN(dev_priv) >= 5) {
+ if (DISPLAY_VER(dev_priv) >= 5) {
intel_de_write(dev_priv, PIPE_DATA_M1(transcoder),
TU_SIZE(m_n->tu) | m_n->gmch_m);
intel_de_write(dev_priv, PIPE_DATA_N1(transcoder),
@@ -6678,267 +4368,6 @@ void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_s
intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
}
-static void vlv_prepare_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = crtc->pipe;
- u32 mdiv;
- u32 bestn, bestm1, bestm2, bestp1, bestp2;
- u32 coreclk, reg_val;
-
- /* Enable Refclk */
- intel_de_write(dev_priv, DPLL(pipe),
- pipe_config->dpll_hw_state.dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
-
- /* No need to actually set up the DPLL with DSI */
- if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
- return;
-
- vlv_dpio_get(dev_priv);
-
- bestn = pipe_config->dpll.n;
- bestm1 = pipe_config->dpll.m1;
- bestm2 = pipe_config->dpll.m2;
- bestp1 = pipe_config->dpll.p1;
- bestp2 = pipe_config->dpll.p2;
-
- /* See eDP HDMI DPIO driver vbios notes doc */
-
- /* PLL B needs special handling */
- if (pipe == PIPE_B)
- vlv_pllb_recal_opamp(dev_priv, pipe);
-
- /* Set up Tx target for periodic Rcomp update */
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
-
- /* Disable target IRef on PLL */
- reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
- reg_val &= 0x00ffffff;
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
-
- /* Disable fast lock */
- vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
-
- /* Set idtafcrecal before PLL is enabled */
- mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
- mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
- mdiv |= ((bestn << DPIO_N_SHIFT));
- mdiv |= (1 << DPIO_K_SHIFT);
-
- /*
- * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
- * but we don't support that).
- * Note: don't use the DAC post divider as it seems unstable.
- */
- mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
-
- mdiv |= DPIO_ENABLE_CALIBRATION;
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
-
- /* Set HBR and RBR LPF coefficients */
- if (pipe_config->port_clock == 162000 ||
- intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
- intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
- 0x009f0003);
- else
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
- 0x00d0000f);
-
- if (intel_crtc_has_dp_encoder(pipe_config)) {
- /* Use SSC source */
- if (pipe == PIPE_A)
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
- 0x0df40000);
- else
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
- 0x0df70000);
- } else { /* HDMI or VGA */
- /* Use bend source */
- if (pipe == PIPE_A)
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
- 0x0df70000);
- else
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
- 0x0df40000);
- }
-
- coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
- coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
- if (intel_crtc_has_dp_encoder(pipe_config))
- coreclk |= 0x01000000;
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
-
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
-
- vlv_dpio_put(dev_priv);
-}
-
-static void chv_prepare_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = crtc->pipe;
- enum dpio_channel port = vlv_pipe_to_channel(pipe);
- u32 loopfilter, tribuf_calcntr;
- u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
- u32 dpio_val;
- int vco;
-
- /* Enable Refclk and SSC */
- intel_de_write(dev_priv, DPLL(pipe),
- pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
-
- /* No need to actually set up the DPLL with DSI */
- if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
- return;
-
- bestn = pipe_config->dpll.n;
- bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
- bestm1 = pipe_config->dpll.m1;
- bestm2 = pipe_config->dpll.m2 >> 22;
- bestp1 = pipe_config->dpll.p1;
- bestp2 = pipe_config->dpll.p2;
- vco = pipe_config->dpll.vco;
- dpio_val = 0;
- loopfilter = 0;
-
- vlv_dpio_get(dev_priv);
-
- /* p1 and p2 divider */
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
- 5 << DPIO_CHV_S1_DIV_SHIFT |
- bestp1 << DPIO_CHV_P1_DIV_SHIFT |
- bestp2 << DPIO_CHV_P2_DIV_SHIFT |
- 1 << DPIO_CHV_K_DIV_SHIFT);
-
- /* Feedback post-divider - m2 */
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
-
- /* Feedback refclk divider - n and m1 */
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
- DPIO_CHV_M1_DIV_BY_2 |
- 1 << DPIO_CHV_N_DIV_SHIFT);
-
- /* M2 fraction division */
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
-
- /* M2 fraction division enable */
- dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
- dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
- dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
- if (bestm2_frac)
- dpio_val |= DPIO_CHV_FRAC_DIV_EN;
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
-
- /* Program digital lock detect threshold */
- dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
- dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
- DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
- dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
- if (!bestm2_frac)
- dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
-
- /* Loop filter */
- if (vco == 5400000) {
- loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
- loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
- loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
- tribuf_calcntr = 0x9;
- } else if (vco <= 6200000) {
- loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
- loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
- loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
- tribuf_calcntr = 0x9;
- } else if (vco <= 6480000) {
- loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
- loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
- loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
- tribuf_calcntr = 0x8;
- } else {
- /* Not supported. Apply the same limits as in the max case */
- loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
- loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
- loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
- tribuf_calcntr = 0;
- }
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
-
- dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
- dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
- dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
-
- /* AFC Recal */
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
- vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
- DPIO_AFC_RECAL);
-
- vlv_dpio_put(dev_priv);
-}
-
-/**
- * vlv_force_pll_on - forcibly enable just the PLL
- * @dev_priv: i915 private structure
- * @pipe: pipe PLL to enable
- * @dpll: PLL configuration
- *
- * Enable the PLL for @pipe using the supplied @dpll config. To be used
- * in cases where we need the PLL enabled even when @pipe is not going to
- * be enabled.
- */
-int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
- const struct dpll *dpll)
-{
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- struct intel_crtc_state *pipe_config;
-
- pipe_config = intel_crtc_state_alloc(crtc);
- if (!pipe_config)
- return -ENOMEM;
-
- pipe_config->cpu_transcoder = (enum transcoder)pipe;
- pipe_config->pixel_multiplier = 1;
- pipe_config->dpll = *dpll;
-
- if (IS_CHERRYVIEW(dev_priv)) {
- chv_compute_dpll(crtc, pipe_config);
- chv_prepare_pll(crtc, pipe_config);
- chv_enable_pll(crtc, pipe_config);
- } else {
- vlv_compute_dpll(crtc, pipe_config);
- vlv_prepare_pll(crtc, pipe_config);
- vlv_enable_pll(crtc, pipe_config);
- }
-
- kfree(pipe_config);
-
- return 0;
-}
-
-/**
- * vlv_force_pll_off - forcibly disable just the PLL
- * @dev_priv: i915 private structure
- * @pipe: pipe PLL to disable
- *
- * Disable the PLL for @pipe. To be used in cases where we need
- * the PLL enabled even when @pipe is not going to be enabled.
- */
-void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
-{
- if (IS_CHERRYVIEW(dev_priv))
- chv_disable_pll(dev_priv, pipe);
- else
- vlv_disable_pll(dev_priv, pipe);
-}
-
-
-
static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -6968,7 +4397,7 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
vsyncshift += adjusted_mode->crtc_htotal;
}
- if (INTEL_GEN(dev_priv) > 3)
+ if (DISPLAY_VER(dev_priv) > 3)
intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
vsyncshift);
@@ -7015,10 +4444,10 @@ static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (IS_GEN(dev_priv, 2))
+ if (IS_DISPLAY_VER(dev_priv, 2))
return false;
- if (INTEL_GEN(dev_priv) >= 9 ||
+ if (DISPLAY_VER(dev_priv) >= 9 ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
else
@@ -7122,7 +4551,7 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
}
if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
- if (INTEL_GEN(dev_priv) < 4 ||
+ if (DISPLAY_VER(dev_priv) < 4 ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
else
@@ -7148,7 +4577,7 @@ static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
if (IS_I830(dev_priv))
return false;
- return INTEL_GEN(dev_priv) >= 4 ||
+ return DISPLAY_VER(dev_priv) >= 4 ||
IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
}
@@ -7166,7 +4595,7 @@ static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
return;
/* Check whether the pfit is attached to our pipe. */
- if (INTEL_GEN(dev_priv) < 4) {
+ if (DISPLAY_VER(dev_priv) < 4) {
if (crtc->pipe != PIPE_B)
return;
} else {
@@ -7206,92 +4635,6 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
}
-static void
-i9xx_get_initial_plane_config(struct intel_crtc *crtc,
- struct intel_initial_plane_config *plane_config)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_plane *plane = to_intel_plane(crtc->base.primary);
- enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- enum pipe pipe;
- u32 val, base, offset;
- int fourcc, pixel_format;
- unsigned int aligned_height;
- struct drm_framebuffer *fb;
- struct intel_framebuffer *intel_fb;
-
- if (!plane->get_hw_state(plane, &pipe))
- return;
-
- drm_WARN_ON(dev, pipe != crtc->pipe);
-
- intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
- if (!intel_fb) {
- drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
- return;
- }
-
- fb = &intel_fb->base;
-
- fb->dev = dev;
-
- val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
-
- if (INTEL_GEN(dev_priv) >= 4) {
- if (val & DISPPLANE_TILED) {
- plane_config->tiling = I915_TILING_X;
- fb->modifier = I915_FORMAT_MOD_X_TILED;
- }
-
- if (val & DISPPLANE_ROTATE_180)
- plane_config->rotation = DRM_MODE_ROTATE_180;
- }
-
- if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
- val & DISPPLANE_MIRROR)
- plane_config->rotation |= DRM_MODE_REFLECT_X;
-
- pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
- fourcc = i9xx_format_to_fourcc(pixel_format);
- fb->format = drm_format_info(fourcc);
-
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- offset = intel_de_read(dev_priv, DSPOFFSET(i9xx_plane));
- base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
- } else if (INTEL_GEN(dev_priv) >= 4) {
- if (plane_config->tiling)
- offset = intel_de_read(dev_priv,
- DSPTILEOFF(i9xx_plane));
- else
- offset = intel_de_read(dev_priv,
- DSPLINOFF(i9xx_plane));
- base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
- } else {
- base = intel_de_read(dev_priv, DSPADDR(i9xx_plane));
- }
- plane_config->base = base;
-
- val = intel_de_read(dev_priv, PIPESRC(pipe));
- fb->width = ((val >> 16) & 0xfff) + 1;
- fb->height = ((val >> 0) & 0xfff) + 1;
-
- val = intel_de_read(dev_priv, DSPSTRIDE(i9xx_plane));
- fb->pitches[0] = val & 0xffffffc0;
-
- aligned_height = intel_fb_align_height(fb, 0, fb->height);
-
- plane_config->size = fb->pitches[0] * aligned_height;
-
- drm_dbg_kms(&dev_priv->drm,
- "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
- crtc->base.name, plane->base.name, fb->width, fb->height,
- fb->format->cpp[0] * 8, base, fb->pitches[0],
- plane_config->size);
-
- plane_config->fb = intel_fb;
-}
-
static void chv_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
@@ -7420,7 +4763,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
i9xx_get_pipe_color_config(pipe_config);
intel_color_get_config(pipe_config);
- if (INTEL_GEN(dev_priv) < 4)
+ if (DISPLAY_VER(dev_priv) < 4)
pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
intel_get_transcoder_timings(crtc, pipe_config);
@@ -7428,7 +4771,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
i9xx_get_pfit_config(pipe_config);
- if (INTEL_GEN(dev_priv) >= 4) {
+ if (DISPLAY_VER(dev_priv) >= 4) {
/* No way to read it out on pipes B and C */
if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
tmp = dev_priv->chv_dpll_md[crtc->pipe];
@@ -8108,12 +5451,12 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
val |= PIPEMISC_YUV420_ENABLE |
PIPEMISC_YUV420_MODE_FULL_BLEND;
- if (INTEL_GEN(dev_priv) >= 11 &&
+ if (DISPLAY_VER(dev_priv) >= 11 &&
(crtc_state->active_planes & ~(icl_hdr_plane_mask() |
BIT(PLANE_CURSOR))) == 0)
val |= PIPEMISC_HDR_MODE_PRECISION;
- if (INTEL_GEN(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 12)
val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
@@ -8176,7 +5519,7 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- if (INTEL_GEN(dev_priv) >= 5) {
+ if (DISPLAY_VER(dev_priv) >= 5) {
m_n->link_m = intel_de_read(dev_priv,
PIPE_LINK_M1(transcoder));
m_n->link_n = intel_de_read(dev_priv,
@@ -8274,150 +5617,6 @@ static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
}
-static void
-skl_get_initial_plane_config(struct intel_crtc *crtc,
- struct intel_initial_plane_config *plane_config)
-{
- struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_plane *plane = to_intel_plane(crtc->base.primary);
- enum plane_id plane_id = plane->id;
- enum pipe pipe;
- u32 val, base, offset, stride_mult, tiling, alpha;
- int fourcc, pixel_format;
- unsigned int aligned_height;
- struct drm_framebuffer *fb;
- struct intel_framebuffer *intel_fb;
-
- if (!plane->get_hw_state(plane, &pipe))
- return;
-
- drm_WARN_ON(dev, pipe != crtc->pipe);
-
- if (crtc_state->bigjoiner) {
- drm_dbg_kms(&dev_priv->drm,
- "Unsupported bigjoiner configuration for initial FB\n");
- return;
- }
-
- intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
- if (!intel_fb) {
- drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
- return;
- }
-
- fb = &intel_fb->base;
-
- fb->dev = dev;
-
- val = intel_de_read(dev_priv, PLANE_CTL(pipe, plane_id));
-
- if (INTEL_GEN(dev_priv) >= 11)
- pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
- else
- pixel_format = val & PLANE_CTL_FORMAT_MASK;
-
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
- alpha = intel_de_read(dev_priv,
- PLANE_COLOR_CTL(pipe, plane_id));
- alpha &= PLANE_COLOR_ALPHA_MASK;
- } else {
- alpha = val & PLANE_CTL_ALPHA_MASK;
- }
-
- fourcc = skl_format_to_fourcc(pixel_format,
- val & PLANE_CTL_ORDER_RGBX, alpha);
- fb->format = drm_format_info(fourcc);
-
- tiling = val & PLANE_CTL_TILED_MASK;
- switch (tiling) {
- case PLANE_CTL_TILED_LINEAR:
- fb->modifier = DRM_FORMAT_MOD_LINEAR;
- break;
- case PLANE_CTL_TILED_X:
- plane_config->tiling = I915_TILING_X;
- fb->modifier = I915_FORMAT_MOD_X_TILED;
- break;
- case PLANE_CTL_TILED_Y:
- plane_config->tiling = I915_TILING_Y;
- if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
- fb->modifier = INTEL_GEN(dev_priv) >= 12 ?
- I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS :
- I915_FORMAT_MOD_Y_TILED_CCS;
- else if (val & PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE)
- fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
- else
- fb->modifier = I915_FORMAT_MOD_Y_TILED;
- break;
- case PLANE_CTL_TILED_YF:
- if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
- fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
- else
- fb->modifier = I915_FORMAT_MOD_Yf_TILED;
- break;
- default:
- MISSING_CASE(tiling);
- goto error;
- }
-
- /*
- * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
- * while i915 HW rotation is clockwise, thats why this swapping.
- */
- switch (val & PLANE_CTL_ROTATE_MASK) {
- case PLANE_CTL_ROTATE_0:
- plane_config->rotation = DRM_MODE_ROTATE_0;
- break;
- case PLANE_CTL_ROTATE_90:
- plane_config->rotation = DRM_MODE_ROTATE_270;
- break;
- case PLANE_CTL_ROTATE_180:
- plane_config->rotation = DRM_MODE_ROTATE_180;
- break;
- case PLANE_CTL_ROTATE_270:
- plane_config->rotation = DRM_MODE_ROTATE_90;
- break;
- }
-
- if (INTEL_GEN(dev_priv) >= 10 &&
- val & PLANE_CTL_FLIP_HORIZONTAL)
- plane_config->rotation |= DRM_MODE_REFLECT_X;
-
- /* 90/270 degree rotation would require extra work */
- if (drm_rotation_90_or_270(plane_config->rotation))
- goto error;
-
- base = intel_de_read(dev_priv, PLANE_SURF(pipe, plane_id)) & 0xfffff000;
- plane_config->base = base;
-
- offset = intel_de_read(dev_priv, PLANE_OFFSET(pipe, plane_id));
-
- val = intel_de_read(dev_priv, PLANE_SIZE(pipe, plane_id));
- fb->height = ((val >> 16) & 0xffff) + 1;
- fb->width = ((val >> 0) & 0xffff) + 1;
-
- val = intel_de_read(dev_priv, PLANE_STRIDE(pipe, plane_id));
- stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
- fb->pitches[0] = (val & 0x3ff) * stride_mult;
-
- aligned_height = intel_fb_align_height(fb, 0, fb->height);
-
- plane_config->size = fb->pitches[0] * aligned_height;
-
- drm_dbg_kms(&dev_priv->drm,
- "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
- crtc->base.name, plane->base.name, fb->width, fb->height,
- fb->format->cpp[0] * 8, base, fb->pitches[0],
- plane_config->size);
-
- plane_config->fb = intel_fb;
- return;
-
-error:
- kfree(intel_fb);
-}
-
static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -8440,7 +5639,7 @@ static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
* ivb/hsw (since we don't use the higher upscaling modes which
* differentiates them) so just WARN about this case for now.
*/
- drm_WARN_ON(&dev_priv->drm, IS_GEN(dev_priv, 7) &&
+ drm_WARN_ON(&dev_priv->drm, IS_DISPLAY_VER(dev_priv, 7) &&
(ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
}
@@ -8564,205 +5763,6 @@ out:
return ret;
}
-static void dg1_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
- struct intel_crtc_state *pipe_config)
-{
- enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
- enum phy phy = intel_port_to_phy(dev_priv, port);
- struct icl_port_dpll *port_dpll;
- struct intel_shared_dpll *pll;
- enum intel_dpll_id id;
- bool pll_active;
- u32 clk_sel;
-
- clk_sel = intel_de_read(dev_priv, DG1_DPCLKA_CFGCR0(phy)) & DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
- id = DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_DPLL_MAP(clk_sel, phy);
-
- if (WARN_ON(id > DPLL_ID_DG1_DPLL3))
- return;
-
- pll = intel_get_shared_dpll_by_id(dev_priv, id);
- port_dpll = &pipe_config->icl_port_dplls[port_dpll_id];
-
- port_dpll->pll = pll;
- pll_active = intel_dpll_get_hw_state(dev_priv, pll,
- &port_dpll->hw_state);
- drm_WARN_ON(&dev_priv->drm, !pll_active);
-
- icl_set_active_port_dpll(pipe_config, port_dpll_id);
-}
-
-static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
- struct intel_crtc_state *pipe_config)
-{
- enum phy phy = intel_port_to_phy(dev_priv, port);
- enum icl_port_dpll_id port_dpll_id;
- struct icl_port_dpll *port_dpll;
- struct intel_shared_dpll *pll;
- enum intel_dpll_id id;
- bool pll_active;
- u32 temp;
-
- if (intel_phy_is_combo(dev_priv, phy)) {
- u32 mask, shift;
-
- if (IS_ROCKETLAKE(dev_priv)) {
- mask = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
- shift = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
- } else {
- mask = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
- shift = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
- }
-
- temp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0) & mask;
- id = temp >> shift;
- port_dpll_id = ICL_PORT_DPLL_DEFAULT;
- } else if (intel_phy_is_tc(dev_priv, phy)) {
- u32 clk_sel = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
-
- if (clk_sel == DDI_CLK_SEL_MG) {
- id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
- port));
- port_dpll_id = ICL_PORT_DPLL_MG_PHY;
- } else {
- drm_WARN_ON(&dev_priv->drm,
- clk_sel < DDI_CLK_SEL_TBT_162);
- id = DPLL_ID_ICL_TBTPLL;
- port_dpll_id = ICL_PORT_DPLL_DEFAULT;
- }
- } else {
- drm_WARN(&dev_priv->drm, 1, "Invalid port %x\n", port);
- return;
- }
-
- pll = intel_get_shared_dpll_by_id(dev_priv, id);
- port_dpll = &pipe_config->icl_port_dplls[port_dpll_id];
-
- port_dpll->pll = pll;
- pll_active = intel_dpll_get_hw_state(dev_priv, pll,
- &port_dpll->hw_state);
- drm_WARN_ON(&dev_priv->drm, !pll_active);
-
- icl_set_active_port_dpll(pipe_config, port_dpll_id);
-}
-
-static void cnl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
- struct intel_crtc_state *pipe_config)
-{
- struct intel_shared_dpll *pll;
- enum intel_dpll_id id;
- bool pll_active;
- u32 temp;
-
- temp = intel_de_read(dev_priv, DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
- id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
-
- if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL2))
- return;
-
- pll = intel_get_shared_dpll_by_id(dev_priv, id);
-
- pipe_config->shared_dpll = pll;
- pll_active = intel_dpll_get_hw_state(dev_priv, pll,
- &pipe_config->dpll_hw_state);
- drm_WARN_ON(&dev_priv->drm, !pll_active);
-}
-
-static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
- enum port port,
- struct intel_crtc_state *pipe_config)
-{
- struct intel_shared_dpll *pll;
- enum intel_dpll_id id;
- bool pll_active;
-
- switch (port) {
- case PORT_A:
- id = DPLL_ID_SKL_DPLL0;
- break;
- case PORT_B:
- id = DPLL_ID_SKL_DPLL1;
- break;
- case PORT_C:
- id = DPLL_ID_SKL_DPLL2;
- break;
- default:
- drm_err(&dev_priv->drm, "Incorrect port type\n");
- return;
- }
-
- pll = intel_get_shared_dpll_by_id(dev_priv, id);
-
- pipe_config->shared_dpll = pll;
- pll_active = intel_dpll_get_hw_state(dev_priv, pll,
- &pipe_config->dpll_hw_state);
- drm_WARN_ON(&dev_priv->drm, !pll_active);
-}
-
-static void skl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
- struct intel_crtc_state *pipe_config)
-{
- struct intel_shared_dpll *pll;
- enum intel_dpll_id id;
- bool pll_active;
- u32 temp;
-
- temp = intel_de_read(dev_priv, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
- id = temp >> (port * 3 + 1);
-
- if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL3))
- return;
-
- pll = intel_get_shared_dpll_by_id(dev_priv, id);
-
- pipe_config->shared_dpll = pll;
- pll_active = intel_dpll_get_hw_state(dev_priv, pll,
- &pipe_config->dpll_hw_state);
- drm_WARN_ON(&dev_priv->drm, !pll_active);
-}
-
-static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
- struct intel_crtc_state *pipe_config)
-{
- struct intel_shared_dpll *pll;
- enum intel_dpll_id id;
- u32 ddi_pll_sel = intel_de_read(dev_priv, PORT_CLK_SEL(port));
- bool pll_active;
-
- switch (ddi_pll_sel) {
- case PORT_CLK_SEL_WRPLL1:
- id = DPLL_ID_WRPLL1;
- break;
- case PORT_CLK_SEL_WRPLL2:
- id = DPLL_ID_WRPLL2;
- break;
- case PORT_CLK_SEL_SPLL:
- id = DPLL_ID_SPLL;
- break;
- case PORT_CLK_SEL_LCPLL_810:
- id = DPLL_ID_LCPLL_810;
- break;
- case PORT_CLK_SEL_LCPLL_1350:
- id = DPLL_ID_LCPLL_1350;
- break;
- case PORT_CLK_SEL_LCPLL_2700:
- id = DPLL_ID_LCPLL_2700;
- break;
- default:
- MISSING_CASE(ddi_pll_sel);
- fallthrough;
- case PORT_CLK_SEL_NONE:
- return;
- }
-
- pll = intel_get_shared_dpll_by_id(dev_priv, id);
-
- pipe_config->shared_dpll = pll;
- pll_active = intel_dpll_get_hw_state(dev_priv, pll,
- &pipe_config->dpll_hw_state);
- drm_WARN_ON(&dev_priv->drm, !pll_active);
-}
-
static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config,
struct intel_display_power_domain_set *power_domain_set)
@@ -8774,7 +5774,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
enum transcoder panel_transcoder;
u32 tmp;
- if (INTEL_GEN(dev_priv) >= 11)
+ if (DISPLAY_VER(dev_priv) >= 11)
panel_transcoder_mask |=
BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
@@ -8913,31 +5913,18 @@ static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (!(tmp & TRANS_DDI_FUNC_ENABLE))
return;
- if (INTEL_GEN(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 12)
port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
else
port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
}
- if (IS_DG1(dev_priv))
- dg1_get_ddi_pll(dev_priv, port, pipe_config);
- else if (INTEL_GEN(dev_priv) >= 11)
- icl_get_ddi_pll(dev_priv, port, pipe_config);
- else if (IS_CANNONLAKE(dev_priv))
- cnl_get_ddi_pll(dev_priv, port, pipe_config);
- else if (IS_GEN9_LP(dev_priv))
- bxt_get_ddi_pll(dev_priv, port, pipe_config);
- else if (IS_GEN9_BC(dev_priv))
- skl_get_ddi_pll(dev_priv, port, pipe_config);
- else
- hsw_get_ddi_pll(dev_priv, port, pipe_config);
-
/*
* Haswell has only FDI/PCH transcoder A. It is which is connected to
* DDI E. So just check whether this pipe is wired to DDI E and whether
* the PCH transcoder is on.
*/
- if (INTEL_GEN(dev_priv) < 9 &&
+ if (DISPLAY_VER(dev_priv) < 9 &&
(port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) {
pipe_config->has_pch_encoder = true;
@@ -8984,7 +5971,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
/* we cannot read out most state, so don't bother.. */
pipe_config->quirks |= PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE;
} else if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
- INTEL_GEN(dev_priv) >= 11) {
+ DISPLAY_VER(dev_priv) >= 11) {
hsw_get_ddi_port_state(crtc, pipe_config);
intel_get_transcoder_timings(crtc, pipe_config);
}
@@ -9013,7 +6000,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
pipe_config->csc_mode = intel_de_read(dev_priv,
PIPE_CSC_MODE(crtc->pipe));
- if (INTEL_GEN(dev_priv) >= 9) {
+ if (DISPLAY_VER(dev_priv) >= 9) {
tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
@@ -9035,7 +6022,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
- if (INTEL_GEN(dev_priv) >= 9)
+ if (DISPLAY_VER(dev_priv) >= 9)
skl_get_pfit_config(pipe_config);
else
ilk_get_pfit_config(pipe_config);
@@ -9335,7 +6322,7 @@ static int i9xx_pll_refclk(struct drm_device *dev,
return dev_priv->vbt.lvds_ssc_freq;
else if (HAS_PCH_SPLIT(dev_priv))
return 120000;
- else if (!IS_GEN(dev_priv, 2))
+ else if (!IS_DISPLAY_VER(dev_priv, 2))
return 96000;
else
return 48000;
@@ -9368,7 +6355,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
}
- if (!IS_GEN(dev_priv, 2)) {
+ if (!IS_DISPLAY_VER(dev_priv, 2)) {
if (IS_PINEVIEW(dev_priv))
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
@@ -9565,7 +6552,7 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
bool turn_off, turn_on, visible, was_visible;
int ret;
- if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
+ if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
ret = skl_update_scaler_plane(crtc_state, plane_state);
if (ret)
return ret;
@@ -9606,21 +6593,21 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
turn_off, turn_on, mode_changed);
if (turn_on) {
- if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
+ if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
crtc_state->update_wm_pre = true;
/* must disable cxsr around plane enable/disable */
if (plane->id != PLANE_CURSOR)
crtc_state->disable_cxsr = true;
} else if (turn_off) {
- if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
+ if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
crtc_state->update_wm_post = true;
/* must disable cxsr around plane enable/disable */
if (plane->id != PLANE_CURSOR)
crtc_state->disable_cxsr = true;
} else if (intel_wm_need_update(old_plane_state, plane_state)) {
- if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
+ if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) {
/* FIXME bollocks */
crtc_state->update_wm_pre = true;
crtc_state->update_wm_post = true;
@@ -9664,7 +6651,7 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
* plane, not only sprite plane.
*/
if (plane->id != PLANE_CURSOR &&
- (IS_GEN_RANGE(dev_priv, 5, 6) ||
+ (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) ||
IS_IVYBRIDGE(dev_priv)) &&
(turn_on || (!needs_scaling(old_plane_state) &&
needs_scaling(plane_state))))
@@ -9737,7 +6724,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
struct intel_plane_state *plane_state;
int i;
- if (INTEL_GEN(dev_priv) < 11)
+ if (DISPLAY_VER(dev_priv) < 11)
return 0;
/*
@@ -9804,8 +6791,6 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
linked_state->color_ctl = plane_state->color_ctl;
linked_state->view = plane_state->view;
- memcpy(linked_state->color_plane, plane_state->color_plane,
- sizeof(linked_state->color_plane));
intel_plane_copy_hw_state(linked_state, plane_state);
linked_state->uapi.src = plane_state->uapi.src;
@@ -9899,7 +6884,7 @@ static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_cdclk_state *cdclk_state;
- if (INTEL_GEN(dev_priv) >= 9)
+ if (DISPLAY_VER(dev_priv) >= 9)
crtc_state->linetime = skl_linetime_wm(crtc_state);
else
crtc_state->linetime = hsw_linetime_wm(crtc_state);
@@ -9926,7 +6911,7 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
bool mode_changed = intel_crtc_needs_modeset(crtc_state);
int ret;
- if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
+ if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) &&
mode_changed && !crtc_state->hw.active)
crtc_state->update_wm_post = true;
@@ -9980,7 +6965,7 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
}
}
- if (INTEL_GEN(dev_priv) >= 9) {
+ if (DISPLAY_VER(dev_priv) >= 9) {
if (mode_changed || crtc_state->update_pipe) {
ret = skl_update_scaler_crtc(crtc_state);
if (ret)
@@ -9998,7 +6983,7 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
return ret;
}
- if (INTEL_GEN(dev_priv) >= 9 ||
+ if (DISPLAY_VER(dev_priv) >= 9 ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
ret = hsw_compute_linetime_wm(state, crtc);
if (ret)
@@ -10022,19 +7007,27 @@ static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
drm_connector_list_iter_begin(dev, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
- if (connector->base.state->crtc)
+ struct drm_connector_state *conn_state = connector->base.state;
+ struct intel_encoder *encoder =
+ to_intel_encoder(connector->base.encoder);
+
+ if (conn_state->crtc)
drm_connector_put(&connector->base);
- if (connector->base.encoder) {
- connector->base.state->best_encoder =
- connector->base.encoder;
- connector->base.state->crtc =
- connector->base.encoder->crtc;
+ if (encoder) {
+ struct intel_crtc *crtc =
+ to_intel_crtc(encoder->base.crtc);
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ conn_state->best_encoder = &encoder->base;
+ conn_state->crtc = &crtc->base;
+ conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3;
drm_connector_get(&connector->base);
} else {
- connector->base.state->best_encoder = NULL;
- connector->base.state->crtc = NULL;
+ conn_state->best_encoder = NULL;
+ conn_state->crtc = NULL;
}
}
drm_connector_list_iter_end(&conn_iter);
@@ -10095,7 +7088,7 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
IS_CHERRYVIEW(dev_priv)))
bpp = 10*3;
- else if (INTEL_GEN(dev_priv) >= 5)
+ else if (DISPLAY_VER(dev_priv) >= 5)
bpp = 12*3;
else
bpp = 8*3;
@@ -10228,7 +7221,6 @@ static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *i915 = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
- struct drm_format_name_buf format_name;
if (!fb) {
drm_dbg_kms(&i915->drm,
@@ -10239,10 +7231,9 @@ static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
}
drm_dbg_kms(&i915->drm,
- "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s modifier = 0x%llx, visible: %s\n",
+ "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
plane->base.base.id, plane->base.name,
- fb->base.id, fb->width, fb->height,
- drm_get_format_name(fb->format->format, &format_name),
+ fb->base.id, fb->width, fb->height, &fb->format->format,
fb->modifier, yesno(plane_state->uapi.visible));
drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
plane_state->hw.rotation, plane_state->scaler_id);
@@ -10295,6 +7286,11 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
pipe_config->bigjoiner_slave ? "slave" :
pipe_config->bigjoiner ? "master" : "no");
+ drm_dbg_kms(&dev_priv->drm, "splitter: %s, link count %d, overlap %d\n",
+ enableddisabled(pipe_config->splitter.enable),
+ pipe_config->splitter.link_count,
+ pipe_config->splitter.pixel_overlap);
+
if (pipe_config->has_pch_encoder)
intel_dump_m_n_config(pipe_config, "fdi",
pipe_config->fdi_lanes,
@@ -10361,7 +7357,7 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
pipe_config->linetime, pipe_config->ips_linetime);
- if (INTEL_GEN(dev_priv) >= 9)
+ if (DISPLAY_VER(dev_priv) >= 9)
drm_dbg_kms(&dev_priv->drm,
"num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
crtc->num_scalers,
@@ -10937,7 +7933,7 @@ static bool fastboot_enabled(struct drm_i915_private *dev_priv)
return dev_priv->params.fastboot;
/* Enable fastboot by default on Skylake and newer */
- if (INTEL_GEN(dev_priv) >= 9)
+ if (DISPLAY_VER(dev_priv) >= 9)
return true;
/* Enable fastboot by default on VLV and CHV */
@@ -11149,7 +8145,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(lane_count);
PIPE_CONF_CHECK_X(lane_lat_optim_mask);
- if (INTEL_GEN(dev_priv) < 8) {
+ if (DISPLAY_VER(dev_priv) < 8) {
PIPE_CONF_CHECK_M_N(dp_m_n);
if (current_config->has_drrs)
@@ -11208,7 +8204,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(output_format);
PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
- if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
+ if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
PIPE_CONF_CHECK_BOOL(limited_color_range);
@@ -11223,7 +8219,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_X(gmch_pfit.control);
/* pfit ratios are autocomputed by the hw on gen4+ */
- if (INTEL_GEN(dev_priv) < 4)
+ if (DISPLAY_VER(dev_priv) < 4)
PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
@@ -11307,7 +8303,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_X(dsi_pll.ctrl);
PIPE_CONF_CHECK_X(dsi_pll.div);
- if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
+ if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock);
@@ -11335,6 +8331,10 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(dsc.dsc_split);
PIPE_CONF_CHECK_I(dsc.compressed_bpp);
+ PIPE_CONF_CHECK_BOOL(splitter.enable);
+ PIPE_CONF_CHECK_I(splitter.link_count);
+ PIPE_CONF_CHECK_I(splitter.pixel_overlap);
+
PIPE_CONF_CHECK_I(mst_master_transcoder);
PIPE_CONF_CHECK_BOOL(vrr.enable);
@@ -11384,13 +8384,12 @@ static void verify_wm_state(struct intel_crtc *crtc,
struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
struct skl_pipe_wm wm;
} *hw;
- struct skl_pipe_wm *sw_wm;
- struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
+ const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
+ int level, max_level = ilk_wm_max_level(dev_priv);
+ struct intel_plane *plane;
u8 hw_enabled_slices;
- const enum pipe pipe = crtc->pipe;
- int plane, level, max_level = ilk_wm_max_level(dev_priv);
- if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active)
+ if (DISPLAY_VER(dev_priv) < 9 || !new_crtc_state->hw.active)
return;
hw = kzalloc(sizeof(*hw), GFP_KERNEL);
@@ -11398,123 +8397,64 @@ static void verify_wm_state(struct intel_crtc *crtc,
return;
skl_pipe_wm_get_hw_state(crtc, &hw->wm);
- sw_wm = &new_crtc_state->wm.skl.optimal;
skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
- if (INTEL_GEN(dev_priv) >= 11 &&
+ if (DISPLAY_VER(dev_priv) >= 11 &&
hw_enabled_slices != dev_priv->dbuf.enabled_slices)
drm_err(&dev_priv->drm,
"mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
dev_priv->dbuf.enabled_slices,
hw_enabled_slices);
- /* planes */
- for_each_universal_plane(dev_priv, pipe, plane) {
- struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
-
- hw_plane_wm = &hw->wm.planes[plane];
- sw_plane_wm = &sw_wm->planes[plane];
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
+ const struct skl_wm_level *hw_wm_level, *sw_wm_level;
/* Watermarks */
for (level = 0; level <= max_level; level++) {
- if (skl_wm_level_equals(&hw_plane_wm->wm[level],
- &sw_plane_wm->wm[level]) ||
- (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level],
- &sw_plane_wm->sagv_wm0)))
- continue;
+ hw_wm_level = &hw->wm.planes[plane->id].wm[level];
+ sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
- drm_err(&dev_priv->drm,
- "mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- pipe_name(pipe), plane + 1, level,
- sw_plane_wm->wm[level].plane_en,
- sw_plane_wm->wm[level].plane_res_b,
- sw_plane_wm->wm[level].plane_res_l,
- hw_plane_wm->wm[level].plane_en,
- hw_plane_wm->wm[level].plane_res_b,
- hw_plane_wm->wm[level].plane_res_l);
- }
-
- if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
- &sw_plane_wm->trans_wm)) {
- drm_err(&dev_priv->drm,
- "mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- pipe_name(pipe), plane + 1,
- sw_plane_wm->trans_wm.plane_en,
- sw_plane_wm->trans_wm.plane_res_b,
- sw_plane_wm->trans_wm.plane_res_l,
- hw_plane_wm->trans_wm.plane_en,
- hw_plane_wm->trans_wm.plane_res_b,
- hw_plane_wm->trans_wm.plane_res_l);
- }
-
- /* DDB */
- hw_ddb_entry = &hw->ddb_y[plane];
- sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane];
-
- if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
- drm_err(&dev_priv->drm,
- "mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
- pipe_name(pipe), plane + 1,
- sw_ddb_entry->start, sw_ddb_entry->end,
- hw_ddb_entry->start, hw_ddb_entry->end);
- }
- }
-
- /*
- * cursor
- * If the cursor plane isn't active, we may not have updated it's ddb
- * allocation. In that case since the ddb allocation will be updated
- * once the plane becomes visible, we can skip this check
- */
- if (1) {
- struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
-
- hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
- sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
-
- /* Watermarks */
- for (level = 0; level <= max_level; level++) {
- if (skl_wm_level_equals(&hw_plane_wm->wm[level],
- &sw_plane_wm->wm[level]) ||
- (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level],
- &sw_plane_wm->sagv_wm0)))
+ if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
continue;
drm_err(&dev_priv->drm,
- "mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- pipe_name(pipe), level,
- sw_plane_wm->wm[level].plane_en,
- sw_plane_wm->wm[level].plane_res_b,
- sw_plane_wm->wm[level].plane_res_l,
- hw_plane_wm->wm[level].plane_en,
- hw_plane_wm->wm[level].plane_res_b,
- hw_plane_wm->wm[level].plane_res_l);
+ "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name, level,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
}
- if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
- &sw_plane_wm->trans_wm)) {
+ hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
+ sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
+
+ if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
drm_err(&dev_priv->drm,
- "mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- pipe_name(pipe),
- sw_plane_wm->trans_wm.plane_en,
- sw_plane_wm->trans_wm.plane_res_b,
- sw_plane_wm->trans_wm.plane_res_l,
- hw_plane_wm->trans_wm.plane_en,
- hw_plane_wm->trans_wm.plane_res_b,
- hw_plane_wm->trans_wm.plane_res_l);
+ "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
}
/* DDB */
- hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
- sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
+ hw_ddb_entry = &hw->ddb_y[plane->id];
+ sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane->id];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
drm_err(&dev_priv->drm,
- "mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
- pipe_name(pipe),
+ "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
+ plane->base.base.id, plane->base.name,
sw_ddb_entry->start, sw_ddb_entry->end,
hw_ddb_entry->start, hw_ddb_entry->end);
}
@@ -11689,7 +8629,7 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
struct intel_crtc_state *new_crtc_state)
{
struct intel_dpll_hw_state dpll_hw_state;
- unsigned int crtc_mask;
+ u8 pipe_mask;
bool active;
memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
@@ -11702,34 +8642,34 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
I915_STATE_WARN(!pll->on && pll->active_mask,
"pll in active use but not on in sw tracking\n");
I915_STATE_WARN(pll->on && !pll->active_mask,
- "pll is on but not used by any active crtc\n");
+ "pll is on but not used by any active pipe\n");
I915_STATE_WARN(pll->on != active,
"pll on state mismatch (expected %i, found %i)\n",
pll->on, active);
}
if (!crtc) {
- I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
- "more active pll users than references: %x vs %x\n",
- pll->active_mask, pll->state.crtc_mask);
+ I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
+ "more active pll users than references: 0x%x vs 0x%x\n",
+ pll->active_mask, pll->state.pipe_mask);
return;
}
- crtc_mask = drm_crtc_mask(&crtc->base);
+ pipe_mask = BIT(crtc->pipe);
if (new_crtc_state->hw.active)
- I915_STATE_WARN(!(pll->active_mask & crtc_mask),
- "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
+ I915_STATE_WARN(!(pll->active_mask & pipe_mask),
+ "pll active mismatch (expected pipe %c in active mask 0x%x)\n",
pipe_name(crtc->pipe), pll->active_mask);
else
- I915_STATE_WARN(pll->active_mask & crtc_mask,
- "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
+ I915_STATE_WARN(pll->active_mask & pipe_mask,
+ "pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
pipe_name(crtc->pipe), pll->active_mask);
- I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
- "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
- crtc_mask, pll->state.crtc_mask);
+ I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
+ "pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
+ pipe_mask, pll->state.pipe_mask);
I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
&dpll_hw_state,
@@ -11749,15 +8689,15 @@ verify_shared_dpll_state(struct intel_crtc *crtc,
if (old_crtc_state->shared_dpll &&
old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
- unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
+ u8 pipe_mask = BIT(crtc->pipe);
struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
- I915_STATE_WARN(pll->active_mask & crtc_mask,
- "pll active mismatch (didn't expect pipe %c in active mask)\n",
- pipe_name(crtc->pipe));
- I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
- "pll enabled crtcs mismatch (found %x in enabled mask)\n",
- pipe_name(crtc->pipe));
+ I915_STATE_WARN(pll->active_mask & pipe_mask,
+ "pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
+ pipe_name(crtc->pipe), pll->active_mask);
+ I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
+ "pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
+ pipe_name(crtc->pipe), pll->state.pipe_mask);
}
}
@@ -11842,7 +8782,7 @@ intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
* However if queried just before the start of vblank we'll get an
* answer that's slightly in the future.
*/
- if (IS_GEN(dev_priv, 2)) {
+ if (IS_DISPLAY_VER(dev_priv, 2)) {
int vtotal;
vtotal = adjusted_mode.crtc_vtotal;
@@ -12049,7 +8989,7 @@ static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
/* See {hsw,vlv,ivb}_plane_ratio() */
return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
- IS_IVYBRIDGE(dev_priv) || (INTEL_GEN(dev_priv) >= 11);
+ IS_IVYBRIDGE(dev_priv);
}
static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
@@ -12136,13 +9076,7 @@ static int intel_atomic_check_planes(struct intel_atomic_state *state)
old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
- /*
- * Not only the number of planes, but if the plane configuration had
- * changed might already mean we need to recompute min CDCLK,
- * because different planes might consume different amount of Dbuf bandwidth
- * according to formula: Bw per plane = Pixel rate * bpp * pipe/plane scale factor
- */
- if (old_active_planes == new_active_planes)
+ if (hweight8(old_active_planes) == hweight8(new_active_planes))
continue;
ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
@@ -12382,8 +9316,8 @@ static int intel_atomic_check_async(struct intel_atomic_state *state)
return -EINVAL;
}
- if (old_plane_state->color_plane[0].stride !=
- new_plane_state->color_plane[0].stride) {
+ if (old_plane_state->view.color_plane[0].stride !=
+ new_plane_state->view.color_plane[0].stride) {
drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n");
return -EINVAL;
}
@@ -12725,7 +9659,7 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- if (!IS_GEN(dev_priv, 2) || crtc_state->active_planes)
+ if (!IS_DISPLAY_VER(dev_priv, 2) || crtc_state->active_planes)
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
if (crtc_state->has_pch_encoder) {
@@ -12753,7 +9687,7 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
intel_set_pipe_src_size(new_crtc_state);
/* on skylake this is done by detaching scalers */
- if (INTEL_GEN(dev_priv) >= 9) {
+ if (DISPLAY_VER(dev_priv) >= 9) {
skl_detach_scalers(new_crtc_state);
if (new_crtc_state->pch_pfit.enabled)
@@ -12773,11 +9707,11 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
* HSW/BDW only really need this here for fastboot, after
* that the value should not change without a full modeset.
*/
- if (INTEL_GEN(dev_priv) >= 9 ||
+ if (DISPLAY_VER(dev_priv) >= 9 ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
hsw_set_linetime_wm(new_crtc_state);
- if (INTEL_GEN(dev_priv) >= 11)
+ if (DISPLAY_VER(dev_priv) >= 11)
icl_set_pipe_chicken(crtc);
}
@@ -12800,10 +9734,10 @@ static void commit_pipe_config(struct intel_atomic_state *state,
new_crtc_state->update_pipe)
intel_color_commit(new_crtc_state);
- if (INTEL_GEN(dev_priv) >= 9)
+ if (DISPLAY_VER(dev_priv) >= 9)
skl_detach_scalers(new_crtc_state);
- if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
+ if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
bdw_set_pipemisc(new_crtc_state);
if (new_crtc_state->update_pipe)
@@ -12869,7 +9803,7 @@ static void intel_update_crtc(struct intel_atomic_state *state,
commit_pipe_config(state, crtc);
- if (INTEL_GEN(dev_priv) >= 9)
+ if (DISPLAY_VER(dev_priv) >= 9)
skl_update_planes_on_crtc(state, crtc);
else
i9xx_update_planes_on_crtc(state, crtc);
@@ -13343,7 +10277,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
* chance of catching underruns with the intermediate watermarks
* vs. the new plane configuration.
*/
- if (IS_GEN(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state))
+ if (IS_DISPLAY_VER(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state))
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
if (dev_priv->display.optimize_watermarks)
@@ -13479,7 +10413,7 @@ static int intel_atomic_commit(struct drm_device *dev,
* FIXME doing watermarks and fb cleanup from a vblank worker
* (assuming we had any) would solve these problems.
*/
- if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) {
+ if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) {
struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
int i;
@@ -13576,7 +10510,7 @@ static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
if (!dma_fence_is_i915(fence))
return;
- if (INTEL_GEN(to_i915(crtc->dev)) < 6)
+ if (DISPLAY_VER(to_i915(crtc->dev)) < 6)
return;
if (drm_crtc_vblank_get(crtc))
@@ -13603,20 +10537,12 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state)
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
struct drm_framebuffer *fb = plane_state->hw.fb;
struct i915_vma *vma;
+ bool phys_cursor =
+ plane->id == PLANE_CURSOR &&
+ INTEL_INFO(dev_priv)->display.cursor_needs_physical;
- if (plane->id == PLANE_CURSOR &&
- INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- const int align = intel_cursor_alignment(dev_priv);
- int err;
-
- err = i915_gem_object_attach_phys(obj, align);
- if (err)
- return err;
- }
-
- vma = intel_pin_and_fence_fb_obj(fb,
- &plane_state->view,
+ vma = intel_pin_and_fence_fb_obj(fb, phys_cursor,
+ &plane_state->view.gtt,
intel_plane_uses_fence(plane_state),
&plane_state->flags);
if (IS_ERR(vma))
@@ -13652,9 +10578,7 @@ int
intel_prepare_plane_fb(struct drm_plane *_plane,
struct drm_plane_state *_new_plane_state)
{
- struct i915_sched_attr attr = {
- .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
- };
+ struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY };
struct intel_plane *plane = to_intel_plane(_plane);
struct intel_plane_state *new_plane_state =
to_intel_plane_state(_new_plane_state);
@@ -13707,13 +10631,8 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
if (!obj)
return 0;
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- return ret;
ret = intel_plane_pin_fb(new_plane_state);
-
- i915_gem_object_unpin_pages(obj);
if (ret)
return ret;
@@ -13872,7 +10791,7 @@ static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
return false;
- if (IS_GEN(dev_priv, 5) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
+ if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
return false;
return true;
@@ -13880,7 +10799,7 @@ static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
{
- if (INTEL_GEN(dev_priv) >= 9)
+ if (DISPLAY_VER(dev_priv) >= 9)
return false;
if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
@@ -13910,12 +10829,18 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
- if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
+ if (IS_ALDERLAKE_S(dev_priv)) {
+ intel_ddi_init(dev_priv, PORT_A);
+ intel_ddi_init(dev_priv, PORT_TC1);
+ intel_ddi_init(dev_priv, PORT_TC2);
+ intel_ddi_init(dev_priv, PORT_TC3);
+ intel_ddi_init(dev_priv, PORT_TC4);
+ } else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_B);
intel_ddi_init(dev_priv, PORT_TC1);
intel_ddi_init(dev_priv, PORT_TC2);
- } else if (INTEL_GEN(dev_priv) >= 12) {
+ } else if (DISPLAY_VER(dev_priv) >= 12) {
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_B);
intel_ddi_init(dev_priv, PORT_TC1);
@@ -13931,7 +10856,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_ddi_init(dev_priv, PORT_C);
intel_ddi_init(dev_priv, PORT_D);
icl_dsi_init(dev_priv);
- } else if (IS_GEN(dev_priv, 11)) {
+ } else if (IS_DISPLAY_VER(dev_priv, 11)) {
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_B);
intel_ddi_init(dev_priv, PORT_C);
@@ -13966,8 +10891,9 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
/*
* Haswell uses DDI functions to detect digital outputs.
- * On SKL pre-D0 the strap isn't connected, so we assume
- * it's there.
+ * On SKL pre-D0 the strap isn't connected. Later SKUs may or
+ * may not have it - it was supposed to be fixed by the same
+ * time we stopped using straps. Assume it's there.
*/
found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
/* WaIgnoreDDIAStrap: skl */
@@ -13976,7 +10902,14 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
/* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
* register */
- found = intel_de_read(dev_priv, SFUSE_STRAP);
+ if (HAS_PCH_TGP(dev_priv)) {
+ /* W/A due to lack of STRAP config on TGP PCH*/
+ found = (SFUSE_STRAP_DDIB_DETECTED |
+ SFUSE_STRAP_DDIC_DETECTED |
+ SFUSE_STRAP_DDID_DETECTED);
+ } else {
+ found = intel_de_read(dev_priv, SFUSE_STRAP);
+ }
if (found & SFUSE_STRAP_DDIB_DETECTED)
intel_ddi_init(dev_priv, PORT_B);
@@ -14007,28 +10940,28 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
if (ilk_has_edp_a(dev_priv))
- intel_dp_init(dev_priv, DP_A, PORT_A);
+ g4x_dp_init(dev_priv, DP_A, PORT_A);
if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
/* PCH SDVOB multiplex with HDMIB */
found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
if (!found)
- intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
+ g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
- intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
+ g4x_dp_init(dev_priv, PCH_DP_B, PORT_B);
}
if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
- intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
+ g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
- intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
+ g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
- intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
+ g4x_dp_init(dev_priv, PCH_DP_C, PORT_C);
if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
- intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
+ g4x_dp_init(dev_priv, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
bool has_edp, has_port;
@@ -14053,16 +10986,16 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
has_port = intel_bios_is_port_present(dev_priv, PORT_B);
if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
- has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
+ has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
- intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
+ g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
has_port = intel_bios_is_port_present(dev_priv, PORT_C);
if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
- has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
+ has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
- intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
+ g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
if (IS_CHERRYVIEW(dev_priv)) {
/*
@@ -14071,16 +11004,16 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
*/
has_port = intel_bios_is_port_present(dev_priv, PORT_D);
if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
- intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
+ g4x_dp_init(dev_priv, CHV_DP_D, PORT_D);
if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
- intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
+ g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
}
vlv_dsi_init(dev_priv);
} else if (IS_PINEVIEW(dev_priv)) {
intel_lvds_init(dev_priv);
intel_crt_init(dev_priv);
- } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
+ } else if (IS_DISPLAY_RANGE(dev_priv, 3, 4)) {
bool found = false;
if (IS_MOBILE(dev_priv))
@@ -14094,11 +11027,11 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
if (!found && IS_G4X(dev_priv)) {
drm_dbg_kms(&dev_priv->drm,
"probing HDMI on SDVOB\n");
- intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
+ g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
}
if (!found && IS_G4X(dev_priv))
- intel_dp_init(dev_priv, DP_B, PORT_B);
+ g4x_dp_init(dev_priv, DP_B, PORT_B);
}
/* Before G4X SDVOC doesn't have its own detect register */
@@ -14113,18 +11046,18 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
if (IS_G4X(dev_priv)) {
drm_dbg_kms(&dev_priv->drm,
"probing HDMI on SDVOC\n");
- intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
+ g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
}
if (IS_G4X(dev_priv))
- intel_dp_init(dev_priv, DP_C, PORT_C);
+ g4x_dp_init(dev_priv, DP_C, PORT_C);
}
if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
- intel_dp_init(dev_priv, DP_D, PORT_D);
+ g4x_dp_init(dev_priv, DP_D, PORT_D);
if (SUPPORTS_TV(dev_priv))
intel_tv_init(dev_priv);
- } else if (IS_GEN(dev_priv, 2)) {
+ } else if (IS_DISPLAY_VER(dev_priv, 2)) {
if (IS_I85X(dev_priv))
intel_lvds_init(dev_priv);
@@ -14132,8 +11065,6 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_dvo_init(dev_priv);
}
- intel_psr_init(dev_priv);
-
for_each_intel_encoder(&dev_priv->drm, encoder) {
encoder->base.possible_crtcs =
intel_encoder_possible_crtcs(encoder);
@@ -14163,7 +11094,7 @@ static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- if (obj->userptr.mm) {
+ if (i915_gem_object_is_userptr(obj)) {
drm_dbg(&i915->drm,
"attempting to use a userptr for a framebuffer, denied\n");
return -EINVAL;
@@ -14236,13 +11167,9 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
if (!drm_any_plane_has_format(&dev_priv->drm,
mode_cmd->pixel_format,
mode_cmd->modifier[0])) {
- struct drm_format_name_buf format_name;
-
drm_dbg_kms(&dev_priv->drm,
- "unsupported pixel format %s / modifier 0x%llx\n",
- drm_get_format_name(mode_cmd->pixel_format,
- &format_name),
- mode_cmd->modifier[0]);
+ "unsupported pixel format %p4cc / modifier 0x%llx\n",
+ &mode_cmd->pixel_format, mode_cmd->modifier[0]);
goto err;
}
@@ -14250,7 +11177,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
* gen2/3 display engine uses the fence if present,
* so the tiling mode must match the fb modifier exactly.
*/
- if (INTEL_GEN(dev_priv) < 4 &&
+ if (DISPLAY_VER(dev_priv) < 4 &&
tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
drm_dbg_kms(&dev_priv->drm,
"tiling_mode must match fb modifier exactly on gen2/3\n");
@@ -14395,18 +11322,18 @@ intel_mode_valid(struct drm_device *dev,
return MODE_BAD;
/* Transcoder timing limits */
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (DISPLAY_VER(dev_priv) >= 11) {
hdisplay_max = 16384;
vdisplay_max = 8192;
htotal_max = 16384;
vtotal_max = 8192;
- } else if (INTEL_GEN(dev_priv) >= 9 ||
+ } else if (DISPLAY_VER(dev_priv) >= 9 ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
vdisplay_max = 4096;
htotal_max = 8192;
vtotal_max = 8192;
- } else if (INTEL_GEN(dev_priv) >= 3) {
+ } else if (DISPLAY_VER(dev_priv) >= 3) {
hdisplay_max = 4096;
vdisplay_max = 4096;
htotal_max = 8192;
@@ -14430,7 +11357,7 @@ intel_mode_valid(struct drm_device *dev,
mode->vtotal > vtotal_max)
return MODE_V_ILLEGAL;
- if (INTEL_GEN(dev_priv) >= 5) {
+ if (DISPLAY_VER(dev_priv) >= 5) {
if (mode->hdisplay < 64 ||
mode->htotal - mode->hdisplay < 32)
return MODE_H_ILLEGAL;
@@ -14459,7 +11386,7 @@ intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
* intel_mode_valid() should be
* sufficient on older platforms.
*/
- if (INTEL_GEN(dev_priv) < 9)
+ if (DISPLAY_VER(dev_priv) < 9)
return MODE_OK;
/*
@@ -14467,7 +11394,7 @@ intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
* plane so let's not advertize modes that are
* too big for that.
*/
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (DISPLAY_VER(dev_priv) >= 11) {
plane_width_max = 5120 << bigjoiner;
plane_height_max = 4320;
} else {
@@ -14503,10 +11430,11 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
void intel_init_display_hooks(struct drm_i915_private *dev_priv)
{
intel_init_cdclk_hooks(dev_priv);
+ intel_init_audio_hooks(dev_priv);
intel_dpll_init_clock_hook(dev_priv);
- if (INTEL_GEN(dev_priv) >= 9) {
+ if (DISPLAY_VER(dev_priv) >= 9) {
dev_priv->display.get_pipe_config = hsw_get_pipe_config;
dev_priv->display.crtc_enable = hsw_crtc_enable;
dev_priv->display.crtc_disable = hsw_crtc_disable;
@@ -14531,7 +11459,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
intel_fdi_init_hook(dev_priv);
- if (INTEL_GEN(dev_priv) >= 9) {
+ if (DISPLAY_VER(dev_priv) >= 9) {
dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
dev_priv->display.get_initial_plane_config = skl_get_initial_plane_config;
} else {
@@ -14671,12 +11599,12 @@ fail:
static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
{
- if (IS_GEN(dev_priv, 5)) {
+ if (IS_IRONLAKE(dev_priv)) {
u32 fdi_pll_clk =
intel_de_read(dev_priv, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
- } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
+ } else if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
dev_priv->fdi_pll_freq = 270000;
} else {
return;
@@ -14787,13 +11715,13 @@ static void intel_mode_config_init(struct drm_i915_private *i915)
* Maximum framebuffer dimensions, chosen to match
* the maximum render engine surface size on gen4+.
*/
- if (INTEL_GEN(i915) >= 7) {
+ if (DISPLAY_VER(i915) >= 7) {
mode_config->max_width = 16384;
mode_config->max_height = 16384;
- } else if (INTEL_GEN(i915) >= 4) {
+ } else if (DISPLAY_VER(i915) >= 4) {
mode_config->max_width = 8192;
mode_config->max_height = 8192;
- } else if (IS_GEN(i915, 3)) {
+ } else if (IS_DISPLAY_VER(i915, 3)) {
mode_config->max_width = 4096;
mode_config->max_height = 4096;
} else {
@@ -14938,6 +11866,7 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915)
intel_update_czclk(i915);
intel_modeset_init_hw(i915);
+ intel_dpll_update_ref_clks(i915);
intel_hdcp_component_init(i915);
@@ -15135,7 +12064,7 @@ intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
{
struct intel_crtc *crtc;
- if (INTEL_GEN(dev_priv) >= 4)
+ if (DISPLAY_VER(dev_priv) >= 4)
return;
for_each_intel_crtc(&dev_priv->drm, crtc) {
@@ -15194,7 +12123,7 @@ static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (INTEL_GEN(dev_priv) >= 9 ||
+ if (DISPLAY_VER(dev_priv) >= 9 ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
u32 val;
@@ -15266,7 +12195,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
* Disable any background color set by the BIOS, but enable the
* gamma and CSC to match how we program our planes.
*/
- if (INTEL_GEN(dev_priv) >= 9)
+ if (DISPLAY_VER(dev_priv) >= 9)
intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe),
SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE);
}
@@ -15320,7 +12249,7 @@ static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
* without several WARNs, but for now let's take the easy
* road.
*/
- return IS_GEN(dev_priv, 6) &&
+ return IS_SANDYBRIDGE(dev_priv) &&
crtc_state->hw.active &&
crtc_state->shared_dpll &&
crtc_state->port_clock == 0;
@@ -15393,8 +12322,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
/* notify opregion of the sanitized encoder state */
intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
- if (INTEL_GEN(dev_priv) >= 11)
- icl_sanitize_encoder_pll_mapping(encoder);
+ if (HAS_DDI(dev_priv))
+ intel_ddi_sanitize_encoder_pll_mapping(encoder);
}
/* FIXME read out full plane state for all planes */
@@ -15474,8 +12403,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
readout_plane_state(dev_priv);
- intel_dpll_readout_hw_state(dev_priv);
-
for_each_intel_encoder(dev, encoder) {
pipe = 0;
@@ -15510,6 +12437,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
pipe_name(pipe));
}
+ intel_dpll_readout_hw_state(dev_priv);
+
drm_connector_list_iter_begin(dev, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (connector->get_hw_state(connector)) {
@@ -15590,8 +12519,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
* use plane->min_cdclk() :(
*/
if (plane_state->uapi.visible && plane->min_cdclk) {
- if (crtc_state->double_wide ||
- INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ if (crtc_state->double_wide || DISPLAY_VER(dev_priv) >= 10)
crtc_state->min_cdclk[plane->id] =
DIV_ROUND_UP(crtc_state->pixel_rate, 2);
else
@@ -15682,7 +12610,7 @@ static void intel_early_display_was(struct drm_i915_private *dev_priv)
* Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl
* Also known as Wa_14010480278.
*/
- if (IS_GEN_RANGE(dev_priv, 10, 12) || IS_GEMINILAKE(dev_priv))
+ if (IS_DISPLAY_RANGE(dev_priv, 10, 12))
intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
@@ -15837,7 +12765,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
vlv_wm_get_hw_state(dev_priv);
vlv_wm_sanitize(dev_priv);
- } else if (INTEL_GEN(dev_priv) >= 9) {
+ } else if (DISPLAY_VER(dev_priv) >= 9) {
skl_wm_get_hw_state(dev_priv);
} else if (HAS_PCH_SPLIT(dev_priv)) {
ilk_wm_get_hw_state(dev_priv);
@@ -15971,6 +12899,57 @@ void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
intel_bios_driver_remove(i915);
}
+void intel_display_driver_register(struct drm_i915_private *i915)
+{
+ if (!HAS_DISPLAY(i915))
+ return;
+
+ intel_display_debugfs_register(i915);
+
+ /* Must be done after probing outputs */
+ intel_opregion_register(i915);
+ acpi_video_register();
+
+ intel_audio_init(i915);
+
+ /*
+ * Some ports require correctly set-up hpd registers for
+ * detection to work properly (leading to ghost connected
+ * connector status), e.g. VGA on gm45. Hence we can only set
+ * up the initial fbdev config after hpd irqs are fully
+ * enabled. We do it last so that the async config cannot run
+ * before the connectors are registered.
+ */
+ intel_fbdev_initial_config_async(&i915->drm);
+
+ /*
+ * We need to coordinate the hotplugs with the asynchronous
+ * fbdev configuration, for which we use the
+ * fbdev->async_cookie.
+ */
+ drm_kms_helper_poll_init(&i915->drm);
+}
+
+void intel_display_driver_unregister(struct drm_i915_private *i915)
+{
+ if (!HAS_DISPLAY(i915))
+ return;
+
+ intel_fbdev_unregister(i915);
+ intel_audio_deinit(i915);
+
+ /*
+ * After flushing the fbdev (incl. a late async config which
+ * will have delayed queuing of a hotplug event), then flush
+ * the hotplug events.
+ */
+ drm_kms_helper_poll_fini(&i915->drm);
+ drm_atomic_helper_shutdown(&i915->drm);
+
+ acpi_video_unregister();
+ intel_opregion_unregister(i915);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
struct intel_display_error_state {
@@ -16055,16 +13034,16 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
error->plane[i].control = intel_de_read(dev_priv, DSPCNTR(i));
error->plane[i].stride = intel_de_read(dev_priv, DSPSTRIDE(i));
- if (INTEL_GEN(dev_priv) <= 3) {
+ if (DISPLAY_VER(dev_priv) <= 3) {
error->plane[i].size = intel_de_read(dev_priv,
DSPSIZE(i));
error->plane[i].pos = intel_de_read(dev_priv,
DSPPOS(i));
}
- if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
+ if (DISPLAY_VER(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
error->plane[i].addr = intel_de_read(dev_priv,
DSPADDR(i));
- if (INTEL_GEN(dev_priv) >= 4) {
+ if (DISPLAY_VER(dev_priv) >= 4) {
error->plane[i].surface = intel_de_read(dev_priv,
DSPSURF(i));
error->plane[i].tile_offset = intel_de_read(dev_priv,
@@ -16138,13 +13117,13 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
err_printf(m, "Plane [%d]:\n", i);
err_printf(m, " CNTR: %08x\n", error->plane[i].control);
err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
- if (INTEL_GEN(dev_priv) <= 3) {
+ if (DISPLAY_VER(dev_priv) <= 3) {
err_printf(m, " SIZE: %08x\n", error->plane[i].size);
err_printf(m, " POS: %08x\n", error->plane[i].pos);
}
- if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
+ if (DISPLAY_VER(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
- if (INTEL_GEN(dev_priv) >= 4) {
+ if (DISPLAY_VER(dev_priv) >= 4) {
err_printf(m, " SURF: %08x\n", error->plane[i].surface);
err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 76f8a805b0a3..105294ec2dcc 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -48,10 +48,10 @@ struct i915_ggtt_view;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
-struct intel_crtc_state;
struct intel_digital_port;
struct intel_dp;
struct intel_encoder;
+struct intel_initial_plane_config;
struct intel_load_detect_pipe;
struct intel_plane;
struct intel_plane_state;
@@ -352,11 +352,6 @@ enum phy_fia {
for_each_cpu_transcoder(__dev_priv, __t) \
for_each_if ((__mask) & BIT(__t))
-#define for_each_universal_plane(__dev_priv, __pipe, __p) \
- for ((__p) = 0; \
- (__p) < RUNTIME_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
- (__p)++)
-
#define for_each_sprite(__dev_priv, __p, __s) \
for ((__s) = 0; \
(__s) < RUNTIME_INFO(__dev_priv)->num_sprites[(__p)]; \
@@ -417,10 +412,19 @@ enum phy_fia {
for_each_if((encoder_mask) & \
drm_encoder_mask(&intel_encoder->base))
+#define for_each_intel_encoder_mask_with_psr(dev, intel_encoder, encoder_mask) \
+ list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
+ for_each_if(((encoder_mask) & drm_encoder_mask(&(intel_encoder)->base)) && \
+ intel_encoder_can_psr(intel_encoder))
+
#define for_each_intel_dp(dev, intel_encoder) \
for_each_intel_encoder(dev, intel_encoder) \
for_each_if(intel_encoder_is_dp(intel_encoder))
+#define for_each_intel_encoder_with_psr(dev, intel_encoder) \
+ for_each_intel_encoder((dev), (intel_encoder)) \
+ for_each_if(intel_encoder_can_psr(intel_encoder))
+
#define for_each_intel_connector_iter(intel_connector, iter) \
while ((intel_connector = to_intel_connector(drm_connector_list_iter_next(iter))))
@@ -507,12 +511,9 @@ void intel_link_compute_m_n(u16 bpp, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
bool constant_n, bool fec_enable);
-bool is_ccs_modifier(u64 modifier);
-int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane);
void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
u32 pixel_format, u64 modifier);
-bool intel_plane_can_remap(const struct intel_plane_state *plane_state);
enum drm_mode_status
intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
const struct drm_display_mode *mode,
@@ -570,7 +571,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx);
struct i915_vma *
-intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
+intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, bool phys_cursor,
const struct i915_ggtt_view *view,
bool uses_fence,
unsigned long *out_flags);
@@ -586,9 +587,6 @@ void intel_cleanup_plane_fb(struct drm_plane *plane,
void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe);
-int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
- const struct dpll *dpll);
-void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe);
int lpt_get_iclkip(struct drm_i915_private *dev_priv);
bool intel_fuzzy_clock_check(int clock1, int clock2);
@@ -613,25 +611,8 @@ enum intel_display_power_domain
intel_legacy_aux_to_power_domain(enum aux_ch aux_ch);
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
-
-u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center);
-void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state);
-u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set);
-void skl_scaler_setup_filter(struct drm_i915_private *dev_priv, enum pipe pipe,
- int id, int set, enum drm_scaling_filter filter);
void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state);
-u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state);
-u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state);
-u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state);
-u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state);
-u32 skl_plane_stride(const struct intel_plane_state *plane_state,
- int plane);
-int skl_check_plane_surface(struct intel_plane_state *plane_state);
-int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state,
- int *x, int *y, u32 *offset);
-int skl_format_to_fourcc(int format, bool rgb_order, bool alpha);
+
int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state);
@@ -644,21 +625,18 @@ bool
intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
u64 modifier);
-int intel_plane_compute_gtt(struct intel_plane_state *plane_state);
-u32 intel_plane_compute_aligned_offset(int *x, int *y,
- const struct intel_plane_state *state,
- int color_plane);
int intel_plane_pin_fb(struct intel_plane_state *plane_state);
void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state);
struct intel_encoder *
intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state);
+
unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
int color_plane);
-u32 intel_plane_adjust_aligned_offset(int *x, int *y,
- const struct intel_plane_state *state,
- int color_plane,
- u32 old_offset, u32 new_offset);
+unsigned int intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane);
+
+void intel_display_driver_register(struct drm_i915_private *i915);
+void intel_display_driver_unregister(struct drm_i915_private *i915);
/* modesetting */
void intel_modeset_init_hw(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index d62b18d5ecd8..564509a4e666 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -58,11 +58,11 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
if (intel_fbc_is_active(dev_priv)) {
u32 mask;
- if (INTEL_GEN(dev_priv) >= 8)
+ if (DISPLAY_VER(dev_priv) >= 8)
mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
- else if (INTEL_GEN(dev_priv) >= 7)
+ else if (DISPLAY_VER(dev_priv) >= 7)
mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
- else if (INTEL_GEN(dev_priv) >= 5)
+ else if (DISPLAY_VER(dev_priv) >= 5)
mask = intel_de_read(dev_priv, ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
else if (IS_G4X(dev_priv))
mask = intel_de_read(dev_priv, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
@@ -83,7 +83,7 @@ static int i915_fbc_false_color_get(void *data, u64 *val)
{
struct drm_i915_private *dev_priv = data;
- if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
+ if (DISPLAY_VER(dev_priv) < 7 || !HAS_FBC(dev_priv))
return -ENODEV;
*val = dev_priv->fbc.false_color;
@@ -96,7 +96,7 @@ static int i915_fbc_false_color_set(void *data, u64 val)
struct drm_i915_private *dev_priv = data;
u32 reg;
- if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
+ if (DISPLAY_VER(dev_priv) < 7 || !HAS_FBC(dev_priv))
return -ENODEV;
mutex_lock(&dev_priv->fbc.lock);
@@ -128,7 +128,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
seq_printf(m, "Enabled by kernel parameter: %s\n",
yesno(dev_priv->params.enable_ips));
- if (INTEL_GEN(dev_priv) >= 8) {
+ if (DISPLAY_VER(dev_priv) >= 8) {
seq_puts(m, "Currently: unknown\n");
} else {
if (intel_de_read(dev_priv, IPS_CTL) & IPS_ENABLE)
@@ -150,7 +150,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
- if (INTEL_GEN(dev_priv) >= 9)
+ if (DISPLAY_VER(dev_priv) >= 9)
/* no global SR status; inspect per-plane WM */;
else if (HAS_PCH_SPLIT(dev_priv))
sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM1_LP_SR_EN;
@@ -249,12 +249,11 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data)
"sink internal error",
};
struct drm_connector *connector = m->private;
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_dp *intel_dp =
intel_attached_dp(to_intel_connector(connector));
int ret;
- if (!CAN_PSR(dev_priv)) {
+ if (!CAN_PSR(intel_dp)) {
seq_puts(m, "PSR Unsupported\n");
return -ENODEV;
}
@@ -280,12 +279,13 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data)
DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
static void
-psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
+psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
{
- u32 val, status_val;
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
const char *status = "unknown";
+ u32 val, status_val;
- if (dev_priv->psr.psr2_enabled) {
+ if (intel_dp->psr.psr2_enabled) {
static const char * const live_status[] = {
"IDLE",
"CAPTURE",
@@ -300,7 +300,7 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
"TG_ON"
};
val = intel_de_read(dev_priv,
- EDP_PSR2_STATUS(dev_priv->psr.transcoder));
+ EDP_PSR2_STATUS(intel_dp->psr.transcoder));
status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
EDP_PSR2_STATUS_STATE_SHIFT;
if (status_val < ARRAY_SIZE(live_status))
@@ -317,7 +317,7 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
"SRDENT_ON",
};
val = intel_de_read(dev_priv,
- EDP_PSR_STATUS(dev_priv->psr.transcoder));
+ EDP_PSR_STATUS(intel_dp->psr.transcoder));
status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
EDP_PSR_STATUS_STATE_SHIFT;
if (status_val < ARRAY_SIZE(live_status))
@@ -327,21 +327,18 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
}
-static int i915_edp_psr_status(struct seq_file *m, void *data)
+static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct i915_psr *psr = &dev_priv->psr;
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_psr *psr = &intel_dp->psr;
intel_wakeref_t wakeref;
const char *status;
bool enabled;
u32 val;
- if (!HAS_PSR(dev_priv))
- return -ENODEV;
-
seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
- if (psr->dp)
- seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
+ if (psr->sink_support)
+ seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
seq_puts(m, "\n");
if (!psr->sink_support)
@@ -365,16 +362,16 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
if (psr->psr2_enabled) {
val = intel_de_read(dev_priv,
- EDP_PSR2_CTL(dev_priv->psr.transcoder));
+ EDP_PSR2_CTL(intel_dp->psr.transcoder));
enabled = val & EDP_PSR2_ENABLE;
} else {
val = intel_de_read(dev_priv,
- EDP_PSR_CTL(dev_priv->psr.transcoder));
+ EDP_PSR_CTL(intel_dp->psr.transcoder));
enabled = val & EDP_PSR_ENABLE;
}
seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
enableddisabled(enabled), val);
- psr_source_status(dev_priv, m);
+ psr_source_status(intel_dp, m);
seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
psr->busy_frontbuffer_bits);
@@ -383,7 +380,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
*/
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
val = intel_de_read(dev_priv,
- EDP_PSR_PERF_CNT(dev_priv->psr.transcoder));
+ EDP_PSR_PERF_CNT(intel_dp->psr.transcoder));
val &= EDP_PSR_PERF_CNT_MASK;
seq_printf(m, "Performance counter: %u\n", val);
}
@@ -404,7 +401,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
*/
for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
val = intel_de_read(dev_priv,
- PSR2_SU_STATUS(dev_priv->psr.transcoder, frame));
+ PSR2_SU_STATUS(intel_dp->psr.transcoder, frame));
su_frames_val[frame / 3] = val;
}
@@ -430,23 +427,50 @@ unlock:
return 0;
}
+static int i915_edp_psr_status(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_dp *intel_dp = NULL;
+ struct intel_encoder *encoder;
+
+ if (!HAS_PSR(dev_priv))
+ return -ENODEV;
+
+ /* Find the first EDP which supports PSR */
+ for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ intel_dp = enc_to_intel_dp(encoder);
+ break;
+ }
+
+ if (!intel_dp)
+ return -ENODEV;
+
+ return intel_psr_status(m, intel_dp);
+}
+
static int
i915_edp_psr_debug_set(void *data, u64 val)
{
struct drm_i915_private *dev_priv = data;
+ struct intel_encoder *encoder;
intel_wakeref_t wakeref;
- int ret;
+ int ret = -ENODEV;
- if (!CAN_PSR(dev_priv))
- return -ENODEV;
+ if (!HAS_PSR(dev_priv))
+ return ret;
- drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
+ for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
- ret = intel_psr_debug_set(dev_priv, val);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ // TODO: split to each transcoder's PSR debug state
+ ret = intel_psr_debug_set(intel_dp, val);
+
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ }
return ret;
}
@@ -455,12 +479,20 @@ static int
i915_edp_psr_debug_get(void *data, u64 *val)
{
struct drm_i915_private *dev_priv = data;
+ struct intel_encoder *encoder;
- if (!CAN_PSR(dev_priv))
+ if (!HAS_PSR(dev_priv))
return -ENODEV;
- *val = READ_ONCE(dev_priv->psr.debug);
- return 0;
+ for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ // TODO: split to each transcoder's PSR debug state
+ *val = READ_ONCE(intel_dp->psr.debug);
+ return 0;
+ }
+
+ return -ENODEV;
}
DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
@@ -518,7 +550,7 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version));
- if (INTEL_GEN(dev_priv) >= 12) {
+ if (DISPLAY_VER(dev_priv) >= 12) {
if (IS_DGFX(dev_priv)) {
dc5_reg = DG1_DMC_DEBUG_DC5_COUNT;
} else {
@@ -772,27 +804,25 @@ static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
const struct drm_framebuffer *fb = plane_state->uapi.fb;
- struct drm_format_name_buf format_name;
struct drm_rect src, dst;
char rot_str[48];
src = drm_plane_state_src(&plane_state->uapi);
dst = drm_plane_state_dest(&plane_state->uapi);
- if (fb)
- drm_get_format_name(fb->format->format, &format_name);
-
plane_rotation(rot_str, sizeof(rot_str),
plane_state->uapi.rotation);
- seq_printf(m, "\t\tuapi: [FB:%d] %s,0x%llx,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
- fb ? fb->base.id : 0, fb ? format_name.str : "n/a",
- fb ? fb->modifier : 0,
- fb ? fb->width : 0, fb ? fb->height : 0,
- plane_visibility(plane_state),
- DRM_RECT_FP_ARG(&src),
- DRM_RECT_ARG(&dst),
- rot_str);
+ seq_puts(m, "\t\tuapi: [FB:");
+ if (fb)
+ seq_printf(m, "%d] %p4cc,0x%llx,%dx%d", fb->base.id,
+ &fb->format->format, fb->modifier, fb->width,
+ fb->height);
+ else
+ seq_puts(m, "0] n/a,0x0,0x0,");
+ seq_printf(m, ", visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT
+ ", rotation=%s\n", plane_visibility(plane_state),
+ DRM_RECT_FP_ARG(&src), DRM_RECT_ARG(&dst), rot_str);
if (plane_state->planar_linked_plane)
seq_printf(m, "\t\tplanar: Linked to [PLANE:%d:%s] as a %s\n",
@@ -805,19 +835,17 @@ static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
const struct drm_framebuffer *fb = plane_state->hw.fb;
- struct drm_format_name_buf format_name;
char rot_str[48];
if (!fb)
return;
- drm_get_format_name(fb->format->format, &format_name);
-
plane_rotation(rot_str, sizeof(rot_str),
plane_state->hw.rotation);
- seq_printf(m, "\t\thw: [FB:%d] %s,0x%llx,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
- fb->base.id, format_name.str,
+ seq_printf(m, "\t\thw: [FB:%d] %p4cc,0x%llx,%dx%d, visible=%s, src="
+ DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
+ fb->base.id, &fb->format->format,
fb->modifier, fb->width, fb->height,
yesno(plane_state->uapi.visible),
DRM_RECT_FP_ARG(&plane_state->uapi.src),
@@ -1066,8 +1094,8 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
pll->info->id);
- seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
- pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
+ seq_printf(m, " pipe_mask: 0x%x, active: 0x%x, on: %s\n",
+ pll->state.pipe_mask, pll->active_mask, yesno(pll->on));
seq_printf(m, " tracked hardware state:\n");
seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
seq_printf(m, " dpll_md: 0x%08x\n",
@@ -1162,7 +1190,7 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
struct skl_ddb_entry *entry;
struct intel_crtc *crtc;
- if (INTEL_GEN(dev_priv) < 9)
+ if (DISPLAY_VER(dev_priv) < 9)
return -ENODEV;
drm_modeset_lock_all(dev);
@@ -1233,9 +1261,6 @@ static void drrs_status_per_crtc(struct seq_file *m,
/* disable_drrs() will make drrs->dp NULL */
if (!drrs->dp) {
seq_puts(m, "Idleness DRRS: Disabled\n");
- if (dev_priv->psr.enabled)
- seq_puts(m,
- "\tAs PSR is enabled, DRRS is not enabled\n");
mutex_unlock(&drrs->mutex);
return;
}
@@ -1314,7 +1339,7 @@ static int i915_lpsp_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
- switch (INTEL_GEN(i915)) {
+ switch (DISPLAY_VER(i915)) {
case 12:
case 11:
LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3));
@@ -1591,7 +1616,7 @@ static void wm_latency_show(struct seq_file *m, const u16 wm[8])
* - WM1+ latency values in 0.5us units
* - latencies are in us on gen9/vlv/chv
*/
- if (INTEL_GEN(dev_priv) >= 9 ||
+ if (DISPLAY_VER(dev_priv) >= 9 ||
IS_VALLEYVIEW(dev_priv) ||
IS_CHERRYVIEW(dev_priv) ||
IS_G4X(dev_priv))
@@ -1611,7 +1636,7 @@ static int pri_wm_latency_show(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = m->private;
const u16 *latencies;
- if (INTEL_GEN(dev_priv) >= 9)
+ if (DISPLAY_VER(dev_priv) >= 9)
latencies = dev_priv->wm.skl_latency;
else
latencies = dev_priv->wm.pri_latency;
@@ -1626,7 +1651,7 @@ static int spr_wm_latency_show(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = m->private;
const u16 *latencies;
- if (INTEL_GEN(dev_priv) >= 9)
+ if (DISPLAY_VER(dev_priv) >= 9)
latencies = dev_priv->wm.skl_latency;
else
latencies = dev_priv->wm.spr_latency;
@@ -1641,7 +1666,7 @@ static int cur_wm_latency_show(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = m->private;
const u16 *latencies;
- if (INTEL_GEN(dev_priv) >= 9)
+ if (DISPLAY_VER(dev_priv) >= 9)
latencies = dev_priv->wm.skl_latency;
else
latencies = dev_priv->wm.cur_latency;
@@ -1655,7 +1680,7 @@ static int pri_wm_latency_open(struct inode *inode, struct file *file)
{
struct drm_i915_private *dev_priv = inode->i_private;
- if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
+ if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
return -ENODEV;
return single_open(file, pri_wm_latency_show, dev_priv);
@@ -1734,7 +1759,7 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
struct drm_i915_private *dev_priv = m->private;
u16 *latencies;
- if (INTEL_GEN(dev_priv) >= 9)
+ if (DISPLAY_VER(dev_priv) >= 9)
latencies = dev_priv->wm.skl_latency;
else
latencies = dev_priv->wm.pri_latency;
@@ -1749,7 +1774,7 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
struct drm_i915_private *dev_priv = m->private;
u16 *latencies;
- if (INTEL_GEN(dev_priv) >= 9)
+ if (DISPLAY_VER(dev_priv) >= 9)
latencies = dev_priv->wm.skl_latency;
else
latencies = dev_priv->wm.spr_latency;
@@ -1764,7 +1789,7 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
struct drm_i915_private *dev_priv = m->private;
u16 *latencies;
- if (INTEL_GEN(dev_priv) >= 9)
+ if (DISPLAY_VER(dev_priv) >= 9)
latencies = dev_priv->wm.skl_latency;
else
latencies = dev_priv->wm.cur_latency;
@@ -1961,7 +1986,7 @@ static int i915_drrs_ctl_set(void *data, u64 val)
struct drm_device *dev = &dev_priv->drm;
struct intel_crtc *crtc;
- if (INTEL_GEN(dev_priv) < 7)
+ if (DISPLAY_VER(dev_priv) < 7)
return -ENODEV;
for_each_intel_crtc(dev, crtc) {
@@ -2169,19 +2194,40 @@ DEFINE_SHOW_ATTRIBUTE(i915_panel);
static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
+ struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_connector *intel_connector = to_intel_connector(connector);
+ int ret;
- if (connector->status != connector_status_connected)
- return -ENODEV;
+ ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ if (ret)
+ return ret;
+
+ if (!connector->encoder || connector->status != connector_status_connected) {
+ ret = -ENODEV;
+ goto out;
+ }
seq_printf(m, "%s:%d HDCP version: ", connector->name,
connector->base.id);
intel_hdcp_info(m, intel_connector);
- return 0;
+out:
+ drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+
+ return ret;
}
DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
+static int i915_psr_status_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct intel_dp *intel_dp =
+ intel_attached_dp(to_intel_connector(connector));
+
+ return intel_psr_status(m, intel_dp);
+}
+DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
+
#define LPSP_CAPABLE(COND) (COND ? seq_puts(m, "LPSP: capable\n") : \
seq_puts(m, "LPSP: incapable\n"))
@@ -2198,7 +2244,7 @@ static int i915_lpsp_capability_show(struct seq_file *m, void *data)
if (connector->status != connector_status_connected)
return -ENODEV;
- switch (INTEL_GEN(i915)) {
+ switch (DISPLAY_VER(i915)) {
case 12:
/*
* Actually TGL can drive LPSP on port till DDI_C
@@ -2357,6 +2403,12 @@ int intel_connector_debugfs_add(struct drm_connector *connector)
connector, &i915_psr_sink_status_fops);
}
+ if (HAS_PSR(dev_priv) &&
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ debugfs_create_file("i915_psr_status", 0444, root,
+ connector, &i915_psr_status_fops);
+ }
+
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
@@ -2364,15 +2416,12 @@ int intel_connector_debugfs_add(struct drm_connector *connector)
connector, &i915_hdcp_sink_capability_fops);
}
- if (INTEL_GEN(dev_priv) >= 10 &&
- ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
- !to_intel_connector(connector)->mst_port) ||
- connector->connector_type == DRM_MODE_CONNECTOR_eDP))
+ if ((DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv)) && ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort && !to_intel_connector(connector)->mst_port) || connector->connector_type == DRM_MODE_CONNECTOR_eDP))
debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
connector, &i915_dsc_fec_support_fops);
/* Legacy panels doesn't lpsp on any platform */
- if ((INTEL_GEN(dev_priv) >= 9 || IS_HASWELL(dev_priv) ||
+ if ((DISPLAY_VER(dev_priv) >= 9 || IS_HASWELL(dev_priv) ||
IS_BROADWELL(dev_priv)) &&
(connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index c11c37c65d86..99126caf5747 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -408,7 +408,7 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
if (power_well->desc->hsw.has_fuses) {
enum skl_power_gate pg;
- pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
+ pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
SKL_PW_CTL_IDX_TO_PG(pw_idx);
/*
* For PW1 we have to wait both for the PW0/PG0 fuse state
@@ -441,7 +441,7 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
if (power_well->desc->hsw.has_fuses) {
enum skl_power_gate pg;
- pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
+ pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
SKL_PW_CTL_IDX_TO_PG(pw_idx);
gen9_wait_for_power_well_fuses(dev_priv, pg);
}
@@ -484,7 +484,7 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
intel_de_write(dev_priv, regs->driver,
val | HSW_PWR_WELL_CTL_REQ(pw_idx));
- if (INTEL_GEN(dev_priv) < 12) {
+ if (DISPLAY_VER(dev_priv) < 12) {
val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
val | ICL_LANE_ENABLE_AUX);
@@ -550,7 +550,7 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
if (drm_WARN_ON(&dev_priv->drm, !dig_port))
return;
- if (INTEL_GEN(dev_priv) == 11 && dig_port->tc_legacy_port)
+ if (IS_DISPLAY_VER(dev_priv, 11) && dig_port->tc_legacy_port)
return;
drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
@@ -619,14 +619,14 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
* exit sequence.
*/
timeout_expected = is_tbt;
- if (INTEL_GEN(dev_priv) == 11 && dig_port->tc_legacy_port) {
+ if (IS_DISPLAY_VER(dev_priv, 11) && dig_port->tc_legacy_port) {
icl_tc_cold_exit(dev_priv);
timeout_expected = true;
}
hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected);
- if (INTEL_GEN(dev_priv) >= 12 && !is_tbt) {
+ if (DISPLAY_VER(dev_priv) >= 12 && !is_tbt) {
enum tc_port tc_port;
tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx);
@@ -709,7 +709,7 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
* BIOS's own request bits, which are forced-on for these power wells
* when exiting DC5/6.
*/
- if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
+ if (IS_DISPLAY_VER(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
(id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
val |= intel_de_read(dev_priv, regs->bios);
@@ -804,10 +804,10 @@ static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
mask = DC_STATE_EN_UPTO_DC5;
- if (INTEL_GEN(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 12)
mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6
| DC_STATE_EN_DC9;
- else if (IS_GEN(dev_priv, 11))
+ else if (IS_DISPLAY_VER(dev_priv, 11))
mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
else if (IS_GEN9_LP(dev_priv))
mask |= DC_STATE_EN_DC9;
@@ -1035,7 +1035,7 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
enum i915_power_well_id high_pg;
/* Power wells at this level and above must be disabled for DC5 entry */
- if (INTEL_GEN(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 12)
high_pg = ICL_DISP_PW_3;
else
high_pg = SKL_DISP_PW_2;
@@ -1192,7 +1192,7 @@ static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
if (IS_GEN9_LP(dev_priv))
bxt_verify_ddi_phy_power_wells(dev_priv);
- if (INTEL_GEN(dev_priv) >= 11)
+ if (DISPLAY_VER(dev_priv) >= 11)
/*
* DMC retains HW context only for port A, the other combo
* PHY's HW context for port B is lost after DC transitions,
@@ -2886,24 +2886,24 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
BIT_ULL(POWER_DOMAIN_PIPE_B) | \
BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_G_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_H_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_I_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_D) | \
- BIT_ULL(POWER_DOMAIN_AUX_E) | \
- BIT_ULL(POWER_DOMAIN_AUX_F) | \
- BIT_ULL(POWER_DOMAIN_AUX_G) | \
- BIT_ULL(POWER_DOMAIN_AUX_H) | \
- BIT_ULL(POWER_DOMAIN_AUX_I) | \
- BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \
- BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \
- BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \
- BIT_ULL(POWER_DOMAIN_AUX_G_TBT) | \
- BIT_ULL(POWER_DOMAIN_AUX_H_TBT) | \
- BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC5) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC6) | \
+ BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \
+ BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \
+ BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \
+ BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \
+ BIT_ULL(POWER_DOMAIN_AUX_USBC5) | \
+ BIT_ULL(POWER_DOMAIN_AUX_USBC6) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \
BIT_ULL(POWER_DOMAIN_VGA) | \
BIT_ULL(POWER_DOMAIN_AUDIO) | \
BIT_ULL(POWER_DOMAIN_INIT))
@@ -2921,18 +2921,12 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
BIT_ULL(POWER_DOMAIN_AUX_C) | \
BIT_ULL(POWER_DOMAIN_INIT))
-#define TGL_DDI_IO_D_TC1_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
-#define TGL_DDI_IO_E_TC2_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
-#define TGL_DDI_IO_F_TC3_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
-#define TGL_DDI_IO_G_TC4_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_G_IO))
-#define TGL_DDI_IO_H_TC5_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_H_IO))
-#define TGL_DDI_IO_I_TC6_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_I_IO))
+#define TGL_DDI_IO_TC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1)
+#define TGL_DDI_IO_TC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2)
+#define TGL_DDI_IO_TC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3)
+#define TGL_DDI_IO_TC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4)
+#define TGL_DDI_IO_TC5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC5)
+#define TGL_DDI_IO_TC6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC6)
#define TGL_AUX_A_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
@@ -2941,44 +2935,34 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
BIT_ULL(POWER_DOMAIN_AUX_B))
#define TGL_AUX_C_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_C))
-#define TGL_AUX_D_TC1_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_D))
-#define TGL_AUX_E_TC2_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_E))
-#define TGL_AUX_F_TC3_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_F))
-#define TGL_AUX_G_TC4_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_G))
-#define TGL_AUX_H_TC5_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_H))
-#define TGL_AUX_I_TC6_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_I))
-#define TGL_AUX_D_TBT1_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
-#define TGL_AUX_E_TBT2_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
-#define TGL_AUX_F_TBT3_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
-#define TGL_AUX_G_TBT4_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_G_TBT))
-#define TGL_AUX_H_TBT5_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_H_TBT))
-#define TGL_AUX_I_TBT6_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_I_TBT))
+
+#define TGL_AUX_IO_USBC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC1)
+#define TGL_AUX_IO_USBC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC2)
+#define TGL_AUX_IO_USBC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC3)
+#define TGL_AUX_IO_USBC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC4)
+#define TGL_AUX_IO_USBC5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC5)
+#define TGL_AUX_IO_USBC6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC6)
+
+#define TGL_AUX_IO_TBT1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT1)
+#define TGL_AUX_IO_TBT2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT2)
+#define TGL_AUX_IO_TBT3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT3)
+#define TGL_AUX_IO_TBT4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT4)
+#define TGL_AUX_IO_TBT5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT5)
+#define TGL_AUX_IO_TBT6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT6)
#define TGL_TC_COLD_OFF_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_D) | \
- BIT_ULL(POWER_DOMAIN_AUX_E) | \
- BIT_ULL(POWER_DOMAIN_AUX_F) | \
- BIT_ULL(POWER_DOMAIN_AUX_G) | \
- BIT_ULL(POWER_DOMAIN_AUX_H) | \
- BIT_ULL(POWER_DOMAIN_AUX_I) | \
- BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \
- BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \
- BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \
- BIT_ULL(POWER_DOMAIN_AUX_G_TBT) | \
- BIT_ULL(POWER_DOMAIN_AUX_H_TBT) | \
- BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \
+ BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \
+ BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \
+ BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \
+ BIT_ULL(POWER_DOMAIN_AUX_USBC5) | \
+ BIT_ULL(POWER_DOMAIN_AUX_USBC6) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \
BIT_ULL(POWER_DOMAIN_TC_COLD_OFF))
#define RKL_PW_4_POWER_DOMAINS ( \
@@ -2994,10 +2978,10 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
BIT_ULL(POWER_DOMAIN_AUDIO) | \
BIT_ULL(POWER_DOMAIN_VGA) | \
BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_D) | \
- BIT_ULL(POWER_DOMAIN_AUX_E) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \
+ BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \
+ BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \
BIT_ULL(POWER_DOMAIN_INIT))
/*
@@ -4145,8 +4129,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
}
},
{
- .name = "DDI D TC1 IO",
- .domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS,
+ .name = "DDI IO TC1",
+ .domains = TGL_DDI_IO_TC1_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4155,8 +4139,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "DDI E TC2 IO",
- .domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS,
+ .name = "DDI IO TC2",
+ .domains = TGL_DDI_IO_TC2_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4165,8 +4149,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "DDI F TC3 IO",
- .domains = TGL_DDI_IO_F_TC3_POWER_DOMAINS,
+ .name = "DDI IO TC3",
+ .domains = TGL_DDI_IO_TC3_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4175,8 +4159,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "DDI G TC4 IO",
- .domains = TGL_DDI_IO_G_TC4_POWER_DOMAINS,
+ .name = "DDI IO TC4",
+ .domains = TGL_DDI_IO_TC4_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4185,8 +4169,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "DDI H TC5 IO",
- .domains = TGL_DDI_IO_H_TC5_POWER_DOMAINS,
+ .name = "DDI IO TC5",
+ .domains = TGL_DDI_IO_TC5_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4195,8 +4179,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "DDI I TC6 IO",
- .domains = TGL_DDI_IO_I_TC6_POWER_DOMAINS,
+ .name = "DDI IO TC6",
+ .domains = TGL_DDI_IO_TC6_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4241,8 +4225,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX D TC1",
- .domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS,
+ .name = "AUX USBC1",
+ .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4252,8 +4236,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX E TC2",
- .domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS,
+ .name = "AUX USBC2",
+ .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4263,8 +4247,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX F TC3",
- .domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS,
+ .name = "AUX USBC3",
+ .domains = TGL_AUX_IO_USBC3_POWER_DOMAINS,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4274,8 +4258,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX G TC4",
- .domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS,
+ .name = "AUX USBC4",
+ .domains = TGL_AUX_IO_USBC4_POWER_DOMAINS,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4285,8 +4269,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX H TC5",
- .domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS,
+ .name = "AUX USBC5",
+ .domains = TGL_AUX_IO_USBC5_POWER_DOMAINS,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4296,8 +4280,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX I TC6",
- .domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS,
+ .name = "AUX USBC6",
+ .domains = TGL_AUX_IO_USBC6_POWER_DOMAINS,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4307,8 +4291,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX D TBT1",
- .domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS,
+ .name = "AUX TBT1",
+ .domains = TGL_AUX_IO_TBT1_POWER_DOMAINS,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4318,8 +4302,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX E TBT2",
- .domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS,
+ .name = "AUX TBT2",
+ .domains = TGL_AUX_IO_TBT2_POWER_DOMAINS,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4329,8 +4313,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX F TBT3",
- .domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS,
+ .name = "AUX TBT3",
+ .domains = TGL_AUX_IO_TBT3_POWER_DOMAINS,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4340,8 +4324,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX G TBT4",
- .domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS,
+ .name = "AUX TBT4",
+ .domains = TGL_AUX_IO_TBT4_POWER_DOMAINS,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4351,8 +4335,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX H TBT5",
- .domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS,
+ .name = "AUX TBT5",
+ .domains = TGL_AUX_IO_TBT5_POWER_DOMAINS,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4362,8 +4346,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX I TBT6",
- .domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS,
+ .name = "AUX TBT6",
+ .domains = TGL_AUX_IO_TBT6_POWER_DOMAINS,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4471,8 +4455,8 @@ static const struct i915_power_well_desc rkl_power_wells[] = {
}
},
{
- .name = "DDI D TC1 IO",
- .domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS,
+ .name = "DDI IO TC1",
+ .domains = TGL_DDI_IO_TC1_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4481,8 +4465,8 @@ static const struct i915_power_well_desc rkl_power_wells[] = {
},
},
{
- .name = "DDI E TC2 IO",
- .domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS,
+ .name = "DDI IO TC2",
+ .domains = TGL_DDI_IO_TC2_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4511,8 +4495,8 @@ static const struct i915_power_well_desc rkl_power_wells[] = {
},
},
{
- .name = "AUX D TC1",
- .domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS,
+ .name = "AUX USBC1",
+ .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4521,8 +4505,8 @@ static const struct i915_power_well_desc rkl_power_wells[] = {
},
},
{
- .name = "AUX E TC2",
- .domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS,
+ .name = "AUX USBC2",
+ .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -4551,9 +4535,9 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
if (IS_DG1(dev_priv))
max_dc = 3;
- else if (INTEL_GEN(dev_priv) >= 12)
+ else if (DISPLAY_VER(dev_priv) >= 12)
max_dc = 4;
- else if (INTEL_GEN(dev_priv) >= 10 || IS_GEN9_BC(dev_priv))
+ else if (DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv) || IS_GEN9_BC(dev_priv))
max_dc = 2;
else if (IS_GEN9_LP(dev_priv))
max_dc = 1;
@@ -4565,7 +4549,7 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
* not depending on the DMC firmware. It's needed by system
* suspend/resume, so allow it unconditionally.
*/
- mask = IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 11 ?
+ mask = IS_GEN9_LP(dev_priv) || DISPLAY_VER(dev_priv) >= 11 ?
DC_STATE_EN_DC9 : 0;
if (!dev_priv->params.disable_power_well)
@@ -4689,14 +4673,14 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
* The enabling order will be from lower to higher indexed wells,
* the disabling order is reversed.
*/
- if (IS_DG1(dev_priv)) {
+ if (IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv)) {
err = set_power_wells_mask(power_domains, tgl_power_wells,
BIT_ULL(TGL_DISP_PW_TC_COLD_OFF));
} else if (IS_ROCKETLAKE(dev_priv)) {
err = set_power_wells(power_domains, rkl_power_wells);
- } else if (IS_GEN(dev_priv, 12)) {
+ } else if (IS_DISPLAY_VER(dev_priv, 12)) {
err = set_power_wells(power_domains, tgl_power_wells);
- } else if (IS_GEN(dev_priv, 11)) {
+ } else if (IS_DISPLAY_VER(dev_priv, 11)) {
err = set_power_wells(power_domains, icl_power_wells);
} else if (IS_CNL_WITH_PORT_F(dev_priv)) {
err = set_power_wells(power_domains, cnl_power_wells);
@@ -4853,7 +4837,7 @@ static void icl_mbus_init(struct drm_i915_private *dev_priv)
* expect us to program the abox_ctl0 register as well, even though
* we don't have to program other instance-0 registers like BW_BUDDY.
*/
- if (IS_GEN(dev_priv, 12))
+ if (IS_DISPLAY_VER(dev_priv, 12))
abox_regs |= BIT(0);
for_each_set_bit(i, &abox_regs, sizeof(abox_regs))
@@ -5317,17 +5301,25 @@ struct buddy_page_mask {
static const struct buddy_page_mask tgl_buddy_page_masks[] = {
{ .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF },
+ { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0xF },
{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
+ { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1C },
{ .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F },
+ { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x1E },
{ .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 },
+ { .num_channels = 4, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x38 },
{}
};
static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
{ .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
{ .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 },
+ { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0x1 },
+ { .num_channels = 1, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1 },
{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
{ .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 },
+ { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x3 },
+ { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x3 },
{}
};
@@ -5339,9 +5331,10 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
unsigned long abox_mask = INTEL_INFO(dev_priv)->abox_mask;
int config, i;
- if (IS_DG1_REVID(dev_priv, DG1_REVID_A0, DG1_REVID_A0) ||
- IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B0))
- /* Wa_1409767108:tgl,dg1 */
+ if (IS_ALDERLAKE_S(dev_priv) ||
+ IS_DG1_REVID(dev_priv, DG1_REVID_A0, DG1_REVID_A0) ||
+ IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
+ /* Wa_1409767108:tgl,dg1,adl-s */
table = wa_1409767108_buddy_page_masks;
else
table = tgl_buddy_page_masks;
@@ -5379,7 +5372,7 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
- /* Wa_14011294188:ehl,jsl,tgl,rkl */
+ /* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */
if (INTEL_PCH_TYPE(dev_priv) >= PCH_JSP &&
INTEL_PCH_TYPE(dev_priv) < PCH_DG1)
intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0,
@@ -5403,7 +5396,7 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
/* 4. Enable CDCLK. */
intel_cdclk_init_hw(dev_priv);
- if (INTEL_GEN(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 12)
gen12_dbuf_slices_config(dev_priv);
/* 5. Enable DBUF. */
@@ -5413,14 +5406,14 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
icl_mbus_init(dev_priv);
/* 7. Program arbiter BW_BUDDY registers */
- if (INTEL_GEN(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 12)
tgl_bw_buddy_init(dev_priv);
if (resume && dev_priv->csr.dmc_payload)
intel_csr_load_program(dev_priv);
/* Wa_14011508470 */
- if (IS_GEN(dev_priv, 12)) {
+ if (IS_DISPLAY_VER(dev_priv, 12)) {
val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR;
intel_uncore_rmw(&dev_priv->uncore, GEN11_CHICKEN_DCPR_2, 0, val);
@@ -5626,7 +5619,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
power_domains->initializing = true;
- if (INTEL_GEN(i915) >= 11) {
+ if (DISPLAY_VER(i915) >= 11) {
icl_display_core_init(i915, resume);
} else if (IS_CANNONLAKE(i915)) {
cnl_display_core_init(i915, resume);
@@ -5787,7 +5780,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915,
intel_display_power_flush_work(i915);
intel_power_domains_verify_state(i915);
- if (INTEL_GEN(i915) >= 11)
+ if (DISPLAY_VER(i915) >= 11)
icl_display_core_uninit(i915);
else if (IS_CANNONLAKE(i915))
cnl_display_core_uninit(i915);
@@ -5915,7 +5908,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *i915)
void intel_display_power_suspend_late(struct drm_i915_private *i915)
{
- if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915)) {
+ if (DISPLAY_VER(i915) >= 11 || IS_GEN9_LP(i915)) {
bxt_enable_dc9(i915);
/* Tweaked Wa_14010685332:icp,jsp,mcc */
if (INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) <= PCH_MCC)
@@ -5928,7 +5921,7 @@ void intel_display_power_suspend_late(struct drm_i915_private *i915)
void intel_display_power_resume_early(struct drm_i915_private *i915)
{
- if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915)) {
+ if (DISPLAY_VER(i915) >= 11 || IS_GEN9_LP(i915)) {
gen9_sanitize_dc_state(i915);
bxt_disable_dc9(i915);
/* Tweaked Wa_14010685332:icp,jsp,mcc */
@@ -5942,7 +5935,7 @@ void intel_display_power_resume_early(struct drm_i915_private *i915)
void intel_display_power_suspend(struct drm_i915_private *i915)
{
- if (INTEL_GEN(i915) >= 11) {
+ if (DISPLAY_VER(i915) >= 11) {
icl_display_core_uninit(i915);
bxt_enable_dc9(i915);
} else if (IS_GEN9_LP(i915)) {
@@ -5955,7 +5948,7 @@ void intel_display_power_suspend(struct drm_i915_private *i915)
void intel_display_power_resume(struct drm_i915_private *i915)
{
- if (INTEL_GEN(i915) >= 11) {
+ if (DISPLAY_VER(i915) >= 11) {
bxt_disable_dc9(i915);
icl_display_core_init(i915, true);
if (i915->csr.dmc_payload) {
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index bc30c479be53..f3ca5d5c9778 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -41,6 +41,14 @@ enum intel_display_power_domain {
POWER_DOMAIN_PORT_DDI_G_LANES,
POWER_DOMAIN_PORT_DDI_H_LANES,
POWER_DOMAIN_PORT_DDI_I_LANES,
+
+ POWER_DOMAIN_PORT_DDI_LANES_TC1 = POWER_DOMAIN_PORT_DDI_D_LANES, /* tgl+ */
+ POWER_DOMAIN_PORT_DDI_LANES_TC2,
+ POWER_DOMAIN_PORT_DDI_LANES_TC3,
+ POWER_DOMAIN_PORT_DDI_LANES_TC4,
+ POWER_DOMAIN_PORT_DDI_LANES_TC5,
+ POWER_DOMAIN_PORT_DDI_LANES_TC6,
+
POWER_DOMAIN_PORT_DDI_A_IO,
POWER_DOMAIN_PORT_DDI_B_IO,
POWER_DOMAIN_PORT_DDI_C_IO,
@@ -50,6 +58,14 @@ enum intel_display_power_domain {
POWER_DOMAIN_PORT_DDI_G_IO,
POWER_DOMAIN_PORT_DDI_H_IO,
POWER_DOMAIN_PORT_DDI_I_IO,
+
+ POWER_DOMAIN_PORT_DDI_IO_TC1 = POWER_DOMAIN_PORT_DDI_D_IO, /* tgl+ */
+ POWER_DOMAIN_PORT_DDI_IO_TC2,
+ POWER_DOMAIN_PORT_DDI_IO_TC3,
+ POWER_DOMAIN_PORT_DDI_IO_TC4,
+ POWER_DOMAIN_PORT_DDI_IO_TC5,
+ POWER_DOMAIN_PORT_DDI_IO_TC6,
+
POWER_DOMAIN_PORT_DSI,
POWER_DOMAIN_PORT_CRT,
POWER_DOMAIN_PORT_OTHER,
@@ -64,6 +80,14 @@ enum intel_display_power_domain {
POWER_DOMAIN_AUX_G,
POWER_DOMAIN_AUX_H,
POWER_DOMAIN_AUX_I,
+
+ POWER_DOMAIN_AUX_USBC1 = POWER_DOMAIN_AUX_D, /* tgl+ */
+ POWER_DOMAIN_AUX_USBC2,
+ POWER_DOMAIN_AUX_USBC3,
+ POWER_DOMAIN_AUX_USBC4,
+ POWER_DOMAIN_AUX_USBC5,
+ POWER_DOMAIN_AUX_USBC6,
+
POWER_DOMAIN_AUX_IO_A,
POWER_DOMAIN_AUX_C_TBT,
POWER_DOMAIN_AUX_D_TBT,
@@ -72,6 +96,14 @@ enum intel_display_power_domain {
POWER_DOMAIN_AUX_G_TBT,
POWER_DOMAIN_AUX_H_TBT,
POWER_DOMAIN_AUX_I_TBT,
+
+ POWER_DOMAIN_AUX_TBT1 = POWER_DOMAIN_AUX_D_TBT, /* tgl+ */
+ POWER_DOMAIN_AUX_TBT2,
+ POWER_DOMAIN_AUX_TBT3,
+ POWER_DOMAIN_AUX_TBT4,
+ POWER_DOMAIN_AUX_TBT5,
+ POWER_DOMAIN_AUX_TBT6,
+
POWER_DOMAIN_GMBUS,
POWER_DOMAIN_MODESET,
POWER_DOMAIN_GT_IRQ,
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 184ecbbcec99..e2e707c4dff5 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -37,6 +37,7 @@
#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_vblank.h>
@@ -84,20 +85,50 @@ enum intel_broadcast_rgb {
INTEL_BROADCAST_RGB_LIMITED,
};
+struct intel_fb_view {
+ /*
+ * The remap information used in the remapped and rotated views to
+ * create the DMA scatter-gather list for each FB color plane. This sg
+ * list is created along with the view type (gtt.type) specific
+ * i915_vma object and contains the list of FB object pages (reordered
+ * in the rotated view) that are visible in the view.
+ * In the normal view the FB object's backing store sg list is used
+ * directly and hence the remap information here is not used.
+ */
+ struct i915_ggtt_view gtt;
+
+ /*
+ * The GTT view (gtt.type) specific information for each FB color
+ * plane. In the normal GTT view all formats (up to 4 color planes),
+ * in the rotated and remapped GTT view all no-CCS formats (up to 2
+ * color planes) are supported.
+ *
+ * TODO: add support for CCS formats in the remapped GTT view.
+ *
+ * The view information shared by all FB color planes in the FB,
+ * like dst x/y and src/dst width, is stored separately in
+ * intel_plane_state.
+ */
+ struct i915_color_plane_view {
+ u32 offset;
+ unsigned int x, y;
+ /*
+ * Plane stride in:
+ * bytes for 0/180 degree rotation
+ * pixels for 90/270 degree rotation
+ */
+ unsigned int stride;
+ } color_plane[4];
+};
+
struct intel_framebuffer {
struct drm_framebuffer base;
struct intel_frontbuffer *frontbuffer;
- struct intel_rotation_info rot_info;
- /* for each plane in the normal GTT view */
- struct {
- unsigned int x, y;
- } normal[4];
- /* for each plane in the rotated GTT view for no-CCS formats */
- struct {
- unsigned int x, y;
- unsigned int pitch; /* pixels */
- } rotated[2];
+ /* Params to remap the FB pages and program the plane registers in each view. */
+ struct intel_fb_view normal_view;
+ struct intel_fb_view rotated_view;
+ struct intel_fb_view remapped_view;
};
struct intel_fbdev {
@@ -219,10 +250,23 @@ struct intel_encoder {
* encoders have been disabled and suspended.
*/
void (*shutdown)(struct intel_encoder *encoder);
+ /*
+ * Enable/disable the clock to the port.
+ */
+ void (*enable_clock)(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+ void (*disable_clock)(struct intel_encoder *encoder);
+ /*
+ * Returns whether the port clock is enabled or not.
+ */
+ bool (*is_clock_enabled)(struct intel_encoder *encoder);
enum hpd_pin hpd_pin;
enum intel_display_power_domain power_domain;
/* for communication with audio component; protected by av_mutex */
const struct drm_connector *audio_connector;
+
+ /* VBT information for this encoder (may be NULL for older platforms) */
+ const struct intel_bios_encoder_data *devdata;
};
struct intel_panel_bl_funcs {
@@ -373,6 +417,10 @@ struct intel_hdcp_shim {
int (*hdcp_2_2_capable)(struct intel_digital_port *dig_port,
bool *capable);
+ /* Detects whether a HDCP 1.4 sink connected in MST topology */
+ int (*streams_type1_capable)(struct intel_connector *connector,
+ bool *capable);
+
/* Write HDCP2.2 messages */
int (*write_2_2_msg)(struct intel_digital_port *dig_port,
void *buf, size_t size);
@@ -563,21 +611,11 @@ struct intel_plane_state {
enum drm_scaling_filter scaling_filter;
} hw;
- struct i915_ggtt_view view;
struct i915_vma *vma;
unsigned long flags;
#define PLANE_HAS_FENCE BIT(0)
- struct {
- u32 offset;
- /*
- * Plane stride in:
- * bytes for 0/180 degree rotation
- * pixels for 90/270 degree rotation
- */
- u32 stride;
- int x, y;
- } color_plane[4];
+ struct intel_fb_view view;
/* plane control register */
u32 ctl;
@@ -714,9 +752,9 @@ struct intel_pipe_wm {
struct skl_wm_level {
u16 min_ddb_alloc;
- u16 plane_res_b;
- u8 plane_res_l;
- bool plane_en;
+ u16 blocks;
+ u8 lines;
+ bool enable;
bool ignore_lines;
bool can_sagv;
};
@@ -725,7 +763,10 @@ struct skl_plane_wm {
struct skl_wm_level wm[8];
struct skl_wm_level uv_wm[8];
struct skl_wm_level trans_wm;
- struct skl_wm_level sagv_wm0;
+ struct {
+ struct skl_wm_level wm0;
+ struct skl_wm_level trans_wm;
+ } sagv;
bool is_planar;
};
@@ -1159,6 +1200,13 @@ struct intel_crtc_state {
u8 pipeline_full;
u16 flipline, vmin, vmax;
} vrr;
+
+ /* Stream Splitter for eDP MSO */
+ struct {
+ bool enable;
+ u8 link_count;
+ u8 pixel_overlap;
+ } splitter;
};
enum intel_pipe_crc_source {
@@ -1414,6 +1462,44 @@ struct intel_pps {
struct edp_power_seq pps_delays;
};
+struct intel_psr {
+ /* Mutex for PSR state of the transcoder */
+ struct mutex lock;
+
+#define I915_PSR_DEBUG_MODE_MASK 0x0f
+#define I915_PSR_DEBUG_DEFAULT 0x00
+#define I915_PSR_DEBUG_DISABLE 0x01
+#define I915_PSR_DEBUG_ENABLE 0x02
+#define I915_PSR_DEBUG_FORCE_PSR1 0x03
+#define I915_PSR_DEBUG_ENABLE_SEL_FETCH 0x4
+#define I915_PSR_DEBUG_IRQ 0x10
+
+ u32 debug;
+ bool sink_support;
+ bool source_support;
+ bool enabled;
+ enum pipe pipe;
+ enum transcoder transcoder;
+ bool active;
+ struct work_struct work;
+ unsigned int busy_frontbuffer_bits;
+ bool sink_psr2_support;
+ bool link_standby;
+ bool colorimetry_support;
+ bool psr2_enabled;
+ bool psr2_sel_fetch_enabled;
+ u8 sink_sync_latency;
+ ktime_t last_entry_attempt;
+ ktime_t last_exit;
+ bool sink_not_reliable;
+ bool irq_aux_error;
+ u16 su_x_granularity;
+ bool dc3co_enabled;
+ u32 dc3co_exit_delay;
+ struct delayed_work dc3co_work;
+ struct drm_dp_vsc_sdp vsc;
+};
+
struct intel_dp {
i915_reg_t output_reg;
u32 DP;
@@ -1448,6 +1534,8 @@ struct intel_dp {
int max_link_lane_count;
/* Max rate for the current link */
int max_link_rate;
+ int mso_link_count;
+ int mso_pixel_overlap;
/* sink or branch descriptor */
struct drm_dp_desc desc;
struct drm_dp_aux aux;
@@ -1516,6 +1604,8 @@ struct intel_dp {
bool hobl_active;
struct intel_dp_pcon_frl frl;
+
+ struct intel_psr psr;
};
enum lspcon_vendor {
@@ -1704,6 +1794,18 @@ intel_attached_dig_port(struct intel_connector *connector)
return enc_to_dig_port(intel_attached_encoder(connector));
}
+static inline struct intel_hdmi *
+enc_to_intel_hdmi(struct intel_encoder *encoder)
+{
+ return &enc_to_dig_port(encoder)->hdmi;
+}
+
+static inline struct intel_hdmi *
+intel_attached_hdmi(struct intel_connector *connector)
+{
+ return enc_to_intel_hdmi(intel_attached_encoder(connector));
+}
+
static inline struct intel_dp *enc_to_intel_dp(struct intel_encoder *encoder)
{
return &enc_to_dig_port(encoder)->dp;
@@ -1752,6 +1854,17 @@ dp_to_i915(struct intel_dp *intel_dp)
return to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
}
+#define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
+ (intel_dp)->psr.source_support)
+
+static inline bool intel_encoder_can_psr(struct intel_encoder *encoder)
+{
+ if (!intel_encoder_is_dp(encoder))
+ return false;
+
+ return CAN_PSR(enc_to_intel_dp(encoder));
+}
+
static inline struct intel_digital_port *
hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
{
@@ -1893,4 +2006,20 @@ static inline u32 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
return dev_priv->fdi_pll_freq;
}
+static inline bool is_ccs_modifier(u64 modifier)
+{
+ return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
+}
+
+static inline bool is_gen12_ccs_modifier(u64 modifier)
+{
+ return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
+}
+
#endif /* __INTEL_DISPLAY_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 775d89b6c3fc..6a2dee8cef1f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -39,6 +39,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
+#include "g4x_dp.h"
#include "i915_debugfs.h"
#include "i915_drv.h"
#include "intel_atomic.h"
@@ -50,6 +51,7 @@
#include "intel_dp_aux.h"
#include "intel_dp_link_training.h"
#include "intel_dp_mst.h"
+#include "intel_dpll.h"
#include "intel_dpio_phy.h"
#include "intel_fifo_underrun.h"
#include "intel_hdcp.h"
@@ -81,52 +83,6 @@
#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
-struct dp_link_dpll {
- int clock;
- struct dpll dpll;
-};
-
-static const struct dp_link_dpll g4x_dpll[] = {
- { 162000,
- { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
- { 270000,
- { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
-};
-
-static const struct dp_link_dpll pch_dpll[] = {
- { 162000,
- { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
- { 270000,
- { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
-};
-
-static const struct dp_link_dpll vlv_dpll[] = {
- { 162000,
- { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
- { 270000,
- { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
-};
-
-/*
- * CHV supports eDP 1.4 that have more link rates.
- * Below only provides the fixed rate but exclude variable rate.
- */
-static const struct dp_link_dpll chv_dpll[] = {
- /*
- * CHV requires to program fractional division for m2.
- * m2 is stored in fixed point format using formula below
- * (m2_int << 22) | m2_fraction
- */
- { 162000, /* m2_int = 32, m2_fraction = 1677722 */
- { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
- { 270000, /* m2_int = 27, m2_fraction = 0 */
- { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
-};
-
-const struct dpll *vlv_get_dpll(struct drm_i915_private *i915)
-{
- return IS_CHERRYVIEW(i915) ? &chv_dpll[0].dpll : &vlv_dpll[0].dpll;
-}
/* Constants for DP DSC configurations */
static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
@@ -150,8 +106,6 @@ bool intel_dp_is_edp(struct intel_dp *intel_dp)
return dig_port->base.type == INTEL_OUTPUT_EDP;
}
-static void intel_dp_link_down(struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state);
static void intel_dp_unset_edid(struct intel_dp *intel_dp);
/* update sink rates from dpcd */
@@ -260,8 +214,8 @@ bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp)
struct intel_encoder *encoder = &intel_dig_port->base;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- return INTEL_GEN(dev_priv) >= 12 ||
- (INTEL_GEN(dev_priv) == 11 &&
+ return DISPLAY_VER(dev_priv) >= 12 ||
+ (IS_DISPLAY_VER(dev_priv, 11) &&
encoder->port != PORT_A);
}
@@ -338,10 +292,10 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
drm_WARN_ON(&dev_priv->drm,
intel_dp->source_rates || intel_dp->num_source_rates);
- if (INTEL_GEN(dev_priv) >= 10) {
+ if (DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv)) {
source_rates = cnl_rates;
size = ARRAY_SIZE(cnl_rates);
- if (IS_GEN(dev_priv, 10))
+ if (IS_DISPLAY_VER(dev_priv, 10))
max_rate = cnl_max_source_rate(intel_dp);
else if (IS_JSL_EHL(dev_priv))
max_rate = ehl_max_source_rate(intel_dp);
@@ -529,7 +483,7 @@ u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
static int
small_joiner_ram_size_bits(struct drm_i915_private *i915)
{
- if (INTEL_GEN(i915) >= 11)
+ if (DISPLAY_VER(i915) >= 11)
return 7680 * 8;
else
return 6144 * 8;
@@ -788,10 +742,10 @@ intel_dp_mode_valid(struct drm_connector *connector,
return MODE_H_ILLEGAL;
if (intel_dp_is_edp(intel_dp) && fixed_mode) {
- if (mode->hdisplay > fixed_mode->hdisplay)
+ if (mode->hdisplay != fixed_mode->hdisplay)
return MODE_PANEL;
- if (mode->vdisplay > fixed_mode->vdisplay)
+ if (mode->vdisplay != fixed_mode->vdisplay)
return MODE_PANEL;
target_clock = fixed_mode->clock;
@@ -822,7 +776,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
* Output bpp is stored in 6.4 format so right shift by 4 to get the
* integer value since we support only integer values of bpp.
*/
- if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) &&
+ if (DISPLAY_VER(dev_priv) >= 10 &&
drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
if (intel_dp_is_edp(intel_dp)) {
dsc_max_output_bpp =
@@ -877,39 +831,6 @@ bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
return max_rate >= 810000;
}
-static void
-intel_dp_set_clock(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- const struct dp_link_dpll *divisor = NULL;
- int i, count = 0;
-
- if (IS_G4X(dev_priv)) {
- divisor = g4x_dpll;
- count = ARRAY_SIZE(g4x_dpll);
- } else if (HAS_PCH_SPLIT(dev_priv)) {
- divisor = pch_dpll;
- count = ARRAY_SIZE(pch_dpll);
- } else if (IS_CHERRYVIEW(dev_priv)) {
- divisor = chv_dpll;
- count = ARRAY_SIZE(chv_dpll);
- } else if (IS_VALLEYVIEW(dev_priv)) {
- divisor = vlv_dpll;
- count = ARRAY_SIZE(vlv_dpll);
- }
-
- if (divisor && count) {
- for (i = 0; i < count; i++) {
- if (pipe_config->port_clock == divisor[i].clock) {
- pipe_config->dpll = divisor[i].dpll;
- pipe_config->clock_set = true;
- break;
- }
- }
- }
-}
-
static void snprintf_int_array(char *str, size_t len,
const int *array, int nelem)
{
@@ -992,10 +913,10 @@ static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
/* On TGL, FEC is supported on all Pipes */
- if (INTEL_GEN(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 12)
return true;
- if (IS_GEN(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A)
+ if (IS_DISPLAY_VER(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A)
return true;
return false;
@@ -1314,7 +1235,7 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
return -EINVAL;
/* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
- if (INTEL_GEN(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 12)
dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc);
else
dsc_max_bpc = min_t(u8, 10,
@@ -1553,7 +1474,7 @@ static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv,
{
if (IS_G4X(dev_priv))
return false;
- if (INTEL_GEN(dev_priv) < 12 && port == PORT_A)
+ if (DISPLAY_VER(dev_priv) < 12 && port == PORT_A)
return false;
return true;
@@ -1663,12 +1584,10 @@ void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
const struct drm_connector_state *conn_state,
struct drm_dp_vsc_sdp *vsc)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
vsc->sdp_type = DP_SDP_VSC;
- if (dev_priv->psr.psr2_enabled) {
- if (dev_priv->psr.colorimetry_support &&
+ if (intel_dp->psr.psr2_enabled) {
+ if (intel_dp->psr.colorimetry_support &&
intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
/* [PSR2, +Colorimetry] */
intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
@@ -1724,6 +1643,7 @@ intel_dp_drrs_compute_config(struct intel_dp *intel_dp,
{
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ int pixel_clock;
if (pipe_config->vrr.enable)
return;
@@ -1742,10 +1662,18 @@ intel_dp_drrs_compute_config(struct intel_dp *intel_dp,
return;
pipe_config->has_drrs = true;
- intel_link_compute_m_n(output_bpp, pipe_config->lane_count,
- intel_connector->panel.downclock_mode->clock,
+
+ pixel_clock = intel_connector->panel.downclock_mode->clock;
+ if (pipe_config->splitter.enable)
+ pixel_clock /= pipe_config->splitter.link_count;
+
+ intel_link_compute_m_n(output_bpp, pipe_config->lane_count, pixel_clock,
pipe_config->port_clock, &pipe_config->dp_m2_n2,
constant_n, pipe_config->fec_enable);
+
+ /* FIXME: abstract this better */
+ if (pipe_config->splitter.enable)
+ pipe_config->dp_m2_n2.gmch_m *= pipe_config->splitter.link_count;
}
int
@@ -1820,6 +1748,26 @@ intel_dp_compute_config(struct intel_encoder *encoder,
output_bpp = intel_dp_output_bpp(pipe_config->output_format,
pipe_config->pipe_bpp);
+ if (intel_dp->mso_link_count) {
+ int n = intel_dp->mso_link_count;
+ int overlap = intel_dp->mso_pixel_overlap;
+
+ pipe_config->splitter.enable = true;
+ pipe_config->splitter.link_count = n;
+ pipe_config->splitter.pixel_overlap = overlap;
+
+ drm_dbg_kms(&dev_priv->drm, "MSO link count %d, pixel overlap %d\n",
+ n, overlap);
+
+ adjusted_mode->crtc_hdisplay = adjusted_mode->crtc_hdisplay / n + overlap;
+ adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hblank_start / n + overlap;
+ adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_end / n + overlap;
+ adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hsync_start / n + overlap;
+ adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_end / n + overlap;
+ adjusted_mode->crtc_htotal = adjusted_mode->crtc_htotal / n + overlap;
+ adjusted_mode->crtc_clock /= n;
+ }
+
intel_link_compute_m_n(output_bpp,
pipe_config->lane_count,
adjusted_mode->crtc_clock,
@@ -1827,8 +1775,12 @@ intel_dp_compute_config(struct intel_encoder *encoder,
&pipe_config->dp_m_n,
constant_n, pipe_config->fec_enable);
+ /* FIXME: abstract this better */
+ if (pipe_config->splitter.enable)
+ pipe_config->dp_m_n.gmch_m *= pipe_config->splitter.link_count;
+
if (!HAS_DDI(dev_priv))
- intel_dp_set_clock(encoder, pipe_config);
+ g4x_dp_set_clock(encoder, pipe_config);
intel_vrr_compute_config(pipe_config, conn_state);
intel_psr_compute_config(intel_dp, pipe_config);
@@ -1848,90 +1800,6 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
intel_dp->lane_count = lane_count;
}
-static void intel_dp_prepare(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- enum port port = encoder->port;
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
-
- intel_dp_set_link_params(intel_dp,
- pipe_config->port_clock,
- pipe_config->lane_count);
-
- /*
- * There are four kinds of DP registers:
- *
- * IBX PCH
- * SNB CPU
- * IVB CPU
- * CPT PCH
- *
- * IBX PCH and CPU are the same for almost everything,
- * except that the CPU DP PLL is configured in this
- * register
- *
- * CPT PCH is quite different, having many bits moved
- * to the TRANS_DP_CTL register instead. That
- * configuration happens (oddly) in ilk_pch_enable
- */
-
- /* Preserve the BIOS-computed detected bit. This is
- * supposed to be read-only.
- */
- intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
-
- /* Handle DP bits in common between all three register formats */
- intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
- intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
-
- /* Split out the IBX/CPU vs CPT settings */
-
- if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
- if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
- intel_dp->DP |= DP_SYNC_HS_HIGH;
- if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
- intel_dp->DP |= DP_SYNC_VS_HIGH;
- intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
-
- if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
- intel_dp->DP |= DP_ENHANCED_FRAMING;
-
- intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
- } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
- u32 trans_dp;
-
- intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
-
- trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe));
- if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
- trans_dp |= TRANS_DP_ENH_FRAMING;
- else
- trans_dp &= ~TRANS_DP_ENH_FRAMING;
- intel_de_write(dev_priv, TRANS_DP_CTL(crtc->pipe), trans_dp);
- } else {
- if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
- intel_dp->DP |= DP_COLOR_RANGE_16_235;
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
- intel_dp->DP |= DP_SYNC_HS_HIGH;
- if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
- intel_dp->DP |= DP_SYNC_VS_HIGH;
- intel_dp->DP |= DP_LINK_TRAIN_OFF;
-
- if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
- intel_dp->DP |= DP_ENHANCED_FRAMING;
-
- if (IS_CHERRYVIEW(dev_priv))
- intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
- else
- intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
- }
-}
-
-
/* Enable backlight PWM and backlight PP control. */
void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
@@ -1963,89 +1831,6 @@ void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
intel_panel_disable_backlight(old_conn_state);
}
-static void assert_dp_port(struct intel_dp *intel_dp, bool state)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- bool cur_state = intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN;
-
- I915_STATE_WARN(cur_state != state,
- "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n",
- dig_port->base.base.base.id, dig_port->base.base.name,
- onoff(state), onoff(cur_state));
-}
-#define assert_dp_port_disabled(d) assert_dp_port((d), false)
-
-static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
-{
- bool cur_state = intel_de_read(dev_priv, DP_A) & DP_PLL_ENABLE;
-
- I915_STATE_WARN(cur_state != state,
- "eDP PLL state assertion failure (expected %s, current %s)\n",
- onoff(state), onoff(cur_state));
-}
-#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
-#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
-
-static void ilk_edp_pll_on(struct intel_dp *intel_dp,
- const struct intel_crtc_state *pipe_config)
-{
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
- assert_dp_port_disabled(intel_dp);
- assert_edp_pll_disabled(dev_priv);
-
- drm_dbg_kms(&dev_priv->drm, "enabling eDP PLL for clock %d\n",
- pipe_config->port_clock);
-
- intel_dp->DP &= ~DP_PLL_FREQ_MASK;
-
- if (pipe_config->port_clock == 162000)
- intel_dp->DP |= DP_PLL_FREQ_162MHZ;
- else
- intel_dp->DP |= DP_PLL_FREQ_270MHZ;
-
- intel_de_write(dev_priv, DP_A, intel_dp->DP);
- intel_de_posting_read(dev_priv, DP_A);
- udelay(500);
-
- /*
- * [DevILK] Work around required when enabling DP PLL
- * while a pipe is enabled going to FDI:
- * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
- * 2. Program DP PLL enable
- */
- if (IS_GEN(dev_priv, 5))
- intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
-
- intel_dp->DP |= DP_PLL_ENABLE;
-
- intel_de_write(dev_priv, DP_A, intel_dp->DP);
- intel_de_posting_read(dev_priv, DP_A);
- udelay(200);
-}
-
-static void ilk_edp_pll_off(struct intel_dp *intel_dp,
- const struct intel_crtc_state *old_crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
- assert_dp_port_disabled(intel_dp);
- assert_edp_pll_enabled(dev_priv);
-
- drm_dbg_kms(&dev_priv->drm, "disabling eDP PLL\n");
-
- intel_dp->DP &= ~DP_PLL_ENABLE;
-
- intel_de_write(dev_priv, DP_A, intel_dp->DP);
- intel_de_posting_read(dev_priv, DP_A);
- udelay(200);
-}
-
static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
{
/*
@@ -2148,160 +1933,6 @@ void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode)
mode == DP_SET_POWER_D0 ? "D0" : "D3");
}
-static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
- enum port port, enum pipe *pipe)
-{
- enum pipe p;
-
- for_each_pipe(dev_priv, p) {
- u32 val = intel_de_read(dev_priv, TRANS_DP_CTL(p));
-
- if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
- *pipe = p;
- return true;
- }
- }
-
- drm_dbg_kms(&dev_priv->drm, "No pipe for DP port %c found\n",
- port_name(port));
-
- /* must initialize pipe to something for the asserts */
- *pipe = PIPE_A;
-
- return false;
-}
-
-bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
- i915_reg_t dp_reg, enum port port,
- enum pipe *pipe)
-{
- bool ret;
- u32 val;
-
- val = intel_de_read(dev_priv, dp_reg);
-
- ret = val & DP_PORT_EN;
-
- /* asserts want to know the pipe even if the port is disabled */
- if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
- *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
- else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
- ret &= cpt_dp_port_selected(dev_priv, port, pipe);
- else if (IS_CHERRYVIEW(dev_priv))
- *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
- else
- *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
-
- return ret;
-}
-
-static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
- enum pipe *pipe)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- intel_wakeref_t wakeref;
- bool ret;
-
- wakeref = intel_display_power_get_if_enabled(dev_priv,
- encoder->power_domain);
- if (!wakeref)
- return false;
-
- ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
- encoder->port, pipe);
-
- intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
-
- return ret;
-}
-
-static void intel_dp_get_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- u32 tmp, flags = 0;
- enum port port = encoder->port;
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
-
- if (encoder->type == INTEL_OUTPUT_EDP)
- pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
- else
- pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
-
- tmp = intel_de_read(dev_priv, intel_dp->output_reg);
-
- pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
-
- if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
- u32 trans_dp = intel_de_read(dev_priv,
- TRANS_DP_CTL(crtc->pipe));
-
- if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
- flags |= DRM_MODE_FLAG_PHSYNC;
- else
- flags |= DRM_MODE_FLAG_NHSYNC;
-
- if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
- flags |= DRM_MODE_FLAG_PVSYNC;
- else
- flags |= DRM_MODE_FLAG_NVSYNC;
- } else {
- if (tmp & DP_SYNC_HS_HIGH)
- flags |= DRM_MODE_FLAG_PHSYNC;
- else
- flags |= DRM_MODE_FLAG_NHSYNC;
-
- if (tmp & DP_SYNC_VS_HIGH)
- flags |= DRM_MODE_FLAG_PVSYNC;
- else
- flags |= DRM_MODE_FLAG_NVSYNC;
- }
-
- pipe_config->hw.adjusted_mode.flags |= flags;
-
- if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
- pipe_config->limited_color_range = true;
-
- pipe_config->lane_count =
- ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
-
- intel_dp_get_m_n(crtc, pipe_config);
-
- if (port == PORT_A) {
- if ((intel_de_read(dev_priv, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
- pipe_config->port_clock = 162000;
- else
- pipe_config->port_clock = 270000;
- }
-
- pipe_config->hw.adjusted_mode.crtc_clock =
- intel_dotclock_calculate(pipe_config->port_clock,
- &pipe_config->dp_m_n);
-
- if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
- pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
- /*
- * This is a big fat ugly hack.
- *
- * Some machines in UEFI boot mode provide us a VBT that has 18
- * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
- * unknown we fail to light up. Yet the same BIOS boots up with
- * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
- * max, not what it tells us to use.
- *
- * Note: This will still be broken if the eDP panel is not lit
- * up by the BIOS, and thus we can't get the mode at module
- * load.
- */
- drm_dbg_kms(&dev_priv->drm,
- "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
- pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
- dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
- }
-}
-
static bool
intel_dp_get_dpcd(struct intel_dp *intel_dp);
@@ -2359,7 +1990,7 @@ bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
return false;
}
- if (CAN_PSR(i915) && intel_dp_is_edp(intel_dp)) {
+ if (CAN_PSR(intel_dp)) {
drm_dbg_kms(&i915->drm, "Forcing full modeset to compute PSR state\n");
crtc_state->uapi.mode_changed = true;
return false;
@@ -2368,122 +1999,6 @@ bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
return true;
}
-static void intel_disable_dp(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
-{
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
- intel_dp->link_trained = false;
-
- if (old_crtc_state->has_audio)
- intel_audio_codec_disable(encoder,
- old_crtc_state, old_conn_state);
-
- /* Make sure the panel is off before trying to change the mode. But also
- * ensure that we have vdd while we switch off the panel. */
- intel_pps_vdd_on(intel_dp);
- intel_edp_backlight_off(old_conn_state);
- intel_dp_set_power(intel_dp, DP_SET_POWER_D3);
- intel_pps_off(intel_dp);
- intel_dp->frl.is_trained = false;
- intel_dp->frl.trained_rate_gbps = 0;
-}
-
-static void g4x_disable_dp(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
-{
- intel_disable_dp(state, encoder, old_crtc_state, old_conn_state);
-}
-
-static void vlv_disable_dp(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
-{
- intel_disable_dp(state, encoder, old_crtc_state, old_conn_state);
-}
-
-static void g4x_post_disable_dp(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
-{
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- enum port port = encoder->port;
-
- /*
- * Bspec does not list a specific disable sequence for g4x DP.
- * Follow the ilk+ sequence (disable pipe before the port) for
- * g4x DP as it does not suffer from underruns like the normal
- * g4x modeset sequence (disable pipe after the port).
- */
- intel_dp_link_down(encoder, old_crtc_state);
-
- /* Only ilk+ has port A */
- if (port == PORT_A)
- ilk_edp_pll_off(intel_dp, old_crtc_state);
-}
-
-static void vlv_post_disable_dp(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
-{
- intel_dp_link_down(encoder, old_crtc_state);
-}
-
-static void chv_post_disable_dp(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- intel_dp_link_down(encoder, old_crtc_state);
-
- vlv_dpio_get(dev_priv);
-
- /* Assert data lane reset */
- chv_data_lane_soft_reset(encoder, old_crtc_state, true);
-
- vlv_dpio_put(dev_priv);
-}
-
-static void
-cpt_set_link_train(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- u8 dp_train_pat)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 *DP = &intel_dp->DP;
-
- *DP &= ~DP_LINK_TRAIN_MASK_CPT;
-
- switch (intel_dp_training_pattern_symbol(dp_train_pat)) {
- case DP_TRAINING_PATTERN_DISABLE:
- *DP |= DP_LINK_TRAIN_OFF_CPT;
- break;
- case DP_TRAINING_PATTERN_1:
- *DP |= DP_LINK_TRAIN_PAT_1_CPT;
- break;
- case DP_TRAINING_PATTERN_2:
- *DP |= DP_LINK_TRAIN_PAT_2_CPT;
- break;
- case DP_TRAINING_PATTERN_3:
- drm_dbg_kms(&dev_priv->drm,
- "TPS3 not supported, using TPS2 instead\n");
- *DP |= DP_LINK_TRAIN_PAT_2_CPT;
- break;
- }
-
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
-}
-
static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
@@ -2558,10 +2073,6 @@ static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp)
static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
{
-#define PCON_EXTENDED_TRAIN_MODE (1 > 0)
-#define PCON_CONCURRENT_MODE (1 > 0)
-#define PCON_SEQUENTIAL_MODE !PCON_CONCURRENT_MODE
-#define PCON_NORMAL_TRAIN_MODE !PCON_EXTENDED_TRAIN_MODE
#define TIMEOUT_FRL_READY_MS 500
#define TIMEOUT_HDMI_LINK_ACTIVE_MS 1000
@@ -2595,10 +2106,12 @@ static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
return -ETIMEDOUT;
max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw);
- ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw, PCON_SEQUENTIAL_MODE);
+ ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw,
+ DP_PCON_ENABLE_SEQUENTIAL_LINK);
if (ret < 0)
return ret;
- ret = drm_dp_pcon_frl_configure_2(&intel_dp->aux, max_frl_bw_mask, PCON_NORMAL_TRAIN_MODE);
+ ret = drm_dp_pcon_frl_configure_2(&intel_dp->aux, max_frl_bw_mask,
+ DP_PCON_FRL_LINK_TRAIN_NORMAL);
if (ret < 0)
return ret;
ret = drm_dp_pcon_frl_enable(&intel_dp->aux);
@@ -2642,15 +2155,20 @@ void intel_dp_check_frl_training(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- /* Always go for FRL training if supported */
- if (!intel_dp_is_hdmi_2_1_sink(intel_dp) ||
+ /*
+ * Always go for FRL training if:
+ * -PCON supports SRC_CTL_MODE (VESA DP2.0-HDMI2.1 PCON Spec Draft-1 Sec-7)
+ * -sink is HDMI2.1
+ */
+ if (!(intel_dp->dpcd[2] & DP_PCON_SOURCE_CTL_MODE) ||
+ !intel_dp_is_hdmi_2_1_sink(intel_dp) ||
intel_dp->frl.is_trained)
return;
if (intel_dp_pcon_start_frl_training(intel_dp) < 0) {
int ret, mode;
- drm_dbg(&dev_priv->drm, "Couldnt set FRL mode, continuing with TMDS mode\n");
+ drm_dbg(&dev_priv->drm, "Couldn't set FRL mode, continuing with TMDS mode\n");
ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux);
mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL);
@@ -2758,61 +2276,6 @@ intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
drm_dbg_kms(&i915->drm, "Failed to set pcon DSC\n");
}
-static void
-g4x_set_link_train(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- u8 dp_train_pat)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 *DP = &intel_dp->DP;
-
- *DP &= ~DP_LINK_TRAIN_MASK;
-
- switch (intel_dp_training_pattern_symbol(dp_train_pat)) {
- case DP_TRAINING_PATTERN_DISABLE:
- *DP |= DP_LINK_TRAIN_OFF;
- break;
- case DP_TRAINING_PATTERN_1:
- *DP |= DP_LINK_TRAIN_PAT_1;
- break;
- case DP_TRAINING_PATTERN_2:
- *DP |= DP_LINK_TRAIN_PAT_2;
- break;
- case DP_TRAINING_PATTERN_3:
- drm_dbg_kms(&dev_priv->drm,
- "TPS3 not supported, using TPS2 instead\n");
- *DP |= DP_LINK_TRAIN_PAT_2;
- break;
- }
-
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
-}
-
-static void intel_dp_enable_port(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- /* enable with pattern 1 (as per spec) */
-
- intel_dp_program_link_training_pattern(intel_dp, crtc_state,
- DP_TRAINING_PATTERN_1);
-
- /*
- * Magic for VLV/CHV. We _must_ first set up the register
- * without actually enabling the port, and then do another
- * write to enable the port. Otherwise link training will
- * fail when the power sequencer is freshly used for this port.
- */
- intel_dp->DP |= DP_PORT_EN;
- if (crtc_state->has_audio)
- intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
-
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
-}
-
void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
@@ -2881,592 +2344,6 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
enableddisabled(tmp ? true : false));
}
-static void intel_enable_dp(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- u32 dp_reg = intel_de_read(dev_priv, intel_dp->output_reg);
- enum pipe pipe = crtc->pipe;
- intel_wakeref_t wakeref;
-
- if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN))
- return;
-
- with_intel_pps_lock(intel_dp, wakeref) {
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_pps_init(encoder, pipe_config);
-
- intel_dp_enable_port(intel_dp, pipe_config);
-
- intel_pps_vdd_on_unlocked(intel_dp);
- intel_pps_on_unlocked(intel_dp);
- intel_pps_vdd_off_unlocked(intel_dp, true);
- }
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- unsigned int lane_mask = 0x0;
-
- if (IS_CHERRYVIEW(dev_priv))
- lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
-
- vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
- lane_mask);
- }
-
- intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
- intel_dp_configure_protocol_converter(intel_dp, pipe_config);
- intel_dp_check_frl_training(intel_dp);
- intel_dp_pcon_dsc_configure(intel_dp, pipe_config);
- intel_dp_start_link_train(intel_dp, pipe_config);
- intel_dp_stop_link_train(intel_dp, pipe_config);
-
- if (pipe_config->has_audio) {
- drm_dbg(&dev_priv->drm, "Enabling DP audio on pipe %c\n",
- pipe_name(pipe));
- intel_audio_codec_enable(encoder, pipe_config, conn_state);
- }
-}
-
-static void g4x_enable_dp(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
-{
- intel_enable_dp(state, encoder, pipe_config, conn_state);
- intel_edp_backlight_on(pipe_config, conn_state);
-}
-
-static void vlv_enable_dp(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
-{
- intel_edp_backlight_on(pipe_config, conn_state);
-}
-
-static void g4x_pre_enable_dp(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
-{
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- enum port port = encoder->port;
-
- intel_dp_prepare(encoder, pipe_config);
-
- /* Only ilk+ has port A */
- if (port == PORT_A)
- ilk_edp_pll_on(intel_dp, pipe_config);
-}
-
-static void vlv_pre_enable_dp(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
-{
- vlv_phy_pre_encoder_enable(encoder, pipe_config);
-
- intel_enable_dp(state, encoder, pipe_config, conn_state);
-}
-
-static void vlv_dp_pre_pll_enable(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
-{
- intel_dp_prepare(encoder, pipe_config);
-
- vlv_phy_pre_pll_enable(encoder, pipe_config);
-}
-
-static void chv_pre_enable_dp(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
-{
- chv_phy_pre_encoder_enable(encoder, pipe_config);
-
- intel_enable_dp(state, encoder, pipe_config, conn_state);
-
- /* Second common lane will stay alive on its own now */
- chv_phy_release_cl2_override(encoder);
-}
-
-static void chv_dp_pre_pll_enable(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
-{
- intel_dp_prepare(encoder, pipe_config);
-
- chv_phy_pre_pll_enable(encoder, pipe_config);
-}
-
-static void chv_dp_post_pll_disable(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
-{
- chv_phy_post_pll_disable(encoder, old_crtc_state);
-}
-
-static u8 intel_dp_voltage_max_2(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
-}
-
-static u8 intel_dp_voltage_max_3(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
-}
-
-static u8 intel_dp_preemph_max_2(struct intel_dp *intel_dp)
-{
- return DP_TRAIN_PRE_EMPH_LEVEL_2;
-}
-
-static u8 intel_dp_preemph_max_3(struct intel_dp *intel_dp)
-{
- return DP_TRAIN_PRE_EMPH_LEVEL_3;
-}
-
-static void vlv_set_signal_levels(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- unsigned long demph_reg_value, preemph_reg_value,
- uniqtranscale_reg_value;
- u8 train_set = intel_dp->train_set[0];
-
- switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
- case DP_TRAIN_PRE_EMPH_LEVEL_0:
- preemph_reg_value = 0x0004000;
- switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
- demph_reg_value = 0x2B405555;
- uniqtranscale_reg_value = 0x552AB83A;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
- demph_reg_value = 0x2B404040;
- uniqtranscale_reg_value = 0x5548B83A;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
- demph_reg_value = 0x2B245555;
- uniqtranscale_reg_value = 0x5560B83A;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
- demph_reg_value = 0x2B405555;
- uniqtranscale_reg_value = 0x5598DA3A;
- break;
- default:
- return;
- }
- break;
- case DP_TRAIN_PRE_EMPH_LEVEL_1:
- preemph_reg_value = 0x0002000;
- switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
- demph_reg_value = 0x2B404040;
- uniqtranscale_reg_value = 0x5552B83A;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
- demph_reg_value = 0x2B404848;
- uniqtranscale_reg_value = 0x5580B83A;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
- demph_reg_value = 0x2B404040;
- uniqtranscale_reg_value = 0x55ADDA3A;
- break;
- default:
- return;
- }
- break;
- case DP_TRAIN_PRE_EMPH_LEVEL_2:
- preemph_reg_value = 0x0000000;
- switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
- demph_reg_value = 0x2B305555;
- uniqtranscale_reg_value = 0x5570B83A;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
- demph_reg_value = 0x2B2B4040;
- uniqtranscale_reg_value = 0x55ADDA3A;
- break;
- default:
- return;
- }
- break;
- case DP_TRAIN_PRE_EMPH_LEVEL_3:
- preemph_reg_value = 0x0006000;
- switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
- demph_reg_value = 0x1B405555;
- uniqtranscale_reg_value = 0x55ADDA3A;
- break;
- default:
- return;
- }
- break;
- default:
- return;
- }
-
- vlv_set_phy_signal_level(encoder, crtc_state,
- demph_reg_value, preemph_reg_value,
- uniqtranscale_reg_value, 0);
-}
-
-static void chv_set_signal_levels(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- u32 deemph_reg_value, margin_reg_value;
- bool uniq_trans_scale = false;
- u8 train_set = intel_dp->train_set[0];
-
- switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
- case DP_TRAIN_PRE_EMPH_LEVEL_0:
- switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
- deemph_reg_value = 128;
- margin_reg_value = 52;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
- deemph_reg_value = 128;
- margin_reg_value = 77;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
- deemph_reg_value = 128;
- margin_reg_value = 102;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
- deemph_reg_value = 128;
- margin_reg_value = 154;
- uniq_trans_scale = true;
- break;
- default:
- return;
- }
- break;
- case DP_TRAIN_PRE_EMPH_LEVEL_1:
- switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
- deemph_reg_value = 85;
- margin_reg_value = 78;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
- deemph_reg_value = 85;
- margin_reg_value = 116;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
- deemph_reg_value = 85;
- margin_reg_value = 154;
- break;
- default:
- return;
- }
- break;
- case DP_TRAIN_PRE_EMPH_LEVEL_2:
- switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
- deemph_reg_value = 64;
- margin_reg_value = 104;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
- deemph_reg_value = 64;
- margin_reg_value = 154;
- break;
- default:
- return;
- }
- break;
- case DP_TRAIN_PRE_EMPH_LEVEL_3:
- switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
- deemph_reg_value = 43;
- margin_reg_value = 154;
- break;
- default:
- return;
- }
- break;
- default:
- return;
- }
-
- chv_set_phy_signal_level(encoder, crtc_state,
- deemph_reg_value, margin_reg_value,
- uniq_trans_scale);
-}
-
-static u32 g4x_signal_levels(u8 train_set)
-{
- u32 signal_levels = 0;
-
- switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
- default:
- signal_levels |= DP_VOLTAGE_0_4;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
- signal_levels |= DP_VOLTAGE_0_6;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
- signal_levels |= DP_VOLTAGE_0_8;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
- signal_levels |= DP_VOLTAGE_1_2;
- break;
- }
- switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
- case DP_TRAIN_PRE_EMPH_LEVEL_0:
- default:
- signal_levels |= DP_PRE_EMPHASIS_0;
- break;
- case DP_TRAIN_PRE_EMPH_LEVEL_1:
- signal_levels |= DP_PRE_EMPHASIS_3_5;
- break;
- case DP_TRAIN_PRE_EMPH_LEVEL_2:
- signal_levels |= DP_PRE_EMPHASIS_6;
- break;
- case DP_TRAIN_PRE_EMPH_LEVEL_3:
- signal_levels |= DP_PRE_EMPHASIS_9_5;
- break;
- }
- return signal_levels;
-}
-
-static void
-g4x_set_signal_levels(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u8 train_set = intel_dp->train_set[0];
- u32 signal_levels;
-
- signal_levels = g4x_signal_levels(train_set);
-
- drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
- signal_levels);
-
- intel_dp->DP &= ~(DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK);
- intel_dp->DP |= signal_levels;
-
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
-}
-
-/* SNB CPU eDP voltage swing and pre-emphasis control */
-static u32 snb_cpu_edp_signal_levels(u8 train_set)
-{
- u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
- DP_TRAIN_PRE_EMPHASIS_MASK);
-
- switch (signal_levels) {
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
- return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
- return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
- return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
- return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
- return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
- default:
- DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
- "0x%x\n", signal_levels);
- return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
- }
-}
-
-static void
-snb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u8 train_set = intel_dp->train_set[0];
- u32 signal_levels;
-
- signal_levels = snb_cpu_edp_signal_levels(train_set);
-
- drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
- signal_levels);
-
- intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
- intel_dp->DP |= signal_levels;
-
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
-}
-
-/* IVB CPU eDP voltage swing and pre-emphasis control */
-static u32 ivb_cpu_edp_signal_levels(u8 train_set)
-{
- u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
- DP_TRAIN_PRE_EMPHASIS_MASK);
-
- switch (signal_levels) {
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
- return EDP_LINK_TRAIN_400MV_0DB_IVB;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
- return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
- return EDP_LINK_TRAIN_400MV_6DB_IVB;
-
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
- return EDP_LINK_TRAIN_600MV_0DB_IVB;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
- return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
-
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
- return EDP_LINK_TRAIN_800MV_0DB_IVB;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
- return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
-
- default:
- DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
- "0x%x\n", signal_levels);
- return EDP_LINK_TRAIN_500MV_0DB_IVB;
- }
-}
-
-static void
-ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u8 train_set = intel_dp->train_set[0];
- u32 signal_levels;
-
- signal_levels = ivb_cpu_edp_signal_levels(train_set);
-
- drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
- signal_levels);
-
- intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
- intel_dp->DP |= signal_levels;
-
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
-}
-
-static char dp_training_pattern_name(u8 train_pat)
-{
- switch (train_pat) {
- case DP_TRAINING_PATTERN_1:
- case DP_TRAINING_PATTERN_2:
- case DP_TRAINING_PATTERN_3:
- return '0' + train_pat;
- case DP_TRAINING_PATTERN_4:
- return '4';
- default:
- MISSING_CASE(train_pat);
- return '?';
- }
-}
-
-void
-intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- u8 dp_train_pat)
-{
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u8 train_pat = intel_dp_training_pattern_symbol(dp_train_pat);
-
- if (train_pat != DP_TRAINING_PATTERN_DISABLE)
- drm_dbg_kms(&dev_priv->drm,
- "[ENCODER:%d:%s] Using DP training pattern TPS%c\n",
- encoder->base.base.id, encoder->base.name,
- dp_training_pattern_name(train_pat));
-
- intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat);
-}
-
-static void
-intel_dp_link_down(struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
- enum port port = encoder->port;
- u32 DP = intel_dp->DP;
-
- if (drm_WARN_ON(&dev_priv->drm,
- (intel_de_read(dev_priv, intel_dp->output_reg) &
- DP_PORT_EN) == 0))
- return;
-
- drm_dbg_kms(&dev_priv->drm, "\n");
-
- if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
- (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
- DP &= ~DP_LINK_TRAIN_MASK_CPT;
- DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
- } else {
- DP &= ~DP_LINK_TRAIN_MASK;
- DP |= DP_LINK_TRAIN_PAT_IDLE;
- }
- intel_de_write(dev_priv, intel_dp->output_reg, DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
-
- DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
- intel_de_write(dev_priv, intel_dp->output_reg, DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
-
- /*
- * HW workaround for IBX, we need to move the port
- * to transcoder A after disabling it to allow the
- * matching HDMI port to be enabled on transcoder A.
- */
- if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
- /*
- * We get CPU/PCH FIFO underruns on the other pipe when
- * doing the workaround. Sweep them under the rug.
- */
- intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
- intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
-
- /* always enable with pattern 1 (as per spec) */
- DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
- DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
- DP_LINK_TRAIN_PAT_1;
- intel_de_write(dev_priv, intel_dp->output_reg, DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
-
- DP &= ~DP_PORT_EN;
- intel_de_write(dev_priv, intel_dp->output_reg, DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
-
- intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
- intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
- intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
- }
-
- msleep(intel_dp->pps.panel_power_down_delay);
-
- intel_dp->DP = DP;
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- intel_wakeref_t wakeref;
-
- with_intel_pps_lock(intel_dp, wakeref)
- intel_dp->pps.active_pipe = INVALID_PIPE;
- }
-}
bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
{
@@ -3517,6 +2394,64 @@ static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
}
}
+static void intel_edp_mso_mode_fixup(struct intel_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ int n = intel_dp->mso_link_count;
+ int overlap = intel_dp->mso_pixel_overlap;
+
+ if (!mode || !n)
+ return;
+
+ mode->hdisplay = (mode->hdisplay - overlap) * n;
+ mode->hsync_start = (mode->hsync_start - overlap) * n;
+ mode->hsync_end = (mode->hsync_end - overlap) * n;
+ mode->htotal = (mode->htotal - overlap) * n;
+ mode->clock *= n;
+
+ drm_mode_set_name(mode);
+
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] using generated MSO mode: ",
+ connector->base.base.id, connector->base.name);
+ drm_mode_debug_printmodeline(mode);
+}
+
+static void intel_edp_mso_init(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 mso;
+
+ if (intel_dp->edp_dpcd[0] < DP_EDP_14)
+ return;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_MSO_LINK_CAPABILITIES, &mso) != 1) {
+ drm_err(&i915->drm, "Failed to read MSO cap\n");
+ return;
+ }
+
+ /* Valid configurations are SST or MSO 2x1, 2x2, 4x1 */
+ mso &= DP_EDP_MSO_NUMBER_OF_LINKS_MASK;
+ if (mso % 2 || mso > drm_dp_max_lane_count(intel_dp->dpcd)) {
+ drm_err(&i915->drm, "Invalid MSO link count cap %u\n", mso);
+ mso = 0;
+ }
+
+ if (mso) {
+ drm_dbg_kms(&i915->drm, "Sink MSO %ux%u configuration\n",
+ mso, drm_dp_max_lane_count(intel_dp->dpcd) / mso);
+ if (!HAS_MSO(i915)) {
+ drm_err(&i915->drm, "No source MSO support, disabling\n");
+ mso = 0;
+ }
+ }
+
+ intel_dp->mso_link_count = mso;
+ intel_dp->mso_pixel_overlap = 0; /* FIXME: read from DisplayID v2.0 */
+}
+
static bool
intel_edp_init_dpcd(struct intel_dp *intel_dp)
{
@@ -3591,7 +2526,7 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
intel_dp_set_common_rates(intel_dp);
/* Read the eDP DSC DPCD registers */
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ if (DISPLAY_VER(dev_priv) >= 10)
intel_dp_get_dsc_sink_cap(intel_dp);
/*
@@ -3600,6 +2535,8 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
*/
intel_edp_init_source_oui(intel_dp, true);
+ intel_edp_mso_init(intel_dp);
+
return true;
}
@@ -4537,7 +3474,18 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0);
for (;;) {
- u8 esi[DP_DPRX_ESI_LEN] = {};
+ /*
+ * The +2 is because DP_DPRX_ESI_LEN is 14, but we then
+ * pass in "esi+10" to drm_dp_channel_eq_ok(), which
+ * takes a 6-byte array. So we actually need 16 bytes
+ * here.
+ *
+ * Somebody who knows what the limits actually are
+ * should check this, but for now this is at least
+ * harmless and avoids a valid compiler warning about
+ * using more of the array than we have allocated.
+ */
+ u8 esi[DP_DPRX_ESI_LEN+2] = {};
bool handled;
int retry;
@@ -4768,7 +3716,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
to_intel_crtc_state(crtc->base.state);
/* retrain on the MST master transcoder */
- if (INTEL_GEN(dev_priv) >= 12 &&
+ if (DISPLAY_VER(dev_priv) >= 12 &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) &&
!intel_dp_mst_is_master_trans(crtc_state))
continue;
@@ -4872,7 +3820,7 @@ static int intel_dp_do_phy_test(struct intel_encoder *encoder,
to_intel_crtc_state(crtc->base.state);
/* test on the MST master transcoder */
- if (INTEL_GEN(dev_priv) >= 12 &&
+ if (DISPLAY_VER(dev_priv) >= 12 &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) &&
!intel_dp_mst_is_master_trans(crtc_state))
continue;
@@ -4908,64 +3856,6 @@ void intel_dp_phy_test(struct intel_encoder *encoder)
"Acquiring modeset locks failed with %i\n", ret);
}
-/*
- * If display is now connected check links status,
- * there has been known issues of link loss triggering
- * long pulse.
- *
- * Some sinks (eg. ASUS PB287Q) seem to perform some
- * weird HPD ping pong during modesets. So we can apparently
- * end up with HPD going low during a modeset, and then
- * going back up soon after. And once that happens we must
- * retrain the link to get a picture. That's in case no
- * userspace component reacted to intermittent HPD dip.
- */
-static enum intel_hotplug_state
-intel_dp_hotplug(struct intel_encoder *encoder,
- struct intel_connector *connector)
-{
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_modeset_acquire_ctx ctx;
- enum intel_hotplug_state state;
- int ret;
-
- if (intel_dp->compliance.test_active &&
- intel_dp->compliance.test_type == DP_TEST_LINK_PHY_TEST_PATTERN) {
- intel_dp_phy_test(encoder);
- /* just do the PHY test and nothing else */
- return INTEL_HOTPLUG_UNCHANGED;
- }
-
- state = intel_encoder_hotplug(encoder, connector);
-
- drm_modeset_acquire_init(&ctx, 0);
-
- for (;;) {
- ret = intel_dp_retrain_link(encoder, &ctx);
-
- if (ret == -EDEADLK) {
- drm_modeset_backoff(&ctx);
- continue;
- }
-
- break;
- }
-
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
- drm_WARN(encoder->base.dev, ret,
- "Acquiring modeset locks failed with %i\n", ret);
-
- /*
- * Keeping it consistent with intel_ddi_hotplug() and
- * intel_hdmi_hotplug().
- */
- if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries)
- state = INTEL_HOTPLUG_RETRY;
-
- return state;
-}
-
static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
@@ -5147,68 +4037,6 @@ edp_detect(struct intel_dp *intel_dp)
return connector_status_connected;
}
-static bool ibx_digital_port_connected(struct intel_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->hotplug.pch_hpd[encoder->hpd_pin];
-
- return intel_de_read(dev_priv, SDEISR) & bit;
-}
-
-static bool g4x_digital_port_connected(struct intel_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit;
-
- switch (encoder->hpd_pin) {
- case HPD_PORT_B:
- bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
- break;
- case HPD_PORT_C:
- bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
- break;
- case HPD_PORT_D:
- bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
- break;
- default:
- MISSING_CASE(encoder->hpd_pin);
- return false;
- }
-
- return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
-}
-
-static bool gm45_digital_port_connected(struct intel_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit;
-
- switch (encoder->hpd_pin) {
- case HPD_PORT_B:
- bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
- break;
- case HPD_PORT_C:
- bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
- break;
- case HPD_PORT_D:
- bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
- break;
- default:
- MISSING_CASE(encoder->hpd_pin);
- return false;
- }
-
- return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
-}
-
-static bool ilk_digital_port_connected(struct intel_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin];
-
- return intel_de_read(dev_priv, DEISR) & bit;
-}
-
/*
* intel_digital_port_connected - is the specified port connected?
* @encoder: intel_encoder
@@ -5305,7 +4133,7 @@ intel_dp_update_420(struct intel_dp *intel_dp)
* ILK doesn't seem capable of DP YCbCr output. The
* displayed image is severly corrupted. SNB+ is fine.
*/
- if (IS_GEN(i915, 5))
+ if (IS_IRONLAKE(i915))
return;
is_branch = drm_dp_is_branch(intel_dp->dpcd);
@@ -5323,7 +4151,7 @@ intel_dp_update_420(struct intel_dp *intel_dp)
DP_DS_HDMI_BT709_RGB_YCBCR_CONV |
DP_DS_HDMI_BT2020_RGB_YCBCR_CONV);
- if (INTEL_GEN(i915) >= 11) {
+ if (DISPLAY_VER(i915) >= 11) {
/* Let PCON convert from RGB->YCbCr if possible */
if (is_branch && rgb_to_ycbcr && ycbcr_444_to_420) {
intel_dp->dfp.rgb_to_ycbcr = true;
@@ -5441,7 +4269,7 @@ intel_dp_detect(struct drm_connector *connector,
}
/* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
- if (INTEL_GEN(dev_priv) >= 11)
+ if (DISPLAY_VER(dev_priv) >= 11)
intel_dp_get_dsc_sink_cap(intel_dp);
intel_dp_configure_mst(intel_dp);
@@ -5546,19 +4374,18 @@ static int intel_dp_get_modes(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct edid *edid;
+ int num_modes = 0;
edid = intel_connector->detect_edid;
if (edid) {
- int ret = intel_connector_update_modes(connector, edid);
+ num_modes = intel_connector_update_modes(connector, edid);
if (intel_vrr_is_capable(connector))
drm_connector_set_vrr_capable_property(connector,
true);
- if (ret)
- return ret;
}
- /* if eDP has no EDID, fall back to fixed mode */
+ /* Also add fixed mode, which may or may not be present in EDID */
if (intel_dp_is_edp(intel_attached_dp(intel_connector)) &&
intel_connector->panel.fixed_mode) {
struct drm_display_mode *mode;
@@ -5567,10 +4394,13 @@ static int intel_dp_get_modes(struct drm_connector *connector)
intel_connector->panel.fixed_mode);
if (mode) {
drm_mode_probed_add(connector, mode);
- return 1;
+ num_modes++;
}
}
+ if (num_modes)
+ return num_modes;
+
if (!edid) {
struct intel_dp *intel_dp = intel_attached_dp(intel_connector);
struct drm_display_mode *mode;
@@ -5580,11 +4410,11 @@ static int intel_dp_get_modes(struct drm_connector *connector)
intel_dp->downstream_ports);
if (mode) {
drm_mode_probed_add(connector, mode);
- return 1;
+ num_modes++;
}
}
- return 0;
+ return num_modes;
}
static int
@@ -5648,14 +4478,6 @@ void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
intel_dp_aux_fini(intel_dp);
}
-static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
-{
- intel_dp_encoder_flush_work(encoder);
-
- drm_encoder_cleanup(encoder);
- kfree(enc_to_dig_port(to_intel_encoder(encoder)));
-}
-
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
@@ -5670,39 +4492,6 @@ void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder)
intel_pps_wait_power_cycle(intel_dp);
}
-static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- enum pipe pipe;
-
- if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
- encoder->port, &pipe))
- return pipe;
-
- return INVALID_PIPE;
-}
-
-void intel_dp_encoder_reset(struct drm_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder));
-
- if (!HAS_DDI(dev_priv))
- intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
-
- intel_dp->reset_link_params = true;
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- intel_wakeref_t wakeref;
-
- with_intel_pps_lock(intel_dp, wakeref)
- intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp);
- }
-
- intel_pps_encoder_reset(intel_dp);
-}
-
static int intel_modeset_tile_group(struct intel_atomic_state *state,
int tile_group_id)
{
@@ -5826,7 +4615,7 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn,
* We don't enable port sync on BDW due to missing w/as and
* due to not having adjusted the modeset sequence appropriately.
*/
- if (INTEL_GEN(dev_priv) < 9)
+ if (DISPLAY_VER(dev_priv) < 9)
return 0;
if (!intel_connector_needs_modeset(state, conn))
@@ -5860,11 +4649,6 @@ static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs =
.atomic_check = intel_dp_connector_atomic_check,
};
-static const struct drm_encoder_funcs intel_dp_enc_funcs = {
- .reset = intel_dp_encoder_reset,
- .destroy = intel_dp_encoder_destroy,
-};
-
enum irqreturn
intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
{
@@ -5914,10 +4698,10 @@ bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
* eDP not supported on g4x. so bail out early just
* for a bit extra safety in case the VBT is bonkers.
*/
- if (INTEL_GEN(dev_priv) < 5)
+ if (DISPLAY_VER(dev_priv) < 5)
return false;
- if (INTEL_GEN(dev_priv) < 9 && port == PORT_A)
+ if (DISPLAY_VER(dev_priv) < 9 && port == PORT_A)
return true;
return intel_bios_is_port_edp(dev_priv, port);
@@ -5938,7 +4722,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
intel_attach_broadcast_rgb_property(connector);
if (HAS_GMCH(dev_priv))
drm_connector_attach_max_bpc_property(connector, 6, 10);
- else if (INTEL_GEN(dev_priv) >= 5)
+ else if (DISPLAY_VER(dev_priv) >= 5)
drm_connector_attach_max_bpc_property(connector, 6, 12);
/* Register HDMI colorspace for case of lspcon */
@@ -5949,7 +4733,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
intel_attach_dp_colorspace_property(connector);
}
- if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 11)
+ if (IS_GEMINILAKE(dev_priv) || DISPLAY_VER(dev_priv) >= 11)
drm_object_attach_property(&connector->base,
connector->dev->mode_config.hdr_output_metadata_property,
0);
@@ -6030,7 +4814,7 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
return;
}
- if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
+ if (DISPLAY_VER(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
switch (index) {
case DRRS_HIGH_RR:
intel_dp_set_m_n(crtc_state, M1_N1);
@@ -6043,7 +4827,7 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
drm_err(&dev_priv->drm,
"Unsupported refreshrate type\n");
}
- } else if (INTEL_GEN(dev_priv) > 6) {
+ } else if (DISPLAY_VER(dev_priv) > 6) {
i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
u32 val;
@@ -6371,7 +5155,7 @@ intel_dp_drrs_init(struct intel_connector *connector,
INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
mutex_init(&dev_priv->drrs.mutex);
- if (INTEL_GEN(dev_priv) <= 6) {
+ if (DISPLAY_VER(dev_priv) <= 6) {
drm_dbg_kms(&dev_priv->drm,
"DRRS supported for Gen7 and above\n");
return NULL;
@@ -6457,6 +5241,10 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (fixed_mode)
downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode);
+ /* multiply the mode clock and horizontal timings for MSO */
+ intel_edp_mso_mode_fixup(intel_connector, fixed_mode);
+ intel_edp_mso_mode_fixup(intel_connector, downclock_mode);
+
/* fallback to VBT if available for eDP */
if (!fixed_mode)
fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
@@ -6639,6 +5427,8 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
intel_dp->frl.is_trained = false;
intel_dp->frl.trained_rate_gbps = 0;
+ intel_psr_init(intel_dp);
+
return true;
fail:
@@ -6647,137 +5437,6 @@ fail:
return false;
}
-bool intel_dp_init(struct drm_i915_private *dev_priv,
- i915_reg_t output_reg,
- enum port port)
-{
- struct intel_digital_port *dig_port;
- struct intel_encoder *intel_encoder;
- struct drm_encoder *encoder;
- struct intel_connector *intel_connector;
-
- dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
- if (!dig_port)
- return false;
-
- intel_connector = intel_connector_alloc();
- if (!intel_connector)
- goto err_connector_alloc;
-
- intel_encoder = &dig_port->base;
- encoder = &intel_encoder->base;
-
- mutex_init(&dig_port->hdcp_mutex);
-
- if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
- &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
- "DP %c", port_name(port)))
- goto err_encoder_init;
-
- intel_encoder->hotplug = intel_dp_hotplug;
- intel_encoder->compute_config = intel_dp_compute_config;
- intel_encoder->get_hw_state = intel_dp_get_hw_state;
- intel_encoder->get_config = intel_dp_get_config;
- intel_encoder->sync_state = intel_dp_sync_state;
- intel_encoder->initial_fastset_check = intel_dp_initial_fastset_check;
- intel_encoder->update_pipe = intel_panel_update_backlight;
- intel_encoder->suspend = intel_dp_encoder_suspend;
- intel_encoder->shutdown = intel_dp_encoder_shutdown;
- if (IS_CHERRYVIEW(dev_priv)) {
- intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
- intel_encoder->pre_enable = chv_pre_enable_dp;
- intel_encoder->enable = vlv_enable_dp;
- intel_encoder->disable = vlv_disable_dp;
- intel_encoder->post_disable = chv_post_disable_dp;
- intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
- } else if (IS_VALLEYVIEW(dev_priv)) {
- intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
- intel_encoder->pre_enable = vlv_pre_enable_dp;
- intel_encoder->enable = vlv_enable_dp;
- intel_encoder->disable = vlv_disable_dp;
- intel_encoder->post_disable = vlv_post_disable_dp;
- } else {
- intel_encoder->pre_enable = g4x_pre_enable_dp;
- intel_encoder->enable = g4x_enable_dp;
- intel_encoder->disable = g4x_disable_dp;
- intel_encoder->post_disable = g4x_post_disable_dp;
- }
-
- if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
- (HAS_PCH_CPT(dev_priv) && port != PORT_A))
- dig_port->dp.set_link_train = cpt_set_link_train;
- else
- dig_port->dp.set_link_train = g4x_set_link_train;
-
- if (IS_CHERRYVIEW(dev_priv))
- dig_port->dp.set_signal_levels = chv_set_signal_levels;
- else if (IS_VALLEYVIEW(dev_priv))
- dig_port->dp.set_signal_levels = vlv_set_signal_levels;
- else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
- dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels;
- else if (IS_GEN(dev_priv, 6) && port == PORT_A)
- dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels;
- else
- dig_port->dp.set_signal_levels = g4x_set_signal_levels;
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) ||
- (HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) {
- dig_port->dp.preemph_max = intel_dp_preemph_max_3;
- dig_port->dp.voltage_max = intel_dp_voltage_max_3;
- } else {
- dig_port->dp.preemph_max = intel_dp_preemph_max_2;
- dig_port->dp.voltage_max = intel_dp_voltage_max_2;
- }
-
- dig_port->dp.output_reg = output_reg;
- dig_port->max_lanes = 4;
-
- intel_encoder->type = INTEL_OUTPUT_DP;
- intel_encoder->power_domain = intel_port_to_power_domain(port);
- if (IS_CHERRYVIEW(dev_priv)) {
- if (port == PORT_D)
- intel_encoder->pipe_mask = BIT(PIPE_C);
- else
- intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B);
- } else {
- intel_encoder->pipe_mask = ~0;
- }
- intel_encoder->cloneable = 0;
- intel_encoder->port = port;
- intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
-
- dig_port->hpd_pulse = intel_dp_hpd_pulse;
-
- if (HAS_GMCH(dev_priv)) {
- if (IS_GM45(dev_priv))
- dig_port->connected = gm45_digital_port_connected;
- else
- dig_port->connected = g4x_digital_port_connected;
- } else {
- if (port == PORT_A)
- dig_port->connected = ilk_digital_port_connected;
- else
- dig_port->connected = ibx_digital_port_connected;
- }
-
- if (port != PORT_A)
- intel_infoframe_init(dig_port);
-
- dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
- if (!intel_dp_init_connector(dig_port, intel_connector))
- goto err_init_connector;
-
- return true;
-
-err_init_connector:
- drm_encoder_cleanup(encoder);
-err_encoder_init:
- kfree(intel_connector);
-err_connector_alloc:
- kfree(dig_port);
- return false;
-}
-
void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
{
struct intel_encoder *encoder;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index d80839139bfb..8db5062f6c4a 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -37,11 +37,6 @@ void intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
int intel_dp_min_bpp(enum intel_output_format output_format);
-bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
- i915_reg_t dp_reg, enum port port,
- enum pipe *pipe);
-bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg,
- enum port port);
bool intel_dp_init_connector(struct intel_digital_port *dig_port,
struct intel_connector *intel_connector);
void intel_dp_set_link_params(struct intel_dp *intel_dp,
@@ -56,7 +51,6 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
bool enable);
-void intel_dp_encoder_reset(struct drm_encoder *encoder);
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder);
void intel_dp_encoder_flush_work(struct drm_encoder *encoder);
@@ -87,10 +81,6 @@ void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits);
-void
-intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- u8 dp_train_pat);
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
u8 *link_bw, u8 *rate_select);
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
@@ -136,7 +126,6 @@ bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state);
void intel_dp_sync_state(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
-const struct dpll *vlv_get_dpll(struct drm_i915_private *i915);
void intel_dp_check_frl_training(struct intel_dp *intel_dp);
void intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index 10fe17b7280d..7e83bc2cc34a 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -128,7 +128,7 @@ static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
to_i915(dig_port->base.base.dev);
u32 precharge, timeout;
- if (IS_GEN(dev_priv, 6))
+ if (IS_SANDYBRIDGE(dev_priv))
precharge = 3;
else
precharge = 5;
@@ -654,10 +654,10 @@ void intel_dp_aux_init(struct intel_dp *intel_dp)
struct intel_encoder *encoder = &dig_port->base;
enum aux_ch aux_ch = dig_port->aux_ch;
- if (INTEL_GEN(dev_priv) >= 12) {
+ if (DISPLAY_VER(dev_priv) >= 12) {
intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg;
intel_dp->aux_ch_data_reg = tgl_aux_data_reg;
- } else if (INTEL_GEN(dev_priv) >= 9) {
+ } else if (DISPLAY_VER(dev_priv) >= 9) {
intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
intel_dp->aux_ch_data_reg = skl_aux_data_reg;
} else if (HAS_PCH_SPLIT(dev_priv)) {
@@ -668,7 +668,7 @@ void intel_dp_aux_init(struct intel_dp *intel_dp)
intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
}
- if (INTEL_GEN(dev_priv) >= 9)
+ if (DISPLAY_VER(dev_priv) >= 9)
intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
@@ -677,7 +677,7 @@ void intel_dp_aux_init(struct intel_dp *intel_dp)
else
intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
- if (INTEL_GEN(dev_priv) >= 9)
+ if (DISPLAY_VER(dev_priv) >= 9)
intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
else
intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
@@ -685,7 +685,7 @@ void intel_dp_aux_init(struct intel_dp *intel_dp)
drm_dp_aux_init(&intel_dp->aux);
/* Failure to allocate our preferred name is not critical */
- if (INTEL_GEN(dev_priv) >= 12 && aux_ch >= AUX_CH_USBC1)
+ if (DISPLAY_VER(dev_priv) >= 12 && aux_ch >= AUX_CH_USBC1)
intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX USBC%c/%s",
aux_ch - AUX_CH_USBC1 + '1',
encoder->base.name);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 651884390137..4f8337c7fd2e 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -646,7 +646,6 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
break;
case INTEL_BACKLIGHT_DISPLAY_DDI:
try_intel_interface = true;
- try_vesa_interface = true;
break;
default:
return -ENODEV;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
index 4dba5bb15af5..90868e156c69 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
@@ -294,37 +294,39 @@ struct hdcp2_dp_msg_data {
bool msg_detectable;
u32 timeout;
u32 timeout2; /* Added for non_paired situation */
+ /* Timeout to read entire msg */
+ u32 msg_read_timeout;
};
static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = {
- { HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0 },
+ { HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0, 0},
{ HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
- false, HDCP_2_2_CERT_TIMEOUT_MS, 0 },
+ false, HDCP_2_2_CERT_TIMEOUT_MS, 0, HDCP_2_2_DP_CERT_READ_TIMEOUT_MS},
{ HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
- false, 0, 0 },
+ false, 0, 0, 0 },
{ HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
- false, 0, 0 },
+ false, 0, 0, 0 },
{ HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
- HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS },
+ HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS, HDCP_2_2_DP_HPRIME_READ_TIMEOUT_MS},
{ HDCP_2_2_AKE_SEND_PAIRING_INFO,
DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
- HDCP_2_2_PAIRING_TIMEOUT_MS, 0 },
- { HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0 },
+ HDCP_2_2_PAIRING_TIMEOUT_MS, 0, HDCP_2_2_DP_PAIRING_READ_TIMEOUT_MS },
+ { HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0, 0 },
{ HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
- false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0 },
+ false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0, 0 },
{ HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
- 0, 0 },
+ 0, 0, 0 },
{ HDCP_2_2_REP_SEND_RECVID_LIST,
DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
- HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0 },
+ HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0, 0 },
{ HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
- 0, 0 },
+ 0, 0, 0 },
{ HDCP_2_2_REP_STREAM_MANAGE,
DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
- 0, 0 },
+ 0, 0, 0},
{ HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
- false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0 },
+ false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0, 0 },
/* local define to shovel this through the write_2_2 interface */
#define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50
{ HDCP_2_2_ERRATA_DP_STREAM_TYPE,
@@ -478,6 +480,23 @@ int intel_dp_hdcp2_write_msg(struct intel_digital_port *dig_port,
return size;
}
+static int
+get_rxinfo_hdcp_1_dev_downstream(struct intel_digital_port *dig_port, bool *hdcp_1_x)
+{
+ u8 rx_info[HDCP_2_2_RXINFO_LEN];
+ int ret;
+
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux,
+ DP_HDCP_2_2_REG_RXINFO_OFFSET,
+ (void *)rx_info, HDCP_2_2_RXINFO_LEN);
+
+ if (ret != HDCP_2_2_RXINFO_LEN)
+ return ret >= 0 ? -EIO : ret;
+
+ *hdcp_1_x = HDCP_2_2_HDCP1_DEVICE_CONNECTED(rx_info[1]) ? true : false;
+ return 0;
+}
+
static
ssize_t get_receiver_id_list_size(struct intel_digital_port *dig_port)
{
@@ -513,6 +532,8 @@ int intel_dp_hdcp2_read_msg(struct intel_digital_port *dig_port,
u8 *byte = buf;
ssize_t ret, bytes_to_recv, len;
const struct hdcp2_dp_msg_data *hdcp2_msg_data;
+ ktime_t msg_end;
+ bool msg_expired;
hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id);
if (!hdcp2_msg_data)
@@ -539,6 +560,11 @@ int intel_dp_hdcp2_read_msg(struct intel_digital_port *dig_port,
len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ?
DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv;
+ /* Entire msg read timeout since initiate of msg read */
+ if (bytes_to_recv == size - 1 && hdcp2_msg_data->msg_read_timeout > 0)
+ msg_end = ktime_add_ms(ktime_get_raw(),
+ hdcp2_msg_data->msg_read_timeout);
+
ret = drm_dp_dpcd_read(&dig_port->dp.aux, offset,
(void *)byte, len);
if (ret < 0) {
@@ -551,6 +577,16 @@ int intel_dp_hdcp2_read_msg(struct intel_digital_port *dig_port,
byte += ret;
offset += ret;
}
+
+ if (hdcp2_msg_data->msg_read_timeout > 0) {
+ msg_expired = ktime_after(ktime_get_raw(), msg_end);
+ if (msg_expired) {
+ drm_dbg_kms(&i915->drm, "msg_id %d, entire msg read timeout(mSec): %d\n",
+ msg_id, hdcp2_msg_data->msg_read_timeout);
+ return -ETIMEDOUT;
+ }
+ }
+
byte = buf;
*byte = msg_id;
@@ -626,6 +662,27 @@ int intel_dp_hdcp2_capable(struct intel_digital_port *dig_port,
return 0;
}
+static
+int intel_dp_mst_streams_type1_capable(struct intel_connector *connector,
+ bool *capable)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ int ret;
+ bool hdcp_1_x;
+
+ ret = get_rxinfo_hdcp_1_dev_downstream(dig_port, &hdcp_1_x);
+ if (ret) {
+ drm_dbg_kms(&i915->drm,
+ "[%s:%d] failed to read RxInfo ret=%d\n",
+ connector->base.name, connector->base.base.id, ret);
+ return ret;
+ }
+
+ *capable = !hdcp_1_x;
+ return 0;
+}
+
static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
.write_an_aksv = intel_dp_hdcp_write_an_aksv,
.read_bksv = intel_dp_hdcp_read_bksv,
@@ -698,30 +755,6 @@ intel_dp_mst_hdcp_stream_encryption(struct intel_connector *connector,
return 0;
}
-static bool intel_dp_mst_get_qses_status(struct intel_digital_port *dig_port,
- struct intel_connector *connector)
-{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- struct drm_dp_query_stream_enc_status_ack_reply reply;
- struct intel_dp *intel_dp = &dig_port->dp;
- int ret;
-
- ret = drm_dp_send_query_stream_enc_status(&intel_dp->mst_mgr,
- connector->port, &reply);
- if (ret) {
- drm_dbg_kms(&i915->drm,
- "[%s:%d] failed QSES ret=%d\n",
- connector->base.name, connector->base.base.id, ret);
- return false;
- }
-
- drm_dbg_kms(&i915->drm, "[%s:%d] QSES stream auth: %d stream enc: %d\n",
- connector->base.name, connector->base.base.id,
- reply.auth_completed, reply.encryption_enabled);
-
- return reply.auth_completed && reply.encryption_enabled;
-}
-
static int
intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector,
bool enable)
@@ -757,11 +790,6 @@ intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector,
return 0;
}
-/*
- * DP v2.0 I.3.3 ignore the stream signature L' in QSES reply msg reply.
- * I.3.5 MST source device may use a QSES msg to query downstream status
- * for a particular stream.
- */
static
int intel_dp_mst_hdcp2_check_link(struct intel_digital_port *dig_port,
struct intel_connector *connector)
@@ -781,7 +809,7 @@ int intel_dp_mst_hdcp2_check_link(struct intel_digital_port *dig_port,
return ret;
}
- return intel_dp_mst_get_qses_status(dig_port, connector) ? 0 : -EINVAL;
+ return 0;
}
static const struct intel_hdcp_shim intel_dp_mst_hdcp_shim = {
@@ -803,6 +831,7 @@ static const struct intel_hdcp_shim intel_dp_mst_hdcp_shim = {
.stream_2_2_encryption = intel_dp_mst_hdcp2_stream_encryption,
.check_2_2_link = intel_dp_mst_hdcp2_check_link,
.hdcp_2_2_capable = intel_dp_hdcp2_capable,
+ .streams_type1_capable = intel_dp_mst_streams_type1_capable,
.protocol = HDCP_PROTOCOL_DP,
};
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index be6ac0dd846e..02a003fd48fb 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -26,12 +26,13 @@
#include "intel_dp_link_training.h"
static void
-intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE])
+intel_dp_dump_link_status(struct drm_device *drm,
+ const u8 link_status[DP_LINK_STATUS_SIZE])
{
-
- DRM_DEBUG_KMS("ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x",
- link_status[0], link_status[1], link_status[2],
- link_status[3], link_status[4], link_status[5]);
+ drm_dbg_kms(drm,
+ "ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x\n",
+ link_status[0], link_status[1], link_status[2],
+ link_status[3], link_status[4], link_status[5]);
}
static void intel_dp_reset_lttpr_common_caps(struct intel_dp *intel_dp)
@@ -95,7 +96,7 @@ static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp)
* Detecting LTTPRs must be avoided on platforms with an AUX timeout
* period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
*/
- if (INTEL_GEN(i915) < 10)
+ if (DISPLAY_VER(i915) < 10 || IS_GEMINILAKE(i915))
return false;
if (drm_dp_read_lttpr_common_caps(&intel_dp->aux,
@@ -366,6 +367,39 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
return drm_dp_dpcd_write(&intel_dp->aux, reg, buf, len) == len;
}
+static char dp_training_pattern_name(u8 train_pat)
+{
+ switch (train_pat) {
+ case DP_TRAINING_PATTERN_1:
+ case DP_TRAINING_PATTERN_2:
+ case DP_TRAINING_PATTERN_3:
+ return '0' + train_pat;
+ case DP_TRAINING_PATTERN_4:
+ return '4';
+ default:
+ MISSING_CASE(train_pat);
+ return '?';
+ }
+}
+
+void
+intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ u8 dp_train_pat)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u8 train_pat = intel_dp_training_pattern_symbol(dp_train_pat);
+
+ if (train_pat != DP_TRAINING_PATTERN_DISABLE)
+ drm_dbg_kms(&dev_priv->drm,
+ "[ENCODER:%d:%s] Using DP training pattern TPS%c\n",
+ encoder->base.base.id, encoder->base.name,
+ dp_training_pattern_name(train_pat));
+
+ intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat);
+}
+
void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy)
@@ -680,7 +714,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
/* Make sure clock is still ok */
if (!drm_dp_clock_recovery_ok(link_status,
crtc_state->lane_count)) {
- intel_dp_dump_link_status(link_status);
+ intel_dp_dump_link_status(&i915->drm, link_status);
drm_dbg_kms(&i915->drm,
"Clock recovery check failed, cannot "
"continue channel equalization\n");
@@ -707,7 +741,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
/* Try 5 times, else fail and try at lower BW */
if (tries == 5) {
- intel_dp_dump_link_status(link_status);
+ intel_dp_dump_link_status(&i915->drm, link_status);
drm_dbg_kms(&i915->drm,
"Channel equalization failed 5 times\n");
}
@@ -769,7 +803,7 @@ intel_dp_link_train_phy(struct intel_dp *intel_dp,
out:
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
- "[CONNECTOR:%d:%s] Link Training %s at link rate = %d, lane count = %d, at %s",
+ "[CONNECTOR:%d:%s] Link Training %s at link rate = %d, lane count = %d, at %s\n",
intel_connector->base.base.id,
intel_connector->base.name,
ret ? "passed" : "failed",
@@ -848,7 +882,8 @@ void intel_dp_start_link_train(struct intel_dp *intel_dp,
int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp);
if (lttpr_count < 0)
- return;
+ /* Still continue with enabling the port and link training. */
+ lttpr_count = 0;
if (!intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count))
intel_dp_schedule_fallback_link_training(intel_dp, crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
index 9cb7c28027f0..9d24d594368c 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
@@ -17,6 +17,9 @@ void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy,
const u8 link_status[DP_LINK_STATUS_SIZE]);
+void intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ u8 dp_train_pat);
void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index b4621ed0127e..2daa3f67791e 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -39,6 +39,7 @@
#include "intel_dp_mst.h"
#include "intel_dpio_phy.h"
#include "intel_hdcp.h"
+#include "skl_scaler.h"
static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
@@ -176,7 +177,7 @@ intel_dp_mst_transcoder_mask(struct intel_atomic_state *state,
u8 transcoders = 0;
int i;
- if (INTEL_GEN(dev_priv) < 12)
+ if (DISPLAY_VER(dev_priv) < 12)
return 0;
for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
@@ -227,7 +228,7 @@ intel_dp_mst_atomic_master_trans_check(struct intel_connector *connector,
struct drm_connector_list_iter connector_list_iter;
struct intel_connector *connector_iter;
- if (INTEL_GEN(dev_priv) < 12)
+ if (DISPLAY_VER(dev_priv) < 12)
return 0;
if (!intel_connector_needs_modeset(state, &connector->base))
@@ -389,7 +390,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
intel_dp->active_mst_links--;
last_mst_stream = intel_dp->active_mst_links == 0;
drm_WARN_ON(&dev_priv->drm,
- INTEL_GEN(dev_priv) >= 12 && last_mst_stream &&
+ DISPLAY_VER(dev_priv) >= 12 && last_mst_stream &&
!intel_dp_mst_is_master_trans(old_crtc_state));
intel_crtc_vblank_off(old_crtc_state);
@@ -413,7 +414,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
intel_ddi_disable_transcoder_func(old_crtc_state);
- if (INTEL_GEN(dev_priv) >= 9)
+ if (DISPLAY_VER(dev_priv) >= 9)
skl_scaler_disable(old_crtc_state);
else
ilk_pfit_disable(old_crtc_state);
@@ -439,7 +440,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
* From older GENs spec: "Configure Transcoder Clock Select to direct
* no clock to the transcoder"
*/
- if (INTEL_GEN(dev_priv) < 12 || !last_mst_stream)
+ if (DISPLAY_VER(dev_priv) < 12 || !last_mst_stream)
intel_ddi_disable_pipe_clock(old_crtc_state);
@@ -487,7 +488,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
intel_mst->connector = connector;
first_mst_stream = intel_dp->active_mst_links == 0;
drm_WARN_ON(&dev_priv->drm,
- INTEL_GEN(dev_priv) >= 12 && first_mst_stream &&
+ DISPLAY_VER(dev_priv) >= 12 && first_mst_stream &&
!intel_dp_mst_is_master_trans(pipe_config));
drm_dbg_kms(&dev_priv->drm, "active links %d\n",
@@ -520,7 +521,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
* first MST stream, so it's done on the DDI for the first stream and
* here for the following ones.
*/
- if (INTEL_GEN(dev_priv) < 12 || !first_mst_stream)
+ if (DISPLAY_VER(dev_priv) < 12 || !first_mst_stream)
intel_ddi_enable_pipe_clock(encoder, pipe_config);
intel_ddi_set_dp_msa(pipe_config, conn_state);
@@ -590,7 +591,7 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_digital_port *dig_port = intel_mst->primary;
- intel_ddi_get_config(&dig_port->base, pipe_config);
+ dig_port->base.get_config(&dig_port->base, pipe_config);
}
static bool intel_dp_mst_initial_fastset_check(struct intel_encoder *encoder,
@@ -830,7 +831,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
- if (INTEL_GEN(dev_priv) <= 12) {
+ if (DISPLAY_VER(dev_priv) <= 12) {
ret = intel_dp_init_hdcp(dig_port, intel_connector);
if (ret)
drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP MST init failed, skipping.\n",
@@ -944,10 +945,10 @@ intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
if (!HAS_DP_MST(i915) || intel_dp_is_edp(intel_dp))
return 0;
- if (INTEL_GEN(i915) < 12 && port == PORT_A)
+ if (DISPLAY_VER(i915) < 12 && port == PORT_A)
return 0;
- if (INTEL_GEN(i915) < 11 && port == PORT_E)
+ if (DISPLAY_VER(i915) < 11 && port == PORT_E)
return 0;
intel_dp->mst_mgr.cbs = &mst_cbs;
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index 7ba7f315aaee..3e3c5eed1600 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -3,11 +3,13 @@
* Copyright © 2020 Intel Corporation
*/
#include <linux/kernel.h>
+#include "intel_crtc.h"
#include "intel_display_types.h"
#include "intel_display.h"
#include "intel_dpll.h"
#include "intel_lvds.h"
#include "intel_panel.h"
+#include "intel_sideband.h"
struct intel_limit {
struct {
@@ -845,7 +847,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
break;
}
- if (INTEL_GEN(dev_priv) >= 4)
+ if (DISPLAY_VER(dev_priv) >= 4)
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
if (crtc_state->sdvo_tv_clock)
@@ -859,7 +861,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
dpll |= DPLL_VCO_ENABLE;
crtc_state->dpll_hw_state.dpll = dpll;
- if (INTEL_GEN(dev_priv) >= 4) {
+ if (DISPLAY_VER(dev_priv) >= 4) {
u32 dpll_md = (crtc_state->pixel_multiplier - 1)
<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
crtc_state->dpll_hw_state.dpll_md = dpll_md;
@@ -924,7 +926,7 @@ static int hsw_crtc_compute_clock(struct intel_crtc *crtc,
to_intel_atomic_state(crtc_state->uapi.state);
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
- INTEL_GEN(dev_priv) >= 11) {
+ DISPLAY_VER(dev_priv) >= 11) {
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
@@ -1344,7 +1346,7 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
void
intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv)
{
- if (INTEL_GEN(dev_priv) >= 9 || HAS_DDI(dev_priv))
+ if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv))
dev_priv->display.crtc_compute_clock = hsw_crtc_compute_clock;
else if (HAS_PCH_SPLIT(dev_priv))
dev_priv->display.crtc_compute_clock = ilk_crtc_compute_clock;
@@ -1356,8 +1358,515 @@ intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv)
dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
else if (IS_PINEVIEW(dev_priv))
dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
- else if (!IS_GEN(dev_priv, 2))
+ else if (!IS_DISPLAY_VER(dev_priv, 2))
dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
else
dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
}
+
+static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
+{
+ if (IS_I830(dev_priv))
+ return false;
+
+ return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
+}
+
+void i9xx_enable_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ i915_reg_t reg = DPLL(crtc->pipe);
+ u32 dpll = crtc_state->dpll_hw_state.dpll;
+ int i;
+
+ assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
+
+ /* PLL is protected by panel, make sure we can write it */
+ if (i9xx_has_pps(dev_priv))
+ assert_panel_unlocked(dev_priv, crtc->pipe);
+
+ /*
+ * Apparently we need to have VGA mode enabled prior to changing
+ * the P1/P2 dividers. Otherwise the DPLL will keep using the old
+ * dividers, even though the register value does change.
+ */
+ intel_de_write(dev_priv, reg, dpll & ~DPLL_VGA_MODE_DIS);
+ intel_de_write(dev_priv, reg, dpll);
+
+ /* Wait for the clocks to stabilize. */
+ intel_de_posting_read(dev_priv, reg);
+ udelay(150);
+
+ if (DISPLAY_VER(dev_priv) >= 4) {
+ intel_de_write(dev_priv, DPLL_MD(crtc->pipe),
+ crtc_state->dpll_hw_state.dpll_md);
+ } else {
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
+ intel_de_write(dev_priv, reg, dpll);
+ }
+
+ /* We do this three times for luck */
+ for (i = 0; i < 3; i++) {
+ intel_de_write(dev_priv, reg, dpll);
+ intel_de_posting_read(dev_priv, reg);
+ udelay(150); /* wait for warmup */
+ }
+}
+
+static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ u32 reg_val;
+
+ /*
+ * PLLB opamp always calibrates to max value of 0x3f, force enable it
+ * and set it to a reasonable value instead.
+ */
+ reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
+ reg_val &= 0xffffff00;
+ reg_val |= 0x00000030;
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
+
+ reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
+ reg_val &= 0x00ffffff;
+ reg_val |= 0x8c000000;
+ vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
+
+ reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
+ reg_val &= 0xffffff00;
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
+
+ reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
+ reg_val &= 0x00ffffff;
+ reg_val |= 0xb0000000;
+ vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
+}
+
+static void _vlv_enable_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
+ udelay(150);
+
+ if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
+ drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe);
+}
+
+void vlv_enable_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
+
+ /* PLL is protected by panel, make sure we can write it */
+ assert_panel_unlocked(dev_priv, pipe);
+
+ if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
+ _vlv_enable_pll(crtc, pipe_config);
+
+ intel_de_write(dev_priv, DPLL_MD(pipe),
+ pipe_config->dpll_hw_state.dpll_md);
+ intel_de_posting_read(dev_priv, DPLL_MD(pipe));
+}
+
+
+static void _chv_enable_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ u32 tmp;
+
+ vlv_dpio_get(dev_priv);
+
+ /* Enable back the 10bit clock to display controller */
+ tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
+ tmp |= DPIO_DCLKP_EN;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
+
+ vlv_dpio_put(dev_priv);
+
+ /*
+ * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
+ */
+ udelay(1);
+
+ /* Enable PLL */
+ intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
+
+ /* Check PLL is locked */
+ if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
+ drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe);
+}
+
+void chv_enable_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
+
+ /* PLL is protected by panel, make sure we can write it */
+ assert_panel_unlocked(dev_priv, pipe);
+
+ if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
+ _chv_enable_pll(crtc, pipe_config);
+
+ if (pipe != PIPE_A) {
+ /*
+ * WaPixelRepeatModeFixForC0:chv
+ *
+ * DPLLCMD is AWOL. Use chicken bits to propagate
+ * the value from DPLLBMD to either pipe B or C.
+ */
+ intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
+ intel_de_write(dev_priv, DPLL_MD(PIPE_B),
+ pipe_config->dpll_hw_state.dpll_md);
+ intel_de_write(dev_priv, CBR4_VLV, 0);
+ dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
+
+ /*
+ * DPLLB VGA mode also seems to cause problems.
+ * We should always have it disabled.
+ */
+ drm_WARN_ON(&dev_priv->drm,
+ (intel_de_read(dev_priv, DPLL(PIPE_B)) &
+ DPLL_VGA_MODE_DIS) == 0);
+ } else {
+ intel_de_write(dev_priv, DPLL_MD(pipe),
+ pipe_config->dpll_hw_state.dpll_md);
+ intel_de_posting_read(dev_priv, DPLL_MD(pipe));
+ }
+}
+
+void vlv_prepare_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = crtc->pipe;
+ u32 mdiv;
+ u32 bestn, bestm1, bestm2, bestp1, bestp2;
+ u32 coreclk, reg_val;
+
+ /* Enable Refclk */
+ intel_de_write(dev_priv, DPLL(pipe),
+ pipe_config->dpll_hw_state.dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
+
+ /* No need to actually set up the DPLL with DSI */
+ if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
+ return;
+
+ vlv_dpio_get(dev_priv);
+
+ bestn = pipe_config->dpll.n;
+ bestm1 = pipe_config->dpll.m1;
+ bestm2 = pipe_config->dpll.m2;
+ bestp1 = pipe_config->dpll.p1;
+ bestp2 = pipe_config->dpll.p2;
+
+ /* See eDP HDMI DPIO driver vbios notes doc */
+
+ /* PLL B needs special handling */
+ if (pipe == PIPE_B)
+ vlv_pllb_recal_opamp(dev_priv, pipe);
+
+ /* Set up Tx target for periodic Rcomp update */
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
+
+ /* Disable target IRef on PLL */
+ reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
+ reg_val &= 0x00ffffff;
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
+
+ /* Disable fast lock */
+ vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
+
+ /* Set idtafcrecal before PLL is enabled */
+ mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
+ mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
+ mdiv |= ((bestn << DPIO_N_SHIFT));
+ mdiv |= (1 << DPIO_K_SHIFT);
+
+ /*
+ * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
+ * but we don't support that).
+ * Note: don't use the DAC post divider as it seems unstable.
+ */
+ mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
+
+ mdiv |= DPIO_ENABLE_CALIBRATION;
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
+
+ /* Set HBR and RBR LPF coefficients */
+ if (pipe_config->port_clock == 162000 ||
+ intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
+ intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
+ 0x009f0003);
+ else
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
+ 0x00d0000f);
+
+ if (intel_crtc_has_dp_encoder(pipe_config)) {
+ /* Use SSC source */
+ if (pipe == PIPE_A)
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+ 0x0df40000);
+ else
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+ 0x0df70000);
+ } else { /* HDMI or VGA */
+ /* Use bend source */
+ if (pipe == PIPE_A)
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+ 0x0df70000);
+ else
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+ 0x0df40000);
+ }
+
+ coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
+ coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
+ if (intel_crtc_has_dp_encoder(pipe_config))
+ coreclk |= 0x01000000;
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
+
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
+
+ vlv_dpio_put(dev_priv);
+}
+
+void chv_prepare_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = crtc->pipe;
+ enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ u32 loopfilter, tribuf_calcntr;
+ u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
+ u32 dpio_val;
+ int vco;
+
+ /* Enable Refclk and SSC */
+ intel_de_write(dev_priv, DPLL(pipe),
+ pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
+
+ /* No need to actually set up the DPLL with DSI */
+ if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
+ return;
+
+ bestn = pipe_config->dpll.n;
+ bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
+ bestm1 = pipe_config->dpll.m1;
+ bestm2 = pipe_config->dpll.m2 >> 22;
+ bestp1 = pipe_config->dpll.p1;
+ bestp2 = pipe_config->dpll.p2;
+ vco = pipe_config->dpll.vco;
+ dpio_val = 0;
+ loopfilter = 0;
+
+ vlv_dpio_get(dev_priv);
+
+ /* p1 and p2 divider */
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
+ 5 << DPIO_CHV_S1_DIV_SHIFT |
+ bestp1 << DPIO_CHV_P1_DIV_SHIFT |
+ bestp2 << DPIO_CHV_P2_DIV_SHIFT |
+ 1 << DPIO_CHV_K_DIV_SHIFT);
+
+ /* Feedback post-divider - m2 */
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
+
+ /* Feedback refclk divider - n and m1 */
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
+ DPIO_CHV_M1_DIV_BY_2 |
+ 1 << DPIO_CHV_N_DIV_SHIFT);
+
+ /* M2 fraction division */
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
+
+ /* M2 fraction division enable */
+ dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
+ dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
+ dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
+ if (bestm2_frac)
+ dpio_val |= DPIO_CHV_FRAC_DIV_EN;
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
+
+ /* Program digital lock detect threshold */
+ dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
+ dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
+ DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
+ dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
+ if (!bestm2_frac)
+ dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
+
+ /* Loop filter */
+ if (vco == 5400000) {
+ loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
+ loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
+ loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ tribuf_calcntr = 0x9;
+ } else if (vco <= 6200000) {
+ loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
+ loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
+ loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ tribuf_calcntr = 0x9;
+ } else if (vco <= 6480000) {
+ loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
+ loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
+ loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ tribuf_calcntr = 0x8;
+ } else {
+ /* Not supported. Apply the same limits as in the max case */
+ loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
+ loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
+ loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ tribuf_calcntr = 0;
+ }
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
+
+ dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
+ dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
+ dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
+
+ /* AFC Recal */
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
+ vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
+ DPIO_AFC_RECAL);
+
+ vlv_dpio_put(dev_priv);
+}
+
+/**
+ * vlv_force_pll_on - forcibly enable just the PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to enable
+ * @dpll: PLL configuration
+ *
+ * Enable the PLL for @pipe using the supplied @dpll config. To be used
+ * in cases where we need the PLL enabled even when @pipe is not going to
+ * be enabled.
+ */
+int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
+ const struct dpll *dpll)
+{
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ struct intel_crtc_state *pipe_config;
+
+ pipe_config = intel_crtc_state_alloc(crtc);
+ if (!pipe_config)
+ return -ENOMEM;
+
+ pipe_config->cpu_transcoder = (enum transcoder)pipe;
+ pipe_config->pixel_multiplier = 1;
+ pipe_config->dpll = *dpll;
+
+ if (IS_CHERRYVIEW(dev_priv)) {
+ chv_compute_dpll(crtc, pipe_config);
+ chv_prepare_pll(crtc, pipe_config);
+ chv_enable_pll(crtc, pipe_config);
+ } else {
+ vlv_compute_dpll(crtc, pipe_config);
+ vlv_prepare_pll(crtc, pipe_config);
+ vlv_enable_pll(crtc, pipe_config);
+ }
+
+ kfree(pipe_config);
+
+ return 0;
+}
+
+void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ u32 val;
+
+ /* Make sure the pipe isn't still relying on us */
+ assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
+
+ val = DPLL_INTEGRATED_REF_CLK_VLV |
+ DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+ if (pipe != PIPE_A)
+ val |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+ intel_de_write(dev_priv, DPLL(pipe), val);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
+}
+
+void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ u32 val;
+
+ /* Make sure the pipe isn't still relying on us */
+ assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
+
+ val = DPLL_SSC_REF_CLK_CHV |
+ DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+ if (pipe != PIPE_A)
+ val |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+ intel_de_write(dev_priv, DPLL(pipe), val);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
+
+ vlv_dpio_get(dev_priv);
+
+ /* Disable 10bit clock to display controller */
+ val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
+ val &= ~DPIO_DCLKP_EN;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
+
+ vlv_dpio_put(dev_priv);
+}
+
+void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ /* Don't disable pipe or pipe PLLs if needed */
+ if (IS_I830(dev_priv))
+ return;
+
+ /* Make sure the pipe isn't still relying on us */
+ assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
+
+ intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
+}
+
+
+/**
+ * vlv_force_pll_off - forcibly disable just the PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to disable
+ *
+ * Disable the PLL for @pipe. To be used in cases where we need
+ * the PLL enabled even when @pipe is not going to be enabled.
+ */
+void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ if (IS_CHERRYVIEW(dev_priv))
+ chv_disable_pll(dev_priv, pipe);
+ else
+ vlv_disable_pll(dev_priv, pipe);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h
index caf4615092e1..7ff4b0d29ed1 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll.h
@@ -10,6 +10,7 @@ struct dpll;
struct drm_i915_private;
struct intel_crtc;
struct intel_crtc_state;
+enum pipe;
void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv);
int vlv_calc_dpll_params(int refclk, struct dpll *clock);
@@ -20,4 +21,21 @@ void vlv_compute_dpll(struct intel_crtc *crtc,
void chv_compute_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
+int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
+ const struct dpll *dpll);
+void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe);
+void i9xx_enable_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state);
+void vlv_enable_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_state *pipe_config);
+void chv_enable_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_state *pipe_config);
+void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe);
+void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe);
+void i9xx_disable_pll(const struct intel_crtc_state *crtc_state);
+void vlv_prepare_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_state *pipe_config);
+void chv_prepare_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_state *pipe_config);
+
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index f6ad257a260e..1ae158d12c07 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -176,7 +176,7 @@ void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
return;
mutex_lock(&dev_priv->dpll.lock);
- drm_WARN_ON(&dev_priv->drm, !pll->state.crtc_mask);
+ drm_WARN_ON(&dev_priv->drm, !pll->state.pipe_mask);
if (!pll->active_mask) {
drm_dbg(&dev_priv->drm, "setting up %s\n", pll->info->name);
drm_WARN_ON(&dev_priv->drm, pll->on);
@@ -198,7 +198,7 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
+ unsigned int pipe_mask = BIT(crtc->pipe);
unsigned int old_mask;
if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
@@ -207,16 +207,16 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
mutex_lock(&dev_priv->dpll.lock);
old_mask = pll->active_mask;
- if (drm_WARN_ON(&dev_priv->drm, !(pll->state.crtc_mask & crtc_mask)) ||
- drm_WARN_ON(&dev_priv->drm, pll->active_mask & crtc_mask))
+ if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) ||
+ drm_WARN_ON(&dev_priv->drm, pll->active_mask & pipe_mask))
goto out;
- pll->active_mask |= crtc_mask;
+ pll->active_mask |= pipe_mask;
drm_dbg_kms(&dev_priv->drm,
- "enable %s (active %x, on? %d) for crtc %d\n",
+ "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
pll->info->name, pll->active_mask, pll->on,
- crtc->base.base.id);
+ crtc->base.base.id, crtc->base.name);
if (old_mask) {
drm_WARN_ON(&dev_priv->drm, !pll->on);
@@ -244,28 +244,30 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
+ unsigned int pipe_mask = BIT(crtc->pipe);
/* PCH only available on ILK+ */
- if (INTEL_GEN(dev_priv) < 5)
+ if (DISPLAY_VER(dev_priv) < 5)
return;
if (pll == NULL)
return;
mutex_lock(&dev_priv->dpll.lock);
- if (drm_WARN_ON(&dev_priv->drm, !(pll->active_mask & crtc_mask)))
+ if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask),
+ "%s not used by [CRTC:%d:%s]\n", pll->info->name,
+ crtc->base.base.id, crtc->base.name))
goto out;
drm_dbg_kms(&dev_priv->drm,
- "disable %s (active %x, on? %d) for crtc %d\n",
+ "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
pll->info->name, pll->active_mask, pll->on,
- crtc->base.base.id);
+ crtc->base.base.id, crtc->base.name);
assert_shared_dpll_enabled(dev_priv, pll);
drm_WARN_ON(&dev_priv->drm, !pll->on);
- pll->active_mask &= ~crtc_mask;
+ pll->active_mask &= ~pipe_mask;
if (pll->active_mask)
goto out;
@@ -296,7 +298,7 @@ intel_find_shared_dpll(struct intel_atomic_state *state,
pll = &dev_priv->dpll.shared_dplls[i];
/* Only want to check enabled timings first */
- if (shared_dpll[i].crtc_mask == 0) {
+ if (shared_dpll[i].pipe_mask == 0) {
if (!unused_pll)
unused_pll = pll;
continue;
@@ -306,10 +308,10 @@ intel_find_shared_dpll(struct intel_atomic_state *state,
&shared_dpll[i].hw_state,
sizeof(*pll_state)) == 0) {
drm_dbg_kms(&dev_priv->drm,
- "[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
+ "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
crtc->base.base.id, crtc->base.name,
pll->info->name,
- shared_dpll[i].crtc_mask,
+ shared_dpll[i].pipe_mask,
pll->active_mask);
return pll;
}
@@ -338,13 +340,13 @@ intel_reference_shared_dpll(struct intel_atomic_state *state,
shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
- if (shared_dpll[id].crtc_mask == 0)
+ if (shared_dpll[id].pipe_mask == 0)
shared_dpll[id].hw_state = *pll_state;
drm_dbg(&i915->drm, "using %s for pipe %c\n", pll->info->name,
pipe_name(crtc->pipe));
- shared_dpll[id].crtc_mask |= 1 << crtc->pipe;
+ shared_dpll[id].pipe_mask |= BIT(crtc->pipe);
}
static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
@@ -354,7 +356,7 @@ static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
struct intel_shared_dpll_state *shared_dpll;
shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
- shared_dpll[pll->info->id].crtc_mask &= ~(1 << crtc->pipe);
+ shared_dpll[pll->info->id].pipe_mask &= ~BIT(crtc->pipe);
}
static void intel_put_dpll(struct intel_atomic_state *state,
@@ -3015,7 +3017,7 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- if (INTEL_GEN(dev_priv) >= 12) {
+ if (DISPLAY_VER(dev_priv) >= 12) {
switch (dev_priv->dpll.ref_clks.nssc) {
default:
MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
@@ -3110,7 +3112,7 @@ static void icl_calc_dpll_state(struct drm_i915_private *i915,
DPLL_CFGCR1_KDIV(pll_params->kdiv) |
DPLL_CFGCR1_PDIV(pll_params->pdiv);
- if (INTEL_GEN(i915) >= 12)
+ if (DISPLAY_VER(i915) >= 12)
pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
else
pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
@@ -3220,7 +3222,7 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
u64 tmp;
bool use_ssc = false;
bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
- bool is_dkl = INTEL_GEN(dev_priv) >= 12;
+ bool is_dkl = DISPLAY_VER(dev_priv) >= 12;
memset(pll_state, 0, sizeof(*pll_state));
@@ -3420,7 +3422,7 @@ static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
ref_clock = dev_priv->dpll.ref_clks.nssc;
- if (INTEL_GEN(dev_priv) >= 12) {
+ if (DISPLAY_VER(dev_priv) >= 12) {
m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
@@ -3559,7 +3561,13 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
- if (IS_DG1(dev_priv)) {
+ if (IS_ALDERLAKE_S(dev_priv)) {
+ dpll_mask =
+ BIT(DPLL_ID_DG1_DPLL3) |
+ BIT(DPLL_ID_DG1_DPLL2) |
+ BIT(DPLL_ID_ICL_DPLL1) |
+ BIT(DPLL_ID_ICL_DPLL0);
+ } else if (IS_DG1(dev_priv)) {
if (port == PORT_D || port == PORT_E) {
dpll_mask =
BIT(DPLL_ID_DG1_DPLL2) |
@@ -3865,7 +3873,10 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
if (!(val & PLL_ENABLE))
goto out;
- if (IS_DG1(dev_priv)) {
+ if (IS_ALDERLAKE_S(dev_priv)) {
+ hw_state->cfgcr0 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR0(id));
+ hw_state->cfgcr1 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR1(id));
+ } else if (IS_DG1(dev_priv)) {
hw_state->cfgcr0 = intel_de_read(dev_priv, DG1_DPLL_CFGCR0(id));
hw_state->cfgcr1 = intel_de_read(dev_priv, DG1_DPLL_CFGCR1(id));
} else if (IS_ROCKETLAKE(dev_priv)) {
@@ -3873,7 +3884,7 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
RKL_DPLL_CFGCR0(id));
hw_state->cfgcr1 = intel_de_read(dev_priv,
RKL_DPLL_CFGCR1(id));
- } else if (INTEL_GEN(dev_priv) >= 12) {
+ } else if (DISPLAY_VER(dev_priv) >= 12) {
hw_state->cfgcr0 = intel_de_read(dev_priv,
TGL_DPLL_CFGCR0(id));
hw_state->cfgcr1 = intel_de_read(dev_priv,
@@ -3921,13 +3932,16 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv,
const enum intel_dpll_id id = pll->info->id;
i915_reg_t cfgcr0_reg, cfgcr1_reg;
- if (IS_DG1(dev_priv)) {
+ if (IS_ALDERLAKE_S(dev_priv)) {
+ cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
+ cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
+ } else if (IS_DG1(dev_priv)) {
cfgcr0_reg = DG1_DPLL_CFGCR0(id);
cfgcr1_reg = DG1_DPLL_CFGCR1(id);
} else if (IS_ROCKETLAKE(dev_priv)) {
cfgcr0_reg = RKL_DPLL_CFGCR0(id);
cfgcr1_reg = RKL_DPLL_CFGCR1(id);
- } else if (INTEL_GEN(dev_priv) >= 12) {
+ } else if (DISPLAY_VER(dev_priv) >= 12) {
cfgcr0_reg = TGL_DPLL_CFGCR0(id);
cfgcr1_reg = TGL_DPLL_CFGCR1(id);
} else {
@@ -4158,7 +4172,7 @@ static void mg_pll_enable(struct drm_i915_private *dev_priv,
icl_pll_power_enable(dev_priv, pll, enable_reg);
- if (INTEL_GEN(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 12)
dkl_pll_write(dev_priv, pll);
else
icl_mg_pll_write(dev_priv, pll);
@@ -4185,7 +4199,7 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
/*
* DVFS pre sequence would be here, but in our driver the cdclk code
* paths should already be setting the appropriate voltage, hence we do
- * nothign here.
+ * nothing here.
*/
val = intel_de_read(dev_priv, enable_reg);
@@ -4384,6 +4398,22 @@ static const struct intel_dpll_mgr dg1_pll_mgr = {
.dump_hw_state = icl_dump_hw_state,
};
+static const struct dpll_info adls_plls[] = {
+ { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
+ { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
+ { "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
+ { "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
+ { },
+};
+
+static const struct intel_dpll_mgr adls_pll_mgr = {
+ .dpll_info = adls_plls,
+ .get_dplls = icl_get_dplls,
+ .put_dplls = icl_put_dplls,
+ .update_ref_clks = icl_update_dpll_ref_clks,
+ .dump_hw_state = icl_dump_hw_state,
+};
+
/**
* intel_shared_dpll_init - Initialize shared DPLLs
* @dev: drm device
@@ -4397,15 +4427,17 @@ void intel_shared_dpll_init(struct drm_device *dev)
const struct dpll_info *dpll_info;
int i;
- if (IS_DG1(dev_priv))
+ if (IS_ALDERLAKE_S(dev_priv))
+ dpll_mgr = &adls_pll_mgr;
+ else if (IS_DG1(dev_priv))
dpll_mgr = &dg1_pll_mgr;
else if (IS_ROCKETLAKE(dev_priv))
dpll_mgr = &rkl_pll_mgr;
- else if (INTEL_GEN(dev_priv) >= 12)
+ else if (DISPLAY_VER(dev_priv) >= 12)
dpll_mgr = &tgl_pll_mgr;
else if (IS_JSL_EHL(dev_priv))
dpll_mgr = &ehl_pll_mgr;
- else if (INTEL_GEN(dev_priv) >= 11)
+ else if (DISPLAY_VER(dev_priv) >= 11)
dpll_mgr = &icl_pll_mgr;
else if (IS_CANNONLAKE(dev_priv))
dpll_mgr = &cnl_pll_mgr;
@@ -4567,27 +4599,30 @@ static void readout_dpll_hw_state(struct drm_i915_private *i915,
POWER_DOMAIN_DPLL_DC_OFF);
}
- pll->state.crtc_mask = 0;
+ pll->state.pipe_mask = 0;
for_each_intel_crtc(&i915->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
- pll->state.crtc_mask |= 1 << crtc->pipe;
+ pll->state.pipe_mask |= BIT(crtc->pipe);
}
- pll->active_mask = pll->state.crtc_mask;
+ pll->active_mask = pll->state.pipe_mask;
drm_dbg_kms(&i915->drm,
- "%s hw state readout: crtc_mask 0x%08x, on %i\n",
- pll->info->name, pll->state.crtc_mask, pll->on);
+ "%s hw state readout: pipe_mask 0x%x, on %i\n",
+ pll->info->name, pll->state.pipe_mask, pll->on);
}
-void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
+void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
{
- int i;
-
if (i915->dpll.mgr && i915->dpll.mgr->update_ref_clks)
i915->dpll.mgr->update_ref_clks(i915);
+}
+
+void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
+{
+ int i;
for (i = 0; i < i915->dpll.num_shared_dpll; i++)
readout_dpll_hw_state(i915, &i915->dpll.shared_dplls[i]);
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index 2eb7618ef957..7fd031a70cfd 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -241,9 +241,9 @@ struct intel_dpll_hw_state {
*/
struct intel_shared_dpll_state {
/**
- * @crtc_mask: mask of CRTC using this DPLL, active or not
+ * @pipe_mask: mask of pipes using this DPLL, active or not
*/
- unsigned crtc_mask;
+ u8 pipe_mask;
/**
* @hw_state: hardware configuration for the DPLL stored in
@@ -351,9 +351,9 @@ struct intel_shared_dpll {
struct intel_shared_dpll_state state;
/**
- * @active_mask: mask of active CRTCs (i.e. DPMS on) using this DPLL
+ * @active_mask: mask of active pipes (i.e. DPMS on) using this DPLL
*/
- unsigned active_mask;
+ u8 active_mask;
/**
* @on: is the PLL actually active? Disabled during modeset
@@ -410,6 +410,7 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_shared_dpll_swap_state(struct intel_atomic_state *state);
void intel_shared_dpll_init(struct drm_device *dev);
+void intel_dpll_update_ref_clks(struct drm_i915_private *dev_priv);
void intel_dpll_readout_hw_state(struct drm_i915_private *dev_priv);
void intel_dpll_sanitize_state(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index 566fa72427b3..857126822a88 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -293,7 +293,7 @@ void intel_dsb_prepare(struct intel_crtc_state *crtc_state)
goto out;
}
- buf = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
+ buf = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WC);
if (IS_ERR(buf)) {
drm_err(&i915->drm, "Command buffer creation failed\n");
i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP);
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index eed037ec0b29..c2a2cd1f84dc 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -203,7 +203,7 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
break;
}
- if (INTEL_GEN(dev_priv) < 11)
+ if (DISPLAY_VER(dev_priv) < 11)
vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
out:
@@ -380,7 +380,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
/* pull up/down */
value = *data++ & 1;
- if (INTEL_GEN(dev_priv) >= 11)
+ if (DISPLAY_VER(dev_priv) >= 11)
icl_exec_gpio(dev_priv, gpio_source, gpio_index, value);
else if (IS_VALLEYVIEW(dev_priv))
vlv_exec_gpio(dev_priv, gpio_source, gpio_number, value);
@@ -425,7 +425,7 @@ static void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
const u16 slave_addr)
{
struct drm_device *drm_dev = intel_dsi->base.base.dev;
- struct device *dev = &drm_dev->pdev->dev;
+ struct device *dev = drm_dev->dev;
struct acpi_device *acpi_dev;
struct list_head resource_list;
struct i2c_adapter_lookup lookup;
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
new file mode 100644
index 000000000000..fca41ac5b8e1
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -0,0 +1,962 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <drm/drm_framebuffer.h>
+
+#include "intel_display.h"
+#include "intel_display_types.h"
+#include "intel_fb.h"
+
+#define check_array_bounds(i915, a, i) drm_WARN_ON(&(i915)->drm, (i) >= ARRAY_SIZE(a))
+
+bool is_ccs_plane(const struct drm_framebuffer *fb, int plane)
+{
+ if (!is_ccs_modifier(fb->modifier))
+ return false;
+
+ return plane >= fb->format->num_planes / 2;
+}
+
+bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane)
+{
+ return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane);
+}
+
+bool is_gen12_ccs_cc_plane(const struct drm_framebuffer *fb, int plane)
+{
+ return fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC &&
+ plane == 2;
+}
+
+bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
+{
+ if (is_ccs_modifier(fb->modifier))
+ return is_ccs_plane(fb, plane);
+
+ return plane == 1;
+}
+
+bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, int color_plane)
+{
+ return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
+ color_plane == 1;
+}
+
+bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane)
+{
+ return fb->modifier == DRM_FORMAT_MOD_LINEAR ||
+ is_gen12_ccs_plane(fb, color_plane);
+}
+
+int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane)
+{
+ drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
+ (main_plane && main_plane >= fb->format->num_planes / 2));
+
+ return fb->format->num_planes / 2 + main_plane;
+}
+
+int skl_ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
+{
+ drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
+ ccs_plane < fb->format->num_planes / 2);
+
+ if (is_gen12_ccs_cc_plane(fb, ccs_plane))
+ return 0;
+
+ return ccs_plane - fb->format->num_planes / 2;
+}
+
+int skl_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane)
+{
+ struct drm_i915_private *i915 = to_i915(fb->dev);
+
+ if (is_ccs_modifier(fb->modifier))
+ return main_to_ccs_plane(fb, main_plane);
+ else if (DISPLAY_VER(i915) < 11 &&
+ intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
+ return 1;
+ else
+ return 0;
+}
+
+unsigned int intel_tile_size(const struct drm_i915_private *i915)
+{
+ return IS_DISPLAY_VER(i915, 2) ? 2048 : 4096;
+}
+
+unsigned int intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
+{
+ if (is_gen12_ccs_plane(fb, color_plane))
+ return 1;
+
+ return intel_tile_size(to_i915(fb->dev)) /
+ intel_tile_width_bytes(fb, color_plane);
+}
+
+/* Return the tile dimensions in pixel units */
+static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
+ unsigned int *tile_width,
+ unsigned int *tile_height)
+{
+ unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
+ unsigned int cpp = fb->format->cpp[color_plane];
+
+ *tile_width = tile_width_bytes / cpp;
+ *tile_height = intel_tile_height(fb, color_plane);
+}
+
+unsigned int intel_tile_row_size(const struct drm_framebuffer *fb, int color_plane)
+{
+ unsigned int tile_width, tile_height;
+
+ intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
+
+ return fb->pitches[color_plane] * tile_height;
+}
+
+unsigned int intel_cursor_alignment(const struct drm_i915_private *i915)
+{
+ if (IS_I830(i915))
+ return 16 * 1024;
+ else if (IS_I85X(i915))
+ return 256;
+ else if (IS_I845G(i915) || IS_I865G(i915))
+ return 32;
+ else
+ return 4 * 1024;
+}
+
+void intel_fb_plane_get_subsampling(int *hsub, int *vsub,
+ const struct drm_framebuffer *fb,
+ int color_plane)
+{
+ int main_plane;
+
+ if (color_plane == 0) {
+ *hsub = 1;
+ *vsub = 1;
+
+ return;
+ }
+
+ /*
+ * TODO: Deduct the subsampling from the char block for all CCS
+ * formats and planes.
+ */
+ if (!is_gen12_ccs_plane(fb, color_plane)) {
+ *hsub = fb->format->hsub;
+ *vsub = fb->format->vsub;
+
+ return;
+ }
+
+ main_plane = skl_ccs_to_main_plane(fb, color_plane);
+ *hsub = drm_format_info_block_width(fb->format, color_plane) /
+ drm_format_info_block_width(fb->format, main_plane);
+
+ /*
+ * The min stride check in the core framebuffer_check() function
+ * assumes that format->hsub applies to every plane except for the
+ * first plane. That's incorrect for the CCS AUX plane of the first
+ * plane, but for the above check to pass we must define the block
+ * width with that subsampling applied to it. Adjust the width here
+ * accordingly, so we can calculate the actual subsampling factor.
+ */
+ if (main_plane == 0)
+ *hsub *= fb->format->hsub;
+
+ *vsub = 32;
+}
+
+static void intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane)
+{
+ int main_plane = is_ccs_plane(fb, color_plane) ?
+ skl_ccs_to_main_plane(fb, color_plane) : 0;
+ int main_hsub, main_vsub;
+ int hsub, vsub;
+
+ intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, main_plane);
+ intel_fb_plane_get_subsampling(&hsub, &vsub, fb, color_plane);
+ *w = fb->width / main_hsub / hsub;
+ *h = fb->height / main_vsub / vsub;
+}
+
+static u32 intel_adjust_tile_offset(int *x, int *y,
+ unsigned int tile_width,
+ unsigned int tile_height,
+ unsigned int tile_size,
+ unsigned int pitch_tiles,
+ u32 old_offset,
+ u32 new_offset)
+{
+ unsigned int pitch_pixels = pitch_tiles * tile_width;
+ unsigned int tiles;
+
+ WARN_ON(old_offset & (tile_size - 1));
+ WARN_ON(new_offset & (tile_size - 1));
+ WARN_ON(new_offset > old_offset);
+
+ tiles = (old_offset - new_offset) / tile_size;
+
+ *y += tiles / pitch_tiles * tile_height;
+ *x += tiles % pitch_tiles * tile_width;
+
+ /* minimize x in case it got needlessly big */
+ *y += *x / pitch_pixels * tile_height;
+ *x %= pitch_pixels;
+
+ return new_offset;
+}
+
+static u32 intel_adjust_aligned_offset(int *x, int *y,
+ const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation,
+ unsigned int pitch,
+ u32 old_offset, u32 new_offset)
+{
+ struct drm_i915_private *i915 = to_i915(fb->dev);
+ unsigned int cpp = fb->format->cpp[color_plane];
+
+ drm_WARN_ON(&i915->drm, new_offset > old_offset);
+
+ if (!is_surface_linear(fb, color_plane)) {
+ unsigned int tile_size, tile_width, tile_height;
+ unsigned int pitch_tiles;
+
+ tile_size = intel_tile_size(i915);
+ intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
+
+ if (drm_rotation_90_or_270(rotation)) {
+ pitch_tiles = pitch / tile_height;
+ swap(tile_width, tile_height);
+ } else {
+ pitch_tiles = pitch / (tile_width * cpp);
+ }
+
+ intel_adjust_tile_offset(x, y, tile_width, tile_height,
+ tile_size, pitch_tiles,
+ old_offset, new_offset);
+ } else {
+ old_offset += *y * pitch + *x * cpp;
+
+ *y = (old_offset - new_offset) / pitch;
+ *x = ((old_offset - new_offset) - *y * pitch) / cpp;
+ }
+
+ return new_offset;
+}
+
+/*
+ * Adjust the tile offset by moving the difference into
+ * the x/y offsets.
+ */
+u32 intel_plane_adjust_aligned_offset(int *x, int *y,
+ const struct intel_plane_state *state,
+ int color_plane,
+ u32 old_offset, u32 new_offset)
+{
+ return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
+ state->hw.rotation,
+ state->view.color_plane[color_plane].stride,
+ old_offset, new_offset);
+}
+
+/*
+ * Computes the aligned offset to the base tile and adjusts
+ * x, y. bytes per pixel is assumed to be a power-of-two.
+ *
+ * In the 90/270 rotated case, x and y are assumed
+ * to be already rotated to match the rotated GTT view, and
+ * pitch is the tile_height aligned framebuffer height.
+ *
+ * This function is used when computing the derived information
+ * under intel_framebuffer, so using any of that information
+ * here is not allowed. Anything under drm_framebuffer can be
+ * used. This is why the user has to pass in the pitch since it
+ * is specified in the rotated orientation.
+ */
+static u32 intel_compute_aligned_offset(struct drm_i915_private *i915,
+ int *x, int *y,
+ const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int pitch,
+ unsigned int rotation,
+ u32 alignment)
+{
+ unsigned int cpp = fb->format->cpp[color_plane];
+ u32 offset, offset_aligned;
+
+ if (!is_surface_linear(fb, color_plane)) {
+ unsigned int tile_size, tile_width, tile_height;
+ unsigned int tile_rows, tiles, pitch_tiles;
+
+ tile_size = intel_tile_size(i915);
+ intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
+
+ if (drm_rotation_90_or_270(rotation)) {
+ pitch_tiles = pitch / tile_height;
+ swap(tile_width, tile_height);
+ } else {
+ pitch_tiles = pitch / (tile_width * cpp);
+ }
+
+ tile_rows = *y / tile_height;
+ *y %= tile_height;
+
+ tiles = *x / tile_width;
+ *x %= tile_width;
+
+ offset = (tile_rows * pitch_tiles + tiles) * tile_size;
+
+ offset_aligned = offset;
+ if (alignment)
+ offset_aligned = rounddown(offset_aligned, alignment);
+
+ intel_adjust_tile_offset(x, y, tile_width, tile_height,
+ tile_size, pitch_tiles,
+ offset, offset_aligned);
+ } else {
+ offset = *y * pitch + *x * cpp;
+ offset_aligned = offset;
+ if (alignment) {
+ offset_aligned = rounddown(offset_aligned, alignment);
+ *y = (offset % alignment) / pitch;
+ *x = ((offset % alignment) - *y * pitch) / cpp;
+ } else {
+ *y = *x = 0;
+ }
+ }
+
+ return offset_aligned;
+}
+
+u32 intel_plane_compute_aligned_offset(int *x, int *y,
+ const struct intel_plane_state *state,
+ int color_plane)
+{
+ struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
+ struct drm_i915_private *i915 = to_i915(intel_plane->base.dev);
+ const struct drm_framebuffer *fb = state->hw.fb;
+ unsigned int rotation = state->hw.rotation;
+ int pitch = state->view.color_plane[color_plane].stride;
+ u32 alignment;
+
+ if (intel_plane->id == PLANE_CURSOR)
+ alignment = intel_cursor_alignment(i915);
+ else
+ alignment = intel_surf_alignment(fb, color_plane);
+
+ return intel_compute_aligned_offset(i915, x, y, fb, color_plane,
+ pitch, rotation, alignment);
+}
+
+/* Convert the fb->offset[] into x/y offsets */
+static int intel_fb_offset_to_xy(int *x, int *y,
+ const struct drm_framebuffer *fb,
+ int color_plane)
+{
+ struct drm_i915_private *i915 = to_i915(fb->dev);
+ unsigned int height;
+ u32 alignment;
+
+ if (DISPLAY_VER(i915) >= 12 &&
+ is_semiplanar_uv_plane(fb, color_plane))
+ alignment = intel_tile_row_size(fb, color_plane);
+ else if (fb->modifier != DRM_FORMAT_MOD_LINEAR)
+ alignment = intel_tile_size(i915);
+ else
+ alignment = 0;
+
+ if (alignment != 0 && fb->offsets[color_plane] % alignment) {
+ drm_dbg_kms(&i915->drm,
+ "Misaligned offset 0x%08x for color plane %d\n",
+ fb->offsets[color_plane], color_plane);
+ return -EINVAL;
+ }
+
+ height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
+ height = ALIGN(height, intel_tile_height(fb, color_plane));
+
+ /* Catch potential overflows early */
+ if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
+ fb->offsets[color_plane])) {
+ drm_dbg_kms(&i915->drm,
+ "Bad offset 0x%08x or pitch %d for color plane %d\n",
+ fb->offsets[color_plane], fb->pitches[color_plane],
+ color_plane);
+ return -ERANGE;
+ }
+
+ *x = 0;
+ *y = 0;
+
+ intel_adjust_aligned_offset(x, y,
+ fb, color_plane, DRM_MODE_ROTATE_0,
+ fb->pitches[color_plane],
+ fb->offsets[color_plane], 0);
+
+ return 0;
+}
+
+static int intel_fb_check_ccs_xy(const struct drm_framebuffer *fb, int ccs_plane, int x, int y)
+{
+ struct drm_i915_private *i915 = to_i915(fb->dev);
+ const struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ int main_plane;
+ int hsub, vsub;
+ int tile_width, tile_height;
+ int ccs_x, ccs_y;
+ int main_x, main_y;
+
+ if (!is_ccs_plane(fb, ccs_plane) || is_gen12_ccs_cc_plane(fb, ccs_plane))
+ return 0;
+
+ intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height);
+ intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
+
+ tile_width *= hsub;
+ tile_height *= vsub;
+
+ ccs_x = (x * hsub) % tile_width;
+ ccs_y = (y * vsub) % tile_height;
+
+ main_plane = skl_ccs_to_main_plane(fb, ccs_plane);
+ main_x = intel_fb->normal_view.color_plane[main_plane].x % tile_width;
+ main_y = intel_fb->normal_view.color_plane[main_plane].y % tile_height;
+
+ /*
+ * CCS doesn't have its own x/y offset register, so the intra CCS tile
+ * x/y offsets must match between CCS and the main surface.
+ */
+ if (main_x != ccs_x || main_y != ccs_y) {
+ drm_dbg_kms(&i915->drm,
+ "Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
+ main_x, main_y,
+ ccs_x, ccs_y,
+ intel_fb->normal_view.color_plane[main_plane].x,
+ intel_fb->normal_view.color_plane[main_plane].y,
+ x, y);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int i;
+
+ /* We don't want to deal with remapping with cursors */
+ if (plane->id == PLANE_CURSOR)
+ return false;
+
+ /*
+ * The display engine limits already match/exceed the
+ * render engine limits, so not much point in remapping.
+ * Would also need to deal with the fence POT alignment
+ * and gen2 2KiB GTT tile size.
+ */
+ if (DISPLAY_VER(i915) < 4)
+ return false;
+
+ /*
+ * The new CCS hash mode isn't compatible with remapping as
+ * the virtual address of the pages affects the compressed data.
+ */
+ if (is_ccs_modifier(fb->modifier))
+ return false;
+
+ /* Linear needs a page aligned stride for remapping */
+ if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
+ unsigned int alignment = intel_tile_size(i915) - 1;
+
+ for (i = 0; i < fb->format->num_planes; i++) {
+ if (fb->pitches[i] & alignment)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool intel_fb_needs_pot_stride_remap(const struct intel_framebuffer *fb)
+{
+ return false;
+}
+
+static int intel_fb_pitch(const struct intel_framebuffer *fb, int color_plane, unsigned int rotation)
+{
+ if (drm_rotation_90_or_270(rotation))
+ return fb->rotated_view.color_plane[color_plane].stride;
+ else if (intel_fb_needs_pot_stride_remap(fb))
+ return fb->remapped_view.color_plane[color_plane].stride;
+ else
+ return fb->normal_view.color_plane[color_plane].stride;
+}
+
+static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ const struct intel_framebuffer *fb = to_intel_framebuffer(plane_state->hw.fb);
+ unsigned int rotation = plane_state->hw.rotation;
+ u32 stride, max_stride;
+
+ /*
+ * No remapping for invisible planes since we don't have
+ * an actual source viewport to remap.
+ */
+ if (!plane_state->uapi.visible)
+ return false;
+
+ if (!intel_plane_can_remap(plane_state))
+ return false;
+
+ /*
+ * FIXME: aux plane limits on gen9+ are
+ * unclear in Bspec, for now no checking.
+ */
+ stride = intel_fb_pitch(fb, 0, rotation);
+ max_stride = plane->max_stride(plane, fb->base.format->format,
+ fb->base.modifier, rotation);
+
+ return stride > max_stride;
+}
+
+static int convert_plane_offset_to_xy(const struct intel_framebuffer *fb, int color_plane,
+ int plane_width, int *x, int *y)
+{
+ struct drm_i915_gem_object *obj = intel_fb_obj(&fb->base);
+ int ret;
+
+ ret = intel_fb_offset_to_xy(x, y, &fb->base, color_plane);
+ if (ret) {
+ drm_dbg_kms(fb->base.dev,
+ "bad fb plane %d offset: 0x%x\n",
+ color_plane, fb->base.offsets[color_plane]);
+ return ret;
+ }
+
+ ret = intel_fb_check_ccs_xy(&fb->base, color_plane, *x, *y);
+ if (ret)
+ return ret;
+
+ /*
+ * The fence (if used) is aligned to the start of the object
+ * so having the framebuffer wrap around across the edge of the
+ * fenced region doesn't really work. We have no API to configure
+ * the fence start offset within the object (nor could we probably
+ * on gen2/3). So it's just easier if we just require that the
+ * fb layout agrees with the fence layout. We already check that the
+ * fb stride matches the fence stride elsewhere.
+ */
+ if (color_plane == 0 && i915_gem_object_is_tiled(obj) &&
+ (*x + plane_width) * fb->base.format->cpp[color_plane] > fb->base.pitches[color_plane]) {
+ drm_dbg_kms(fb->base.dev,
+ "bad fb plane %d offset: 0x%x\n",
+ color_plane, fb->base.offsets[color_plane]);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static u32 calc_plane_aligned_offset(const struct intel_framebuffer *fb, int color_plane, int *x, int *y)
+{
+ struct drm_i915_private *i915 = to_i915(fb->base.dev);
+ unsigned int tile_size = intel_tile_size(i915);
+ u32 offset;
+
+ offset = intel_compute_aligned_offset(i915, x, y, &fb->base, color_plane,
+ fb->base.pitches[color_plane],
+ DRM_MODE_ROTATE_0,
+ tile_size);
+
+ return offset / tile_size;
+}
+
+struct fb_plane_view_dims {
+ unsigned int width, height;
+ unsigned int tile_width, tile_height;
+};
+
+static void init_plane_view_dims(const struct intel_framebuffer *fb, int color_plane,
+ unsigned int width, unsigned int height,
+ struct fb_plane_view_dims *dims)
+{
+ dims->width = width;
+ dims->height = height;
+
+ intel_tile_dims(&fb->base, color_plane, &dims->tile_width, &dims->tile_height);
+}
+
+static unsigned int
+plane_view_src_stride_tiles(const struct intel_framebuffer *fb, int color_plane,
+ const struct fb_plane_view_dims *dims)
+{
+ return DIV_ROUND_UP(fb->base.pitches[color_plane],
+ dims->tile_width * fb->base.format->cpp[color_plane]);
+}
+
+static unsigned int
+plane_view_dst_stride_tiles(const struct intel_framebuffer *fb, int color_plane,
+ unsigned int pitch_tiles)
+{
+ if (intel_fb_needs_pot_stride_remap(fb))
+ return roundup_pow_of_two(pitch_tiles);
+ else
+ return pitch_tiles;
+}
+
+static unsigned int
+plane_view_width_tiles(const struct intel_framebuffer *fb, int color_plane,
+ const struct fb_plane_view_dims *dims,
+ int x)
+{
+ return DIV_ROUND_UP(x + dims->width, dims->tile_width);
+}
+
+static unsigned int
+plane_view_height_tiles(const struct intel_framebuffer *fb, int color_plane,
+ const struct fb_plane_view_dims *dims,
+ int y)
+{
+ return DIV_ROUND_UP(y + dims->height, dims->tile_height);
+}
+
+#define assign_chk_ovf(i915, var, val) ({ \
+ drm_WARN_ON(&(i915)->drm, overflows_type(val, var)); \
+ (var) = (val); \
+})
+
+static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_plane,
+ const struct fb_plane_view_dims *dims,
+ u32 obj_offset, u32 gtt_offset, int x, int y,
+ struct intel_fb_view *view)
+{
+ struct drm_i915_private *i915 = to_i915(fb->base.dev);
+ struct intel_remapped_plane_info *remap_info = &view->gtt.remapped.plane[color_plane];
+ struct i915_color_plane_view *color_plane_info = &view->color_plane[color_plane];
+ unsigned int tile_width = dims->tile_width;
+ unsigned int tile_height = dims->tile_height;
+ unsigned int tile_size = intel_tile_size(i915);
+ struct drm_rect r;
+ u32 size;
+
+ assign_chk_ovf(i915, remap_info->offset, obj_offset);
+ assign_chk_ovf(i915, remap_info->src_stride, plane_view_src_stride_tiles(fb, color_plane, dims));
+ assign_chk_ovf(i915, remap_info->width, plane_view_width_tiles(fb, color_plane, dims, x));
+ assign_chk_ovf(i915, remap_info->height, plane_view_height_tiles(fb, color_plane, dims, y));
+
+ if (view->gtt.type == I915_GGTT_VIEW_ROTATED) {
+ check_array_bounds(i915, view->gtt.rotated.plane, color_plane);
+
+ assign_chk_ovf(i915, remap_info->dst_stride,
+ plane_view_dst_stride_tiles(fb, color_plane, remap_info->height));
+
+ /* rotate the x/y offsets to match the GTT view */
+ drm_rect_init(&r, x, y, dims->width, dims->height);
+ drm_rect_rotate(&r,
+ remap_info->width * tile_width,
+ remap_info->height * tile_height,
+ DRM_MODE_ROTATE_270);
+
+ color_plane_info->x = r.x1;
+ color_plane_info->y = r.y1;
+
+ color_plane_info->stride = remap_info->dst_stride * tile_height;
+
+ size = remap_info->dst_stride * remap_info->width;
+
+ /* rotate the tile dimensions to match the GTT view */
+ swap(tile_width, tile_height);
+ } else {
+ drm_WARN_ON(&i915->drm, view->gtt.type != I915_GGTT_VIEW_REMAPPED);
+
+ check_array_bounds(i915, view->gtt.remapped.plane, color_plane);
+
+ assign_chk_ovf(i915, remap_info->dst_stride,
+ plane_view_dst_stride_tiles(fb, color_plane, remap_info->width));
+
+ color_plane_info->x = x;
+ color_plane_info->y = y;
+
+ color_plane_info->stride = remap_info->dst_stride * tile_width *
+ fb->base.format->cpp[color_plane];
+
+ size = remap_info->dst_stride * remap_info->height;
+ }
+
+ /*
+ * We only keep the x/y offsets, so push all of the gtt offset into
+ * the x/y offsets. x,y will hold the first pixel of the framebuffer
+ * plane from the start of the remapped/rotated gtt mapping.
+ */
+ intel_adjust_tile_offset(&color_plane_info->x, &color_plane_info->y,
+ tile_width, tile_height,
+ tile_size, remap_info->dst_stride,
+ gtt_offset * tile_size, 0);
+
+ return size;
+}
+
+#undef assign_chk_ovf
+
+/* Return number of tiles @color_plane needs. */
+static unsigned int
+calc_plane_normal_size(const struct intel_framebuffer *fb, int color_plane,
+ const struct fb_plane_view_dims *dims,
+ int x, int y)
+{
+ struct drm_i915_private *i915 = to_i915(fb->base.dev);
+ unsigned int tiles;
+
+ if (is_surface_linear(&fb->base, color_plane)) {
+ unsigned int size;
+
+ size = (y + dims->height) * fb->base.pitches[color_plane] +
+ x * fb->base.format->cpp[color_plane];
+ tiles = DIV_ROUND_UP(size, intel_tile_size(i915));
+ } else {
+ tiles = plane_view_src_stride_tiles(fb, color_plane, dims) *
+ plane_view_height_tiles(fb, color_plane, dims, y);
+ /*
+ * If the plane isn't horizontally tile aligned,
+ * we need one more tile.
+ */
+ if (x != 0)
+ tiles++;
+ }
+
+ return tiles;
+}
+
+static void intel_fb_view_init(struct intel_fb_view *view, enum i915_ggtt_view_type view_type)
+{
+ memset(view, 0, sizeof(*view));
+ view->gtt.type = view_type;
+}
+
+int intel_fill_fb_info(struct drm_i915_private *i915, struct drm_framebuffer *fb)
+{
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ u32 gtt_offset_rotated = 0;
+ u32 gtt_offset_remapped = 0;
+ unsigned int max_size = 0;
+ int i, num_planes = fb->format->num_planes;
+ unsigned int tile_size = intel_tile_size(i915);
+
+ intel_fb_view_init(&intel_fb->normal_view, I915_GGTT_VIEW_NORMAL);
+ intel_fb_view_init(&intel_fb->rotated_view, I915_GGTT_VIEW_ROTATED);
+ intel_fb_view_init(&intel_fb->remapped_view, I915_GGTT_VIEW_REMAPPED);
+
+ for (i = 0; i < num_planes; i++) {
+ struct fb_plane_view_dims view_dims;
+ unsigned int width, height;
+ unsigned int cpp, size;
+ u32 offset;
+ int x, y;
+ int ret;
+
+ /*
+ * Plane 2 of Render Compression with Clear Color fb modifier
+ * is consumed by the driver and not passed to DE. Skip the
+ * arithmetic related to alignment and offset calculation.
+ */
+ if (is_gen12_ccs_cc_plane(fb, i)) {
+ if (IS_ALIGNED(fb->offsets[i], PAGE_SIZE))
+ continue;
+ else
+ return -EINVAL;
+ }
+
+ cpp = fb->format->cpp[i];
+ intel_fb_plane_dims(&width, &height, fb, i);
+
+ ret = convert_plane_offset_to_xy(intel_fb, i, width, &x, &y);
+ if (ret)
+ return ret;
+
+ init_plane_view_dims(intel_fb, i, width, height, &view_dims);
+
+ /*
+ * First pixel of the framebuffer from
+ * the start of the normal gtt mapping.
+ */
+ intel_fb->normal_view.color_plane[i].x = x;
+ intel_fb->normal_view.color_plane[i].y = y;
+ intel_fb->normal_view.color_plane[i].stride = intel_fb->base.pitches[i];
+
+ offset = calc_plane_aligned_offset(intel_fb, i, &x, &y);
+
+ /* Y or Yf modifiers required for 90/270 rotation */
+ if (fb->modifier == I915_FORMAT_MOD_Y_TILED ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED)
+ gtt_offset_rotated += calc_plane_remap_info(intel_fb, i, &view_dims,
+ offset, gtt_offset_rotated, x, y,
+ &intel_fb->rotated_view);
+
+ if (intel_fb_needs_pot_stride_remap(intel_fb))
+ gtt_offset_remapped += calc_plane_remap_info(intel_fb, i, &view_dims,
+ offset, gtt_offset_remapped, x, y,
+ &intel_fb->remapped_view);
+
+ size = calc_plane_normal_size(intel_fb, i, &view_dims, x, y);
+ /* how many tiles in total needed in the bo */
+ max_size = max(max_size, offset + size);
+ }
+
+ if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
+ drm_dbg_kms(&i915->drm,
+ "fb too big for bo (need %llu bytes, have %zu bytes)\n",
+ mul_u32_u32(max_size, tile_size), obj->base.size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void intel_plane_remap_gtt(struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *i915 =
+ to_i915(plane_state->uapi.plane->dev);
+ struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ unsigned int rotation = plane_state->hw.rotation;
+ int i, num_planes = fb->format->num_planes;
+ unsigned int src_x, src_y;
+ unsigned int src_w, src_h;
+ u32 gtt_offset = 0;
+
+ intel_fb_view_init(&plane_state->view,
+ drm_rotation_90_or_270(rotation) ? I915_GGTT_VIEW_ROTATED :
+ I915_GGTT_VIEW_REMAPPED);
+
+ src_x = plane_state->uapi.src.x1 >> 16;
+ src_y = plane_state->uapi.src.y1 >> 16;
+ src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+
+ drm_WARN_ON(&i915->drm, is_ccs_modifier(fb->modifier));
+
+ /* Make src coordinates relative to the viewport */
+ drm_rect_translate(&plane_state->uapi.src,
+ -(src_x << 16), -(src_y << 16));
+
+ /* Rotate src coordinates to match rotated GTT view */
+ if (drm_rotation_90_or_270(rotation))
+ drm_rect_rotate(&plane_state->uapi.src,
+ src_w << 16, src_h << 16,
+ DRM_MODE_ROTATE_270);
+
+ for (i = 0; i < num_planes; i++) {
+ unsigned int hsub = i ? fb->format->hsub : 1;
+ unsigned int vsub = i ? fb->format->vsub : 1;
+ struct fb_plane_view_dims view_dims;
+ unsigned int width, height;
+ unsigned int x, y;
+ u32 offset;
+
+ x = src_x / hsub;
+ y = src_y / vsub;
+ width = src_w / hsub;
+ height = src_h / vsub;
+
+ init_plane_view_dims(intel_fb, i, width, height, &view_dims);
+
+ /*
+ * First pixel of the src viewport from the
+ * start of the normal gtt mapping.
+ */
+ x += intel_fb->normal_view.color_plane[i].x;
+ y += intel_fb->normal_view.color_plane[i].y;
+
+ offset = calc_plane_aligned_offset(intel_fb, i, &x, &y);
+
+ gtt_offset += calc_plane_remap_info(intel_fb, i, &view_dims,
+ offset, gtt_offset, x, y,
+ &plane_state->view);
+ }
+}
+
+void intel_fb_fill_view(const struct intel_framebuffer *fb, unsigned int rotation,
+ struct intel_fb_view *view)
+{
+ if (drm_rotation_90_or_270(rotation))
+ *view = fb->rotated_view;
+ else if (intel_fb_needs_pot_stride_remap(fb))
+ *view = fb->remapped_view;
+ else
+ *view = fb->normal_view;
+}
+
+static int intel_plane_check_stride(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
+ u32 stride, max_stride;
+
+ /*
+ * We ignore stride for all invisible planes that
+ * can be remapped. Otherwise we could end up
+ * with a false positive when the remapping didn't
+ * kick in due the plane being invisible.
+ */
+ if (intel_plane_can_remap(plane_state) &&
+ !plane_state->uapi.visible)
+ return 0;
+
+ /* FIXME other color planes? */
+ stride = plane_state->view.color_plane[0].stride;
+ max_stride = plane->max_stride(plane, fb->format->format,
+ fb->modifier, rotation);
+
+ if (stride > max_stride) {
+ DRM_DEBUG_KMS("[FB:%d] stride (%d) exceeds [PLANE:%d:%s] max stride (%d)\n",
+ fb->base.id, stride,
+ plane->base.base.id, plane->base.name, max_stride);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int intel_plane_compute_gtt(struct intel_plane_state *plane_state)
+{
+ const struct intel_framebuffer *fb =
+ to_intel_framebuffer(plane_state->hw.fb);
+ unsigned int rotation = plane_state->hw.rotation;
+
+ if (!fb)
+ return 0;
+
+ if (intel_plane_needs_remap(plane_state)) {
+ intel_plane_remap_gtt(plane_state);
+
+ /*
+ * Sometimes even remapping can't overcome
+ * the stride limitations :( Can happen with
+ * big plane sizes and suitably misaligned
+ * offsets.
+ */
+ return intel_plane_check_stride(plane_state);
+ }
+
+ intel_fb_fill_view(fb, rotation, &plane_state->view);
+
+ /* Rotate src coordinates to match rotated GTT view */
+ if (drm_rotation_90_or_270(rotation))
+ drm_rect_rotate(&plane_state->uapi.src,
+ fb->base.width << 16, fb->base.height << 16,
+ DRM_MODE_ROTATE_270);
+
+ return intel_plane_check_stride(plane_state);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fb.h b/drivers/gpu/drm/i915/display/intel_fb.h
new file mode 100644
index 000000000000..6acf792a8c44
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fb.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020-2021 Intel Corporation
+ */
+
+#ifndef __INTEL_FB_H__
+#define __INTEL_FB_H__
+
+#include <linux/types.h>
+
+struct drm_framebuffer;
+
+struct drm_i915_private;
+
+struct intel_fb_view;
+struct intel_framebuffer;
+struct intel_plane_state;
+
+bool is_ccs_plane(const struct drm_framebuffer *fb, int plane);
+bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane);
+bool is_gen12_ccs_cc_plane(const struct drm_framebuffer *fb, int plane);
+bool is_aux_plane(const struct drm_framebuffer *fb, int plane);
+bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, int color_plane);
+
+bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane);
+
+int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane);
+int skl_ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane);
+int skl_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane);
+
+unsigned int intel_tile_size(const struct drm_i915_private *i915);
+unsigned int intel_tile_height(const struct drm_framebuffer *fb, int color_plane);
+unsigned int intel_tile_row_size(const struct drm_framebuffer *fb, int color_plane);
+
+unsigned int intel_cursor_alignment(const struct drm_i915_private *i915);
+
+void intel_fb_plane_get_subsampling(int *hsub, int *vsub,
+ const struct drm_framebuffer *fb,
+ int color_plane);
+
+u32 intel_plane_adjust_aligned_offset(int *x, int *y,
+ const struct intel_plane_state *state,
+ int color_plane,
+ u32 old_offset, u32 new_offset);
+u32 intel_plane_compute_aligned_offset(int *x, int *y,
+ const struct intel_plane_state *state,
+ int color_plane);
+
+int intel_fill_fb_info(struct drm_i915_private *i915, struct drm_framebuffer *fb);
+void intel_fb_fill_view(const struct intel_framebuffer *fb, unsigned int rotation,
+ struct intel_fb_view *view);
+int intel_plane_compute_gtt(struct intel_plane_state *plane_state);
+
+#endif /* __INTEL_FB_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 5fd4fa4805ef..957252b695d7 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -67,9 +67,9 @@ static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
int lines;
intel_fbc_get_plane_source_size(cache, NULL, &lines);
- if (IS_GEN(dev_priv, 7))
+ if (IS_DISPLAY_VER(dev_priv, 7))
lines = min(lines, 2048);
- else if (INTEL_GEN(dev_priv) >= 8)
+ else if (DISPLAY_VER(dev_priv) >= 8)
lines = min(lines, 2560);
/* Hardware needs the full buffer stride, not just the active area. */
@@ -109,7 +109,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
cfb_pitch = params->fb.stride;
/* FBC_CTL wants 32B or 64B units */
- if (IS_GEN(dev_priv, 2))
+ if (IS_DISPLAY_VER(dev_priv, 2))
cfb_pitch = (cfb_pitch / 32) - 1;
else
cfb_pitch = (cfb_pitch / 64) - 1;
@@ -118,7 +118,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
intel_de_write(dev_priv, FBC_TAG(i), 0);
- if (IS_GEN(dev_priv, 4)) {
+ if (IS_DISPLAY_VER(dev_priv, 4)) {
u32 fbc_ctl2;
/* Set it up... */
@@ -222,9 +222,9 @@ static void snb_fbc_recompress(struct drm_i915_private *dev_priv)
static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
{
- if (INTEL_GEN(dev_priv) >= 6)
+ if (DISPLAY_VER(dev_priv) >= 6)
snb_fbc_recompress(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 4)
+ else if (DISPLAY_VER(dev_priv) >= 4)
i965_fbc_recompress(dev_priv);
else
i8xx_fbc_recompress(dev_priv);
@@ -255,16 +255,16 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
if (params->fence_id >= 0) {
dpfc_ctl |= DPFC_CTL_FENCE_EN;
- if (IS_GEN(dev_priv, 5))
+ if (IS_IRONLAKE(dev_priv))
dpfc_ctl |= params->fence_id;
- if (IS_GEN(dev_priv, 6)) {
+ if (IS_SANDYBRIDGE(dev_priv)) {
intel_de_write(dev_priv, SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | params->fence_id);
intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET,
params->fence_y_offset);
}
} else {
- if (IS_GEN(dev_priv, 6)) {
+ if (IS_SANDYBRIDGE(dev_priv)) {
intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0);
intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0);
}
@@ -354,7 +354,7 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv)
{
- if (INTEL_GEN(dev_priv) >= 5)
+ if (DISPLAY_VER(dev_priv) >= 5)
return ilk_fbc_is_active(dev_priv);
else if (IS_GM45(dev_priv))
return g4x_fbc_is_active(dev_priv);
@@ -371,9 +371,9 @@ static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv)
fbc->active = true;
fbc->activated = true;
- if (INTEL_GEN(dev_priv) >= 7)
+ if (DISPLAY_VER(dev_priv) >= 7)
gen7_fbc_activate(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 5)
+ else if (DISPLAY_VER(dev_priv) >= 5)
ilk_fbc_activate(dev_priv);
else if (IS_GM45(dev_priv))
g4x_fbc_activate(dev_priv);
@@ -389,7 +389,7 @@ static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
fbc->active = false;
- if (INTEL_GEN(dev_priv) >= 5)
+ if (DISPLAY_VER(dev_priv) >= 5)
ilk_fbc_deactivate(dev_priv);
else if (IS_GM45(dev_priv))
g4x_fbc_deactivate(dev_priv);
@@ -426,7 +426,7 @@ static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
static u64 intel_fbc_cfb_base_max(struct drm_i915_private *i915)
{
- if (INTEL_GEN(i915) >= 5 || IS_G4X(i915))
+ if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915))
return BIT_ULL(28);
else
return BIT_ULL(32);
@@ -473,7 +473,7 @@ again:
ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1,
4096, 0, end);
- if (ret && INTEL_GEN(dev_priv) <= 4) {
+ if (ret && DISPLAY_VER(dev_priv) <= 4) {
return 0;
} else if (ret) {
compression_threshold <<= 1;
@@ -504,7 +504,7 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
fbc->threshold = ret;
- if (INTEL_GEN(dev_priv) >= 5)
+ if (DISPLAY_VER(dev_priv) >= 5)
intel_de_write(dev_priv, ILK_DPFC_CB_BASE,
fbc->compressed_fb.start);
else if (IS_GM45(dev_priv)) {
@@ -590,14 +590,14 @@ static bool stride_is_valid(struct drm_i915_private *dev_priv,
if (stride < 512)
return false;
- if (IS_GEN(dev_priv, 2) || IS_GEN(dev_priv, 3))
+ if (IS_DISPLAY_VER(dev_priv, 2) || IS_DISPLAY_VER(dev_priv, 3))
return stride == 4096 || stride == 8192;
- if (IS_GEN(dev_priv, 4) && !IS_G4X(dev_priv) && stride < 2048)
+ if (IS_DISPLAY_VER(dev_priv, 4) && !IS_G4X(dev_priv) && stride < 2048)
return false;
/* Display WA #1105: skl,bxt,kbl,cfl,glk */
- if (IS_GEN(dev_priv, 9) &&
+ if ((IS_DISPLAY_VER(dev_priv, 9) || IS_GEMINILAKE(dev_priv)) &&
modifier == DRM_FORMAT_MOD_LINEAR && stride & 511)
return false;
@@ -617,7 +617,7 @@ static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_RGB565:
/* 16bpp not supported on gen2 */
- if (IS_GEN(dev_priv, 2))
+ if (IS_DISPLAY_VER(dev_priv, 2))
return false;
/* WaFbcOnly1to1Ratio:ctg */
if (IS_G4X(dev_priv))
@@ -631,10 +631,10 @@ static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
static bool rotation_is_valid(struct drm_i915_private *dev_priv,
u32 pixel_format, unsigned int rotation)
{
- if (INTEL_GEN(dev_priv) >= 9 && pixel_format == DRM_FORMAT_RGB565 &&
+ if (DISPLAY_VER(dev_priv) >= 9 && pixel_format == DRM_FORMAT_RGB565 &&
drm_rotation_90_or_270(rotation))
return false;
- else if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) &&
+ else if (DISPLAY_VER(dev_priv) <= 4 && !IS_G4X(dev_priv) &&
rotation != DRM_MODE_ROTATE_0)
return false;
@@ -653,13 +653,13 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
struct intel_fbc *fbc = &dev_priv->fbc;
unsigned int effective_w, effective_h, max_w, max_h;
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+ if (DISPLAY_VER(dev_priv) >= 10) {
max_w = 5120;
max_h = 4096;
- } else if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) {
+ } else if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL(dev_priv)) {
max_w = 4096;
max_h = 4096;
- } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
+ } else if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5) {
max_w = 4096;
max_h = 2048;
} else {
@@ -680,7 +680,7 @@ static bool tiling_is_valid(struct drm_i915_private *dev_priv,
{
switch (modifier) {
case DRM_FORMAT_MOD_LINEAR:
- if (INTEL_GEN(dev_priv) >= 9)
+ if (DISPLAY_VER(dev_priv) >= 9)
return true;
return false;
case I915_FORMAT_MOD_X_TILED:
@@ -716,8 +716,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
*/
cache->plane.src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
cache->plane.src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
- cache->plane.adjusted_x = plane_state->color_plane[0].x;
- cache->plane.adjusted_y = plane_state->color_plane[0].y;
+ cache->plane.adjusted_x = plane_state->view.color_plane[0].x;
+ cache->plane.adjusted_y = plane_state->view.color_plane[0].y;
cache->plane.pixel_blend_mode = plane_state->hw.pixel_blend_mode;
@@ -725,7 +725,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
cache->fb.modifier = fb->modifier;
/* FIXME is this correct? */
- cache->fb.stride = plane_state->color_plane[0].stride;
+ cache->fb.stride = plane_state->view.color_plane[0].stride;
if (drm_rotation_90_or_270(plane_state->hw.rotation))
cache->fb.stride *= fb->format->cpp[0];
@@ -844,7 +844,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
* For now this will effectively disable FBC with 90/270 degree
* rotation.
*/
- if (INTEL_GEN(dev_priv) < 9 && cache->fence_id < 0) {
+ if (DISPLAY_VER(dev_priv) < 9 && cache->fence_id < 0) {
fbc->no_fbc_reason = "framebuffer not tiled or fenced";
return false;
}
@@ -903,14 +903,14 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
* having a Y offset that isn't divisible by 4 causes FIFO underrun
* and screen flicker.
*/
- if (INTEL_GEN(dev_priv) >= 9 &&
+ if (DISPLAY_VER(dev_priv) >= 9 &&
(fbc->state_cache.plane.adjusted_y & 3)) {
fbc->no_fbc_reason = "plane Y offset is misaligned";
return false;
}
/* Wa_22010751166: icl, ehl, tgl, dg1, rkl */
- if (INTEL_GEN(dev_priv) >= 11 &&
+ if (DISPLAY_VER(dev_priv) >= 11 &&
(cache->plane.src_h + cache->plane.adjusted_y) % 4) {
fbc->no_fbc_reason = "plane height + offset is non-modulo of 4";
return false;
@@ -1036,7 +1036,7 @@ bool intel_fbc_pre_update(struct intel_atomic_state *state,
* if at least one frame has already passed.
*/
if (fbc->activated &&
- (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)))
+ DISPLAY_VER(dev_priv) >= 10)
need_vblank_wait = true;
fbc->activated = false;
}
@@ -1445,7 +1445,7 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
if (!HAS_FBC(dev_priv))
return 0;
- if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
+ if (IS_BROADWELL(dev_priv) || DISPLAY_VER(dev_priv) >= 9)
return 1;
return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 84f853f113b9..ccd00e65a5fe 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -167,7 +167,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
struct intel_framebuffer *intel_fb = ifbdev->fb;
struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
const struct i915_ggtt_view view = {
.type = I915_GGTT_VIEW_NORMAL,
@@ -211,7 +211,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
* This also validates that any existing fb inherited from the
* BIOS is suitable for own access.
*/
- vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base,
+ vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base, false,
&view, false, &flags);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index b2eb96ae10a2..d719cd9c5b73 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -3,6 +3,8 @@
* Copyright © 2020 Intel Corporation
*/
#include "intel_atomic.h"
+#include "intel_ddi.h"
+#include "intel_ddi_buf_trans.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
@@ -371,7 +373,7 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
- if (IS_GEN(dev_priv, 6)) {
+ if (IS_SANDYBRIDGE(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
/* SNB-B */
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
@@ -550,6 +552,142 @@ train_done:
drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
}
+/* Starting with Haswell, different DDI ports can work in FDI mode for
+ * connection to the PCH-located connectors. For this, it is necessary to train
+ * both the DDI port and PCH receiver for the desired DDI buffer settings.
+ *
+ * The recommended port to work in FDI mode is DDI E, which we use here. Also,
+ * please note that when FDI mode is active on DDI E, it shares 2 lines with
+ * DDI A (which is used for eDP)
+ */
+void hsw_fdi_link_train(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 temp, i, rx_ctl_val;
+ int n_entries;
+
+ intel_ddi_get_buf_trans_fdi(dev_priv, &n_entries);
+
+ intel_prepare_dp_ddi_buffers(encoder, crtc_state);
+
+ /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
+ * mode set "sequence for CRT port" document:
+ * - TP1 to TP2 time with the default value
+ * - FDI delay to 90h
+ *
+ * WaFDIAutoLinkSetTimingOverrride:hsw
+ */
+ intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A),
+ FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2) | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
+ /* Enable the PCH Receiver FDI PLL */
+ rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
+ FDI_RX_PLL_ENABLE |
+ FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
+ intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
+ udelay(220);
+
+ /* Switch from Rawclk to PCDclk */
+ rx_ctl_val |= FDI_PCDCLK;
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
+
+ /* Configure Port Clock Select */
+ drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll->info->id != DPLL_ID_SPLL);
+ intel_ddi_enable_clock(encoder, crtc_state);
+
+ /* Start the training iterating through available voltages and emphasis,
+ * testing each value twice. */
+ for (i = 0; i < n_entries * 2; i++) {
+ /* Configure DP_TP_CTL with auto-training */
+ intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
+ DP_TP_CTL_FDI_AUTOTRAIN |
+ DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+ DP_TP_CTL_LINK_TRAIN_PAT1 |
+ DP_TP_CTL_ENABLE);
+
+ /* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
+ * DDI E does not support port reversal, the functionality is
+ * achieved on the PCH side in FDI_RX_CTL, so no need to set the
+ * port reversal bit */
+ intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E),
+ DDI_BUF_CTL_ENABLE | ((crtc_state->fdi_lanes - 1) << 1) | DDI_BUF_TRANS_SELECT(i / 2));
+ intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
+
+ udelay(600);
+
+ /* Program PCH FDI Receiver TU */
+ intel_de_write(dev_priv, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
+
+ /* Enable PCH FDI Receiver with auto-training */
+ rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
+ intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
+
+ /* Wait for FDI receiver lane calibration */
+ udelay(30);
+
+ /* Unset FDI_RX_MISC pwrdn lanes */
+ temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
+ temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+ intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
+ intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
+
+ /* Wait for FDI auto training time */
+ udelay(5);
+
+ temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E));
+ if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI link training done on step %d\n", i);
+ break;
+ }
+
+ /*
+ * Leave things enabled even if we failed to train FDI.
+ * Results in less fireworks from the state checker.
+ */
+ if (i == n_entries * 2 - 1) {
+ drm_err(&dev_priv->drm, "FDI link training failed!\n");
+ break;
+ }
+
+ rx_ctl_val &= ~FDI_RX_ENABLE;
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
+ intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
+
+ temp = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E));
+ temp &= ~DDI_BUF_CTL_ENABLE;
+ intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), temp);
+ intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
+
+ /* Disable DP_TP_CTL and FDI_RX_CTL and retry */
+ temp = intel_de_read(dev_priv, DP_TP_CTL(PORT_E));
+ temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ intel_de_write(dev_priv, DP_TP_CTL(PORT_E), temp);
+ intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E));
+
+ intel_wait_ddi_buf_idle(dev_priv, PORT_E);
+
+ /* Reset FDI_RX_MISC pwrdn lanes */
+ temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
+ temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+ temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
+ intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
+ intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
+ }
+
+ /* Enable normal pixel sending for FDI */
+ intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
+ DP_TP_CTL_FDI_AUTOTRAIN |
+ DP_TP_CTL_LINK_TRAIN_NORMAL |
+ DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+ DP_TP_CTL_ENABLE);
+}
+
void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -672,9 +810,9 @@ void ilk_fdi_disable(struct intel_crtc *crtc)
void
intel_fdi_init_hook(struct drm_i915_private *dev_priv)
{
- if (IS_GEN(dev_priv, 5)) {
+ if (IS_IRONLAKE(dev_priv)) {
dev_priv->display.fdi_link_train = ilk_fdi_link_train;
- } else if (IS_GEN(dev_priv, 6)) {
+ } else if (IS_SANDYBRIDGE(dev_priv)) {
dev_priv->display.fdi_link_train = gen6_fdi_link_train;
} else if (IS_IVYBRIDGE(dev_priv)) {
/* FIXME: detect B0+ stepping and use auto training */
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.h b/drivers/gpu/drm/i915/display/intel_fdi.h
index a9cd21663eb8..af01d2c173a8 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.h
+++ b/drivers/gpu/drm/i915/display/intel_fdi.h
@@ -9,6 +9,7 @@
struct drm_i915_private;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_encoder;
#define I915_DISPLAY_CONFIG_RETRY 1
int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
@@ -18,5 +19,7 @@ void ilk_fdi_disable(struct intel_crtc *crtc);
void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc);
void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state);
void intel_fdi_init_hook(struct drm_i915_private *dev_priv);
+void hsw_fdi_link_train(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
index 813a4f7033e1..9605a1064366 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
@@ -269,11 +269,11 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
if (HAS_GMCH(dev_priv))
i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
- else if (IS_GEN_RANGE(dev_priv, 5, 6))
+ else if (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv))
ilk_set_fifo_underrun_reporting(dev, pipe, enable);
- else if (IS_GEN(dev_priv, 7))
+ else if (IS_DISPLAY_VER(dev_priv, 7))
ivb_set_fifo_underrun_reporting(dev, pipe, enable, old);
- else if (INTEL_GEN(dev_priv) >= 8)
+ else if (DISPLAY_VER(dev_priv) >= 8)
bdw_set_fifo_underrun_reporting(dev, pipe, enable);
return old;
@@ -432,7 +432,7 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv)
if (HAS_GMCH(dev_priv))
i9xx_check_fifo_underruns(crtc);
- else if (IS_GEN(dev_priv, 7))
+ else if (IS_DISPLAY_VER(dev_priv, 7))
ivb_check_fifo_underruns(crtc);
}
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index 7b38eee9980f..6fc6965b6133 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -224,6 +224,8 @@ static void frontbuffer_release(struct kref *ref)
struct drm_i915_gem_object *obj = front->obj;
struct i915_vma *vma;
+ drm_WARN_ON(obj->base.dev, atomic_read(&front->bits));
+
spin_lock(&obj->vma.lock);
for_each_ggtt_vma(vma, obj) {
i915_vma_clear_scanout(vma);
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index b0d71bbbf2ad..8ddc20daef64 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -392,7 +392,7 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
static unsigned int gmbus_max_xfer_size(struct drm_i915_private *dev_priv)
{
- return INTEL_GEN(dev_priv) >= 9 ? GEN9_GMBUS_BYTE_COUNT_MAX :
+ return DISPLAY_VER(dev_priv) >= 9 ? GEN9_GMBUS_BYTE_COUNT_MAX :
GMBUS_BYTE_COUNT_MAX;
}
@@ -840,7 +840,7 @@ static const struct i2c_lock_operations gmbus_lock_ops = {
*/
int intel_gmbus_setup(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct intel_gmbus *bus;
unsigned int pin;
int ret;
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index ae1371c36a32..d8570e14fe60 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -32,6 +32,21 @@ static int intel_conn_to_vcpi(struct intel_connector *connector)
return connector->port ? connector->port->vcpi.vcpi : 0;
}
+static bool
+intel_streams_type1_capable(struct intel_connector *connector)
+{
+ const struct intel_hdcp_shim *shim = connector->hdcp.shim;
+ bool capable = false;
+
+ if (!shim)
+ return capable;
+
+ if (shim->streams_type1_capable)
+ shim->streams_type1_capable(connector, &capable);
+
+ return capable;
+}
+
/*
* intel_hdcp_required_content_stream selects the most highest common possible HDCP
* content_type for all streams in DP MST topology because security f/w doesn't
@@ -70,7 +85,7 @@ intel_hdcp_required_content_stream(struct intel_digital_port *dig_port)
if (conn_dig_port != dig_port)
continue;
- if (!enforce_type0 && !intel_hdcp2_capable(connector))
+ if (!enforce_type0 && !intel_streams_type1_capable(connector))
enforce_type0 = true;
data->streams[data->k].stream_id = intel_conn_to_vcpi(connector);
@@ -318,7 +333,7 @@ static
u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder, enum port port)
{
- if (INTEL_GEN(dev_priv) >= 12) {
+ if (DISPLAY_VER(dev_priv) >= 12) {
switch (cpu_transcoder) {
case TRANSCODER_A:
return HDCP_TRANSA_REP_PRESENT |
@@ -1089,7 +1104,7 @@ static void intel_hdcp_prop_work(struct work_struct *work)
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
{
return INTEL_INFO(dev_priv)->display.has_hdcp &&
- (INTEL_GEN(dev_priv) >= 12 || port < PORT_E);
+ (DISPLAY_VER(dev_priv) >= 12 || port < PORT_E);
}
static int
@@ -1706,6 +1721,7 @@ static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
{
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct intel_hdcp *hdcp = &connector->hdcp;
enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
enum port port = dig_port->base.port;
@@ -1715,7 +1731,8 @@ static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
LINK_ENCRYPTION_STATUS)) {
drm_err(&dev_priv->drm, "[%s:%d] HDCP 2.2 Link is not encrypted\n",
connector->base.name, connector->base.base.id);
- return -EPERM;
+ ret = -EPERM;
+ goto link_recover;
}
if (hdcp->shim->stream_2_2_encryption) {
@@ -1729,6 +1746,15 @@ static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
transcoder_name(hdcp->stream_transcoder));
}
+ return 0;
+
+link_recover:
+ if (hdcp2_deauthenticate_port(connector) < 0)
+ drm_dbg_kms(&dev_priv->drm, "Port deauth failed.\n");
+
+ dig_port->hdcp_auth_status = false;
+ data->k = 0;
+
return ret;
}
@@ -1885,7 +1911,8 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
}
}
- ret = hdcp2_enable_stream_encryption(connector);
+ if (!ret)
+ ret = hdcp2_enable_stream_encryption(connector);
return ret;
}
@@ -1927,7 +1954,8 @@ static int _intel_hdcp2_enable(struct intel_connector *connector)
return 0;
}
-static int _intel_hdcp2_disable(struct intel_connector *connector)
+static int
+_intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery)
{
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
@@ -1948,7 +1976,7 @@ static int _intel_hdcp2_disable(struct intel_connector *connector)
drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n",
transcoder_name(hdcp->stream_transcoder));
- if (dig_port->num_hdcp_streams > 0)
+ if (dig_port->num_hdcp_streams > 0 && !hdcp2_link_recovery)
return 0;
}
@@ -1991,6 +2019,7 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
"HDCP2.2 link stopped the encryption, %x\n",
intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)));
ret = -ENXIO;
+ _intel_hdcp2_disable(connector, true);
intel_hdcp_update_value(connector,
DRM_MODE_CONTENT_PROTECTION_DESIRED,
true);
@@ -2030,7 +2059,7 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
connector->base.name, connector->base.base.id);
}
- ret = _intel_hdcp2_disable(connector);
+ ret = _intel_hdcp2_disable(connector, true);
if (ret) {
drm_err(&dev_priv->drm,
"[%s:%d] Failed to disable hdcp2.2 (%d)\n",
@@ -2137,7 +2166,7 @@ static int initialize_hdcp_port_data(struct intel_connector *connector,
struct intel_hdcp *hdcp = &connector->hdcp;
enum port port = dig_port->base.port;
- if (INTEL_GEN(dev_priv) < 12)
+ if (DISPLAY_VER(dev_priv) < 12)
data->fw_ddi = intel_get_mei_fw_ddi_index(port);
else
/*
@@ -2176,8 +2205,7 @@ static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
return false;
- return (INTEL_GEN(dev_priv) >= 10 ||
- IS_GEMINILAKE(dev_priv) ||
+ return (DISPLAY_VER(dev_priv) >= 10 ||
IS_KABYLAKE(dev_priv) ||
IS_COFFEELAKE(dev_priv) ||
IS_COMETLAKE(dev_priv));
@@ -2288,7 +2316,7 @@ int intel_hdcp_enable(struct intel_connector *connector,
hdcp->stream_transcoder = INVALID_TRANSCODER;
}
- if (INTEL_GEN(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 12)
dig_port->hdcp_port_data.fw_tc = intel_get_mei_fw_tc(hdcp->cpu_transcoder);
/*
@@ -2340,7 +2368,7 @@ int intel_hdcp_disable(struct intel_connector *connector)
intel_hdcp_update_value(connector,
DRM_MODE_CONTENT_PROTECTION_UNDESIRED, false);
if (hdcp->hdcp2_encrypted)
- ret = _intel_hdcp2_disable(connector);
+ ret = _intel_hdcp2_disable(connector, false);
else if (hdcp->hdcp_encrypted)
ret = _intel_hdcp_disable(connector);
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 95919d325b0b..d69f0a6dc26d 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -41,21 +41,15 @@
#include "i915_debugfs.h"
#include "i915_drv.h"
#include "intel_atomic.h"
-#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
#include "intel_display_types.h"
#include "intel_dp.h"
-#include "intel_dpio_phy.h"
-#include "intel_fifo_underrun.h"
#include "intel_gmbus.h"
#include "intel_hdcp.h"
#include "intel_hdmi.h"
-#include "intel_hotplug.h"
#include "intel_lspcon.h"
#include "intel_panel.h"
-#include "intel_sdvo.h"
-#include "intel_sideband.h"
static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi)
{
@@ -86,19 +80,6 @@ assert_hdmi_transcoder_func_disabled(struct drm_i915_private *dev_priv,
"HDMI transcoder function enabled, expecting disabled\n");
}
-struct intel_hdmi *enc_to_intel_hdmi(struct intel_encoder *encoder)
-{
- struct intel_digital_port *dig_port =
- container_of(&encoder->base, struct intel_digital_port,
- base.base);
- return &dig_port->hdmi;
-}
-
-static struct intel_hdmi *intel_attached_hdmi(struct intel_connector *connector)
-{
- return enc_to_intel_hdmi(intel_attached_encoder(connector));
-}
-
static u32 g4x_infoframe_index(unsigned int type)
{
switch (type) {
@@ -200,7 +181,7 @@ static int hsw_dip_data_size(struct drm_i915_private *dev_priv,
case DP_SDP_PPS:
return VIDEO_DIP_PPS_DATA_SIZE;
case HDMI_PACKET_TYPE_GAMUT_METADATA:
- if (INTEL_GEN(dev_priv) >= 11)
+ if (DISPLAY_VER(dev_priv) >= 11)
return VIDEO_DIP_GMP_DATA_SIZE;
else
return VIDEO_DIP_DATA_SIZE;
@@ -583,7 +564,7 @@ static u32 hsw_infoframes_enabled(struct intel_encoder *encoder,
VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW);
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ if (DISPLAY_VER(dev_priv) >= 10)
mask |= VIDEO_DIP_ENABLE_DRM_GLK;
return val & mask;
@@ -839,7 +820,7 @@ intel_hdmi_compute_drm_infoframe(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int ret;
- if (!(INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)))
+ if (DISPLAY_VER(dev_priv) < 10)
return true;
if (!crtc_state->has_infoframe)
@@ -1789,379 +1770,16 @@ static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = {
.protocol = HDCP_PROTOCOL_HDMI,
};
-static void intel_hdmi_prepare(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
- u32 hdmi_val;
-
- intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
-
- hdmi_val = SDVO_ENCODING_HDMI;
- if (!HAS_PCH_SPLIT(dev_priv) && crtc_state->limited_color_range)
- hdmi_val |= HDMI_COLOR_RANGE_16_235;
- if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
- hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH;
- if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
- hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH;
-
- if (crtc_state->pipe_bpp > 24)
- hdmi_val |= HDMI_COLOR_FORMAT_12bpc;
- else
- hdmi_val |= SDVO_COLOR_FORMAT_8bpc;
-
- if (crtc_state->has_hdmi_sink)
- hdmi_val |= HDMI_MODE_SELECT_HDMI;
-
- if (HAS_PCH_CPT(dev_priv))
- hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
- else if (IS_CHERRYVIEW(dev_priv))
- hdmi_val |= SDVO_PIPE_SEL_CHV(crtc->pipe);
- else
- hdmi_val |= SDVO_PIPE_SEL(crtc->pipe);
-
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg, hdmi_val);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
-}
-
-static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
- enum pipe *pipe)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- intel_wakeref_t wakeref;
- bool ret;
-
- wakeref = intel_display_power_get_if_enabled(dev_priv,
- encoder->power_domain);
- if (!wakeref)
- return false;
-
- ret = intel_sdvo_port_enabled(dev_priv, intel_hdmi->hdmi_reg, pipe);
-
- intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
-
- return ret;
-}
-
-static void intel_hdmi_get_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
-{
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- u32 tmp, flags = 0;
- int dotclock;
-
- pipe_config->output_types |= BIT(INTEL_OUTPUT_HDMI);
-
- tmp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
-
- if (tmp & SDVO_HSYNC_ACTIVE_HIGH)
- flags |= DRM_MODE_FLAG_PHSYNC;
- else
- flags |= DRM_MODE_FLAG_NHSYNC;
-
- if (tmp & SDVO_VSYNC_ACTIVE_HIGH)
- flags |= DRM_MODE_FLAG_PVSYNC;
- else
- flags |= DRM_MODE_FLAG_NVSYNC;
-
- if (tmp & HDMI_MODE_SELECT_HDMI)
- pipe_config->has_hdmi_sink = true;
-
- pipe_config->infoframes.enable |=
- intel_hdmi_infoframes_enabled(encoder, pipe_config);
-
- if (pipe_config->infoframes.enable)
- pipe_config->has_infoframe = true;
-
- if (tmp & HDMI_AUDIO_ENABLE)
- pipe_config->has_audio = true;
-
- if (!HAS_PCH_SPLIT(dev_priv) &&
- tmp & HDMI_COLOR_RANGE_16_235)
- pipe_config->limited_color_range = true;
-
- pipe_config->hw.adjusted_mode.flags |= flags;
-
- if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
- dotclock = pipe_config->port_clock * 2 / 3;
- else
- dotclock = pipe_config->port_clock;
-
- if (pipe_config->pixel_multiplier)
- dotclock /= pipe_config->pixel_multiplier;
-
- pipe_config->hw.adjusted_mode.crtc_clock = dotclock;
-
- pipe_config->lane_count = 4;
-
- intel_hdmi_read_gcp_infoframe(encoder, pipe_config);
-
- intel_read_infoframe(encoder, pipe_config,
- HDMI_INFOFRAME_TYPE_AVI,
- &pipe_config->infoframes.avi);
- intel_read_infoframe(encoder, pipe_config,
- HDMI_INFOFRAME_TYPE_SPD,
- &pipe_config->infoframes.spd);
- intel_read_infoframe(encoder, pipe_config,
- HDMI_INFOFRAME_TYPE_VENDOR,
- &pipe_config->infoframes.hdmi);
-}
-
-static void intel_enable_hdmi_audio(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
-{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
-
- drm_WARN_ON(&i915->drm, !pipe_config->has_hdmi_sink);
- drm_dbg_kms(&i915->drm, "Enabling HDMI audio on pipe %c\n",
- pipe_name(crtc->pipe));
- intel_audio_codec_enable(encoder, pipe_config, conn_state);
-}
-
-static void g4x_enable_hdmi(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
-{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- u32 temp;
-
- temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
-
- temp |= SDVO_ENABLE;
- if (pipe_config->has_audio)
- temp |= HDMI_AUDIO_ENABLE;
-
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
-
- if (pipe_config->has_audio)
- intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
-}
-
-static void ibx_enable_hdmi(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
-{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- u32 temp;
-
- temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
-
- temp |= SDVO_ENABLE;
- if (pipe_config->has_audio)
- temp |= HDMI_AUDIO_ENABLE;
-
- /*
- * HW workaround, need to write this twice for issue
- * that may result in first write getting masked.
- */
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
-
- /*
- * HW workaround, need to toggle enable bit off and on
- * for 12bpc with pixel repeat.
- *
- * FIXME: BSpec says this should be done at the end of
- * of the modeset sequence, so not sure if this isn't too soon.
- */
- if (pipe_config->pipe_bpp > 24 &&
- pipe_config->pixel_multiplier > 1) {
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg,
- temp & ~SDVO_ENABLE);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
-
- /*
- * HW workaround, need to write this twice for issue
- * that may result in first write getting masked.
- */
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
- }
-
- if (pipe_config->has_audio)
- intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
-}
-
-static void cpt_enable_hdmi(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
-{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- enum pipe pipe = crtc->pipe;
- u32 temp;
-
- temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
-
- temp |= SDVO_ENABLE;
- if (pipe_config->has_audio)
- temp |= HDMI_AUDIO_ENABLE;
-
- /*
- * WaEnableHDMI8bpcBefore12bpc:snb,ivb
- *
- * The procedure for 12bpc is as follows:
- * 1. disable HDMI clock gating
- * 2. enable HDMI with 8bpc
- * 3. enable HDMI with 12bpc
- * 4. enable HDMI clock gating
- */
-
- if (pipe_config->pipe_bpp > 24) {
- intel_de_write(dev_priv, TRANS_CHICKEN1(pipe),
- intel_de_read(dev_priv, TRANS_CHICKEN1(pipe)) | TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
-
- temp &= ~SDVO_COLOR_FORMAT_MASK;
- temp |= SDVO_COLOR_FORMAT_8bpc;
- }
-
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
-
- if (pipe_config->pipe_bpp > 24) {
- temp &= ~SDVO_COLOR_FORMAT_MASK;
- temp |= HDMI_COLOR_FORMAT_12bpc;
-
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
-
- intel_de_write(dev_priv, TRANS_CHICKEN1(pipe),
- intel_de_read(dev_priv, TRANS_CHICKEN1(pipe)) & ~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
- }
-
- if (pipe_config->has_audio)
- intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
-}
-
-static void vlv_enable_hdmi(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
-{
-}
-
-static void intel_disable_hdmi(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
-{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- struct intel_digital_port *dig_port =
- hdmi_to_dig_port(intel_hdmi);
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
- u32 temp;
-
- temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
-
- temp &= ~(SDVO_ENABLE | HDMI_AUDIO_ENABLE);
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
-
- /*
- * HW workaround for IBX, we need to move the port
- * to transcoder A after disabling it to allow the
- * matching DP port to be enabled on transcoder A.
- */
- if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B) {
- /*
- * We get CPU/PCH FIFO underruns on the other pipe when
- * doing the workaround. Sweep them under the rug.
- */
- intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
- intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
-
- temp &= ~SDVO_PIPE_SEL_MASK;
- temp |= SDVO_ENABLE | SDVO_PIPE_SEL(PIPE_A);
- /*
- * HW workaround, need to write this twice for issue
- * that may result in first write getting masked.
- */
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
-
- temp &= ~SDVO_ENABLE;
- intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
- intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
-
- intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
- intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
- intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
- }
-
- dig_port->set_infoframes(encoder,
- false,
- old_crtc_state, old_conn_state);
-
- intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
-}
-
-static void g4x_disable_hdmi(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
-{
- if (old_crtc_state->has_audio)
- intel_audio_codec_disable(encoder,
- old_crtc_state, old_conn_state);
-
- intel_disable_hdmi(state, encoder, old_crtc_state, old_conn_state);
-}
-
-static void pch_disable_hdmi(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
-{
- if (old_crtc_state->has_audio)
- intel_audio_codec_disable(encoder,
- old_crtc_state, old_conn_state);
-}
-
-static void pch_post_disable_hdmi(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
-{
- intel_disable_hdmi(state, encoder, old_crtc_state, old_conn_state);
-}
-
static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int max_tmds_clock, vbt_max_tmds_clock;
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ if (DISPLAY_VER(dev_priv) >= 10)
max_tmds_clock = 594000;
- else if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv))
+ else if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL(dev_priv))
max_tmds_clock = 300000;
- else if (INTEL_GEN(dev_priv) >= 5)
+ else if (DISPLAY_VER(dev_priv) >= 5)
max_tmds_clock = 225000;
else
max_tmds_clock = 165000;
@@ -2233,6 +1851,16 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
return MODE_OK;
}
+static int intel_hdmi_port_clock(int clock, int bpc)
+{
+ /*
+ * Need to adjust the port link by:
+ * 1.5x for 12bpc
+ * 1.25x for 10bpc
+ */
+ return clock * bpc / 8;
+}
+
static enum drm_mode_status
intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
@@ -2264,17 +1892,18 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
clock /= 2;
/* check if we can do 8bpc */
- status = hdmi_port_clock_valid(hdmi, clock, true, has_hdmi_sink);
+ status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 8),
+ true, has_hdmi_sink);
if (has_hdmi_sink) {
/* if we can't do 8bpc we may still be able to do 12bpc */
if (status != MODE_OK && !HAS_GMCH(dev_priv))
- status = hdmi_port_clock_valid(hdmi, clock * 3 / 2,
+ status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 12),
true, has_hdmi_sink);
/* if we can't do 8,12bpc we may still be able to do 10bpc */
- if (status != MODE_OK && INTEL_GEN(dev_priv) >= 11)
- status = hdmi_port_clock_valid(hdmi, clock * 5 / 4,
+ if (status != MODE_OK && DISPLAY_VER(dev_priv) >= 11)
+ status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 10),
true, has_hdmi_sink);
}
if (status != MODE_OK)
@@ -2336,7 +1965,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
if (HAS_GMCH(dev_priv))
return false;
- if (bpc == 10 && INTEL_GEN(dev_priv) < 11)
+ if (bpc == 10 && DISPLAY_VER(dev_priv) < 11)
return false;
/*
@@ -2348,7 +1977,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
/* Display Wa_1405510057:icl,ehl */
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
- bpc == 10 && IS_GEN(dev_priv, 11) &&
+ bpc == 10 && IS_DISPLAY_VER(dev_priv, 11) &&
(adjusted_mode->crtc_hblank_end -
adjusted_mode->crtc_hblank_start) % 8 == 2)
return false;
@@ -2382,16 +2011,6 @@ intel_hdmi_ycbcr420_config(struct intel_crtc_state *crtc_state,
return intel_pch_panel_fitting(crtc_state, conn_state);
}
-static int intel_hdmi_port_clock(int clock, int bpc)
-{
- /*
- * Need to adjust the port link by:
- * 1.5x for 12bpc
- * 1.25x for 10bpc
- */
- return clock * bpc / 8;
-}
-
static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
int clock)
@@ -2545,8 +2164,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
pipe_config->lane_count = 4;
- if (scdc->scrambling.supported && (INTEL_GEN(dev_priv) >= 10 ||
- IS_GEMINILAKE(dev_priv))) {
+ if (scdc->scrambling.supported && DISPLAY_VER(dev_priv) >= 10) {
if (scdc->scrambling.low_rates)
pipe_config->hdmi_scrambling = true;
@@ -2704,7 +2322,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
- if (INTEL_GEN(dev_priv) >= 11 &&
+ if (DISPLAY_VER(dev_priv) >= 11 &&
!intel_digital_port_connected(encoder))
goto out;
@@ -2755,125 +2373,6 @@ static int intel_hdmi_get_modes(struct drm_connector *connector)
return intel_connector_update_modes(connector, edid);
}
-static void intel_hdmi_pre_enable(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
-{
- struct intel_digital_port *dig_port =
- enc_to_dig_port(encoder);
-
- intel_hdmi_prepare(encoder, pipe_config);
-
- dig_port->set_infoframes(encoder,
- pipe_config->has_infoframe,
- pipe_config, conn_state);
-}
-
-static void vlv_hdmi_pre_enable(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
-{
- struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- vlv_phy_pre_encoder_enable(encoder, pipe_config);
-
- /* HDMI 1.0V-2dB */
- vlv_set_phy_signal_level(encoder, pipe_config,
- 0x2b245f5f, 0x00002000,
- 0x5578b83a, 0x2b247878);
-
- dig_port->set_infoframes(encoder,
- pipe_config->has_infoframe,
- pipe_config, conn_state);
-
- g4x_enable_hdmi(state, encoder, pipe_config, conn_state);
-
- vlv_wait_port_ready(dev_priv, dig_port, 0x0);
-}
-
-static void vlv_hdmi_pre_pll_enable(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
-{
- intel_hdmi_prepare(encoder, pipe_config);
-
- vlv_phy_pre_pll_enable(encoder, pipe_config);
-}
-
-static void chv_hdmi_pre_pll_enable(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
-{
- intel_hdmi_prepare(encoder, pipe_config);
-
- chv_phy_pre_pll_enable(encoder, pipe_config);
-}
-
-static void chv_hdmi_post_pll_disable(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
-{
- chv_phy_post_pll_disable(encoder, old_crtc_state);
-}
-
-static void vlv_hdmi_post_disable(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
-{
- /* Reset lanes to avoid HDMI flicker (VLV w/a) */
- vlv_phy_reset_lanes(encoder, old_crtc_state);
-}
-
-static void chv_hdmi_post_disable(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
-{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- vlv_dpio_get(dev_priv);
-
- /* Assert data lane reset */
- chv_data_lane_soft_reset(encoder, old_crtc_state, true);
-
- vlv_dpio_put(dev_priv);
-}
-
-static void chv_hdmi_pre_enable(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
-{
- struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- chv_phy_pre_encoder_enable(encoder, pipe_config);
-
- /* FIXME: Program the support xxx V-dB */
- /* Use 800mV-0dB */
- chv_set_phy_signal_level(encoder, pipe_config, 128, 102, false);
-
- dig_port->set_infoframes(encoder,
- pipe_config->has_infoframe,
- pipe_config, conn_state);
-
- g4x_enable_hdmi(state, encoder, pipe_config, conn_state);
-
- vlv_wait_port_ready(dev_priv, dig_port, 0x0);
-
- /* Second common lane will stay alive on its own now */
- chv_phy_release_cl2_override(encoder);
-}
-
static struct i2c_adapter *
intel_hdmi_get_i2c_adapter(struct drm_connector *connector)
{
@@ -2948,10 +2447,6 @@ static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs
.atomic_check = intel_digital_connector_atomic_check,
};
-static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
- .destroy = intel_encoder_destroy,
-};
-
static void
intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
{
@@ -2964,7 +2459,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_attach_hdmi_colorspace_property(connector);
drm_connector_attach_content_type_property(connector);
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ if (DISPLAY_VER(dev_priv) >= 10)
drm_object_attach_property(&connector->base,
connector->dev->mode_config.hdr_output_metadata_property, 0);
@@ -3137,11 +2632,45 @@ static u8 rkl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
return GMBUS_PIN_1_BXT + phy;
}
+static u8 gen9bc_tgp_port_to_ddc_pin(struct drm_i915_private *i915, enum port port)
+{
+ enum phy phy = intel_port_to_phy(i915, port);
+
+ drm_WARN_ON(&i915->drm, port == PORT_A);
+
+ /*
+ * Pin mapping for GEN9 BC depends on which PCH is present. With TGP,
+ * final two outputs use type-c pins, even though they're actually
+ * combo outputs. With CMP, the traditional DDI A-D pins are used for
+ * all outputs.
+ */
+ if (INTEL_PCH_TYPE(i915) >= PCH_TGP && phy >= PHY_C)
+ return GMBUS_PIN_9_TC1_ICP + phy - PHY_C;
+
+ return GMBUS_PIN_1_BXT + phy;
+}
+
static u8 dg1_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
{
return intel_port_to_phy(dev_priv, port) + 1;
}
+static u8 adls_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+{
+ enum phy phy = intel_port_to_phy(dev_priv, port);
+
+ WARN_ON(port == PORT_B || port == PORT_C);
+
+ /*
+ * Pin mapping for ADL-S requires TC pins for all combo phy outputs
+ * except first combo output.
+ */
+ if (phy == PHY_A)
+ return GMBUS_PIN_1_BXT;
+
+ return GMBUS_PIN_9_TC1_ICP + phy - PHY_B;
+}
+
static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv,
enum port port)
{
@@ -3179,10 +2708,14 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
return ddc_pin;
}
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
+ if (HAS_PCH_ADP(dev_priv))
+ ddc_pin = adls_port_to_ddc_pin(dev_priv, port);
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
ddc_pin = dg1_port_to_ddc_pin(dev_priv, port);
else if (IS_ROCKETLAKE(dev_priv))
ddc_pin = rkl_port_to_ddc_pin(dev_priv, port);
+ else if (IS_GEN9_BC(dev_priv) && HAS_PCH_TGP(dev_priv))
+ ddc_pin = gen9bc_tgp_port_to_ddc_pin(dev_priv, port);
else if (HAS_PCH_MCC(dev_priv))
ddc_pin = mcc_port_to_ddc_pin(dev_priv, port);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
@@ -3259,7 +2792,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
"Adding HDMI connector on [ENCODER:%d:%s]\n",
intel_encoder->base.base.id, intel_encoder->base.name);
- if (INTEL_GEN(dev_priv) < 12 && drm_WARN_ON(dev, port == PORT_A))
+ if (DISPLAY_VER(dev_priv) < 12 && drm_WARN_ON(dev, port == PORT_A))
return;
if (drm_WARN(dev, dig_port->max_lanes < 4,
@@ -3281,7 +2814,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
connector->doublescan_allowed = 0;
connector->stereo_allowed = 1;
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ if (DISPLAY_VER(dev_priv) >= 10)
connector->ycbcr_420_allowed = true;
intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
@@ -3323,119 +2856,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
drm_dbg_kms(&dev_priv->drm, "CEC notifier get failed\n");
}
-static enum intel_hotplug_state
-intel_hdmi_hotplug(struct intel_encoder *encoder,
- struct intel_connector *connector)
-{
- enum intel_hotplug_state state;
-
- state = intel_encoder_hotplug(encoder, connector);
-
- /*
- * On many platforms the HDMI live state signal is known to be
- * unreliable, so we can't use it to detect if a sink is connected or
- * not. Instead we detect if it's connected based on whether we can
- * read the EDID or not. That in turn has a problem during disconnect,
- * since the HPD interrupt may be raised before the DDC lines get
- * disconnected (due to how the required length of DDC vs. HPD
- * connector pins are specified) and so we'll still be able to get a
- * valid EDID. To solve this schedule another detection cycle if this
- * time around we didn't detect any change in the sink's connection
- * status.
- */
- if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries)
- state = INTEL_HOTPLUG_RETRY;
-
- return state;
-}
-
-void intel_hdmi_init(struct drm_i915_private *dev_priv,
- i915_reg_t hdmi_reg, enum port port)
-{
- struct intel_digital_port *dig_port;
- struct intel_encoder *intel_encoder;
- struct intel_connector *intel_connector;
-
- dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
- if (!dig_port)
- return;
-
- intel_connector = intel_connector_alloc();
- if (!intel_connector) {
- kfree(dig_port);
- return;
- }
-
- intel_encoder = &dig_port->base;
-
- mutex_init(&dig_port->hdcp_mutex);
-
- drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
- &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS,
- "HDMI %c", port_name(port));
-
- intel_encoder->hotplug = intel_hdmi_hotplug;
- intel_encoder->compute_config = intel_hdmi_compute_config;
- if (HAS_PCH_SPLIT(dev_priv)) {
- intel_encoder->disable = pch_disable_hdmi;
- intel_encoder->post_disable = pch_post_disable_hdmi;
- } else {
- intel_encoder->disable = g4x_disable_hdmi;
- }
- intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
- intel_encoder->get_config = intel_hdmi_get_config;
- if (IS_CHERRYVIEW(dev_priv)) {
- intel_encoder->pre_pll_enable = chv_hdmi_pre_pll_enable;
- intel_encoder->pre_enable = chv_hdmi_pre_enable;
- intel_encoder->enable = vlv_enable_hdmi;
- intel_encoder->post_disable = chv_hdmi_post_disable;
- intel_encoder->post_pll_disable = chv_hdmi_post_pll_disable;
- } else if (IS_VALLEYVIEW(dev_priv)) {
- intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable;
- intel_encoder->pre_enable = vlv_hdmi_pre_enable;
- intel_encoder->enable = vlv_enable_hdmi;
- intel_encoder->post_disable = vlv_hdmi_post_disable;
- } else {
- intel_encoder->pre_enable = intel_hdmi_pre_enable;
- if (HAS_PCH_CPT(dev_priv))
- intel_encoder->enable = cpt_enable_hdmi;
- else if (HAS_PCH_IBX(dev_priv))
- intel_encoder->enable = ibx_enable_hdmi;
- else
- intel_encoder->enable = g4x_enable_hdmi;
- }
-
- intel_encoder->type = INTEL_OUTPUT_HDMI;
- intel_encoder->power_domain = intel_port_to_power_domain(port);
- intel_encoder->port = port;
- if (IS_CHERRYVIEW(dev_priv)) {
- if (port == PORT_D)
- intel_encoder->pipe_mask = BIT(PIPE_C);
- else
- intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B);
- } else {
- intel_encoder->pipe_mask = ~0;
- }
- intel_encoder->cloneable = 1 << INTEL_OUTPUT_ANALOG;
- intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
- /*
- * BSpec is unclear about HDMI+HDMI cloning on g4x, but it seems
- * to work on real hardware. And since g4x can send infoframes to
- * only one port anyway, nothing is lost by allowing it.
- */
- if (IS_G4X(dev_priv))
- intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI;
-
- dig_port->hdmi.hdmi_reg = hdmi_reg;
- dig_port->dp.output_reg = INVALID_MMIO_REG;
- dig_port->max_lanes = 4;
-
- intel_infoframe_init(dig_port);
-
- dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
- intel_hdmi_init_connector(dig_port, intel_connector);
-}
-
/*
* intel_hdmi_dsc_get_slice_height - get the dsc slice_height
* @vactive: Vactive of a display mode
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index fa1a9b030850..b43a180d007e 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -23,11 +23,8 @@ struct drm_connector_state;
union hdmi_infoframe;
enum port;
-void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg,
- enum port port);
void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
struct intel_connector *intel_connector);
-struct intel_hdmi *enc_to_intel_hdmi(struct intel_encoder *encoder);
int intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state);
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
index 1c939f9c9bc9..7f3c638c8950 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
@@ -80,6 +80,7 @@ static struct platform_device *
lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
struct platform_device_info pinfo = {};
struct resource *rsc;
struct platform_device *platdev;
@@ -99,9 +100,9 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
rsc[0].flags = IORESOURCE_IRQ;
rsc[0].name = "hdmi-lpe-audio-irq";
- rsc[1].start = pci_resource_start(dev->pdev, 0) +
+ rsc[1].start = pci_resource_start(pdev, 0) +
I915_HDMI_LPE_AUDIO_BASE;
- rsc[1].end = pci_resource_start(dev->pdev, 0) +
+ rsc[1].end = pci_resource_start(pdev, 0) +
I915_HDMI_LPE_AUDIO_BASE + I915_HDMI_LPE_AUDIO_SIZE - 1;
rsc[1].flags = IORESOURCE_MEM;
rsc[1].name = "hdmi-lpe-audio-mmio";
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index c6c7c0b9989b..f31a368f34c5 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -136,12 +136,12 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
pipe_config->hw.adjusted_mode.flags |= flags;
- if (INTEL_GEN(dev_priv) < 5)
+ if (DISPLAY_VER(dev_priv) < 5)
pipe_config->gmch_pfit.lvds_border_bits =
tmp & LVDS_BORDER_ENABLE;
/* gen2/3 store dither state in pfit control, needs to match */
- if (INTEL_GEN(dev_priv) < 4) {
+ if (DISPLAY_VER(dev_priv) < 4) {
tmp = intel_de_read(dev_priv, PFIT_CONTROL);
pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
@@ -179,7 +179,7 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
/* Convert from 100ms to 100us units */
pps->t4 = val * 1000;
- if (INTEL_GEN(dev_priv) <= 4 &&
+ if (DISPLAY_VER(dev_priv) <= 4 &&
pps->t1_t2 == 0 && pps->t5 == 0 && pps->t3 == 0 && pps->tx == 0) {
drm_dbg_kms(&dev_priv->drm,
"Panel power timings uninitialized, "
@@ -280,7 +280,7 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
* special lvds dither control bit on pch-split platforms, dithering is
* only controlled through the PIPECONF reg.
*/
- if (IS_GEN(dev_priv, 4)) {
+ if (IS_DISPLAY_VER(dev_priv, 4)) {
/*
* Bspec wording suggests that LVDS port dithering only exists
* for 18bpp panels.
@@ -415,7 +415,7 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
int ret;
/* Should never happen!! */
- if (INTEL_GEN(dev_priv) < 4 && intel_crtc->pipe == 0) {
+ if (DISPLAY_VER(dev_priv) < 4 && intel_crtc->pipe == 0) {
drm_err(&dev_priv->drm, "Can't support LVDS on pipe A\n");
return -EINVAL;
}
@@ -915,7 +915,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
intel_encoder->port = PORT_NONE;
intel_encoder->cloneable = 0;
- if (INTEL_GEN(dev_priv) < 4)
+ if (DISPLAY_VER(dev_priv) < 4)
intel_encoder->pipe_mask = BIT(PIPE_B);
else
intel_encoder->pipe_mask = ~0;
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index 4f77cf849171..dfd724e506b5 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -247,7 +247,7 @@ static int swsci(struct drm_i915_private *dev_priv,
u32 function, u32 parm, u32 *parm_out)
{
struct opregion_swsci *swsci = dev_priv->opregion.swsci;
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
u32 main_function, sub_function, scic;
u16 swsci_val;
u32 dslp;
@@ -807,7 +807,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
if (!name || !*name)
return -ENOENT;
- ret = request_firmware(&fw, name, &dev_priv->drm.pdev->dev);
+ ret = request_firmware(&fw, name, dev_priv->drm.dev);
if (ret) {
drm_err(&dev_priv->drm,
"Requesting VBT firmware \"%s\" failed (%d)\n",
@@ -840,7 +840,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
int intel_opregion_setup(struct drm_i915_private *dev_priv)
{
struct intel_opregion *opregion = &dev_priv->opregion;
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
u32 asls, mboxes;
char buf[sizeof(OPREGION_SIGNATURE)];
int err = 0;
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index f455040fa989..e5dadde422f7 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -203,7 +203,7 @@ struct intel_overlay {
static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
bool enable)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
u8 val;
/* WA_OVERLAY_CLKGATE:alm */
@@ -550,7 +550,7 @@ static u32 calc_swidthsw(struct drm_i915_private *dev_priv, u32 offset, u32 widt
{
u32 sw;
- if (IS_GEN(dev_priv, 2))
+ if (IS_DISPLAY_VER(dev_priv, 2))
sw = ALIGN((offset & 31) + width, 32);
else
sw = ALIGN((offset & 63) + width, 64);
@@ -755,6 +755,32 @@ static u32 overlay_cmd_reg(struct drm_intel_overlay_put_image *params)
return cmd;
}
+static struct i915_vma *intel_overlay_pin_fb(struct drm_i915_gem_object *new_bo)
+{
+ struct i915_gem_ww_ctx ww;
+ struct i915_vma *vma;
+ int ret;
+
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ ret = i915_gem_object_lock(new_bo, &ww);
+ if (!ret) {
+ vma = i915_gem_object_pin_to_display_plane(new_bo, &ww, 0,
+ NULL, PIN_MAPPABLE);
+ ret = PTR_ERR_OR_ZERO(vma);
+ }
+ if (ret == -EDEADLK) {
+ ret = i915_gem_ww_ctx_backoff(&ww);
+ if (!ret)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return vma;
+}
+
static int intel_overlay_do_put_image(struct intel_overlay *overlay,
struct drm_i915_gem_object *new_bo,
struct drm_intel_overlay_put_image *params)
@@ -776,12 +802,12 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
- vma = i915_gem_object_pin_to_display_plane(new_bo,
- 0, NULL, PIN_MAPPABLE);
+ vma = intel_overlay_pin_fb(new_bo);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out_pin_section;
}
+
i915_gem_object_flush_frontbuffer(new_bo, ORIGIN_DIRTYFB);
if (!overlay->active) {
@@ -794,7 +820,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
oconfig |= OCONF_CC_OUT_8BIT;
if (crtc_state->gamma_enable)
oconfig |= OCONF_GAMMA2_ENABLE;
- if (IS_GEN(dev_priv, 4))
+ if (IS_DISPLAY_VER(dev_priv, 4))
oconfig |= OCONF_CSC_MODE_BT709;
oconfig |= pipe == 0 ?
OCONF_PIPE_A : OCONF_PIPE_B;
@@ -913,7 +939,7 @@ static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
/* XXX: This is not the same logic as in the xorg driver, but more in
* line with the intel documentation for the i965
*/
- if (INTEL_GEN(dev_priv) >= 4) {
+ if (DISPLAY_VER(dev_priv) >= 4) {
/* on i965 use the PGM reg to read out the autoscaler values */
ratio = intel_de_read(dev_priv, PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
} else {
@@ -1028,7 +1054,7 @@ static int check_overlay_src(struct drm_i915_private *dev_priv,
if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
return -EINVAL;
- if (IS_GEN(dev_priv, 4) && rec->stride_Y < 512)
+ if (IS_DISPLAY_VER(dev_priv, 4) && rec->stride_Y < 512)
return -EINVAL;
tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
@@ -1255,7 +1281,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
attrs->contrast = overlay->contrast;
attrs->saturation = overlay->saturation;
- if (!IS_GEN(dev_priv, 2)) {
+ if (!IS_DISPLAY_VER(dev_priv, 2)) {
attrs->gamma0 = intel_de_read(dev_priv, OGAMC0);
attrs->gamma1 = intel_de_read(dev_priv, OGAMC1);
attrs->gamma2 = intel_de_read(dev_priv, OGAMC2);
@@ -1279,7 +1305,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
update_reg_attrs(overlay, overlay->regs);
if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
- if (IS_GEN(dev_priv, 2))
+ if (IS_DISPLAY_VER(dev_priv, 2))
goto out_unlock;
if (overlay->active) {
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 5fdf52643150..10022d1575e1 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -405,7 +405,7 @@ int intel_gmch_panel_fitting(struct intel_crtc_state *crtc_state,
break;
case DRM_MODE_SCALE_ASPECT:
/* Scale but preserve the aspect ratio */
- if (INTEL_GEN(dev_priv) >= 4)
+ if (DISPLAY_VER(dev_priv) >= 4)
i965_scale_aspect(crtc_state, &pfit_control);
else
i9xx_scale_aspect(crtc_state, &pfit_control,
@@ -419,7 +419,7 @@ int intel_gmch_panel_fitting(struct intel_crtc_state *crtc_state,
if (crtc_state->pipe_src_h != adjusted_mode->crtc_vdisplay ||
crtc_state->pipe_src_w != adjusted_mode->crtc_hdisplay) {
pfit_control |= PFIT_ENABLE;
- if (INTEL_GEN(dev_priv) >= 4)
+ if (DISPLAY_VER(dev_priv) >= 4)
pfit_control |= PFIT_SCALING_AUTO;
else
pfit_control |= (VERT_AUTO_SCALE |
@@ -435,7 +435,7 @@ int intel_gmch_panel_fitting(struct intel_crtc_state *crtc_state,
/* 965+ wants fuzzy fitting */
/* FIXME: handle multiple panels by failing gracefully */
- if (INTEL_GEN(dev_priv) >= 4)
+ if (DISPLAY_VER(dev_priv) >= 4)
pfit_control |= PFIT_PIPE(crtc->pipe) | PFIT_FILTER_FUZZY;
out:
@@ -445,7 +445,7 @@ out:
}
/* Make sure pre-965 set dither correctly for 18bpp panels. */
- if (INTEL_GEN(dev_priv) < 4 && crtc_state->pipe_bpp == 18)
+ if (DISPLAY_VER(dev_priv) < 4 && crtc_state->pipe_bpp == 18)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
crtc_state->gmch_pfit.control = pfit_control;
@@ -590,13 +590,13 @@ static u32 i9xx_get_backlight(struct intel_connector *connector, enum pipe unuse
u32 val;
val = intel_de_read(dev_priv, BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
- if (INTEL_GEN(dev_priv) < 4)
+ if (DISPLAY_VER(dev_priv) < 4)
val >>= 1;
if (panel->backlight.combination_mode) {
u8 lbpc;
- pci_read_config_byte(dev_priv->drm.pdev, LBPC, &lbpc);
+ pci_read_config_byte(to_pci_dev(dev_priv->drm.dev), LBPC, &lbpc);
val *= lbpc;
}
@@ -664,10 +664,10 @@ static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32
lbpc = level * 0xfe / panel->backlight.pwm_level_max + 1;
level /= lbpc;
- pci_write_config_byte(dev_priv->drm.pdev, LBPC, lbpc);
+ pci_write_config_byte(to_pci_dev(dev_priv->drm.dev), LBPC, lbpc);
}
- if (IS_GEN(dev_priv, 4)) {
+ if (IS_DISPLAY_VER(dev_priv, 4)) {
mask = BACKLIGHT_DUTY_CYCLE_MASK;
} else {
level <<= 1;
@@ -1040,7 +1040,7 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
* 855gm only, but checking for gen2 is safe, as 855gm is the only gen2
* that has backlight.
*/
- if (IS_GEN(dev_priv, 2))
+ if (IS_DISPLAY_VER(dev_priv, 2))
intel_de_write(dev_priv, BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
}
@@ -1728,7 +1728,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu
ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
- if (IS_GEN(dev_priv, 2) || IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
+ if (IS_DISPLAY_VER(dev_priv, 2) || IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
if (IS_PINEVIEW(dev_priv))
@@ -2178,7 +2178,7 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
} else {
panel->backlight.pwm_funcs = &vlv_pwm_funcs;
}
- } else if (IS_GEN(dev_priv, 4)) {
+ } else if (IS_DISPLAY_VER(dev_priv, 4)) {
panel->backlight.pwm_funcs = &i965_pwm_funcs;
} else {
panel->backlight.pwm_funcs = &i9xx_pwm_funcs;
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
index a9a5df2fee4d..7c8e0d76207f 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
@@ -409,15 +409,15 @@ static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum pipe pipe,
enum intel_pipe_crc_source *source, u32 *val)
{
- if (IS_GEN(dev_priv, 2))
+ if (IS_DISPLAY_VER(dev_priv, 2))
return i8xx_pipe_crc_ctl_reg(source, val);
- else if (INTEL_GEN(dev_priv) < 5)
+ else if (DISPLAY_VER(dev_priv) < 5)
return i9xx_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return vlv_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
- else if (IS_GEN_RANGE(dev_priv, 5, 6))
+ else if (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv))
return ilk_pipe_crc_ctl_reg(source, val);
- else if (INTEL_GEN(dev_priv) < 9)
+ else if (DISPLAY_VER(dev_priv) < 9)
return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
else
return skl_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
@@ -539,15 +539,15 @@ static int
intel_is_valid_crc_source(struct drm_i915_private *dev_priv,
const enum intel_pipe_crc_source source)
{
- if (IS_GEN(dev_priv, 2))
+ if (IS_DISPLAY_VER(dev_priv, 2))
return i8xx_crc_source_valid(dev_priv, source);
- else if (INTEL_GEN(dev_priv) < 5)
+ else if (DISPLAY_VER(dev_priv) < 5)
return i9xx_crc_source_valid(dev_priv, source);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return vlv_crc_source_valid(dev_priv, source);
- else if (IS_GEN_RANGE(dev_priv, 5, 6))
+ else if (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv))
return ilk_crc_source_valid(dev_priv, source);
- else if (INTEL_GEN(dev_priv) < 9)
+ else if (DISPLAY_VER(dev_priv) < 9)
return ivb_crc_source_valid(dev_priv, source);
else
return skl_crc_source_valid(dev_priv, source);
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index c4867a8020a5..c55da130773b 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -3,9 +3,11 @@
* Copyright © 2020 Intel Corporation
*/
+#include "g4x_dp.h"
#include "i915_drv.h"
#include "intel_display_types.h"
#include "intel_dp.h"
+#include "intel_dpll.h"
#include "intel_pps.h"
static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
@@ -776,7 +778,7 @@ void intel_pps_on_unlocked(struct intel_dp *intel_dp)
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
pp = ilk_get_pp_control(intel_dp);
- if (IS_GEN(dev_priv, 5)) {
+ if (IS_IRONLAKE(dev_priv)) {
/* ILK workaround: disable reset around power sequence */
pp &= ~PANEL_POWER_RESET;
intel_de_write(dev_priv, pp_ctrl_reg, pp);
@@ -784,7 +786,7 @@ void intel_pps_on_unlocked(struct intel_dp *intel_dp)
}
pp |= PANEL_POWER_ON;
- if (!IS_GEN(dev_priv, 5))
+ if (!IS_IRONLAKE(dev_priv))
pp |= PANEL_POWER_RESET;
intel_de_write(dev_priv, pp_ctrl_reg, pp);
@@ -793,7 +795,7 @@ void intel_pps_on_unlocked(struct intel_dp *intel_dp)
wait_panel_on(intel_dp);
intel_dp->pps.last_power_on = jiffies;
- if (IS_GEN(dev_priv, 5)) {
+ if (IS_IRONLAKE(dev_priv)) {
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
intel_de_write(dev_priv, pp_ctrl_reg, pp);
intel_de_posting_read(dev_priv, pp_ctrl_reg);
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 850cb7f5b332..8ada4f829cab 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -32,6 +32,7 @@
#include "intel_hdmi.h"
#include "intel_psr.h"
#include "intel_sprite.h"
+#include "skl_universal_plane.h"
/**
* DOC: Panel Self Refresh (PSR/SRD)
@@ -80,9 +81,11 @@
* use page flips.
*/
-static bool psr_global_enabled(struct drm_i915_private *i915)
+static bool psr_global_enabled(struct intel_dp *intel_dp)
{
- switch (i915->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_DEFAULT:
return i915->params.enable_psr;
case I915_PSR_DEBUG_DISABLE:
@@ -92,9 +95,9 @@ static bool psr_global_enabled(struct drm_i915_private *i915)
}
}
-static bool psr2_global_enabled(struct drm_i915_private *dev_priv)
+static bool psr2_global_enabled(struct intel_dp *intel_dp)
{
- switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
+ switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_DISABLE:
case I915_PSR_DEBUG_FORCE_PSR1:
return false;
@@ -103,27 +106,28 @@ static bool psr2_global_enabled(struct drm_i915_private *dev_priv)
}
}
-static void psr_irq_control(struct drm_i915_private *dev_priv)
+static void psr_irq_control(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum transcoder trans_shift;
- u32 mask, val;
i915_reg_t imr_reg;
+ u32 mask, val;
/*
* gen12+ has registers relative to transcoder and one per transcoder
* using the same bit definition: handle it as TRANSCODER_EDP to force
* 0 shift in bit definition
*/
- if (INTEL_GEN(dev_priv) >= 12) {
+ if (DISPLAY_VER(dev_priv) >= 12) {
trans_shift = 0;
- imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder);
+ imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
} else {
- trans_shift = dev_priv->psr.transcoder;
+ trans_shift = intel_dp->psr.transcoder;
imr_reg = EDP_PSR_IMR;
}
mask = EDP_PSR_ERROR(trans_shift);
- if (dev_priv->psr.debug & I915_PSR_DEBUG_IRQ)
+ if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
mask |= EDP_PSR_POST_EXIT(trans_shift) |
EDP_PSR_PRE_ENTRY(trans_shift);
@@ -172,38 +176,39 @@ static void psr_event_print(struct drm_i915_private *i915,
drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
}
-void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
+void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
{
- enum transcoder cpu_transcoder = dev_priv->psr.transcoder;
+ enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ ktime_t time_ns = ktime_get();
enum transcoder trans_shift;
i915_reg_t imr_reg;
- ktime_t time_ns = ktime_get();
- if (INTEL_GEN(dev_priv) >= 12) {
+ if (DISPLAY_VER(dev_priv) >= 12) {
trans_shift = 0;
- imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder);
+ imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
} else {
- trans_shift = dev_priv->psr.transcoder;
+ trans_shift = intel_dp->psr.transcoder;
imr_reg = EDP_PSR_IMR;
}
if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) {
- dev_priv->psr.last_entry_attempt = time_ns;
+ intel_dp->psr.last_entry_attempt = time_ns;
drm_dbg_kms(&dev_priv->drm,
"[transcoder %s] PSR entry attempt in 2 vblanks\n",
transcoder_name(cpu_transcoder));
}
if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) {
- dev_priv->psr.last_exit = time_ns;
+ intel_dp->psr.last_exit = time_ns;
drm_dbg_kms(&dev_priv->drm,
"[transcoder %s] PSR exit completed\n",
transcoder_name(cpu_transcoder));
- if (INTEL_GEN(dev_priv) >= 9) {
+ if (DISPLAY_VER(dev_priv) >= 9) {
u32 val = intel_de_read(dev_priv,
PSR_EVENT(cpu_transcoder));
- bool psr2_enabled = dev_priv->psr.psr2_enabled;
+ bool psr2_enabled = intel_dp->psr.psr2_enabled;
intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder),
val);
@@ -217,7 +222,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
transcoder_name(cpu_transcoder));
- dev_priv->psr.irq_aux_error = true;
+ intel_dp->psr.irq_aux_error = true;
/*
* If this interruption is not masked it will keep
@@ -231,7 +236,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
val |= EDP_PSR_ERROR(trans_shift);
intel_de_write(dev_priv, imr_reg, val);
- schedule_work(&dev_priv->psr.work);
+ schedule_work(&intel_dp->psr.work);
}
}
@@ -292,12 +297,6 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv =
to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
- if (dev_priv->psr.dp) {
- drm_warn(&dev_priv->drm,
- "More than one eDP panel found, PSR support should be extended\n");
- return;
- }
-
drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
sizeof(intel_dp->psr_dpcd));
@@ -318,13 +317,11 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
return;
}
- dev_priv->psr.sink_support = true;
- dev_priv->psr.sink_sync_latency =
+ intel_dp->psr.sink_support = true;
+ intel_dp->psr.sink_sync_latency =
intel_dp_get_sink_sync_latency(intel_dp);
- dev_priv->psr.dp = intel_dp;
-
- if (INTEL_GEN(dev_priv) >= 9 &&
+ if (DISPLAY_VER(dev_priv) >= 9 &&
(intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
bool y_req = intel_dp->psr_dpcd[1] &
DP_PSR2_SU_Y_COORDINATE_REQUIRED;
@@ -341,14 +338,14 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
* Y-coordinate requirement panels we would need to enable
* GTC first.
*/
- dev_priv->psr.sink_psr2_support = y_req && alpm;
+ intel_dp->psr.sink_psr2_support = y_req && alpm;
drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",
- dev_priv->psr.sink_psr2_support ? "" : "not ");
+ intel_dp->psr.sink_psr2_support ? "" : "not ");
- if (dev_priv->psr.sink_psr2_support) {
- dev_priv->psr.colorimetry_support =
+ if (intel_dp->psr.sink_psr2_support) {
+ intel_dp->psr.colorimetry_support =
intel_dp_get_colorimetry_status(intel_dp);
- dev_priv->psr.su_x_granularity =
+ intel_dp->psr.su_x_granularity =
intel_dp_get_su_x_granulartiy(intel_dp);
}
}
@@ -374,7 +371,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
BUILD_BUG_ON(sizeof(aux_msg) > 20);
for (i = 0; i < sizeof(aux_msg); i += 4)
intel_de_write(dev_priv,
- EDP_PSR_AUX_DATA(dev_priv->psr.transcoder, i >> 2),
+ EDP_PSR_AUX_DATA(intel_dp->psr.transcoder, i >> 2),
intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
@@ -385,7 +382,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
/* Select only valid bits for SRD_AUX_CTL */
aux_ctl &= psr_aux_mask;
- intel_de_write(dev_priv, EDP_PSR_AUX_CTL(dev_priv->psr.transcoder),
+ intel_de_write(dev_priv, EDP_PSR_AUX_CTL(intel_dp->psr.transcoder),
aux_ctl);
}
@@ -395,17 +392,17 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
u8 dpcd_val = DP_PSR_ENABLE;
/* Enable ALPM at sink for psr2 */
- if (dev_priv->psr.psr2_enabled) {
+ if (intel_dp->psr.psr2_enabled) {
drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
DP_ALPM_ENABLE |
DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
} else {
- if (dev_priv->psr.link_standby)
+ if (intel_dp->psr.link_standby)
dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
- if (INTEL_GEN(dev_priv) >= 8)
+ if (DISPLAY_VER(dev_priv) >= 8)
dpcd_val |= DP_PSR_CRC_VERIFICATION;
}
@@ -419,7 +416,7 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 val = 0;
- if (INTEL_GEN(dev_priv) >= 11)
+ if (DISPLAY_VER(dev_priv) >= 11)
val |= EDP_PSR_TP4_TIME_0US;
if (dev_priv->params.psr_safest_params) {
@@ -465,7 +462,7 @@ static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
* off-by-one issue that HW has in some cases.
*/
idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
- idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
+ idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
idle_frames = 0xf;
@@ -485,17 +482,17 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
if (IS_HASWELL(dev_priv))
val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
- if (dev_priv->psr.link_standby)
+ if (intel_dp->psr.link_standby)
val |= EDP_PSR_LINK_STANDBY;
val |= intel_psr1_get_tp_time(intel_dp);
- if (INTEL_GEN(dev_priv) >= 8)
+ if (DISPLAY_VER(dev_priv) >= 8)
val |= EDP_PSR_CRC_ENABLE;
- val |= (intel_de_read(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder)) &
+ val |= (intel_de_read(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder)) &
EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK);
- intel_de_write(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder), val);
+ intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), val);
}
static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
@@ -527,13 +524,13 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
val = psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT;
val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ if (DISPLAY_VER(dev_priv) >= 10)
val |= EDP_Y_COORDINATE_ENABLE;
- val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
+ val |= EDP_PSR2_FRAME_BEFORE_SU(intel_dp->psr.sink_sync_latency + 1);
val |= intel_psr2_get_tp_time(intel_dp);
- if (INTEL_GEN(dev_priv) >= 12) {
+ if (DISPLAY_VER(dev_priv) >= 12) {
/*
* TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default
* values from BSpec. In order to setting an optimal power
@@ -544,42 +541,42 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(7);
val |= TGL_EDP_PSR2_FAST_WAKE(7);
- } else if (INTEL_GEN(dev_priv) >= 9) {
+ } else if (DISPLAY_VER(dev_priv) >= 9) {
val |= EDP_PSR2_IO_BUFFER_WAKE(7);
val |= EDP_PSR2_FAST_WAKE(7);
}
- if (dev_priv->psr.psr2_sel_fetch_enabled) {
+ if (intel_dp->psr.psr2_sel_fetch_enabled) {
/* WA 1408330847 */
- if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0) ||
+ if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0) ||
IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0))
intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
DIS_RAM_BYPASS_PSR2_MAN_TRACK,
DIS_RAM_BYPASS_PSR2_MAN_TRACK);
intel_de_write(dev_priv,
- PSR2_MAN_TRK_CTL(dev_priv->psr.transcoder),
+ PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
PSR2_MAN_TRK_CTL_ENABLE);
} else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
intel_de_write(dev_priv,
- PSR2_MAN_TRK_CTL(dev_priv->psr.transcoder), 0);
+ PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), 0);
}
/*
* PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
* recommending keep this bit unset while PSR2 is enabled.
*/
- intel_de_write(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder), 0);
+ intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), 0);
- intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
+ intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
}
static bool
transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans)
{
- if (INTEL_GEN(dev_priv) < 9)
+ if (DISPLAY_VER(dev_priv) < 9)
return false;
- else if (INTEL_GEN(dev_priv) >= 12)
+ else if (DISPLAY_VER(dev_priv) >= 12)
return trans == TRANSCODER_A;
else
return trans == TRANSCODER_EDP;
@@ -594,55 +591,58 @@ static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
drm_mode_vrefresh(&cstate->hw.adjusted_mode));
}
-static void psr2_program_idle_frames(struct drm_i915_private *dev_priv,
+static void psr2_program_idle_frames(struct intel_dp *intel_dp,
u32 idle_frames)
{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 val;
idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT;
- val = intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder));
+ val = intel_de_read(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder));
val &= ~EDP_PSR2_IDLE_FRAME_MASK;
val |= idle_frames;
- intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
+ intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
}
-static void tgl_psr2_enable_dc3co(struct drm_i915_private *dev_priv)
+static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
{
- psr2_program_idle_frames(dev_priv, 0);
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ psr2_program_idle_frames(intel_dp, 0);
intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
}
-static void tgl_psr2_disable_dc3co(struct drm_i915_private *dev_priv)
+static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
{
- struct intel_dp *intel_dp = dev_priv->psr.dp;
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
- psr2_program_idle_frames(dev_priv, psr_compute_idle_frames(intel_dp));
+ psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
}
static void tgl_dc3co_disable_work(struct work_struct *work)
{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), psr.dc3co_work.work);
+ struct intel_dp *intel_dp =
+ container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
- mutex_lock(&dev_priv->psr.lock);
+ mutex_lock(&intel_dp->psr.lock);
/* If delayed work is pending, it is not idle */
- if (delayed_work_pending(&dev_priv->psr.dc3co_work))
+ if (delayed_work_pending(&intel_dp->psr.dc3co_work))
goto unlock;
- tgl_psr2_disable_dc3co(dev_priv);
+ tgl_psr2_disable_dc3co(intel_dp);
unlock:
- mutex_unlock(&dev_priv->psr.lock);
+ mutex_unlock(&intel_dp->psr.lock);
}
-static void tgl_disallow_dc3co_on_psr2_exit(struct drm_i915_private *dev_priv)
+static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
{
- if (!dev_priv->psr.dc3co_enabled)
+ if (!intel_dp->psr.dc3co_enabled)
return;
- cancel_delayed_work(&dev_priv->psr.dc3co_work);
+ cancel_delayed_work(&intel_dp->psr.dc3co_work);
/* Before PSR2 exit disallow dc3co*/
- tgl_psr2_disable_dc3co(dev_priv);
+ tgl_psr2_disable_dc3co(intel_dp);
}
static void
@@ -654,6 +654,13 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 exit_scanlines;
+ /*
+ * DMC's DC3CO exit mechanism has an issue with Selective Fecth
+ * TODO: when the issue is addressed, this restriction should be removed.
+ */
+ if (crtc_state->enable_psr2_sel_fetch)
+ return;
+
if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC3CO))
return;
@@ -684,7 +691,8 @@ static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
struct intel_plane *plane;
int i;
- if (!dev_priv->params.enable_psr2_sel_fetch) {
+ if (!dev_priv->params.enable_psr2_sel_fetch &&
+ intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 sel fetch not enabled, disabled by parameter\n");
return false;
@@ -715,9 +723,15 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
- if (!dev_priv->psr.sink_psr2_support)
+ if (!intel_dp->psr.sink_psr2_support)
return false;
+ /* JSL and EHL only supports eDP 1.3 */
+ if (IS_JSL_EHL(dev_priv)) {
+ drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
+ return false;
+ }
+
if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 not supported in transcoder %s\n",
@@ -725,7 +739,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
- if (!psr2_global_enabled(dev_priv)) {
+ if (!psr2_global_enabled(intel_dp)) {
drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
return false;
}
@@ -747,15 +761,15 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
- if (INTEL_GEN(dev_priv) >= 12) {
+ if (DISPLAY_VER(dev_priv) >= 12) {
psr_max_h = 5120;
psr_max_v = 3200;
max_bpp = 30;
- } else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+ } else if (DISPLAY_VER(dev_priv) >= 10) {
psr_max_h = 4096;
psr_max_v = 2304;
max_bpp = 24;
- } else if (IS_GEN(dev_priv, 9)) {
+ } else if (IS_DISPLAY_VER(dev_priv, 9)) {
psr_max_h = 3640;
psr_max_v = 2304;
max_bpp = 24;
@@ -774,10 +788,10 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
* only need to validate the SU block width is a multiple of
* x granularity.
*/
- if (crtc_hdisplay % dev_priv->psr.su_x_granularity) {
+ if (crtc_hdisplay % intel_dp->psr.su_x_granularity) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
- crtc_hdisplay, dev_priv->psr.su_x_granularity);
+ crtc_hdisplay, intel_dp->psr.su_x_granularity);
return false;
}
@@ -806,7 +820,6 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
void intel_psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
@@ -819,30 +832,15 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
if (crtc_state->vrr.enable)
return;
- if (!CAN_PSR(dev_priv))
+ if (!CAN_PSR(intel_dp))
return;
- if (intel_dp != dev_priv->psr.dp)
- return;
-
- if (!psr_global_enabled(dev_priv)) {
+ if (!psr_global_enabled(intel_dp)) {
drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
return;
}
- /*
- * HSW spec explicitly says PSR is tied to port A.
- * BDW+ platforms have a instance of PSR registers per transcoder but
- * for now it only supports one instance of PSR, so lets keep it
- * hardcoded to PORT_A
- */
- if (dig_port->base.port != PORT_A) {
- drm_dbg_kms(&dev_priv->drm,
- "PSR condition failed: Port not supported\n");
- return;
- }
-
- if (dev_priv->psr.sink_not_reliable) {
+ if (intel_dp->psr.sink_not_reliable) {
drm_dbg_kms(&dev_priv->drm,
"PSR sink implementation is not reliable\n");
return;
@@ -878,23 +876,24 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
static void intel_psr_activate(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ enum transcoder transcoder = intel_dp->psr.transcoder;
- if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder))
+ if (transcoder_has_psr2(dev_priv, transcoder))
drm_WARN_ON(&dev_priv->drm,
- intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder)) & EDP_PSR2_ENABLE);
+ intel_de_read(dev_priv, EDP_PSR2_CTL(transcoder)) & EDP_PSR2_ENABLE);
drm_WARN_ON(&dev_priv->drm,
- intel_de_read(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_ENABLE);
- drm_WARN_ON(&dev_priv->drm, dev_priv->psr.active);
- lockdep_assert_held(&dev_priv->psr.lock);
+ intel_de_read(dev_priv, EDP_PSR_CTL(transcoder)) & EDP_PSR_ENABLE);
+ drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
+ lockdep_assert_held(&intel_dp->psr.lock);
/* psr1 and psr2 are mutually exclusive.*/
- if (dev_priv->psr.psr2_enabled)
+ if (intel_dp->psr.psr2_enabled)
hsw_activate_psr2(intel_dp);
else
hsw_activate_psr1(intel_dp);
- dev_priv->psr.active = true;
+ intel_dp->psr.active = true;
}
static void intel_psr_enable_source(struct intel_dp *intel_dp,
@@ -910,8 +909,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_psr_setup_aux(intel_dp);
- if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) &&
- !IS_GEMINILAKE(dev_priv))) {
+ if (intel_dp->psr.psr2_enabled && IS_DISPLAY_VER(dev_priv, 9)) {
i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
u32 chicken = intel_de_read(dev_priv, reg);
@@ -931,13 +929,13 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
EDP_PSR_DEBUG_MASK_LPSP |
EDP_PSR_DEBUG_MASK_MAX_SLEEP;
- if (INTEL_GEN(dev_priv) < 11)
+ if (DISPLAY_VER(dev_priv) < 11)
mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
- intel_de_write(dev_priv, EDP_PSR_DEBUG(dev_priv->psr.transcoder),
+ intel_de_write(dev_priv, EDP_PSR_DEBUG(intel_dp->psr.transcoder),
mask);
- psr_irq_control(dev_priv);
+ psr_irq_control(intel_dp);
if (crtc_state->dc3co_exitline) {
u32 val;
@@ -955,30 +953,30 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
- dev_priv->psr.psr2_sel_fetch_enabled ?
+ intel_dp->psr.psr2_sel_fetch_enabled ?
IGNORE_PSR2_HW_TRACKING : 0);
}
-static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
+static void intel_psr_enable_locked(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_dp *intel_dp = dev_priv->psr.dp;
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
u32 val;
- drm_WARN_ON(&dev_priv->drm, dev_priv->psr.enabled);
+ drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
- dev_priv->psr.psr2_enabled = crtc_state->has_psr2;
- dev_priv->psr.busy_frontbuffer_bits = 0;
- dev_priv->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
- dev_priv->psr.dc3co_enabled = !!crtc_state->dc3co_exitline;
- dev_priv->psr.transcoder = crtc_state->cpu_transcoder;
+ intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
+ intel_dp->psr.busy_frontbuffer_bits = 0;
+ intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
+ intel_dp->psr.dc3co_enabled = !!crtc_state->dc3co_exitline;
+ intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
/* DC5/DC6 requires at least 6 idle frames */
val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
- dev_priv->psr.dc3co_exit_delay = val;
- dev_priv->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
+ intel_dp->psr.dc3co_exit_delay = val;
+ intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
/*
* If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
@@ -988,29 +986,29 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
* first time that PSR HW tries to activate so lets keep PSR disabled
* to avoid any rendering problems.
*/
- if (INTEL_GEN(dev_priv) >= 12) {
+ if (DISPLAY_VER(dev_priv) >= 12) {
val = intel_de_read(dev_priv,
- TRANS_PSR_IIR(dev_priv->psr.transcoder));
+ TRANS_PSR_IIR(intel_dp->psr.transcoder));
val &= EDP_PSR_ERROR(0);
} else {
val = intel_de_read(dev_priv, EDP_PSR_IIR);
- val &= EDP_PSR_ERROR(dev_priv->psr.transcoder);
+ val &= EDP_PSR_ERROR(intel_dp->psr.transcoder);
}
if (val) {
- dev_priv->psr.sink_not_reliable = true;
+ intel_dp->psr.sink_not_reliable = true;
drm_dbg_kms(&dev_priv->drm,
"PSR interruption error set, not enabling PSR\n");
return;
}
drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
- dev_priv->psr.psr2_enabled ? "2" : "1");
+ intel_dp->psr.psr2_enabled ? "2" : "1");
intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
- &dev_priv->psr.vsc);
- intel_write_dp_vsc_sdp(encoder, crtc_state, &dev_priv->psr.vsc);
+ &intel_dp->psr.vsc);
+ intel_write_dp_vsc_sdp(encoder, crtc_state, &intel_dp->psr.vsc);
intel_psr_enable_sink(intel_dp);
intel_psr_enable_source(intel_dp, crtc_state);
- dev_priv->psr.enabled = true;
+ intel_dp->psr.enabled = true;
intel_psr_activate(intel_dp);
}
@@ -1029,7 +1027,7 @@ void intel_psr_enable(struct intel_dp *intel_dp,
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- if (!CAN_PSR(dev_priv) || dev_priv->psr.dp != intel_dp)
+ if (!CAN_PSR(intel_dp))
return;
if (!crtc_state->has_psr)
@@ -1037,46 +1035,47 @@ void intel_psr_enable(struct intel_dp *intel_dp,
drm_WARN_ON(&dev_priv->drm, dev_priv->drrs.dp);
- mutex_lock(&dev_priv->psr.lock);
- intel_psr_enable_locked(dev_priv, crtc_state, conn_state);
- mutex_unlock(&dev_priv->psr.lock);
+ mutex_lock(&intel_dp->psr.lock);
+ intel_psr_enable_locked(intel_dp, crtc_state, conn_state);
+ mutex_unlock(&intel_dp->psr.lock);
}
-static void intel_psr_exit(struct drm_i915_private *dev_priv)
+static void intel_psr_exit(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 val;
- if (!dev_priv->psr.active) {
- if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) {
+ if (!intel_dp->psr.active) {
+ if (transcoder_has_psr2(dev_priv, intel_dp->psr.transcoder)) {
val = intel_de_read(dev_priv,
- EDP_PSR2_CTL(dev_priv->psr.transcoder));
+ EDP_PSR2_CTL(intel_dp->psr.transcoder));
drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
}
val = intel_de_read(dev_priv,
- EDP_PSR_CTL(dev_priv->psr.transcoder));
+ EDP_PSR_CTL(intel_dp->psr.transcoder));
drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
return;
}
- if (dev_priv->psr.psr2_enabled) {
- tgl_disallow_dc3co_on_psr2_exit(dev_priv);
+ if (intel_dp->psr.psr2_enabled) {
+ tgl_disallow_dc3co_on_psr2_exit(intel_dp);
val = intel_de_read(dev_priv,
- EDP_PSR2_CTL(dev_priv->psr.transcoder));
+ EDP_PSR2_CTL(intel_dp->psr.transcoder));
drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
val &= ~EDP_PSR2_ENABLE;
intel_de_write(dev_priv,
- EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
+ EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
} else {
val = intel_de_read(dev_priv,
- EDP_PSR_CTL(dev_priv->psr.transcoder));
+ EDP_PSR_CTL(intel_dp->psr.transcoder));
drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
val &= ~EDP_PSR_ENABLE;
intel_de_write(dev_priv,
- EDP_PSR_CTL(dev_priv->psr.transcoder), val);
+ EDP_PSR_CTL(intel_dp->psr.transcoder), val);
}
- dev_priv->psr.active = false;
+ intel_dp->psr.active = false;
}
static void intel_psr_disable_locked(struct intel_dp *intel_dp)
@@ -1085,21 +1084,21 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
i915_reg_t psr_status;
u32 psr_status_mask;
- lockdep_assert_held(&dev_priv->psr.lock);
+ lockdep_assert_held(&intel_dp->psr.lock);
- if (!dev_priv->psr.enabled)
+ if (!intel_dp->psr.enabled)
return;
drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
- dev_priv->psr.psr2_enabled ? "2" : "1");
+ intel_dp->psr.psr2_enabled ? "2" : "1");
- intel_psr_exit(dev_priv);
+ intel_psr_exit(intel_dp);
- if (dev_priv->psr.psr2_enabled) {
- psr_status = EDP_PSR2_STATUS(dev_priv->psr.transcoder);
+ if (intel_dp->psr.psr2_enabled) {
+ psr_status = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
} else {
- psr_status = EDP_PSR_STATUS(dev_priv->psr.transcoder);
+ psr_status = EDP_PSR_STATUS(intel_dp->psr.transcoder);
psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
}
@@ -1109,8 +1108,8 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
/* WA 1408330847 */
- if (dev_priv->psr.psr2_sel_fetch_enabled &&
- (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0) ||
+ if (intel_dp->psr.psr2_sel_fetch_enabled &&
+ (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0) ||
IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0)))
intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0);
@@ -1118,10 +1117,10 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
/* Disable PSR on Sink */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
- if (dev_priv->psr.psr2_enabled)
+ if (intel_dp->psr.psr2_enabled)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
- dev_priv->psr.enabled = false;
+ intel_dp->psr.enabled = false;
}
/**
@@ -1139,20 +1138,22 @@ void intel_psr_disable(struct intel_dp *intel_dp,
if (!old_crtc_state->has_psr)
return;
- if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(dev_priv)))
+ if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
return;
- mutex_lock(&dev_priv->psr.lock);
+ mutex_lock(&intel_dp->psr.lock);
intel_psr_disable_locked(intel_dp);
- mutex_unlock(&dev_priv->psr.lock);
- cancel_work_sync(&dev_priv->psr.work);
- cancel_delayed_work_sync(&dev_priv->psr.dc3co_work);
+ mutex_unlock(&intel_dp->psr.lock);
+ cancel_work_sync(&intel_dp->psr.work);
+ cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
}
-static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
+static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
if (IS_TIGERLAKE(dev_priv))
/*
* Writes to CURSURFLIVE in TGL are causing IOMMU errors and
@@ -1166,8 +1167,8 @@ static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
* So using this workaround until this issue is root caused
* and a better fix is found.
*/
- intel_psr_exit(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 9)
+ intel_psr_exit(intel_dp);
+ else if (DISPLAY_VER(dev_priv) >= 9)
/*
* Display WA #0884: skl+
* This documented WA for bxt can be safely applied
@@ -1177,13 +1178,13 @@ static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
* but it makes more sense write to the current active
* pipe.
*/
- intel_de_write(dev_priv, CURSURFLIVE(dev_priv->psr.pipe), 0);
+ intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
else
/*
* A write to CURSURFLIVE do not cause HW tracking to exit PSR
* on older gens so doing the manual exit instead.
*/
- intel_psr_exit(dev_priv);
+ intel_psr_exit(intel_dp);
}
void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
@@ -1231,15 +1232,13 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct i915_psr *psr = &dev_priv->psr;
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
if (!HAS_PSR2_SEL_FETCH(dev_priv) ||
!crtc_state->enable_psr2_sel_fetch)
return;
- intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(psr->transcoder),
+ intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(crtc_state->cpu_transcoder),
crtc_state->psr2_man_track_ctl);
}
@@ -1435,29 +1434,30 @@ void intel_psr_update(struct intel_dp *intel_dp,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct i915_psr *psr = &dev_priv->psr;
+ struct intel_psr *psr = &intel_dp->psr;
bool enable, psr2_enable;
- if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp)
+ if (!CAN_PSR(intel_dp))
return;
- mutex_lock(&dev_priv->psr.lock);
+ mutex_lock(&intel_dp->psr.lock);
enable = crtc_state->has_psr;
psr2_enable = crtc_state->has_psr2;
- if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) {
+ if (enable == psr->enabled && psr2_enable == psr->psr2_enabled &&
+ crtc_state->enable_psr2_sel_fetch == psr->psr2_sel_fetch_enabled) {
/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
if (crtc_state->crc_enabled && psr->enabled)
- psr_force_hw_tracking_exit(dev_priv);
- else if (INTEL_GEN(dev_priv) < 9 && psr->enabled) {
+ psr_force_hw_tracking_exit(intel_dp);
+ else if (DISPLAY_VER(dev_priv) < 9 && psr->enabled) {
/*
* Activate PSR again after a force exit when enabling
* CRC in older gens
*/
- if (!dev_priv->psr.active &&
- !dev_priv->psr.busy_frontbuffer_bits)
- schedule_work(&dev_priv->psr.work);
+ if (!intel_dp->psr.active &&
+ !intel_dp->psr.busy_frontbuffer_bits)
+ schedule_work(&intel_dp->psr.work);
}
goto unlock;
@@ -1467,34 +1467,23 @@ void intel_psr_update(struct intel_dp *intel_dp,
intel_psr_disable_locked(intel_dp);
if (enable)
- intel_psr_enable_locked(dev_priv, crtc_state, conn_state);
+ intel_psr_enable_locked(intel_dp, crtc_state, conn_state);
unlock:
- mutex_unlock(&dev_priv->psr.lock);
+ mutex_unlock(&intel_dp->psr.lock);
}
/**
- * intel_psr_wait_for_idle - wait for PSR1 to idle
- * @new_crtc_state: new CRTC state
+ * psr_wait_for_idle - wait for PSR1 to idle
+ * @intel_dp: Intel DP
* @out_value: PSR status in case of failure
*
- * This function is expected to be called from pipe_update_start() where it is
- * not expected to race with PSR enable or disable.
- *
* Returns: 0 on success or -ETIMEOUT if PSR status does not idle.
+ *
*/
-int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
- u32 *out_value)
+static int psr_wait_for_idle(struct intel_dp *intel_dp, u32 *out_value)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- if (!dev_priv->psr.enabled || !new_crtc_state->has_psr)
- return 0;
-
- /* FIXME: Update this for PSR2 if we need to wait for idle */
- if (READ_ONCE(dev_priv->psr.psr2_enabled))
- return 0;
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
/*
* From bspec: Panel Self Refresh (BDW+)
@@ -1502,32 +1491,67 @@ int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
* exit training time + 1.5 ms of aux channel handshake. 50 ms is
* defensive enough to cover everything.
*/
-
return __intel_wait_for_register(&dev_priv->uncore,
- EDP_PSR_STATUS(dev_priv->psr.transcoder),
+ EDP_PSR_STATUS(intel_dp->psr.transcoder),
EDP_PSR_STATUS_STATE_MASK,
EDP_PSR_STATUS_STATE_IDLE, 2, 50,
out_value);
}
-static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
+/**
+ * intel_psr_wait_for_idle - wait for PSR1 to idle
+ * @new_crtc_state: new CRTC state
+ *
+ * This function is expected to be called from pipe_update_start() where it is
+ * not expected to race with PSR enable or disable.
+ */
+void intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
+ struct intel_encoder *encoder;
+
+ if (!new_crtc_state->has_psr)
+ return;
+
+ for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
+ new_crtc_state->uapi.encoder_mask) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ u32 psr_status;
+
+ mutex_lock(&intel_dp->psr.lock);
+ if (!intel_dp->psr.enabled || intel_dp->psr.psr2_enabled) {
+ mutex_unlock(&intel_dp->psr.lock);
+ continue;
+ }
+
+ /* when the PSR1 is enabled */
+ if (psr_wait_for_idle(intel_dp, &psr_status))
+ drm_err(&dev_priv->drm,
+ "PSR idle timed out 0x%x, atomic update may fail\n",
+ psr_status);
+ mutex_unlock(&intel_dp->psr.lock);
+ }
+}
+
+static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
i915_reg_t reg;
u32 mask;
int err;
- if (!dev_priv->psr.enabled)
+ if (!intel_dp->psr.enabled)
return false;
- if (dev_priv->psr.psr2_enabled) {
- reg = EDP_PSR2_STATUS(dev_priv->psr.transcoder);
+ if (intel_dp->psr.psr2_enabled) {
+ reg = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
mask = EDP_PSR2_STATUS_STATE_MASK;
} else {
- reg = EDP_PSR_STATUS(dev_priv->psr.transcoder);
+ reg = EDP_PSR_STATUS(intel_dp->psr.transcoder);
mask = EDP_PSR_STATUS_STATE_MASK;
}
- mutex_unlock(&dev_priv->psr.lock);
+ mutex_unlock(&intel_dp->psr.lock);
err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
if (err)
@@ -1535,8 +1559,8 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
"Timed out waiting for PSR Idle for re-enable\n");
/* After the unlocked wait, verify that PSR is still wanted! */
- mutex_lock(&dev_priv->psr.lock);
- return err == 0 && dev_priv->psr.enabled;
+ mutex_lock(&intel_dp->psr.lock);
+ return err == 0 && intel_dp->psr.enabled;
}
static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
@@ -1602,33 +1626,34 @@ retry:
return err;
}
-int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
+int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
u32 old_mode;
int ret;
if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
- mode > I915_PSR_DEBUG_FORCE_PSR1) {
+ mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
return -EINVAL;
}
- ret = mutex_lock_interruptible(&dev_priv->psr.lock);
+ ret = mutex_lock_interruptible(&intel_dp->psr.lock);
if (ret)
return ret;
- old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK;
- dev_priv->psr.debug = val;
+ old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
+ intel_dp->psr.debug = val;
/*
* Do it right away if it's already enabled, otherwise it will be done
* when enabling the source.
*/
- if (dev_priv->psr.enabled)
- psr_irq_control(dev_priv);
+ if (intel_dp->psr.enabled)
+ psr_irq_control(intel_dp);
- mutex_unlock(&dev_priv->psr.lock);
+ mutex_unlock(&intel_dp->psr.lock);
if (old_mode != mode)
ret = intel_psr_fastset_force(dev_priv);
@@ -1636,28 +1661,28 @@ int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
return ret;
}
-static void intel_psr_handle_irq(struct drm_i915_private *dev_priv)
+static void intel_psr_handle_irq(struct intel_dp *intel_dp)
{
- struct i915_psr *psr = &dev_priv->psr;
+ struct intel_psr *psr = &intel_dp->psr;
- intel_psr_disable_locked(psr->dp);
+ intel_psr_disable_locked(intel_dp);
psr->sink_not_reliable = true;
/* let's make sure that sink is awaken */
- drm_dp_dpcd_writeb(&psr->dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
}
static void intel_psr_work(struct work_struct *work)
{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), psr.work);
+ struct intel_dp *intel_dp =
+ container_of(work, typeof(*intel_dp), psr.work);
- mutex_lock(&dev_priv->psr.lock);
+ mutex_lock(&intel_dp->psr.lock);
- if (!dev_priv->psr.enabled)
+ if (!intel_dp->psr.enabled)
goto unlock;
- if (READ_ONCE(dev_priv->psr.irq_aux_error))
- intel_psr_handle_irq(dev_priv);
+ if (READ_ONCE(intel_dp->psr.irq_aux_error))
+ intel_psr_handle_irq(intel_dp);
/*
* We have to make sure PSR is ready for re-enable
@@ -1665,7 +1690,7 @@ static void intel_psr_work(struct work_struct *work)
* PSR might take some time to get fully disabled
* and be ready for re-enable.
*/
- if (!__psr_wait_for_idle_locked(dev_priv))
+ if (!__psr_wait_for_idle_locked(intel_dp))
goto unlock;
/*
@@ -1673,12 +1698,12 @@ static void intel_psr_work(struct work_struct *work)
* recheck. Since psr_flush first clears this and then reschedules we
* won't ever miss a flush when bailing out here.
*/
- if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active)
+ if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
goto unlock;
- intel_psr_activate(dev_priv->psr.dp);
+ intel_psr_activate(intel_dp);
unlock:
- mutex_unlock(&dev_priv->psr.lock);
+ mutex_unlock(&intel_dp->psr.lock);
}
/**
@@ -1697,27 +1722,31 @@ unlock:
void intel_psr_invalidate(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits, enum fb_op_origin origin)
{
- if (!CAN_PSR(dev_priv))
- return;
+ struct intel_encoder *encoder;
if (origin == ORIGIN_FLIP)
return;
- mutex_lock(&dev_priv->psr.lock);
- if (!dev_priv->psr.enabled) {
- mutex_unlock(&dev_priv->psr.lock);
- return;
- }
+ for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ mutex_lock(&intel_dp->psr.lock);
+ if (!intel_dp->psr.enabled) {
+ mutex_unlock(&intel_dp->psr.lock);
+ continue;
+ }
- frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
- dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
+ pipe_frontbuffer_bits &=
+ INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
+ intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
- if (frontbuffer_bits)
- intel_psr_exit(dev_priv);
+ if (pipe_frontbuffer_bits)
+ intel_psr_exit(intel_dp);
- mutex_unlock(&dev_priv->psr.lock);
+ mutex_unlock(&intel_dp->psr.lock);
+ }
}
-
/*
* When we will be completely rely on PSR2 S/W tracking in future,
* intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
@@ -1725,15 +1754,15 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
* accordingly in future.
*/
static void
-tgl_dc3co_flush(struct drm_i915_private *dev_priv,
- unsigned int frontbuffer_bits, enum fb_op_origin origin)
+tgl_dc3co_flush(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
+ enum fb_op_origin origin)
{
- mutex_lock(&dev_priv->psr.lock);
+ mutex_lock(&intel_dp->psr.lock);
- if (!dev_priv->psr.dc3co_enabled)
+ if (!intel_dp->psr.dc3co_enabled)
goto unlock;
- if (!dev_priv->psr.psr2_enabled || !dev_priv->psr.active)
+ if (!intel_dp->psr.psr2_enabled || !intel_dp->psr.active)
goto unlock;
/*
@@ -1741,15 +1770,15 @@ tgl_dc3co_flush(struct drm_i915_private *dev_priv,
* when delayed work schedules that means display has been idle.
*/
if (!(frontbuffer_bits &
- INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe)))
+ INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
goto unlock;
- tgl_psr2_enable_dc3co(dev_priv);
- mod_delayed_work(system_wq, &dev_priv->psr.dc3co_work,
- dev_priv->psr.dc3co_exit_delay);
+ tgl_psr2_enable_dc3co(intel_dp);
+ mod_delayed_work(system_wq, &intel_dp->psr.dc3co_work,
+ intel_dp->psr.dc3co_exit_delay);
unlock:
- mutex_unlock(&dev_priv->psr.lock);
+ mutex_unlock(&intel_dp->psr.lock);
}
/**
@@ -1768,46 +1797,69 @@ unlock:
void intel_psr_flush(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits, enum fb_op_origin origin)
{
- if (!CAN_PSR(dev_priv))
- return;
+ struct intel_encoder *encoder;
- if (origin == ORIGIN_FLIP) {
- tgl_dc3co_flush(dev_priv, frontbuffer_bits, origin);
- return;
- }
+ for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- mutex_lock(&dev_priv->psr.lock);
- if (!dev_priv->psr.enabled) {
- mutex_unlock(&dev_priv->psr.lock);
- return;
- }
+ if (origin == ORIGIN_FLIP) {
+ tgl_dc3co_flush(intel_dp, frontbuffer_bits, origin);
+ continue;
+ }
- frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
- dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
+ mutex_lock(&intel_dp->psr.lock);
+ if (!intel_dp->psr.enabled) {
+ mutex_unlock(&intel_dp->psr.lock);
+ continue;
+ }
+
+ pipe_frontbuffer_bits &=
+ INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
+ intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
- /* By definition flush = invalidate + flush */
- if (frontbuffer_bits)
- psr_force_hw_tracking_exit(dev_priv);
+ /* By definition flush = invalidate + flush */
+ if (pipe_frontbuffer_bits)
+ psr_force_hw_tracking_exit(intel_dp);
- if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
- schedule_work(&dev_priv->psr.work);
- mutex_unlock(&dev_priv->psr.lock);
+ if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
+ schedule_work(&intel_dp->psr.work);
+ mutex_unlock(&intel_dp->psr.lock);
+ }
}
/**
* intel_psr_init - Init basic PSR work and mutex.
- * @dev_priv: i915 device private
+ * @intel_dp: Intel DP
*
- * This function is called only once at driver load to initialize basic
- * PSR stuff.
+ * This function is called after the initializing connector.
+ * (the initializing of connector treats the handling of connector capabilities)
+ * And it initializes basic PSR stuff for each DP Encoder.
*/
-void intel_psr_init(struct drm_i915_private *dev_priv)
+void intel_psr_init(struct intel_dp *intel_dp)
{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
if (!HAS_PSR(dev_priv))
return;
- if (!dev_priv->psr.sink_support)
+ /*
+ * HSW spec explicitly says PSR is tied to port A.
+ * BDW+ platforms have a instance of PSR registers per transcoder but
+ * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
+ * than eDP one.
+ * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
+ * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
+ * But GEN12 supports a instance of PSR registers per transcoder.
+ */
+ if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR condition failed: Port not supported\n");
return;
+ }
+
+ intel_dp->psr.source_support = true;
if (IS_HASWELL(dev_priv))
/*
@@ -1818,20 +1870,20 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
dev_priv->hsw_psr_mmio_adjust = _SRD_CTL_EDP - _HSW_EDP_PSR_BASE;
if (dev_priv->params.enable_psr == -1)
- if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
+ if (DISPLAY_VER(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
dev_priv->params.enable_psr = 0;
/* Set link_standby x link_off defaults */
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
/* HSW and BDW require workarounds that we don't implement. */
- dev_priv->psr.link_standby = false;
- else if (INTEL_GEN(dev_priv) < 12)
+ intel_dp->psr.link_standby = false;
+ else if (DISPLAY_VER(dev_priv) < 12)
/* For new platforms up to TGL let's respect VBT back again */
- dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
+ intel_dp->psr.link_standby = dev_priv->vbt.psr.full_link;
- INIT_WORK(&dev_priv->psr.work, intel_psr_work);
- INIT_DELAYED_WORK(&dev_priv->psr.dc3co_work, tgl_dc3co_disable_work);
- mutex_init(&dev_priv->psr.lock);
+ INIT_WORK(&intel_dp->psr.work, intel_psr_work);
+ INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
+ mutex_init(&intel_dp->psr.lock);
}
static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
@@ -1857,7 +1909,7 @@ static void psr_alpm_check(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct drm_dp_aux *aux = &intel_dp->aux;
- struct i915_psr *psr = &dev_priv->psr;
+ struct intel_psr *psr = &intel_dp->psr;
u8 val;
int r;
@@ -1884,7 +1936,7 @@ static void psr_alpm_check(struct intel_dp *intel_dp)
static void psr_capability_changed_check(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct i915_psr *psr = &dev_priv->psr;
+ struct intel_psr *psr = &intel_dp->psr;
u8 val;
int r;
@@ -1908,18 +1960,18 @@ static void psr_capability_changed_check(struct intel_dp *intel_dp)
void intel_psr_short_pulse(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct i915_psr *psr = &dev_priv->psr;
+ struct intel_psr *psr = &intel_dp->psr;
u8 status, error_status;
const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
DP_PSR_LINK_CRC_ERROR;
- if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
+ if (!CAN_PSR(intel_dp))
return;
mutex_lock(&psr->lock);
- if (!psr->enabled || psr->dp != intel_dp)
+ if (!psr->enabled)
goto exit;
if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
@@ -1962,15 +2014,14 @@ exit:
bool intel_psr_enabled(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
bool ret;
- if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
+ if (!CAN_PSR(intel_dp))
return false;
- mutex_lock(&dev_priv->psr.lock);
- ret = (dev_priv->psr.dp == intel_dp && dev_priv->psr.enabled);
- mutex_unlock(&dev_priv->psr.lock);
+ mutex_lock(&intel_dp->psr.lock);
+ ret = intel_dp->psr.enabled;
+ mutex_unlock(&intel_dp->psr.lock);
return ret;
}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index 0a517978e8af..0491a49ffd50 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -18,7 +18,6 @@ struct intel_atomic_state;
struct intel_plane_state;
struct intel_plane;
-#define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
void intel_psr_init_dpcd(struct intel_dp *intel_dp);
void intel_psr_enable(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
@@ -28,20 +27,19 @@ void intel_psr_disable(struct intel_dp *intel_dp,
void intel_psr_update(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
-int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 value);
+int intel_psr_debug_set(struct intel_dp *intel_dp, u64 value);
void intel_psr_invalidate(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits,
enum fb_op_origin origin);
void intel_psr_flush(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits,
enum fb_op_origin origin);
-void intel_psr_init(struct drm_i915_private *dev_priv);
+void intel_psr_init(struct intel_dp *intel_dp);
void intel_psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state);
-void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
+void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir);
void intel_psr_short_pulse(struct intel_dp *intel_dp);
-int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
- u32 *out_value);
+void intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state);
bool intel_psr_enabled(struct intel_dp *intel_dp);
int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index 46beb155d835..98dd787b00e3 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -160,7 +160,7 @@ static struct intel_quirk intel_quirks[] = {
void intel_init_quirks(struct drm_i915_private *i915)
{
- struct pci_dev *d = i915->drm.pdev;
+ struct pci_dev *d = to_pci_dev(i915->drm.dev);
int i;
for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 4eaa4aa86ecd..f770d6bcd2c9 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -1540,11 +1540,11 @@ static void intel_sdvo_pre_enable(struct intel_atomic_state *state,
return;
/* Set the SDVO control regs. */
- if (INTEL_GEN(dev_priv) >= 4) {
+ if (DISPLAY_VER(dev_priv) >= 4) {
/* The real mode polarity is set by the SDVO commands, using
* struct intel_sdvo_dtd. */
sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
- if (INTEL_GEN(dev_priv) < 5)
+ if (DISPLAY_VER(dev_priv) < 5)
sdvox |= SDVO_BORDER_ENABLE;
} else {
sdvox = intel_de_read(dev_priv, intel_sdvo->sdvo_reg);
@@ -1560,7 +1560,7 @@ static void intel_sdvo_pre_enable(struct intel_atomic_state *state,
else
sdvox |= SDVO_PIPE_SEL(crtc->pipe);
- if (INTEL_GEN(dev_priv) >= 4) {
+ if (DISPLAY_VER(dev_priv) >= 4) {
/* done in crtc_mode_set as the dpll_md reg must be written early */
} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
@@ -1571,7 +1571,7 @@ static void intel_sdvo_pre_enable(struct intel_atomic_state *state,
}
if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL &&
- INTEL_GEN(dev_priv) < 5)
+ DISPLAY_VER(dev_priv) < 5)
sdvox |= SDVO_STALL_SELECT;
intel_sdvo_write_sdvox(intel_sdvo, sdvox);
}
@@ -3281,7 +3281,7 @@ static bool
intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
sdvo->ddc.owner = THIS_MODULE;
sdvo->ddc.class = I2C_CLASS_DDC;
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 993543334a1e..acbf4e63b245 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -45,284 +45,10 @@
#include "intel_atomic_plane.h"
#include "intel_display_types.h"
#include "intel_frontbuffer.h"
-#include "intel_pm.h"
-#include "intel_psr.h"
-#include "intel_dsi.h"
#include "intel_sprite.h"
#include "i9xx_plane.h"
#include "intel_vrr.h"
-int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
- int usecs)
-{
- /* paranoia */
- if (!adjusted_mode->crtc_htotal)
- return 1;
-
- return DIV_ROUND_UP(usecs * adjusted_mode->crtc_clock,
- 1000 * adjusted_mode->crtc_htotal);
-}
-
-static int intel_mode_vblank_start(const struct drm_display_mode *mode)
-{
- int vblank_start = mode->crtc_vblank_start;
-
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- vblank_start = DIV_ROUND_UP(vblank_start, 2);
-
- return vblank_start;
-}
-
-/**
- * intel_pipe_update_start() - start update of a set of display registers
- * @new_crtc_state: the new crtc state
- *
- * Mark the start of an update to pipe registers that should be updated
- * atomically regarding vblank. If the next vblank will happens within
- * the next 100 us, this function waits until the vblank passes.
- *
- * After a successful call to this function, interrupts will be disabled
- * until a subsequent call to intel_pipe_update_end(). That is done to
- * avoid random delays.
- */
-void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct drm_display_mode *adjusted_mode = &new_crtc_state->hw.adjusted_mode;
- long timeout = msecs_to_jiffies_timeout(1);
- int scanline, min, max, vblank_start;
- wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
- bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI);
- DEFINE_WAIT(wait);
- u32 psr_status;
-
- if (new_crtc_state->uapi.async_flip)
- return;
-
- if (new_crtc_state->vrr.enable)
- vblank_start = intel_vrr_vmax_vblank_start(new_crtc_state);
- else
- vblank_start = intel_mode_vblank_start(adjusted_mode);
-
- /* FIXME needs to be calibrated sensibly */
- min = vblank_start - intel_usecs_to_scanlines(adjusted_mode,
- VBLANK_EVASION_TIME_US);
- max = vblank_start - 1;
-
- if (min <= 0 || max <= 0)
- goto irq_disable;
-
- if (drm_WARN_ON(&dev_priv->drm, drm_crtc_vblank_get(&crtc->base)))
- goto irq_disable;
-
- /*
- * Wait for psr to idle out after enabling the VBL interrupts
- * VBL interrupts will start the PSR exit and prevent a PSR
- * re-entry as well.
- */
- if (intel_psr_wait_for_idle(new_crtc_state, &psr_status))
- drm_err(&dev_priv->drm,
- "PSR idle timed out 0x%x, atomic update may fail\n",
- psr_status);
-
- local_irq_disable();
-
- crtc->debug.min_vbl = min;
- crtc->debug.max_vbl = max;
- trace_intel_pipe_update_start(crtc);
-
- for (;;) {
- /*
- * prepare_to_wait() has a memory barrier, which guarantees
- * other CPUs can see the task state update by the time we
- * read the scanline.
- */
- prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
-
- scanline = intel_get_crtc_scanline(crtc);
- if (scanline < min || scanline > max)
- break;
-
- if (!timeout) {
- drm_err(&dev_priv->drm,
- "Potential atomic update failure on pipe %c\n",
- pipe_name(crtc->pipe));
- break;
- }
-
- local_irq_enable();
-
- timeout = schedule_timeout(timeout);
-
- local_irq_disable();
- }
-
- finish_wait(wq, &wait);
-
- drm_crtc_vblank_put(&crtc->base);
-
- /*
- * On VLV/CHV DSI the scanline counter would appear to
- * increment approx. 1/3 of a scanline before start of vblank.
- * The registers still get latched at start of vblank however.
- * This means we must not write any registers on the first
- * line of vblank (since not the whole line is actually in
- * vblank). And unfortunately we can't use the interrupt to
- * wait here since it will fire too soon. We could use the
- * frame start interrupt instead since it will fire after the
- * critical scanline, but that would require more changes
- * in the interrupt code. So for now we'll just do the nasty
- * thing and poll for the bad scanline to pass us by.
- *
- * FIXME figure out if BXT+ DSI suffers from this as well
- */
- while (need_vlv_dsi_wa && scanline == vblank_start)
- scanline = intel_get_crtc_scanline(crtc);
-
- crtc->debug.scanline_start = scanline;
- crtc->debug.start_vbl_time = ktime_get();
- crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
-
- trace_intel_pipe_update_vblank_evaded(crtc);
- return;
-
-irq_disable:
- local_irq_disable();
-}
-
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
-static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end)
-{
- u64 delta = ktime_to_ns(ktime_sub(end, crtc->debug.start_vbl_time));
- unsigned int h;
-
- h = ilog2(delta >> 9);
- if (h >= ARRAY_SIZE(crtc->debug.vbl.times))
- h = ARRAY_SIZE(crtc->debug.vbl.times) - 1;
- crtc->debug.vbl.times[h]++;
-
- crtc->debug.vbl.sum += delta;
- if (!crtc->debug.vbl.min || delta < crtc->debug.vbl.min)
- crtc->debug.vbl.min = delta;
- if (delta > crtc->debug.vbl.max)
- crtc->debug.vbl.max = delta;
-
- if (delta > 1000 * VBLANK_EVASION_TIME_US) {
- drm_dbg_kms(crtc->base.dev,
- "Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n",
- pipe_name(crtc->pipe),
- div_u64(delta, 1000),
- VBLANK_EVASION_TIME_US);
- crtc->debug.vbl.over++;
- }
-}
-#else
-static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end) {}
-#endif
-
-/**
- * intel_pipe_update_end() - end update of a set of display registers
- * @new_crtc_state: the new crtc state
- *
- * Mark the end of an update started with intel_pipe_update_start(). This
- * re-enables interrupts and verifies the update was actually completed
- * before a vblank.
- */
-void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
- enum pipe pipe = crtc->pipe;
- int scanline_end = intel_get_crtc_scanline(crtc);
- u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc);
- ktime_t end_vbl_time = ktime_get();
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- if (new_crtc_state->uapi.async_flip)
- return;
-
- trace_intel_pipe_update_end(crtc, end_vbl_count, scanline_end);
-
- /*
- * Incase of mipi dsi command mode, we need to set frame update
- * request for every commit.
- */
- if (INTEL_GEN(dev_priv) >= 11 &&
- intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
- icl_dsi_frame_update(new_crtc_state);
-
- /* We're still in the vblank-evade critical section, this can't race.
- * Would be slightly nice to just grab the vblank count and arm the
- * event outside of the critical section - the spinlock might spin for a
- * while ... */
- if (new_crtc_state->uapi.event) {
- drm_WARN_ON(&dev_priv->drm,
- drm_crtc_vblank_get(&crtc->base) != 0);
-
- spin_lock(&crtc->base.dev->event_lock);
- drm_crtc_arm_vblank_event(&crtc->base,
- new_crtc_state->uapi.event);
- spin_unlock(&crtc->base.dev->event_lock);
-
- new_crtc_state->uapi.event = NULL;
- }
-
- local_irq_enable();
-
- /* Send VRR Push to terminate Vblank */
- intel_vrr_send_push(new_crtc_state);
-
- if (intel_vgpu_active(dev_priv))
- return;
-
- if (crtc->debug.start_vbl_count &&
- crtc->debug.start_vbl_count != end_vbl_count) {
- drm_err(&dev_priv->drm,
- "Atomic update failure on pipe %c (start=%u end=%u) time %lld us, min %d, max %d, scanline start %d, end %d\n",
- pipe_name(pipe), crtc->debug.start_vbl_count,
- end_vbl_count,
- ktime_us_delta(end_vbl_time,
- crtc->debug.start_vbl_time),
- crtc->debug.min_vbl, crtc->debug.max_vbl,
- crtc->debug.scanline_start, scanline_end);
- }
-
- dbg_vblank_evade(crtc, end_vbl_time);
-}
-
-int intel_plane_check_stride(const struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- unsigned int rotation = plane_state->hw.rotation;
- u32 stride, max_stride;
-
- /*
- * We ignore stride for all invisible planes that
- * can be remapped. Otherwise we could end up
- * with a false positive when the remapping didn't
- * kick in due the plane being invisible.
- */
- if (intel_plane_can_remap(plane_state) &&
- !plane_state->uapi.visible)
- return 0;
-
- /* FIXME other color planes? */
- stride = plane_state->color_plane[0].stride;
- max_stride = plane->max_stride(plane, fb->format->format,
- fb->modifier, rotation);
-
- if (stride > max_stride) {
- DRM_DEBUG_KMS("[FB:%d] stride (%d) exceeds [PLANE:%d:%s] max stride (%d)\n",
- fb->base.id, stride,
- plane->base.base.id, plane->base.name, max_stride);
- return -EINVAL;
- }
-
- return 0;
-}
-
int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->hw.fb;
@@ -380,584 +106,6 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
return 0;
}
-static u8 icl_nv12_y_plane_mask(struct drm_i915_private *i915)
-{
- if (IS_ROCKETLAKE(i915))
- return BIT(PLANE_SPRITE2) | BIT(PLANE_SPRITE3);
- else
- return BIT(PLANE_SPRITE4) | BIT(PLANE_SPRITE5);
-}
-
-bool icl_is_nv12_y_plane(struct drm_i915_private *dev_priv,
- enum plane_id plane_id)
-{
- return INTEL_GEN(dev_priv) >= 11 &&
- icl_nv12_y_plane_mask(dev_priv) & BIT(plane_id);
-}
-
-bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id)
-{
- return INTEL_GEN(dev_priv) >= 11 &&
- icl_hdr_plane_mask() & BIT(plane_id);
-}
-
-static void
-skl_plane_ratio(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- unsigned int *num, unsigned int *den)
-{
- struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
-
- if (fb->format->cpp[0] == 8) {
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
- *num = 10;
- *den = 8;
- } else {
- *num = 9;
- *den = 8;
- }
- } else {
- *num = 1;
- *den = 1;
- }
-}
-
-static int skl_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
- unsigned int num, den;
- unsigned int pixel_rate = intel_plane_pixel_rate(crtc_state, plane_state);
-
- skl_plane_ratio(crtc_state, plane_state, &num, &den);
-
- /* two pixels per clock on glk+ */
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
- den *= 2;
-
- return DIV_ROUND_UP(pixel_rate * num, den);
-}
-
-static int skl_plane_max_width(const struct drm_framebuffer *fb,
- int color_plane,
- unsigned int rotation)
-{
- int cpp = fb->format->cpp[color_plane];
-
- switch (fb->modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_X_TILED:
- /*
- * Validated limit is 4k, but has 5k should
- * work apart from the following features:
- * - Ytile (already limited to 4k)
- * - FP16 (already limited to 4k)
- * - render compression (already limited to 4k)
- * - KVMR sprite and cursor (don't care)
- * - horizontal panning (TODO verify this)
- * - pipe and plane scaling (TODO verify this)
- */
- if (cpp == 8)
- return 4096;
- else
- return 5120;
- case I915_FORMAT_MOD_Y_TILED_CCS:
- case I915_FORMAT_MOD_Yf_TILED_CCS:
- case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
- /* FIXME AUX plane? */
- case I915_FORMAT_MOD_Y_TILED:
- case I915_FORMAT_MOD_Yf_TILED:
- if (cpp == 8)
- return 2048;
- else
- return 4096;
- default:
- MISSING_CASE(fb->modifier);
- return 2048;
- }
-}
-
-static int glk_plane_max_width(const struct drm_framebuffer *fb,
- int color_plane,
- unsigned int rotation)
-{
- int cpp = fb->format->cpp[color_plane];
-
- switch (fb->modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_X_TILED:
- if (cpp == 8)
- return 4096;
- else
- return 5120;
- case I915_FORMAT_MOD_Y_TILED_CCS:
- case I915_FORMAT_MOD_Yf_TILED_CCS:
- /* FIXME AUX plane? */
- case I915_FORMAT_MOD_Y_TILED:
- case I915_FORMAT_MOD_Yf_TILED:
- if (cpp == 8)
- return 2048;
- else
- return 5120;
- default:
- MISSING_CASE(fb->modifier);
- return 2048;
- }
-}
-
-static int icl_plane_min_width(const struct drm_framebuffer *fb,
- int color_plane,
- unsigned int rotation)
-{
- /* Wa_14011264657, Wa_14011050563: gen11+ */
- switch (fb->format->format) {
- case DRM_FORMAT_C8:
- return 18;
- case DRM_FORMAT_RGB565:
- return 10;
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_ARGB2101010:
- case DRM_FORMAT_ABGR2101010:
- case DRM_FORMAT_XVYU2101010:
- case DRM_FORMAT_Y212:
- case DRM_FORMAT_Y216:
- return 6;
- case DRM_FORMAT_NV12:
- return 20;
- case DRM_FORMAT_P010:
- case DRM_FORMAT_P012:
- case DRM_FORMAT_P016:
- return 12;
- case DRM_FORMAT_XRGB16161616F:
- case DRM_FORMAT_XBGR16161616F:
- case DRM_FORMAT_ARGB16161616F:
- case DRM_FORMAT_ABGR16161616F:
- case DRM_FORMAT_XVYU12_16161616:
- case DRM_FORMAT_XVYU16161616:
- return 4;
- default:
- return 1;
- }
-}
-
-static int icl_plane_max_width(const struct drm_framebuffer *fb,
- int color_plane,
- unsigned int rotation)
-{
- return 5120;
-}
-
-static int skl_plane_max_height(const struct drm_framebuffer *fb,
- int color_plane,
- unsigned int rotation)
-{
- return 4096;
-}
-
-static int icl_plane_max_height(const struct drm_framebuffer *fb,
- int color_plane,
- unsigned int rotation)
-{
- return 4320;
-}
-
-static unsigned int
-skl_plane_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
-{
- const struct drm_format_info *info = drm_format_info(pixel_format);
- int cpp = info->cpp[0];
-
- /*
- * "The stride in bytes must not exceed the
- * of the size of 8K pixels and 32K bytes."
- */
- if (drm_rotation_90_or_270(rotation))
- return min(8192, 32768 / cpp);
- else
- return min(8192 * cpp, 32768);
-}
-
-static void
-skl_program_scaler(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- enum pipe pipe = plane->pipe;
- int scaler_id = plane_state->scaler_id;
- const struct intel_scaler *scaler =
- &crtc_state->scaler_state.scalers[scaler_id];
- int crtc_x = plane_state->uapi.dst.x1;
- int crtc_y = plane_state->uapi.dst.y1;
- u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
- u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
- u16 y_hphase, uv_rgb_hphase;
- u16 y_vphase, uv_rgb_vphase;
- int hscale, vscale;
- u32 ps_ctrl;
-
- hscale = drm_rect_calc_hscale(&plane_state->uapi.src,
- &plane_state->uapi.dst,
- 0, INT_MAX);
- vscale = drm_rect_calc_vscale(&plane_state->uapi.src,
- &plane_state->uapi.dst,
- 0, INT_MAX);
-
- /* TODO: handle sub-pixel coordinates */
- if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
- !icl_is_hdr_plane(dev_priv, plane->id)) {
- y_hphase = skl_scaler_calc_phase(1, hscale, false);
- y_vphase = skl_scaler_calc_phase(1, vscale, false);
-
- /* MPEG2 chroma siting convention */
- uv_rgb_hphase = skl_scaler_calc_phase(2, hscale, true);
- uv_rgb_vphase = skl_scaler_calc_phase(2, vscale, false);
- } else {
- /* not used */
- y_hphase = 0;
- y_vphase = 0;
-
- uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
- uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
- }
-
- ps_ctrl = skl_scaler_get_filter_select(plane_state->hw.scaling_filter, 0);
- ps_ctrl |= PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode;
-
- skl_scaler_setup_filter(dev_priv, pipe, scaler_id, 0,
- plane_state->hw.scaling_filter);
-
- intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
- intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, scaler_id),
- PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
- intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, scaler_id),
- PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
- intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, scaler_id),
- (crtc_x << 16) | crtc_y);
- intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, scaler_id),
- (crtc_w << 16) | crtc_h);
-}
-
-/* Preoffset values for YUV to RGB Conversion */
-#define PREOFF_YUV_TO_RGB_HI 0x1800
-#define PREOFF_YUV_TO_RGB_ME 0x0000
-#define PREOFF_YUV_TO_RGB_LO 0x1800
-
-#define ROFF(x) (((x) & 0xffff) << 16)
-#define GOFF(x) (((x) & 0xffff) << 0)
-#define BOFF(x) (((x) & 0xffff) << 16)
-
-/*
- * Programs the input color space conversion stage for ICL HDR planes.
- * Note that it is assumed that this stage always happens after YUV
- * range correction. Thus, the input to this stage is assumed to be
- * in full-range YCbCr.
- */
-static void
-icl_program_input_csc(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum pipe pipe = plane->pipe;
- enum plane_id plane_id = plane->id;
-
- static const u16 input_csc_matrix[][9] = {
- /*
- * BT.601 full range YCbCr -> full range RGB
- * The matrix required is :
- * [1.000, 0.000, 1.371,
- * 1.000, -0.336, -0.698,
- * 1.000, 1.732, 0.0000]
- */
- [DRM_COLOR_YCBCR_BT601] = {
- 0x7AF8, 0x7800, 0x0,
- 0x8B28, 0x7800, 0x9AC0,
- 0x0, 0x7800, 0x7DD8,
- },
- /*
- * BT.709 full range YCbCr -> full range RGB
- * The matrix required is :
- * [1.000, 0.000, 1.574,
- * 1.000, -0.187, -0.468,
- * 1.000, 1.855, 0.0000]
- */
- [DRM_COLOR_YCBCR_BT709] = {
- 0x7C98, 0x7800, 0x0,
- 0x9EF8, 0x7800, 0xAC00,
- 0x0, 0x7800, 0x7ED8,
- },
- /*
- * BT.2020 full range YCbCr -> full range RGB
- * The matrix required is :
- * [1.000, 0.000, 1.474,
- * 1.000, -0.1645, -0.5713,
- * 1.000, 1.8814, 0.0000]
- */
- [DRM_COLOR_YCBCR_BT2020] = {
- 0x7BC8, 0x7800, 0x0,
- 0x8928, 0x7800, 0xAA88,
- 0x0, 0x7800, 0x7F10,
- },
- };
- const u16 *csc = input_csc_matrix[plane_state->hw.color_encoding];
-
- intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0),
- ROFF(csc[0]) | GOFF(csc[1]));
- intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1),
- BOFF(csc[2]));
- intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2),
- ROFF(csc[3]) | GOFF(csc[4]));
- intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3),
- BOFF(csc[5]));
- intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4),
- ROFF(csc[6]) | GOFF(csc[7]));
- intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5),
- BOFF(csc[8]));
-
- intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
- PREOFF_YUV_TO_RGB_HI);
- intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
- PREOFF_YUV_TO_RGB_ME);
- intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
- PREOFF_YUV_TO_RGB_LO);
- intel_de_write_fw(dev_priv,
- PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0);
- intel_de_write_fw(dev_priv,
- PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0);
- intel_de_write_fw(dev_priv,
- PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0);
-}
-
-static void
-skl_plane_async_flip(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- bool async_flip)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- unsigned long irqflags;
- enum plane_id plane_id = plane->id;
- enum pipe pipe = plane->pipe;
- u32 surf_addr = plane_state->color_plane[0].offset;
- u32 plane_ctl = plane_state->ctl;
-
- plane_ctl |= skl_plane_ctl_crtc(crtc_state);
-
- if (async_flip)
- plane_ctl |= PLANE_CTL_ASYNC_FLIP;
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
- intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id),
- intel_plane_ggtt_offset(plane_state) + surf_addr);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static void
-skl_program_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- int color_plane)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum plane_id plane_id = plane->id;
- enum pipe pipe = plane->pipe;
- const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- u32 surf_addr = plane_state->color_plane[color_plane].offset;
- u32 stride = skl_plane_stride(plane_state, color_plane);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- int aux_plane = intel_main_to_aux_plane(fb, color_plane);
- int crtc_x = plane_state->uapi.dst.x1;
- int crtc_y = plane_state->uapi.dst.y1;
- u32 x = plane_state->color_plane[color_plane].x;
- u32 y = plane_state->color_plane[color_plane].y;
- u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
- u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
- u8 alpha = plane_state->hw.alpha >> 8;
- u32 plane_color_ctl = 0, aux_dist = 0;
- unsigned long irqflags;
- u32 keymsk, keymax;
- u32 plane_ctl = plane_state->ctl;
-
- plane_ctl |= skl_plane_ctl_crtc(crtc_state);
-
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
- plane_color_ctl = plane_state->color_ctl |
- glk_plane_color_ctl_crtc(crtc_state);
-
- /* Sizes are 0 based */
- src_w--;
- src_h--;
-
- keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha);
-
- keymsk = key->channel_mask & 0x7ffffff;
- if (alpha < 0xff)
- keymsk |= PLANE_KEYMSK_ALPHA_ENABLE;
-
- /* The scaler will handle the output position */
- if (plane_state->scaler_id >= 0) {
- crtc_x = 0;
- crtc_y = 0;
- }
-
- if (aux_plane) {
- aux_dist = plane_state->color_plane[aux_plane].offset - surf_addr;
-
- if (INTEL_GEN(dev_priv) < 12)
- aux_dist |= skl_plane_stride(plane_state, aux_plane);
- }
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id), stride);
- intel_de_write_fw(dev_priv, PLANE_POS(pipe, plane_id),
- (crtc_y << 16) | crtc_x);
- intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id),
- (src_h << 16) | src_w);
-
- intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), aux_dist);
-
- if (icl_is_hdr_plane(dev_priv, plane_id))
- intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id),
- plane_state->cus_ctl);
-
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
- intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id),
- plane_color_ctl);
-
- if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id))
- icl_program_input_csc(plane, crtc_state, plane_state);
-
- if (fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
- intel_uncore_write64_fw(&dev_priv->uncore,
- PLANE_CC_VAL(pipe, plane_id), plane_state->ccval);
-
- skl_write_plane_wm(plane, crtc_state);
-
- intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id),
- key->min_value);
- intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), keymsk);
- intel_de_write_fw(dev_priv, PLANE_KEYMAX(pipe, plane_id), keymax);
-
- intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id),
- (y << 16) | x);
-
- if (INTEL_GEN(dev_priv) < 11)
- intel_de_write_fw(dev_priv, PLANE_AUX_OFFSET(pipe, plane_id),
- (plane_state->color_plane[1].y << 16) | plane_state->color_plane[1].x);
-
- if (!drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
- intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane);
-
- /*
- * The control register self-arms if the plane was previously
- * disabled. Try to make the plane enable atomic by writing
- * the control register just before the surface register.
- */
- intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
- intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id),
- intel_plane_ggtt_offset(plane_state) + surf_addr);
-
- if (plane_state->scaler_id >= 0)
- skl_program_scaler(plane, crtc_state, plane_state);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static void
-skl_update_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- int color_plane = 0;
-
- if (plane_state->planar_linked_plane && !plane_state->planar_slave)
- /* Program the UV plane on planar master */
- color_plane = 1;
-
- skl_program_plane(plane, crtc_state, plane_state, color_plane);
-}
-static void
-skl_disable_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum plane_id plane_id = plane->id;
- enum pipe pipe = plane->pipe;
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- if (icl_is_hdr_plane(dev_priv, plane_id))
- intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id), 0);
-
- skl_write_plane_wm(plane, crtc_state);
-
- intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0);
- intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static bool
-skl_plane_get_hw_state(struct intel_plane *plane,
- enum pipe *pipe)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum intel_display_power_domain power_domain;
- enum plane_id plane_id = plane->id;
- intel_wakeref_t wakeref;
- bool ret;
-
- power_domain = POWER_DOMAIN_PIPE(plane->pipe);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (!wakeref)
- return false;
-
- ret = intel_de_read(dev_priv, PLANE_CTL(plane->pipe, plane_id)) & PLANE_CTL_ENABLE;
-
- *pipe = plane->pipe;
-
- intel_display_power_put(dev_priv, power_domain, wakeref);
-
- return ret;
-}
-
-static void
-skl_plane_enable_flip_done(struct intel_plane *plane)
-{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
- enum pipe pipe = plane->pipe;
-
- spin_lock_irq(&i915->irq_lock);
- bdw_enable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
- spin_unlock_irq(&i915->irq_lock);
-}
-
-static void
-skl_plane_disable_flip_done(struct intel_plane *plane)
-{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
- enum pipe pipe = plane->pipe;
-
- spin_lock_irq(&i915->irq_lock);
- bdw_disable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
- spin_unlock_irq(&i915->irq_lock);
-}
-
static void i9xx_plane_linear_gamma(u16 gamma[8])
{
/* The points are not evenly spaced. */
@@ -1275,15 +423,15 @@ vlv_update_plane(struct intel_plane *plane,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
- u32 sprsurf_offset = plane_state->color_plane[0].offset;
+ u32 sprsurf_offset = plane_state->view.color_plane[0].offset;
u32 linear_offset;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->uapi.dst.x1;
int crtc_y = plane_state->uapi.dst.y1;
u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
- u32 x = plane_state->color_plane[0].x;
- u32 y = plane_state->color_plane[0].y;
+ u32 x = plane_state->view.color_plane[0].x;
+ u32 y = plane_state->view.color_plane[0].y;
unsigned long irqflags;
u32 sprctl;
@@ -1298,7 +446,7 @@ vlv_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
intel_de_write_fw(dev_priv, SPSTRIDE(pipe, plane_id),
- plane_state->color_plane[0].stride);
+ plane_state->view.color_plane[0].stride);
intel_de_write_fw(dev_priv, SPPOS(pipe, plane_id),
(crtc_y << 16) | crtc_x);
intel_de_write_fw(dev_priv, SPSIZE(pipe, plane_id),
@@ -1692,15 +840,15 @@ ivb_update_plane(struct intel_plane *plane,
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
- u32 sprsurf_offset = plane_state->color_plane[0].offset;
+ u32 sprsurf_offset = plane_state->view.color_plane[0].offset;
u32 linear_offset;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->uapi.dst.x1;
int crtc_y = plane_state->uapi.dst.y1;
u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
- u32 x = plane_state->color_plane[0].x;
- u32 y = plane_state->color_plane[0].y;
+ u32 x = plane_state->view.color_plane[0].x;
+ u32 y = plane_state->view.color_plane[0].y;
u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
u32 sprctl, sprscale = 0;
@@ -1722,7 +870,7 @@ ivb_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
intel_de_write_fw(dev_priv, SPRSTRIDE(pipe),
- plane_state->color_plane[0].stride);
+ plane_state->view.color_plane[0].stride);
intel_de_write_fw(dev_priv, SPRPOS(pipe), (crtc_y << 16) | crtc_x);
intel_de_write_fw(dev_priv, SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
if (IS_IVYBRIDGE(dev_priv))
@@ -1898,7 +1046,7 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
dvscntr = DVS_ENABLE;
- if (IS_GEN(dev_priv, 6))
+ if (IS_SANDYBRIDGE(dev_priv))
dvscntr |= DVS_TRICKLE_FEED_DISABLE;
switch (fb->format->format) {
@@ -2020,15 +1168,15 @@ g4x_update_plane(struct intel_plane *plane,
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
- u32 dvssurf_offset = plane_state->color_plane[0].offset;
+ u32 dvssurf_offset = plane_state->view.color_plane[0].offset;
u32 linear_offset;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->uapi.dst.x1;
int crtc_y = plane_state->uapi.dst.y1;
u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
- u32 x = plane_state->color_plane[0].x;
- u32 y = plane_state->color_plane[0].y;
+ u32 x = plane_state->view.color_plane[0].x;
+ u32 y = plane_state->view.color_plane[0].y;
u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
u32 dvscntr, dvsscale = 0;
@@ -2050,7 +1198,7 @@ g4x_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
intel_de_write_fw(dev_priv, DVSSTRIDE(pipe),
- plane_state->color_plane[0].stride);
+ plane_state->view.color_plane[0].stride);
intel_de_write_fw(dev_priv, DVSPOS(pipe), (crtc_y << 16) | crtc_x);
intel_de_write_fw(dev_priv, DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
intel_de_write_fw(dev_priv, DVSSCALE(pipe), dvsscale);
@@ -2123,19 +1271,18 @@ g4x_plane_get_hw_state(struct intel_plane *plane,
return ret;
}
-static bool intel_fb_scalable(const struct drm_framebuffer *fb)
+static bool g4x_fb_scalable(const struct drm_framebuffer *fb)
{
if (!fb)
return false;
switch (fb->format->format) {
case DRM_FORMAT_C8:
- return false;
case DRM_FORMAT_XRGB16161616F:
case DRM_FORMAT_ARGB16161616F:
case DRM_FORMAT_XBGR16161616F:
case DRM_FORMAT_ABGR16161616F:
- return INTEL_GEN(to_i915(fb->dev)) >= 11;
+ return false;
default:
return true;
}
@@ -2151,7 +1298,7 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
int src_x, src_w, src_h, crtc_w, crtc_h;
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- unsigned int stride = plane_state->color_plane[0].stride;
+ unsigned int stride = plane_state->view.color_plane[0].stride;
unsigned int cpp = fb->format->cpp[0];
unsigned int width_bytes;
int min_width, min_height;
@@ -2212,8 +1359,8 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
int max_scale = DRM_PLANE_HELPER_NO_SCALING;
int ret;
- if (intel_fb_scalable(plane_state->hw.fb)) {
- if (INTEL_GEN(dev_priv) < 7) {
+ if (g4x_fb_scalable(plane_state->hw.fb)) {
+ if (DISPLAY_VER(dev_priv) < 7) {
min_scale = 1;
max_scale = 16 << 16;
} else if (IS_IVYBRIDGE(dev_priv)) {
@@ -2242,7 +1389,7 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- if (INTEL_GEN(dev_priv) >= 7)
+ if (DISPLAY_VER(dev_priv) >= 7)
plane_state->ctl = ivb_sprite_ctl(crtc_state, plane_state);
else
plane_state->ctl = g4x_sprite_ctl(crtc_state, plane_state);
@@ -2301,243 +1448,9 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
return 0;
}
-static bool intel_format_is_p01x(u32 format)
-{
- switch (format) {
- case DRM_FORMAT_P010:
- case DRM_FORMAT_P012:
- case DRM_FORMAT_P016:
- return true;
- default:
- return false;
- }
-}
-
-static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- unsigned int rotation = plane_state->hw.rotation;
- struct drm_format_name_buf format_name;
-
- if (!fb)
- return 0;
-
- if (rotation & ~(DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180) &&
- is_ccs_modifier(fb->modifier)) {
- drm_dbg_kms(&dev_priv->drm,
- "RC support only with 0/180 degree rotation (%x)\n",
- rotation);
- return -EINVAL;
- }
-
- if (rotation & DRM_MODE_REFLECT_X &&
- fb->modifier == DRM_FORMAT_MOD_LINEAR) {
- drm_dbg_kms(&dev_priv->drm,
- "horizontal flip is not supported with linear surface formats\n");
- return -EINVAL;
- }
-
- if (drm_rotation_90_or_270(rotation)) {
- if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
- fb->modifier != I915_FORMAT_MOD_Yf_TILED) {
- drm_dbg_kms(&dev_priv->drm,
- "Y/Yf tiling required for 90/270!\n");
- return -EINVAL;
- }
-
- /*
- * 90/270 is not allowed with RGB64 16:16:16:16 and
- * Indexed 8-bit. RGB 16-bit 5:6:5 is allowed gen11 onwards.
- */
- switch (fb->format->format) {
- case DRM_FORMAT_RGB565:
- if (INTEL_GEN(dev_priv) >= 11)
- break;
- fallthrough;
- case DRM_FORMAT_C8:
- case DRM_FORMAT_XRGB16161616F:
- case DRM_FORMAT_XBGR16161616F:
- case DRM_FORMAT_ARGB16161616F:
- case DRM_FORMAT_ABGR16161616F:
- case DRM_FORMAT_Y210:
- case DRM_FORMAT_Y212:
- case DRM_FORMAT_Y216:
- case DRM_FORMAT_XVYU12_16161616:
- case DRM_FORMAT_XVYU16161616:
- drm_dbg_kms(&dev_priv->drm,
- "Unsupported pixel format %s for 90/270!\n",
- drm_get_format_name(fb->format->format,
- &format_name));
- return -EINVAL;
- default:
- break;
- }
- }
-
- /* Y-tiling is not supported in IF-ID Interlace mode */
- if (crtc_state->hw.enable &&
- crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE &&
- (fb->modifier == I915_FORMAT_MOD_Y_TILED ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
- fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
- fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
- fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
- fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)) {
- drm_dbg_kms(&dev_priv->drm,
- "Y/Yf tiling not supported in IF-ID mode\n");
- return -EINVAL;
- }
-
- /* Wa_1606054188:tgl */
- if (IS_TIGERLAKE(dev_priv) &&
- plane_state->ckey.flags & I915_SET_COLORKEY_SOURCE &&
- intel_format_is_p01x(fb->format->format)) {
- drm_dbg_kms(&dev_priv->drm,
- "Source color keying not supported with P01x formats\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->uapi.plane->dev);
- int crtc_x = plane_state->uapi.dst.x1;
- int crtc_w = drm_rect_width(&plane_state->uapi.dst);
- int pipe_src_w = crtc_state->pipe_src_w;
-
- /*
- * Display WA #1175: cnl,glk
- * Planes other than the cursor may cause FIFO underflow and display
- * corruption if starting less than 4 pixels from the right edge of
- * the screen.
- * Besides the above WA fix the similar problem, where planes other
- * than the cursor ending less than 4 pixels from the left edge of the
- * screen may cause FIFO underflow and display corruption.
- */
- if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
- (crtc_x + crtc_w < 4 || crtc_x > pipe_src_w - 4)) {
- drm_dbg_kms(&dev_priv->drm,
- "requested plane X %s position %d invalid (valid range %d-%d)\n",
- crtc_x + crtc_w < 4 ? "end" : "start",
- crtc_x + crtc_w < 4 ? crtc_x + crtc_w : crtc_x,
- 4, pipe_src_w - 4);
- return -ERANGE;
- }
-
- return 0;
-}
-
-static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_state)
-{
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- unsigned int rotation = plane_state->hw.rotation;
- int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
-
- /* Display WA #1106 */
- if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
- src_w & 3 &&
- (rotation == DRM_MODE_ROTATE_270 ||
- rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) {
- DRM_DEBUG_KMS("src width must be multiple of 4 for rotated planar YUV\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int skl_plane_max_scale(struct drm_i915_private *dev_priv,
- const struct drm_framebuffer *fb)
-{
- /*
- * We don't yet know the final source width nor
- * whether we can use the HQ scaler mode. Assume
- * the best case.
- * FIXME need to properly check this later.
- */
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) ||
- !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
- return 0x30000 - 1;
- else
- return 0x20000 - 1;
-}
-
-static int skl_plane_check(struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- int min_scale = DRM_PLANE_HELPER_NO_SCALING;
- int max_scale = DRM_PLANE_HELPER_NO_SCALING;
- int ret;
-
- ret = skl_plane_check_fb(crtc_state, plane_state);
- if (ret)
- return ret;
-
- /* use scaler when colorkey is not required */
- if (!plane_state->ckey.flags && intel_fb_scalable(fb)) {
- min_scale = 1;
- max_scale = skl_plane_max_scale(dev_priv, fb);
- }
-
- ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
- min_scale, max_scale, true);
- if (ret)
- return ret;
-
- ret = skl_check_plane_surface(plane_state);
- if (ret)
- return ret;
-
- if (!plane_state->uapi.visible)
- return 0;
-
- ret = skl_plane_check_dst_coordinates(crtc_state, plane_state);
- if (ret)
- return ret;
-
- ret = intel_plane_check_src_coordinates(plane_state);
- if (ret)
- return ret;
-
- ret = skl_plane_check_nv12_rotation(plane_state);
- if (ret)
- return ret;
-
- /* HW only has 8 bits pixel precision, disable plane if invisible */
- if (!(plane_state->hw.alpha >> 8))
- plane_state->uapi.visible = false;
-
- plane_state->ctl = skl_plane_ctl(crtc_state, plane_state);
-
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
- plane_state->color_ctl = glk_plane_color_ctl(crtc_state,
- plane_state);
-
- if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
- icl_is_hdr_plane(dev_priv, plane->id))
- /* Enable and use MPEG-2 chroma siting */
- plane_state->cus_ctl = PLANE_CUS_ENABLE |
- PLANE_CUS_HPHASE_0 |
- PLANE_CUS_VPHASE_SIGN_NEGATIVE | PLANE_CUS_VPHASE_0_25;
- else
- plane_state->cus_ctl = 0;
-
- return 0;
-}
-
static bool has_dst_key_in_primary_plane(struct drm_i915_private *dev_priv)
{
- return INTEL_GEN(dev_priv) >= 9;
+ return DISPLAY_VER(dev_priv) >= 9;
}
static void intel_plane_set_ckey(struct intel_plane_state *plane_state,
@@ -2561,7 +1474,7 @@ static void intel_plane_set_ckey(struct intel_plane_state *plane_state,
* On SKL+ we want dst key enabled on
* the primary and not on the sprite.
*/
- if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_PRIMARY &&
+ if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_PRIMARY &&
set->flags & I915_SET_COLORKEY_DESTINATION)
key->flags = 0;
}
@@ -2600,7 +1513,7 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
* Also multiple planes can't do destination keying on the same
* pipe simultaneously.
*/
- if (INTEL_GEN(dev_priv) >= 9 &&
+ if (DISPLAY_VER(dev_priv) >= 9 &&
to_intel_plane(plane)->id >= PLANE_SPRITE1 &&
set->flags & I915_SET_COLORKEY_DESTINATION)
return -EINVAL;
@@ -2712,186 +1625,6 @@ static const u32 chv_pipe_b_sprite_formats[] = {
DRM_FORMAT_VYUY,
};
-static const u32 skl_plane_formats[] = {
- DRM_FORMAT_C8,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_XRGB16161616F,
- DRM_FORMAT_XBGR16161616F,
- DRM_FORMAT_YUYV,
- DRM_FORMAT_YVYU,
- DRM_FORMAT_UYVY,
- DRM_FORMAT_VYUY,
- DRM_FORMAT_XYUV8888,
-};
-
-static const u32 skl_planar_formats[] = {
- DRM_FORMAT_C8,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_XRGB16161616F,
- DRM_FORMAT_XBGR16161616F,
- DRM_FORMAT_YUYV,
- DRM_FORMAT_YVYU,
- DRM_FORMAT_UYVY,
- DRM_FORMAT_VYUY,
- DRM_FORMAT_NV12,
- DRM_FORMAT_XYUV8888,
-};
-
-static const u32 glk_planar_formats[] = {
- DRM_FORMAT_C8,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_XRGB16161616F,
- DRM_FORMAT_XBGR16161616F,
- DRM_FORMAT_YUYV,
- DRM_FORMAT_YVYU,
- DRM_FORMAT_UYVY,
- DRM_FORMAT_VYUY,
- DRM_FORMAT_NV12,
- DRM_FORMAT_XYUV8888,
- DRM_FORMAT_P010,
- DRM_FORMAT_P012,
- DRM_FORMAT_P016,
-};
-
-static const u32 icl_sdr_y_plane_formats[] = {
- DRM_FORMAT_C8,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_ARGB2101010,
- DRM_FORMAT_ABGR2101010,
- DRM_FORMAT_YUYV,
- DRM_FORMAT_YVYU,
- DRM_FORMAT_UYVY,
- DRM_FORMAT_VYUY,
- DRM_FORMAT_Y210,
- DRM_FORMAT_Y212,
- DRM_FORMAT_Y216,
- DRM_FORMAT_XYUV8888,
- DRM_FORMAT_XVYU2101010,
- DRM_FORMAT_XVYU12_16161616,
- DRM_FORMAT_XVYU16161616,
-};
-
-static const u32 icl_sdr_uv_plane_formats[] = {
- DRM_FORMAT_C8,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_ARGB2101010,
- DRM_FORMAT_ABGR2101010,
- DRM_FORMAT_YUYV,
- DRM_FORMAT_YVYU,
- DRM_FORMAT_UYVY,
- DRM_FORMAT_VYUY,
- DRM_FORMAT_NV12,
- DRM_FORMAT_P010,
- DRM_FORMAT_P012,
- DRM_FORMAT_P016,
- DRM_FORMAT_Y210,
- DRM_FORMAT_Y212,
- DRM_FORMAT_Y216,
- DRM_FORMAT_XYUV8888,
- DRM_FORMAT_XVYU2101010,
- DRM_FORMAT_XVYU12_16161616,
- DRM_FORMAT_XVYU16161616,
-};
-
-static const u32 icl_hdr_plane_formats[] = {
- DRM_FORMAT_C8,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_ARGB2101010,
- DRM_FORMAT_ABGR2101010,
- DRM_FORMAT_XRGB16161616F,
- DRM_FORMAT_XBGR16161616F,
- DRM_FORMAT_ARGB16161616F,
- DRM_FORMAT_ABGR16161616F,
- DRM_FORMAT_YUYV,
- DRM_FORMAT_YVYU,
- DRM_FORMAT_UYVY,
- DRM_FORMAT_VYUY,
- DRM_FORMAT_NV12,
- DRM_FORMAT_P010,
- DRM_FORMAT_P012,
- DRM_FORMAT_P016,
- DRM_FORMAT_Y210,
- DRM_FORMAT_Y212,
- DRM_FORMAT_Y216,
- DRM_FORMAT_XYUV8888,
- DRM_FORMAT_XVYU2101010,
- DRM_FORMAT_XVYU12_16161616,
- DRM_FORMAT_XVYU16161616,
-};
-
-static const u64 skl_plane_format_modifiers_noccs[] = {
- I915_FORMAT_MOD_Yf_TILED,
- I915_FORMAT_MOD_Y_TILED,
- I915_FORMAT_MOD_X_TILED,
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
-static const u64 skl_plane_format_modifiers_ccs[] = {
- I915_FORMAT_MOD_Yf_TILED_CCS,
- I915_FORMAT_MOD_Y_TILED_CCS,
- I915_FORMAT_MOD_Yf_TILED,
- I915_FORMAT_MOD_Y_TILED,
- I915_FORMAT_MOD_X_TILED,
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
-static const u64 gen12_plane_format_modifiers_mc_ccs[] = {
- I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS,
- I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
- I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC,
- I915_FORMAT_MOD_Y_TILED,
- I915_FORMAT_MOD_X_TILED,
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
-static const u64 gen12_plane_format_modifiers_rc_ccs[] = {
- I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
- I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC,
- I915_FORMAT_MOD_Y_TILED,
- I915_FORMAT_MOD_X_TILED,
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
static bool g4x_sprite_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
@@ -2984,150 +1717,6 @@ static bool vlv_sprite_format_mod_supported(struct drm_plane *_plane,
}
}
-static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
- u32 format, u64 modifier)
-{
- struct intel_plane *plane = to_intel_plane(_plane);
-
- switch (modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_X_TILED:
- case I915_FORMAT_MOD_Y_TILED:
- case I915_FORMAT_MOD_Yf_TILED:
- break;
- case I915_FORMAT_MOD_Y_TILED_CCS:
- case I915_FORMAT_MOD_Yf_TILED_CCS:
- if (!plane->has_ccs)
- return false;
- break;
- default:
- return false;
- }
-
- switch (format) {
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_ABGR8888:
- if (is_ccs_modifier(modifier))
- return true;
- fallthrough;
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_ARGB2101010:
- case DRM_FORMAT_ABGR2101010:
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_XYUV8888:
- case DRM_FORMAT_P010:
- case DRM_FORMAT_P012:
- case DRM_FORMAT_P016:
- case DRM_FORMAT_XVYU2101010:
- if (modifier == I915_FORMAT_MOD_Yf_TILED)
- return true;
- fallthrough;
- case DRM_FORMAT_C8:
- case DRM_FORMAT_XBGR16161616F:
- case DRM_FORMAT_ABGR16161616F:
- case DRM_FORMAT_XRGB16161616F:
- case DRM_FORMAT_ARGB16161616F:
- case DRM_FORMAT_Y210:
- case DRM_FORMAT_Y212:
- case DRM_FORMAT_Y216:
- case DRM_FORMAT_XVYU12_16161616:
- case DRM_FORMAT_XVYU16161616:
- if (modifier == DRM_FORMAT_MOD_LINEAR ||
- modifier == I915_FORMAT_MOD_X_TILED ||
- modifier == I915_FORMAT_MOD_Y_TILED)
- return true;
- fallthrough;
- default:
- return false;
- }
-}
-
-static bool gen12_plane_supports_mc_ccs(struct drm_i915_private *dev_priv,
- enum plane_id plane_id)
-{
- /* Wa_14010477008:tgl[a0..c0],rkl[all],dg1[all] */
- if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv) ||
- IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_C0))
- return false;
-
- return plane_id < PLANE_SPRITE4;
-}
-
-static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
- u32 format, u64 modifier)
-{
- struct drm_i915_private *dev_priv = to_i915(_plane->dev);
- struct intel_plane *plane = to_intel_plane(_plane);
-
- switch (modifier) {
- case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
- if (!gen12_plane_supports_mc_ccs(dev_priv, plane->id))
- return false;
- fallthrough;
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_X_TILED:
- case I915_FORMAT_MOD_Y_TILED:
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
- break;
- default:
- return false;
- }
-
- switch (format) {
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_ABGR8888:
- if (is_ccs_modifier(modifier))
- return true;
- fallthrough;
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_XYUV8888:
- case DRM_FORMAT_P010:
- case DRM_FORMAT_P012:
- case DRM_FORMAT_P016:
- if (modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS)
- return true;
- fallthrough;
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_ARGB2101010:
- case DRM_FORMAT_ABGR2101010:
- case DRM_FORMAT_XVYU2101010:
- case DRM_FORMAT_C8:
- case DRM_FORMAT_XBGR16161616F:
- case DRM_FORMAT_ABGR16161616F:
- case DRM_FORMAT_XRGB16161616F:
- case DRM_FORMAT_ARGB16161616F:
- case DRM_FORMAT_Y210:
- case DRM_FORMAT_Y212:
- case DRM_FORMAT_Y216:
- case DRM_FORMAT_XVYU12_16161616:
- case DRM_FORMAT_XVYU16161616:
- if (modifier == DRM_FORMAT_MOD_LINEAR ||
- modifier == I915_FORMAT_MOD_X_TILED ||
- modifier == I915_FORMAT_MOD_Y_TILED)
- return true;
- fallthrough;
- default:
- return false;
- }
-}
-
static const struct drm_plane_funcs g4x_sprite_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
@@ -3155,257 +1744,6 @@ static const struct drm_plane_funcs vlv_sprite_funcs = {
.format_mod_supported = vlv_sprite_format_mod_supported,
};
-static const struct drm_plane_funcs skl_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = intel_plane_destroy,
- .atomic_duplicate_state = intel_plane_duplicate_state,
- .atomic_destroy_state = intel_plane_destroy_state,
- .format_mod_supported = skl_plane_format_mod_supported,
-};
-
-static const struct drm_plane_funcs gen12_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = intel_plane_destroy,
- .atomic_duplicate_state = intel_plane_duplicate_state,
- .atomic_destroy_state = intel_plane_destroy_state,
- .format_mod_supported = gen12_plane_format_mod_supported,
-};
-
-static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum plane_id plane_id)
-{
- if (!HAS_FBC(dev_priv))
- return false;
-
- return pipe == PIPE_A && plane_id == PLANE_PRIMARY;
-}
-
-static bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum plane_id plane_id)
-{
- /* Display WA #0870: skl, bxt */
- if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
- return false;
-
- if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C)
- return false;
-
- if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0)
- return false;
-
- return true;
-}
-
-static const u32 *skl_get_plane_formats(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum plane_id plane_id,
- int *num_formats)
-{
- if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
- *num_formats = ARRAY_SIZE(skl_planar_formats);
- return skl_planar_formats;
- } else {
- *num_formats = ARRAY_SIZE(skl_plane_formats);
- return skl_plane_formats;
- }
-}
-
-static const u32 *glk_get_plane_formats(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum plane_id plane_id,
- int *num_formats)
-{
- if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
- *num_formats = ARRAY_SIZE(glk_planar_formats);
- return glk_planar_formats;
- } else {
- *num_formats = ARRAY_SIZE(skl_plane_formats);
- return skl_plane_formats;
- }
-}
-
-static const u32 *icl_get_plane_formats(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum plane_id plane_id,
- int *num_formats)
-{
- if (icl_is_hdr_plane(dev_priv, plane_id)) {
- *num_formats = ARRAY_SIZE(icl_hdr_plane_formats);
- return icl_hdr_plane_formats;
- } else if (icl_is_nv12_y_plane(dev_priv, plane_id)) {
- *num_formats = ARRAY_SIZE(icl_sdr_y_plane_formats);
- return icl_sdr_y_plane_formats;
- } else {
- *num_formats = ARRAY_SIZE(icl_sdr_uv_plane_formats);
- return icl_sdr_uv_plane_formats;
- }
-}
-
-static const u64 *gen12_get_plane_modifiers(struct drm_i915_private *dev_priv,
- enum plane_id plane_id)
-{
- if (gen12_plane_supports_mc_ccs(dev_priv, plane_id))
- return gen12_plane_format_modifiers_mc_ccs;
- else
- return gen12_plane_format_modifiers_rc_ccs;
-}
-
-static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum plane_id plane_id)
-{
- if (plane_id == PLANE_CURSOR)
- return false;
-
- if (INTEL_GEN(dev_priv) >= 10)
- return true;
-
- if (IS_GEMINILAKE(dev_priv))
- return pipe != PIPE_C;
-
- return pipe != PIPE_C &&
- (plane_id == PLANE_PRIMARY ||
- plane_id == PLANE_SPRITE0);
-}
-
-struct intel_plane *
-skl_universal_plane_create(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum plane_id plane_id)
-{
- const struct drm_plane_funcs *plane_funcs;
- struct intel_plane *plane;
- enum drm_plane_type plane_type;
- unsigned int supported_rotations;
- unsigned int supported_csc;
- const u64 *modifiers;
- const u32 *formats;
- int num_formats;
- int ret;
-
- plane = intel_plane_alloc();
- if (IS_ERR(plane))
- return plane;
-
- plane->pipe = pipe;
- plane->id = plane_id;
- plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane_id);
-
- plane->has_fbc = skl_plane_has_fbc(dev_priv, pipe, plane_id);
- if (plane->has_fbc) {
- struct intel_fbc *fbc = &dev_priv->fbc;
-
- fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
- }
-
- if (INTEL_GEN(dev_priv) >= 11) {
- plane->min_width = icl_plane_min_width;
- plane->max_width = icl_plane_max_width;
- plane->max_height = icl_plane_max_height;
- } else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
- plane->max_width = glk_plane_max_width;
- plane->max_height = skl_plane_max_height;
- } else {
- plane->max_width = skl_plane_max_width;
- plane->max_height = skl_plane_max_height;
- }
-
- plane->max_stride = skl_plane_max_stride;
- plane->update_plane = skl_update_plane;
- plane->disable_plane = skl_disable_plane;
- plane->get_hw_state = skl_plane_get_hw_state;
- plane->check_plane = skl_plane_check;
- plane->min_cdclk = skl_plane_min_cdclk;
-
- if (plane_id == PLANE_PRIMARY) {
- plane->need_async_flip_disable_wa = IS_GEN_RANGE(dev_priv, 9, 10);
- plane->async_flip = skl_plane_async_flip;
- plane->enable_flip_done = skl_plane_enable_flip_done;
- plane->disable_flip_done = skl_plane_disable_flip_done;
- }
-
- if (INTEL_GEN(dev_priv) >= 11)
- formats = icl_get_plane_formats(dev_priv, pipe,
- plane_id, &num_formats);
- else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
- formats = glk_get_plane_formats(dev_priv, pipe,
- plane_id, &num_formats);
- else
- formats = skl_get_plane_formats(dev_priv, pipe,
- plane_id, &num_formats);
-
- plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
- if (INTEL_GEN(dev_priv) >= 12) {
- modifiers = gen12_get_plane_modifiers(dev_priv, plane_id);
- plane_funcs = &gen12_plane_funcs;
- } else {
- if (plane->has_ccs)
- modifiers = skl_plane_format_modifiers_ccs;
- else
- modifiers = skl_plane_format_modifiers_noccs;
- plane_funcs = &skl_plane_funcs;
- }
-
- if (plane_id == PLANE_PRIMARY)
- plane_type = DRM_PLANE_TYPE_PRIMARY;
- else
- plane_type = DRM_PLANE_TYPE_OVERLAY;
-
- ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
- 0, plane_funcs,
- formats, num_formats, modifiers,
- plane_type,
- "plane %d%c", plane_id + 1,
- pipe_name(pipe));
- if (ret)
- goto fail;
-
- supported_rotations =
- DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
- DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
-
- if (INTEL_GEN(dev_priv) >= 10)
- supported_rotations |= DRM_MODE_REFLECT_X;
-
- drm_plane_create_rotation_property(&plane->base,
- DRM_MODE_ROTATE_0,
- supported_rotations);
-
- supported_csc = BIT(DRM_COLOR_YCBCR_BT601) | BIT(DRM_COLOR_YCBCR_BT709);
-
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
- supported_csc |= BIT(DRM_COLOR_YCBCR_BT2020);
-
- drm_plane_create_color_properties(&plane->base,
- supported_csc,
- BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
- BIT(DRM_COLOR_YCBCR_FULL_RANGE),
- DRM_COLOR_YCBCR_BT709,
- DRM_COLOR_YCBCR_LIMITED_RANGE);
-
- drm_plane_create_alpha_property(&plane->base);
- drm_plane_create_blend_mode_property(&plane->base,
- BIT(DRM_MODE_BLEND_PIXEL_NONE) |
- BIT(DRM_MODE_BLEND_PREMULTI) |
- BIT(DRM_MODE_BLEND_COVERAGE));
-
- drm_plane_create_zpos_immutable_property(&plane->base, plane_id);
-
- if (INTEL_GEN(dev_priv) >= 12)
- drm_plane_enable_fb_damage_clips(&plane->base);
-
- if (INTEL_GEN(dev_priv) >= 10)
- drm_plane_create_scaling_filter_property(&plane->base,
- BIT(DRM_SCALING_FILTER_DEFAULT) |
- BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR));
-
- drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
-
- return plane;
-
-fail:
- intel_plane_free(plane);
-
- return ERR_PTR(ret);
-}
-
struct intel_plane *
intel_sprite_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe, int sprite)
@@ -3418,10 +1756,6 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
int num_formats;
int ret, zpos;
- if (INTEL_GEN(dev_priv) >= 9)
- return skl_universal_plane_create(dev_priv, pipe,
- PLANE_SPRITE0 + sprite);
-
plane = intel_plane_alloc();
if (IS_ERR(plane))
return plane;
@@ -3444,7 +1778,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
modifiers = i9xx_plane_format_modifiers;
plane_funcs = &vlv_sprite_funcs;
- } else if (INTEL_GEN(dev_priv) >= 7) {
+ } else if (DISPLAY_VER(dev_priv) >= 7) {
plane->update_plane = ivb_update_plane;
plane->disable_plane = ivb_disable_plane;
plane->get_hw_state = ivb_plane_get_hw_state;
@@ -3472,7 +1806,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane->min_cdclk = g4x_sprite_min_cdclk;
modifiers = i9xx_plane_format_modifiers;
- if (IS_GEN(dev_priv, 6)) {
+ if (IS_SANDYBRIDGE(dev_priv)) {
formats = snb_plane_formats;
num_formats = ARRAY_SIZE(snb_plane_formats);
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.h b/drivers/gpu/drm/i915/display/intel_sprite.h
index 76126dd8d584..c085eb87705c 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.h
+++ b/drivers/gpu/drm/i915/display/intel_sprite.h
@@ -35,12 +35,8 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state);
void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
-int intel_plane_check_stride(const struct intel_plane_state *plane_state);
int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state);
int chv_plane_check_rotation(const struct intel_plane_state *plane_state);
-struct intel_plane *
-skl_universal_plane_create(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum plane_id plane_id);
static inline u8 icl_hdr_plane_mask(void)
{
@@ -48,10 +44,6 @@ static inline u8 icl_hdr_plane_mask(void)
BIT(PLANE_SPRITE0) | BIT(PLANE_SPRITE1);
}
-bool icl_is_nv12_y_plane(struct drm_i915_private *dev_priv,
- enum plane_id plane_id);
-bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id);
-
int ivb_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
int hsw_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 2cefc13535a0..71b8edafb1c3 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -28,7 +28,7 @@ tc_cold_get_power_domain(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- if (INTEL_GEN(i915) == 11)
+ if (IS_DISPLAY_VER(i915, 11))
return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
else
return POWER_DOMAIN_TC_COLD_OFF;
@@ -40,7 +40,7 @@ tc_cold_block(struct intel_digital_port *dig_port)
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum intel_display_power_domain domain;
- if (INTEL_GEN(i915) == 11 && !dig_port->tc_legacy_port)
+ if (IS_DISPLAY_VER(i915, 11) && !dig_port->tc_legacy_port)
return 0;
domain = tc_cold_get_power_domain(dig_port);
@@ -71,7 +71,7 @@ assert_tc_cold_blocked(struct intel_digital_port *dig_port)
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
bool enabled;
- if (INTEL_GEN(i915) == 11 && !dig_port->tc_legacy_port)
+ if (IS_DISPLAY_VER(i915, 11) && !dig_port->tc_legacy_port)
return;
enabled = intel_display_power_is_enabled(i915,
@@ -455,7 +455,7 @@ static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
enum tc_port_mode old_tc_mode = dig_port->tc_mode;
intel_display_power_flush_work(i915);
- if (INTEL_GEN(i915) != 11 || !dig_port->tc_legacy_port) {
+ if (DISPLAY_VER(i915) != 11 || !dig_port->tc_legacy_port) {
enum intel_display_power_domain aux_domain;
bool aux_powered;
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 7a7b99b015a5..e558f121ec4e 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -1165,7 +1165,7 @@ intel_tv_get_config(struct intel_encoder *encoder,
static bool intel_tv_source_too_wide(struct drm_i915_private *dev_priv,
int hdisplay)
{
- return IS_GEN(dev_priv, 3) && hdisplay > 1024;
+ return IS_DISPLAY_VER(dev_priv, 3) && hdisplay > 1024;
}
static bool intel_tv_vert_scaling(const struct drm_display_mode *tv_mode,
@@ -1519,7 +1519,7 @@ static void intel_tv_pre_enable(struct intel_atomic_state *state,
set_color_conversion(dev_priv, color_conversion);
- if (INTEL_GEN(dev_priv) >= 4)
+ if (DISPLAY_VER(dev_priv) >= 4)
intel_de_write(dev_priv, TV_CLR_KNOBS, 0x00404000);
else
intel_de_write(dev_priv, TV_CLR_KNOBS, 0x00606000);
@@ -1789,7 +1789,7 @@ intel_tv_get_modes(struct drm_connector *connector)
continue;
/* no vertical scaling with wide sources on gen3 */
- if (IS_GEN(dev_priv, 3) && input->w > 1024 &&
+ if (IS_DISPLAY_VER(dev_priv, 3) && input->w > 1024 &&
input->h > intel_tv_mode_vdisplay(tv_mode))
continue;
@@ -1978,7 +1978,7 @@ intel_tv_init(struct drm_i915_private *dev_priv)
/* Create TV properties then attach current values */
for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
/* 1080p50/1080p60 not supported on gen3 */
- if (IS_GEN(dev_priv, 3) &&
+ if (IS_DISPLAY_VER(dev_priv, 3) &&
tv_modes[i].oversample == 1)
break;
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 187ec573de59..dbe24d7e7375 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -327,6 +327,10 @@ enum vbt_gmbus_ddi {
ICL_DDC_BUS_PORT_4,
TGL_DDC_BUS_PORT_5,
TGL_DDC_BUS_PORT_6,
+ ADLS_DDC_BUS_PORT_TC1 = 0x2,
+ ADLS_DDC_BUS_PORT_TC2,
+ ADLS_DDC_BUS_PORT_TC3,
+ ADLS_DDC_BUS_PORT_TC4
};
#define DP_AUX_A 0x40
@@ -339,10 +343,21 @@ enum vbt_gmbus_ddi {
#define DP_AUX_H 0x80
#define DP_AUX_I 0x90
-#define VBT_DP_MAX_LINK_RATE_HBR3 0
-#define VBT_DP_MAX_LINK_RATE_HBR2 1
-#define VBT_DP_MAX_LINK_RATE_HBR 2
-#define VBT_DP_MAX_LINK_RATE_LBR 3
+/* DP max link rate 216+ */
+#define BDB_216_VBT_DP_MAX_LINK_RATE_HBR3 0
+#define BDB_216_VBT_DP_MAX_LINK_RATE_HBR2 1
+#define BDB_216_VBT_DP_MAX_LINK_RATE_HBR 2
+#define BDB_216_VBT_DP_MAX_LINK_RATE_LBR 3
+
+/* DP max link rate 230+ */
+#define BDB_230_VBT_DP_MAX_LINK_RATE_DEF 0
+#define BDB_230_VBT_DP_MAX_LINK_RATE_LBR 1
+#define BDB_230_VBT_DP_MAX_LINK_RATE_HBR 2
+#define BDB_230_VBT_DP_MAX_LINK_RATE_HBR2 3
+#define BDB_230_VBT_DP_MAX_LINK_RATE_HBR3 4
+#define BDB_230_VBT_DP_MAX_LINK_RATE_UHBR10 5
+#define BDB_230_VBT_DP_MAX_LINK_RATE_UHBR13P5 6
+#define BDB_230_VBT_DP_MAX_LINK_RATE_UHBR20 7
/*
* The child device config, aka the display device data structure, provides a
@@ -441,8 +456,8 @@ struct child_device_config {
u16 dp_gpio_pin_num; /* 195 */
u8 dp_iboost_level:4; /* 196 */
u8 hdmi_iboost_level:4; /* 196 */
- u8 dp_max_link_rate:2; /* 216 CNL+ */
- u8 dp_max_link_rate_reserved:6; /* 216 */
+ u8 dp_max_link_rate:3; /* 216/230 CNL+ */
+ u8 dp_max_link_rate_reserved:5; /* 216/230 */
} __packed;
struct bdb_general_definitions {
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index a86c57d117f2..3a21c65ffa85 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -343,14 +343,10 @@ bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state)
return false;
/* On TGL, DSC is supported on all Pipes */
- if (INTEL_GEN(i915) >= 12)
+ if (DISPLAY_VER(i915) >= 12)
return true;
- if (INTEL_GEN(i915) >= 10 &&
- (pipe != PIPE_A ||
- (cpu_transcoder == TRANSCODER_EDP ||
- cpu_transcoder == TRANSCODER_DSI_0 ||
- cpu_transcoder == TRANSCODER_DSI_1)))
+ if ((DISPLAY_VER(i915) >= 11 || IS_CANNONLAKE(i915)) && (pipe != PIPE_A || (cpu_transcoder == TRANSCODER_EDP || cpu_transcoder == TRANSCODER_DSI_0 || cpu_transcoder == TRANSCODER_DSI_1)))
return true;
return false;
@@ -362,7 +358,7 @@ static bool is_pipe_dsc(const struct intel_crtc_state *crtc_state)
const struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (INTEL_GEN(i915) >= 12)
+ if (DISPLAY_VER(i915) >= 12)
return true;
if (cpu_transcoder == TRANSCODER_EDP ||
@@ -479,7 +475,7 @@ intel_dsc_power_domain(const struct intel_crtc_state *crtc_state)
* the pipe in use. Hence another reference on the pipe power domain
* will suffice. (Except no VDSC/joining on ICL pipe A.)
*/
- if (INTEL_GEN(i915) >= 12 && !IS_ROCKETLAKE(i915) && pipe == PIPE_A)
+ if (DISPLAY_VER(i915) >= 12 && !IS_ROCKETLAKE(i915) && pipe == PIPE_A)
return POWER_DOMAIN_TRANSCODER_VDSC_PW2;
else if (is_pipe_dsc(crtc_state))
return POWER_DOMAIN_PIPE(pipe);
diff --git a/drivers/gpu/drm/i915/display/intel_vga.c b/drivers/gpu/drm/i915/display/intel_vga.c
index be333699c515..f002b82ba9c0 100644
--- a/drivers/gpu/drm/i915/display/intel_vga.c
+++ b/drivers/gpu/drm/i915/display/intel_vga.c
@@ -16,7 +16,7 @@ static i915_reg_t intel_vga_cntrl_reg(struct drm_i915_private *i915)
{
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
return VLV_VGACNTRL;
- else if (INTEL_GEN(i915) >= 5)
+ else if (DISPLAY_VER(i915) >= 5)
return CPU_VGACNTRL;
else
return VGACNTRL;
@@ -25,7 +25,7 @@ static i915_reg_t intel_vga_cntrl_reg(struct drm_i915_private *i915)
/* Disable the VGA plane that we never use */
void intel_vga_disable(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
i915_reg_t vga_reg = intel_vga_cntrl_reg(dev_priv);
u8 sr1;
@@ -76,7 +76,7 @@ void intel_vga_redisable(struct drm_i915_private *i915)
void intel_vga_reset_io_mem(struct drm_i915_private *i915)
{
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
/*
* After we re-enable the power well, if we touch VGA register 0x3d5
@@ -96,7 +96,7 @@ void intel_vga_reset_io_mem(struct drm_i915_private *i915)
static int
intel_vga_set_state(struct drm_i915_private *i915, bool enable_decode)
{
- unsigned int reg = INTEL_GEN(i915) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
+ unsigned int reg = DISPLAY_VER(i915) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
u16 gmch_ctrl;
if (pci_read_config_word(i915->bridge_dev, reg, &gmch_ctrl)) {
@@ -136,7 +136,7 @@ intel_vga_set_decode(void *cookie, bool enable_decode)
int intel_vga_register(struct drm_i915_private *i915)
{
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
int ret;
/*
@@ -156,7 +156,7 @@ int intel_vga_register(struct drm_i915_private *i915)
void intel_vga_unregister(struct drm_i915_private *i915)
{
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
vga_client_register(pdev, NULL, NULL, NULL);
}
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.h b/drivers/gpu/drm/i915/display/intel_vrr.h
index fac01bf4ab50..96f9c9c27ab9 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.h
+++ b/drivers/gpu/drm/i915/display/intel_vrr.h
@@ -15,7 +15,6 @@ struct intel_crtc;
struct intel_crtc_state;
struct intel_dp;
struct intel_encoder;
-struct intel_crtc;
bool intel_vrr_is_capable(struct drm_connector *connector);
void intel_vrr_check_modeset(struct intel_atomic_state *state);
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
new file mode 100644
index 000000000000..17a98cb627df
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/skl_scaler.c
@@ -0,0 +1,556 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+#include "intel_display_types.h"
+#include "skl_scaler.h"
+#include "skl_universal_plane.h"
+
+/*
+ * The hardware phase 0.0 refers to the center of the pixel.
+ * We want to start from the top/left edge which is phase
+ * -0.5. That matches how the hardware calculates the scaling
+ * factors (from top-left of the first pixel to bottom-right
+ * of the last pixel, as opposed to the pixel centers).
+ *
+ * For 4:2:0 subsampled chroma planes we obviously have to
+ * adjust that so that the chroma sample position lands in
+ * the right spot.
+ *
+ * Note that for packed YCbCr 4:2:2 formats there is no way to
+ * control chroma siting. The hardware simply replicates the
+ * chroma samples for both of the luma samples, and thus we don't
+ * actually get the expected MPEG2 chroma siting convention :(
+ * The same behaviour is observed on pre-SKL platforms as well.
+ *
+ * Theory behind the formula (note that we ignore sub-pixel
+ * source coordinates):
+ * s = source sample position
+ * d = destination sample position
+ *
+ * Downscaling 4:1:
+ * -0.5
+ * | 0.0
+ * | | 1.5 (initial phase)
+ * | | |
+ * v v v
+ * | s | s | s | s |
+ * | d |
+ *
+ * Upscaling 1:4:
+ * -0.5
+ * | -0.375 (initial phase)
+ * | | 0.0
+ * | | |
+ * v v v
+ * | s |
+ * | d | d | d | d |
+ */
+static u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
+{
+ int phase = -0x8000;
+ u16 trip = 0;
+
+ if (chroma_cosited)
+ phase += (sub - 1) * 0x8000 / sub;
+
+ phase += scale / (2 * sub);
+
+ /*
+ * Hardware initial phase limited to [-0.5:1.5].
+ * Since the max hardware scale factor is 3.0, we
+ * should never actually excdeed 1.0 here.
+ */
+ WARN_ON(phase < -0x8000 || phase > 0x18000);
+
+ if (phase < 0)
+ phase = 0x10000 + phase;
+ else
+ trip = PS_PHASE_TRIP;
+
+ return ((phase >> 2) & PS_PHASE_MASK) | trip;
+}
+
+#define SKL_MIN_SRC_W 8
+#define SKL_MAX_SRC_W 4096
+#define SKL_MIN_SRC_H 8
+#define SKL_MAX_SRC_H 4096
+#define SKL_MIN_DST_W 8
+#define SKL_MAX_DST_W 4096
+#define SKL_MIN_DST_H 8
+#define SKL_MAX_DST_H 4096
+#define ICL_MAX_SRC_W 5120
+#define ICL_MAX_SRC_H 4096
+#define ICL_MAX_DST_W 5120
+#define ICL_MAX_DST_H 4096
+#define SKL_MIN_YUV_420_SRC_W 16
+#define SKL_MIN_YUV_420_SRC_H 16
+
+static int
+skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
+ unsigned int scaler_user, int *scaler_id,
+ int src_w, int src_h, int dst_w, int dst_h,
+ const struct drm_format_info *format,
+ u64 modifier, bool need_scaler)
+{
+ struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+ * the 90/270 degree plane rotation cases (to match the
+ * GTT mapping), hence no need to account for rotation here.
+ */
+ if (src_w != dst_w || src_h != dst_h)
+ need_scaler = true;
+
+ /*
+ * Scaling/fitting not supported in IF-ID mode in GEN9+
+ * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
+ * Once NV12 is enabled, handle it here while allocating scaler
+ * for NV12.
+ */
+ if (DISPLAY_VER(dev_priv) >= 9 && crtc_state->hw.enable &&
+ need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Pipe/Plane scaling not supported with IF-ID mode\n");
+ return -EINVAL;
+ }
+
+ /*
+ * if plane is being disabled or scaler is no more required or force detach
+ * - free scaler binded to this plane/crtc
+ * - in order to do this, update crtc->scaler_usage
+ *
+ * Here scaler state in crtc_state is set free so that
+ * scaler can be assigned to other user. Actual register
+ * update to free the scaler is done in plane/panel-fit programming.
+ * For this purpose crtc/plane_state->scaler_id isn't reset here.
+ */
+ if (force_detach || !need_scaler) {
+ if (*scaler_id >= 0) {
+ scaler_state->scaler_users &= ~(1 << scaler_user);
+ scaler_state->scalers[*scaler_id].in_use = 0;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "scaler_user index %u.%u: "
+ "Staged freeing scaler id %d scaler_users = 0x%x\n",
+ intel_crtc->pipe, scaler_user, *scaler_id,
+ scaler_state->scaler_users);
+ *scaler_id = -1;
+ }
+ return 0;
+ }
+
+ if (format && intel_format_info_is_yuv_semiplanar(format, modifier) &&
+ (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Planar YUV: src dimensions not met\n");
+ return -EINVAL;
+ }
+
+ /* range checks */
+ if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
+ dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
+ (DISPLAY_VER(dev_priv) >= 11 &&
+ (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
+ dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
+ (DISPLAY_VER(dev_priv) < 11 &&
+ (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
+ dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
+ drm_dbg_kms(&dev_priv->drm,
+ "scaler_user index %u.%u: src %ux%u dst %ux%u "
+ "size is out of scaler range\n",
+ intel_crtc->pipe, scaler_user, src_w, src_h,
+ dst_w, dst_h);
+ return -EINVAL;
+ }
+
+ /* mark this plane as a scaler user in crtc_state */
+ scaler_state->scaler_users |= (1 << scaler_user);
+ drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: "
+ "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
+ intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
+ scaler_state->scaler_users);
+
+ return 0;
+}
+
+int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state)
+{
+ const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
+ int width, height;
+
+ if (crtc_state->pch_pfit.enabled) {
+ width = drm_rect_width(&crtc_state->pch_pfit.dst);
+ height = drm_rect_height(&crtc_state->pch_pfit.dst);
+ } else {
+ width = pipe_mode->crtc_hdisplay;
+ height = pipe_mode->crtc_vdisplay;
+ }
+ return skl_update_scaler(crtc_state, !crtc_state->hw.active,
+ SKL_CRTC_INDEX,
+ &crtc_state->scaler_state.scaler_id,
+ crtc_state->pipe_src_w, crtc_state->pipe_src_h,
+ width, height, NULL, 0,
+ crtc_state->pch_pfit.enabled);
+}
+
+/**
+ * skl_update_scaler_plane - Stages update to scaler state for a given plane.
+ * @crtc_state: crtc's scaler state
+ * @plane_state: atomic plane state to update
+ *
+ * Return
+ * 0 - scaler_usage updated successfully
+ * error - requested scaling cannot be supported or other error condition
+ */
+int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ struct intel_plane *intel_plane =
+ to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
+ struct drm_framebuffer *fb = plane_state->hw.fb;
+ int ret;
+ bool force_detach = !fb || !plane_state->uapi.visible;
+ bool need_scaler = false;
+
+ /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
+ if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
+ fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
+ need_scaler = true;
+
+ ret = skl_update_scaler(crtc_state, force_detach,
+ drm_plane_index(&intel_plane->base),
+ &plane_state->scaler_id,
+ drm_rect_width(&plane_state->uapi.src) >> 16,
+ drm_rect_height(&plane_state->uapi.src) >> 16,
+ drm_rect_width(&plane_state->uapi.dst),
+ drm_rect_height(&plane_state->uapi.dst),
+ fb ? fb->format : NULL,
+ fb ? fb->modifier : 0,
+ need_scaler);
+
+ if (ret || plane_state->scaler_id < 0)
+ return ret;
+
+ /* check colorkey */
+ if (plane_state->ckey.flags) {
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] scaling with color key not allowed",
+ intel_plane->base.base.id,
+ intel_plane->base.name);
+ return -EINVAL;
+ }
+
+ /* Check src format */
+ switch (fb->format->format) {
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_XYUV8888:
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ case DRM_FORMAT_Y210:
+ case DRM_FORMAT_Y212:
+ case DRM_FORMAT_Y216:
+ case DRM_FORMAT_XVYU2101010:
+ case DRM_FORMAT_XVYU12_16161616:
+ case DRM_FORMAT_XVYU16161616:
+ break;
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ if (DISPLAY_VER(dev_priv) >= 11)
+ break;
+ fallthrough;
+ default:
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
+ intel_plane->base.base.id, intel_plane->base.name,
+ fb->base.id, fb->format->format);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cnl_coef_tap(int i)
+{
+ return i % 7;
+}
+
+static u16 cnl_nearest_filter_coef(int t)
+{
+ return t == 3 ? 0x0800 : 0x3000;
+}
+
+/*
+ * Theory behind setting nearest-neighbor integer scaling:
+ *
+ * 17 phase of 7 taps requires 119 coefficients in 60 dwords per set.
+ * The letter represents the filter tap (D is the center tap) and the number
+ * represents the coefficient set for a phase (0-16).
+ *
+ * +------------+------------------------+------------------------+
+ * |Index value | Data value coeffient 1 | Data value coeffient 2 |
+ * +------------+------------------------+------------------------+
+ * | 00h | B0 | A0 |
+ * +------------+------------------------+------------------------+
+ * | 01h | D0 | C0 |
+ * +------------+------------------------+------------------------+
+ * | 02h | F0 | E0 |
+ * +------------+------------------------+------------------------+
+ * | 03h | A1 | G0 |
+ * +------------+------------------------+------------------------+
+ * | 04h | C1 | B1 |
+ * +------------+------------------------+------------------------+
+ * | ... | ... | ... |
+ * +------------+------------------------+------------------------+
+ * | 38h | B16 | A16 |
+ * +------------+------------------------+------------------------+
+ * | 39h | D16 | C16 |
+ * +------------+------------------------+------------------------+
+ * | 3Ah | F16 | C16 |
+ * +------------+------------------------+------------------------+
+ * | 3Bh | Reserved | G16 |
+ * +------------+------------------------+------------------------+
+ *
+ * To enable nearest-neighbor scaling: program scaler coefficents with
+ * the center tap (Dxx) values set to 1 and all other values set to 0 as per
+ * SCALER_COEFFICIENT_FORMAT
+ *
+ */
+
+static void cnl_program_nearest_filter_coefs(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int id, int set)
+{
+ int i;
+
+ intel_de_write_fw(dev_priv, CNL_PS_COEF_INDEX_SET(pipe, id, set),
+ PS_COEE_INDEX_AUTO_INC);
+
+ for (i = 0; i < 17 * 7; i += 2) {
+ u32 tmp;
+ int t;
+
+ t = cnl_coef_tap(i);
+ tmp = cnl_nearest_filter_coef(t);
+
+ t = cnl_coef_tap(i + 1);
+ tmp |= cnl_nearest_filter_coef(t) << 16;
+
+ intel_de_write_fw(dev_priv, CNL_PS_COEF_DATA_SET(pipe, id, set),
+ tmp);
+ }
+
+ intel_de_write_fw(dev_priv, CNL_PS_COEF_INDEX_SET(pipe, id, set), 0);
+}
+
+static u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set)
+{
+ if (filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR) {
+ return (PS_FILTER_PROGRAMMED |
+ PS_Y_VERT_FILTER_SELECT(set) |
+ PS_Y_HORZ_FILTER_SELECT(set) |
+ PS_UV_VERT_FILTER_SELECT(set) |
+ PS_UV_HORZ_FILTER_SELECT(set));
+ }
+
+ return PS_FILTER_MEDIUM;
+}
+
+static void skl_scaler_setup_filter(struct drm_i915_private *dev_priv, enum pipe pipe,
+ int id, int set, enum drm_scaling_filter filter)
+{
+ switch (filter) {
+ case DRM_SCALING_FILTER_DEFAULT:
+ break;
+ case DRM_SCALING_FILTER_NEAREST_NEIGHBOR:
+ cnl_program_nearest_filter_coefs(dev_priv, pipe, id, set);
+ break;
+ default:
+ MISSING_CASE(filter);
+ }
+}
+
+void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+ struct drm_rect src = {
+ .x2 = crtc_state->pipe_src_w << 16,
+ .y2 = crtc_state->pipe_src_h << 16,
+ };
+ const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
+ u16 uv_rgb_hphase, uv_rgb_vphase;
+ enum pipe pipe = crtc->pipe;
+ int width = drm_rect_width(dst);
+ int height = drm_rect_height(dst);
+ int x = dst->x1;
+ int y = dst->y1;
+ int hscale, vscale;
+ unsigned long irqflags;
+ int id;
+ u32 ps_ctrl;
+
+ if (!crtc_state->pch_pfit.enabled)
+ return;
+
+ if (drm_WARN_ON(&dev_priv->drm,
+ crtc_state->scaler_state.scaler_id < 0))
+ return;
+
+ hscale = drm_rect_calc_hscale(&src, dst, 0, INT_MAX);
+ vscale = drm_rect_calc_vscale(&src, dst, 0, INT_MAX);
+
+ uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
+ uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
+
+ id = scaler_state->scaler_id;
+
+ ps_ctrl = skl_scaler_get_filter_select(crtc_state->hw.scaling_filter, 0);
+ ps_ctrl |= PS_SCALER_EN | scaler_state->scalers[id].mode;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ skl_scaler_setup_filter(dev_priv, pipe, id, 0,
+ crtc_state->hw.scaling_filter);
+
+ intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), ps_ctrl);
+
+ intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id),
+ PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
+ intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
+ PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id),
+ x << 16 | y);
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id),
+ width << 16 | height);
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+void
+skl_program_plane_scaler(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ enum pipe pipe = plane->pipe;
+ int scaler_id = plane_state->scaler_id;
+ const struct intel_scaler *scaler =
+ &crtc_state->scaler_state.scalers[scaler_id];
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_y = plane_state->uapi.dst.y1;
+ u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
+ u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
+ u16 y_hphase, uv_rgb_hphase;
+ u16 y_vphase, uv_rgb_vphase;
+ int hscale, vscale;
+ u32 ps_ctrl;
+
+ hscale = drm_rect_calc_hscale(&plane_state->uapi.src,
+ &plane_state->uapi.dst,
+ 0, INT_MAX);
+ vscale = drm_rect_calc_vscale(&plane_state->uapi.src,
+ &plane_state->uapi.dst,
+ 0, INT_MAX);
+
+ /* TODO: handle sub-pixel coordinates */
+ if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
+ !icl_is_hdr_plane(dev_priv, plane->id)) {
+ y_hphase = skl_scaler_calc_phase(1, hscale, false);
+ y_vphase = skl_scaler_calc_phase(1, vscale, false);
+
+ /* MPEG2 chroma siting convention */
+ uv_rgb_hphase = skl_scaler_calc_phase(2, hscale, true);
+ uv_rgb_vphase = skl_scaler_calc_phase(2, vscale, false);
+ } else {
+ /* not used */
+ y_hphase = 0;
+ y_vphase = 0;
+
+ uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
+ uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
+ }
+
+ ps_ctrl = skl_scaler_get_filter_select(plane_state->hw.scaling_filter, 0);
+ ps_ctrl |= PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode;
+
+ skl_scaler_setup_filter(dev_priv, pipe, scaler_id, 0,
+ plane_state->hw.scaling_filter);
+
+ intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
+ intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, scaler_id),
+ PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
+ intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, scaler_id),
+ PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, scaler_id),
+ (crtc_x << 16) | crtc_y);
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, scaler_id),
+ (crtc_w << 16) | crtc_h);
+}
+
+static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ intel_de_write_fw(dev_priv, SKL_PS_CTRL(intel_crtc->pipe, id), 0);
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+/*
+ * This function detaches (aka. unbinds) unused scalers in hardware
+ */
+void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+ int i;
+
+ /* loop through and disable scalers that aren't in use */
+ for (i = 0; i < intel_crtc->num_scalers; i++) {
+ if (!scaler_state->scalers[i].in_use)
+ skl_detach_scaler(intel_crtc, i);
+ }
+}
+
+void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
+ int i;
+
+ for (i = 0; i < crtc->num_scalers; i++)
+ skl_detach_scaler(crtc, i);
+}
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.h b/drivers/gpu/drm/i915/display/skl_scaler.h
new file mode 100644
index 000000000000..0097d5d08e10
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/skl_scaler.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+#ifndef INTEL_SCALER_H
+#define INTEL_SCALER_H
+
+#include <linux/types.h>
+
+enum drm_scaling_filter;
+struct drm_i915_private;
+struct intel_crtc_state;
+struct intel_plane_state;
+struct intel_plane;
+enum pipe;
+
+int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
+
+int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state);
+
+void skl_pfit_enable(const struct intel_crtc_state *crtc_state);
+
+void skl_program_plane_scaler(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+void skl_detach_scalers(const struct intel_crtc_state *crtc_state);
+void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state);
+#endif
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
new file mode 100644
index 000000000000..7ffd7b570b54
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -0,0 +1,2218 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane_helper.h>
+
+#include "i915_drv.h"
+#include "intel_atomic_plane.h"
+#include "intel_display_types.h"
+#include "intel_fb.h"
+#include "intel_pm.h"
+#include "intel_psr.h"
+#include "intel_sprite.h"
+#include "skl_scaler.h"
+#include "skl_universal_plane.h"
+
+static const u32 skl_plane_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XRGB16161616F,
+ DRM_FORMAT_XBGR16161616F,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_XYUV8888,
+};
+
+static const u32 skl_planar_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XRGB16161616F,
+ DRM_FORMAT_XBGR16161616F,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_XYUV8888,
+};
+
+static const u32 glk_planar_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XRGB16161616F,
+ DRM_FORMAT_XBGR16161616F,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_XYUV8888,
+ DRM_FORMAT_P010,
+ DRM_FORMAT_P012,
+ DRM_FORMAT_P016,
+};
+
+static const u32 icl_sdr_y_plane_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_Y210,
+ DRM_FORMAT_Y212,
+ DRM_FORMAT_Y216,
+ DRM_FORMAT_XYUV8888,
+ DRM_FORMAT_XVYU2101010,
+ DRM_FORMAT_XVYU12_16161616,
+ DRM_FORMAT_XVYU16161616,
+};
+
+static const u32 icl_sdr_uv_plane_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_P010,
+ DRM_FORMAT_P012,
+ DRM_FORMAT_P016,
+ DRM_FORMAT_Y210,
+ DRM_FORMAT_Y212,
+ DRM_FORMAT_Y216,
+ DRM_FORMAT_XYUV8888,
+ DRM_FORMAT_XVYU2101010,
+ DRM_FORMAT_XVYU12_16161616,
+ DRM_FORMAT_XVYU16161616,
+};
+
+static const u32 icl_hdr_plane_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_XRGB16161616F,
+ DRM_FORMAT_XBGR16161616F,
+ DRM_FORMAT_ARGB16161616F,
+ DRM_FORMAT_ABGR16161616F,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_P010,
+ DRM_FORMAT_P012,
+ DRM_FORMAT_P016,
+ DRM_FORMAT_Y210,
+ DRM_FORMAT_Y212,
+ DRM_FORMAT_Y216,
+ DRM_FORMAT_XYUV8888,
+ DRM_FORMAT_XVYU2101010,
+ DRM_FORMAT_XVYU12_16161616,
+ DRM_FORMAT_XVYU16161616,
+};
+
+static const u64 skl_plane_format_modifiers_noccs[] = {
+ I915_FORMAT_MOD_Yf_TILED,
+ I915_FORMAT_MOD_Y_TILED,
+ I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static const u64 skl_plane_format_modifiers_ccs[] = {
+ I915_FORMAT_MOD_Yf_TILED_CCS,
+ I915_FORMAT_MOD_Y_TILED_CCS,
+ I915_FORMAT_MOD_Yf_TILED,
+ I915_FORMAT_MOD_Y_TILED,
+ I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static const u64 gen12_plane_format_modifiers_mc_ccs[] = {
+ I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS,
+ I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
+ I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC,
+ I915_FORMAT_MOD_Y_TILED,
+ I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static const u64 gen12_plane_format_modifiers_rc_ccs[] = {
+ I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
+ I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC,
+ I915_FORMAT_MOD_Y_TILED,
+ I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
+{
+ switch (format) {
+ case PLANE_CTL_FORMAT_RGB_565:
+ return DRM_FORMAT_RGB565;
+ case PLANE_CTL_FORMAT_NV12:
+ return DRM_FORMAT_NV12;
+ case PLANE_CTL_FORMAT_XYUV:
+ return DRM_FORMAT_XYUV8888;
+ case PLANE_CTL_FORMAT_P010:
+ return DRM_FORMAT_P010;
+ case PLANE_CTL_FORMAT_P012:
+ return DRM_FORMAT_P012;
+ case PLANE_CTL_FORMAT_P016:
+ return DRM_FORMAT_P016;
+ case PLANE_CTL_FORMAT_Y210:
+ return DRM_FORMAT_Y210;
+ case PLANE_CTL_FORMAT_Y212:
+ return DRM_FORMAT_Y212;
+ case PLANE_CTL_FORMAT_Y216:
+ return DRM_FORMAT_Y216;
+ case PLANE_CTL_FORMAT_Y410:
+ return DRM_FORMAT_XVYU2101010;
+ case PLANE_CTL_FORMAT_Y412:
+ return DRM_FORMAT_XVYU12_16161616;
+ case PLANE_CTL_FORMAT_Y416:
+ return DRM_FORMAT_XVYU16161616;
+ default:
+ case PLANE_CTL_FORMAT_XRGB_8888:
+ if (rgb_order) {
+ if (alpha)
+ return DRM_FORMAT_ABGR8888;
+ else
+ return DRM_FORMAT_XBGR8888;
+ } else {
+ if (alpha)
+ return DRM_FORMAT_ARGB8888;
+ else
+ return DRM_FORMAT_XRGB8888;
+ }
+ case PLANE_CTL_FORMAT_XRGB_2101010:
+ if (rgb_order) {
+ if (alpha)
+ return DRM_FORMAT_ABGR2101010;
+ else
+ return DRM_FORMAT_XBGR2101010;
+ } else {
+ if (alpha)
+ return DRM_FORMAT_ARGB2101010;
+ else
+ return DRM_FORMAT_XRGB2101010;
+ }
+ case PLANE_CTL_FORMAT_XRGB_16161616F:
+ if (rgb_order) {
+ if (alpha)
+ return DRM_FORMAT_ABGR16161616F;
+ else
+ return DRM_FORMAT_XBGR16161616F;
+ } else {
+ if (alpha)
+ return DRM_FORMAT_ARGB16161616F;
+ else
+ return DRM_FORMAT_XRGB16161616F;
+ }
+ }
+}
+
+static u8 icl_nv12_y_plane_mask(struct drm_i915_private *i915)
+{
+ if (HAS_D12_PLANE_MINIMIZATION(i915))
+ return BIT(PLANE_SPRITE2) | BIT(PLANE_SPRITE3);
+ else
+ return BIT(PLANE_SPRITE4) | BIT(PLANE_SPRITE5);
+}
+
+bool icl_is_nv12_y_plane(struct drm_i915_private *dev_priv,
+ enum plane_id plane_id)
+{
+ return DISPLAY_VER(dev_priv) >= 11 &&
+ icl_nv12_y_plane_mask(dev_priv) & BIT(plane_id);
+}
+
+bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id)
+{
+ return DISPLAY_VER(dev_priv) >= 11 &&
+ icl_hdr_plane_mask() & BIT(plane_id);
+}
+
+static void
+skl_plane_ratio(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+
+ if (fb->format->cpp[0] == 8) {
+ if (DISPLAY_VER(dev_priv) >= 10) {
+ *num = 10;
+ *den = 8;
+ } else {
+ *num = 9;
+ *den = 8;
+ }
+ } else {
+ *num = 1;
+ *den = 1;
+ }
+}
+
+static int skl_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
+ unsigned int num, den;
+ unsigned int pixel_rate = intel_plane_pixel_rate(crtc_state, plane_state);
+
+ skl_plane_ratio(crtc_state, plane_state, &num, &den);
+
+ /* two pixels per clock on glk+ */
+ if (DISPLAY_VER(dev_priv) >= 10)
+ den *= 2;
+
+ return DIV_ROUND_UP(pixel_rate * num, den);
+}
+
+static int skl_plane_max_width(const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ int cpp = fb->format->cpp[color_plane];
+
+ switch (fb->modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ /*
+ * Validated limit is 4k, but has 5k should
+ * work apart from the following features:
+ * - Ytile (already limited to 4k)
+ * - FP16 (already limited to 4k)
+ * - render compression (already limited to 4k)
+ * - KVMR sprite and cursor (don't care)
+ * - horizontal panning (TODO verify this)
+ * - pipe and plane scaling (TODO verify this)
+ */
+ if (cpp == 8)
+ return 4096;
+ else
+ return 5120;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ /* FIXME AUX plane? */
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ if (cpp == 8)
+ return 2048;
+ else
+ return 4096;
+ default:
+ MISSING_CASE(fb->modifier);
+ return 2048;
+ }
+}
+
+static int glk_plane_max_width(const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ int cpp = fb->format->cpp[color_plane];
+
+ switch (fb->modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ if (cpp == 8)
+ return 4096;
+ else
+ return 5120;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ /* FIXME AUX plane? */
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ if (cpp == 8)
+ return 2048;
+ else
+ return 5120;
+ default:
+ MISSING_CASE(fb->modifier);
+ return 2048;
+ }
+}
+
+static int icl_plane_min_width(const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ /* Wa_14011264657, Wa_14011050563: gen11+ */
+ switch (fb->format->format) {
+ case DRM_FORMAT_C8:
+ return 18;
+ case DRM_FORMAT_RGB565:
+ return 10;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_XVYU2101010:
+ case DRM_FORMAT_Y212:
+ case DRM_FORMAT_Y216:
+ return 6;
+ case DRM_FORMAT_NV12:
+ return 20;
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ return 12;
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ case DRM_FORMAT_XVYU12_16161616:
+ case DRM_FORMAT_XVYU16161616:
+ return 4;
+ default:
+ return 1;
+ }
+}
+
+static int icl_plane_max_width(const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ return 5120;
+}
+
+static int skl_plane_max_height(const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ return 4096;
+}
+
+static int icl_plane_max_height(const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ return 4320;
+}
+
+static unsigned int
+skl_plane_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ const struct drm_format_info *info = drm_format_info(pixel_format);
+ int cpp = info->cpp[0];
+
+ /*
+ * "The stride in bytes must not exceed the
+ * of the size of 8K pixels and 32K bytes."
+ */
+ if (drm_rotation_90_or_270(rotation))
+ return min(8192, 32768 / cpp);
+ else
+ return min(8192 * cpp, 32768);
+}
+
+
+/* Preoffset values for YUV to RGB Conversion */
+#define PREOFF_YUV_TO_RGB_HI 0x1800
+#define PREOFF_YUV_TO_RGB_ME 0x0000
+#define PREOFF_YUV_TO_RGB_LO 0x1800
+
+#define ROFF(x) (((x) & 0xffff) << 16)
+#define GOFF(x) (((x) & 0xffff) << 0)
+#define BOFF(x) (((x) & 0xffff) << 16)
+
+/*
+ * Programs the input color space conversion stage for ICL HDR planes.
+ * Note that it is assumed that this stage always happens after YUV
+ * range correction. Thus, the input to this stage is assumed to be
+ * in full-range YCbCr.
+ */
+static void
+icl_program_input_csc(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+ enum plane_id plane_id = plane->id;
+
+ static const u16 input_csc_matrix[][9] = {
+ /*
+ * BT.601 full range YCbCr -> full range RGB
+ * The matrix required is :
+ * [1.000, 0.000, 1.371,
+ * 1.000, -0.336, -0.698,
+ * 1.000, 1.732, 0.0000]
+ */
+ [DRM_COLOR_YCBCR_BT601] = {
+ 0x7AF8, 0x7800, 0x0,
+ 0x8B28, 0x7800, 0x9AC0,
+ 0x0, 0x7800, 0x7DD8,
+ },
+ /*
+ * BT.709 full range YCbCr -> full range RGB
+ * The matrix required is :
+ * [1.000, 0.000, 1.574,
+ * 1.000, -0.187, -0.468,
+ * 1.000, 1.855, 0.0000]
+ */
+ [DRM_COLOR_YCBCR_BT709] = {
+ 0x7C98, 0x7800, 0x0,
+ 0x9EF8, 0x7800, 0xAC00,
+ 0x0, 0x7800, 0x7ED8,
+ },
+ /*
+ * BT.2020 full range YCbCr -> full range RGB
+ * The matrix required is :
+ * [1.000, 0.000, 1.474,
+ * 1.000, -0.1645, -0.5713,
+ * 1.000, 1.8814, 0.0000]
+ */
+ [DRM_COLOR_YCBCR_BT2020] = {
+ 0x7BC8, 0x7800, 0x0,
+ 0x8928, 0x7800, 0xAA88,
+ 0x0, 0x7800, 0x7F10,
+ },
+ };
+ const u16 *csc = input_csc_matrix[plane_state->hw.color_encoding];
+
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0),
+ ROFF(csc[0]) | GOFF(csc[1]));
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1),
+ BOFF(csc[2]));
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2),
+ ROFF(csc[3]) | GOFF(csc[4]));
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3),
+ BOFF(csc[5]));
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4),
+ ROFF(csc[6]) | GOFF(csc[7]));
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5),
+ BOFF(csc[8]));
+
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
+ PREOFF_YUV_TO_RGB_HI);
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
+ PREOFF_YUV_TO_RGB_ME);
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
+ PREOFF_YUV_TO_RGB_LO);
+ intel_de_write_fw(dev_priv,
+ PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0);
+ intel_de_write_fw(dev_priv,
+ PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0);
+ intel_de_write_fw(dev_priv,
+ PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0);
+}
+
+static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
+ int color_plane, unsigned int rotation)
+{
+ /*
+ * The stride is either expressed as a multiple of 64 bytes chunks for
+ * linear buffers or in number of tiles for tiled buffers.
+ */
+ if (is_surface_linear(fb, color_plane))
+ return 64;
+ else if (drm_rotation_90_or_270(rotation))
+ return intel_tile_height(fb, color_plane);
+ else
+ return intel_tile_width_bytes(fb, color_plane);
+}
+
+static u32 skl_plane_stride(const struct intel_plane_state *plane_state,
+ int color_plane)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
+ u32 stride = plane_state->view.color_plane[color_plane].stride;
+
+ if (color_plane >= fb->format->num_planes)
+ return 0;
+
+ return stride / skl_plane_stride_mult(fb, color_plane, rotation);
+}
+
+static void
+skl_disable_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ if (icl_is_hdr_plane(dev_priv, plane_id))
+ intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id), 0);
+
+ skl_write_plane_wm(plane, crtc_state);
+
+ intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0);
+ intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0);
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static bool
+skl_plane_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ enum plane_id plane_id = plane->id;
+ intel_wakeref_t wakeref;
+ bool ret;
+
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
+ return false;
+
+ ret = intel_de_read(dev_priv, PLANE_CTL(plane->pipe, plane_id)) & PLANE_CTL_ENABLE;
+
+ *pipe = plane->pipe;
+
+ intel_display_power_put(dev_priv, power_domain, wakeref);
+
+ return ret;
+}
+
+static u32 skl_plane_ctl_format(u32 pixel_format)
+{
+ switch (pixel_format) {
+ case DRM_FORMAT_C8:
+ return PLANE_CTL_FORMAT_INDEXED;
+ case DRM_FORMAT_RGB565:
+ return PLANE_CTL_FORMAT_RGB_565;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ return PLANE_CTL_FORMAT_XRGB_8888;
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ return PLANE_CTL_FORMAT_XRGB_2101010;
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ return PLANE_CTL_FORMAT_XRGB_16161616F;
+ case DRM_FORMAT_XYUV8888:
+ return PLANE_CTL_FORMAT_XYUV;
+ case DRM_FORMAT_YUYV:
+ return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
+ case DRM_FORMAT_YVYU:
+ return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
+ case DRM_FORMAT_UYVY:
+ return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
+ case DRM_FORMAT_VYUY:
+ return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
+ case DRM_FORMAT_NV12:
+ return PLANE_CTL_FORMAT_NV12;
+ case DRM_FORMAT_P010:
+ return PLANE_CTL_FORMAT_P010;
+ case DRM_FORMAT_P012:
+ return PLANE_CTL_FORMAT_P012;
+ case DRM_FORMAT_P016:
+ return PLANE_CTL_FORMAT_P016;
+ case DRM_FORMAT_Y210:
+ return PLANE_CTL_FORMAT_Y210;
+ case DRM_FORMAT_Y212:
+ return PLANE_CTL_FORMAT_Y212;
+ case DRM_FORMAT_Y216:
+ return PLANE_CTL_FORMAT_Y216;
+ case DRM_FORMAT_XVYU2101010:
+ return PLANE_CTL_FORMAT_Y410;
+ case DRM_FORMAT_XVYU12_16161616:
+ return PLANE_CTL_FORMAT_Y412;
+ case DRM_FORMAT_XVYU16161616:
+ return PLANE_CTL_FORMAT_Y416;
+ default:
+ MISSING_CASE(pixel_format);
+ }
+
+ return 0;
+}
+
+static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
+{
+ if (!plane_state->hw.fb->format->has_alpha)
+ return PLANE_CTL_ALPHA_DISABLE;
+
+ switch (plane_state->hw.pixel_blend_mode) {
+ case DRM_MODE_BLEND_PIXEL_NONE:
+ return PLANE_CTL_ALPHA_DISABLE;
+ case DRM_MODE_BLEND_PREMULTI:
+ return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+ case DRM_MODE_BLEND_COVERAGE:
+ return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
+ default:
+ MISSING_CASE(plane_state->hw.pixel_blend_mode);
+ return PLANE_CTL_ALPHA_DISABLE;
+ }
+}
+
+static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
+{
+ if (!plane_state->hw.fb->format->has_alpha)
+ return PLANE_COLOR_ALPHA_DISABLE;
+
+ switch (plane_state->hw.pixel_blend_mode) {
+ case DRM_MODE_BLEND_PIXEL_NONE:
+ return PLANE_COLOR_ALPHA_DISABLE;
+ case DRM_MODE_BLEND_PREMULTI:
+ return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
+ case DRM_MODE_BLEND_COVERAGE:
+ return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
+ default:
+ MISSING_CASE(plane_state->hw.pixel_blend_mode);
+ return PLANE_COLOR_ALPHA_DISABLE;
+ }
+}
+
+static u32 skl_plane_ctl_tiling(u64 fb_modifier)
+{
+ switch (fb_modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ break;
+ case I915_FORMAT_MOD_X_TILED:
+ return PLANE_CTL_TILED_X;
+ case I915_FORMAT_MOD_Y_TILED:
+ return PLANE_CTL_TILED_Y;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
+ return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ return PLANE_CTL_TILED_Y |
+ PLANE_CTL_RENDER_DECOMPRESSION_ENABLE |
+ PLANE_CTL_CLEAR_COLOR_DISABLE;
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ return PLANE_CTL_TILED_Y | PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE;
+ case I915_FORMAT_MOD_Yf_TILED:
+ return PLANE_CTL_TILED_YF;
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
+ default:
+ MISSING_CASE(fb_modifier);
+ }
+
+ return 0;
+}
+
+static u32 skl_plane_ctl_rotate(unsigned int rotate)
+{
+ switch (rotate) {
+ case DRM_MODE_ROTATE_0:
+ break;
+ /*
+ * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
+ * while i915 HW rotation is clockwise, thats why this swapping.
+ */
+ case DRM_MODE_ROTATE_90:
+ return PLANE_CTL_ROTATE_270;
+ case DRM_MODE_ROTATE_180:
+ return PLANE_CTL_ROTATE_180;
+ case DRM_MODE_ROTATE_270:
+ return PLANE_CTL_ROTATE_90;
+ default:
+ MISSING_CASE(rotate);
+ }
+
+ return 0;
+}
+
+static u32 cnl_plane_ctl_flip(unsigned int reflect)
+{
+ switch (reflect) {
+ case 0:
+ break;
+ case DRM_MODE_REFLECT_X:
+ return PLANE_CTL_FLIP_HORIZONTAL;
+ case DRM_MODE_REFLECT_Y:
+ default:
+ MISSING_CASE(reflect);
+ }
+
+ return 0;
+}
+
+static u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ u32 plane_ctl = 0;
+
+ if (DISPLAY_VER(dev_priv) >= 10)
+ return plane_ctl;
+
+ if (crtc_state->gamma_enable)
+ plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
+
+ if (crtc_state->csc_enable)
+ plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
+
+ return plane_ctl;
+}
+
+static u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+ u32 plane_ctl;
+
+ plane_ctl = PLANE_CTL_ENABLE;
+
+ if (DISPLAY_VER(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
+ plane_ctl |= skl_plane_ctl_alpha(plane_state);
+ plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
+
+ if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
+ plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
+
+ if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
+ }
+
+ plane_ctl |= skl_plane_ctl_format(fb->format->format);
+ plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
+ plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
+
+ if (DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv))
+ plane_ctl |= cnl_plane_ctl_flip(rotation &
+ DRM_MODE_REFLECT_MASK);
+
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
+
+ return plane_ctl;
+}
+
+static u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ u32 plane_color_ctl = 0;
+
+ if (DISPLAY_VER(dev_priv) >= 11)
+ return plane_color_ctl;
+
+ if (crtc_state->gamma_enable)
+ plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
+
+ if (crtc_state->csc_enable)
+ plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
+
+ return plane_color_ctl;
+}
+
+static u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ u32 plane_color_ctl = 0;
+
+ plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
+ plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
+
+ if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
+ switch (plane_state->hw.color_encoding) {
+ case DRM_COLOR_YCBCR_BT709:
+ plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
+ break;
+ case DRM_COLOR_YCBCR_BT2020:
+ plane_color_ctl |=
+ PLANE_COLOR_CSC_MODE_YUV2020_TO_RGB2020;
+ break;
+ default:
+ plane_color_ctl |=
+ PLANE_COLOR_CSC_MODE_YUV601_TO_RGB601;
+ }
+ if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
+ } else if (fb->format->is_yuv) {
+ plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
+ if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
+ }
+
+ return plane_color_ctl;
+}
+
+static void
+skl_program_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int color_plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+ u32 surf_addr = plane_state->view.color_plane[color_plane].offset;
+ u32 stride = skl_plane_stride(plane_state, color_plane);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int aux_plane = skl_main_to_aux_plane(fb, color_plane);
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_y = plane_state->uapi.dst.y1;
+ u32 x = plane_state->view.color_plane[color_plane].x;
+ u32 y = plane_state->view.color_plane[color_plane].y;
+ u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+ u8 alpha = plane_state->hw.alpha >> 8;
+ u32 plane_color_ctl = 0, aux_dist = 0;
+ unsigned long irqflags;
+ u32 keymsk, keymax;
+ u32 plane_ctl = plane_state->ctl;
+
+ plane_ctl |= skl_plane_ctl_crtc(crtc_state);
+
+ if (DISPLAY_VER(dev_priv) >= 10)
+ plane_color_ctl = plane_state->color_ctl |
+ glk_plane_color_ctl_crtc(crtc_state);
+
+ /* Sizes are 0 based */
+ src_w--;
+ src_h--;
+
+ keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha);
+
+ keymsk = key->channel_mask & 0x7ffffff;
+ if (alpha < 0xff)
+ keymsk |= PLANE_KEYMSK_ALPHA_ENABLE;
+
+ /* The scaler will handle the output position */
+ if (plane_state->scaler_id >= 0) {
+ crtc_x = 0;
+ crtc_y = 0;
+ }
+
+ if (aux_plane) {
+ aux_dist = plane_state->view.color_plane[aux_plane].offset - surf_addr;
+
+ if (DISPLAY_VER(dev_priv) < 12)
+ aux_dist |= skl_plane_stride(plane_state, aux_plane);
+ }
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id), stride);
+ intel_de_write_fw(dev_priv, PLANE_POS(pipe, plane_id),
+ (crtc_y << 16) | crtc_x);
+ intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id),
+ (src_h << 16) | src_w);
+
+ intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), aux_dist);
+
+ if (icl_is_hdr_plane(dev_priv, plane_id))
+ intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id),
+ plane_state->cus_ctl);
+
+ if (DISPLAY_VER(dev_priv) >= 10)
+ intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id),
+ plane_color_ctl);
+
+ if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id))
+ icl_program_input_csc(plane, crtc_state, plane_state);
+
+ if (fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
+ intel_uncore_write64_fw(&dev_priv->uncore,
+ PLANE_CC_VAL(pipe, plane_id), plane_state->ccval);
+
+ skl_write_plane_wm(plane, crtc_state);
+
+ intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id),
+ key->min_value);
+ intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), keymsk);
+ intel_de_write_fw(dev_priv, PLANE_KEYMAX(pipe, plane_id), keymax);
+
+ intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id),
+ (y << 16) | x);
+
+ if (DISPLAY_VER(dev_priv) < 11)
+ intel_de_write_fw(dev_priv, PLANE_AUX_OFFSET(pipe, plane_id),
+ (plane_state->view.color_plane[1].y << 16) |
+ plane_state->view.color_plane[1].x);
+
+ if (!drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
+ intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane);
+
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
+ intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
+ intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id),
+ intel_plane_ggtt_offset(plane_state) + surf_addr);
+
+ if (plane_state->scaler_id >= 0)
+ skl_program_plane_scaler(plane, crtc_state, plane_state);
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void
+skl_plane_async_flip(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ bool async_flip)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ unsigned long irqflags;
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ u32 surf_addr = plane_state->view.color_plane[0].offset;
+ u32 plane_ctl = plane_state->ctl;
+
+ plane_ctl |= skl_plane_ctl_crtc(crtc_state);
+
+ if (async_flip)
+ plane_ctl |= PLANE_CTL_ASYNC_FLIP;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
+ intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id),
+ intel_plane_ggtt_offset(plane_state) + surf_addr);
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void
+skl_update_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ int color_plane = 0;
+
+ if (plane_state->planar_linked_plane && !plane_state->planar_slave)
+ /* Program the UV plane on planar master */
+ color_plane = 1;
+
+ skl_program_plane(plane, crtc_state, plane_state, color_plane);
+}
+
+static bool intel_format_is_p01x(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
+ struct drm_format_name_buf format_name;
+
+ if (!fb)
+ return 0;
+
+ if (rotation & ~(DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180) &&
+ is_ccs_modifier(fb->modifier)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "RC support only with 0/180 degree rotation (%x)\n",
+ rotation);
+ return -EINVAL;
+ }
+
+ if (rotation & DRM_MODE_REFLECT_X &&
+ fb->modifier == DRM_FORMAT_MOD_LINEAR) {
+ drm_dbg_kms(&dev_priv->drm,
+ "horizontal flip is not supported with linear surface formats\n");
+ return -EINVAL;
+ }
+
+ if (drm_rotation_90_or_270(rotation)) {
+ if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
+ fb->modifier != I915_FORMAT_MOD_Yf_TILED) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Y/Yf tiling required for 90/270!\n");
+ return -EINVAL;
+ }
+
+ /*
+ * 90/270 is not allowed with RGB64 16:16:16:16 and
+ * Indexed 8-bit. RGB 16-bit 5:6:5 is allowed gen11 onwards.
+ */
+ switch (fb->format->format) {
+ case DRM_FORMAT_RGB565:
+ if (DISPLAY_VER(dev_priv) >= 11)
+ break;
+ fallthrough;
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ case DRM_FORMAT_Y210:
+ case DRM_FORMAT_Y212:
+ case DRM_FORMAT_Y216:
+ case DRM_FORMAT_XVYU12_16161616:
+ case DRM_FORMAT_XVYU16161616:
+ drm_dbg_kms(&dev_priv->drm,
+ "Unsupported pixel format %s for 90/270!\n",
+ drm_get_format_name(fb->format->format,
+ &format_name));
+ return -EINVAL;
+ default:
+ break;
+ }
+ }
+
+ /* Y-tiling is not supported in IF-ID Interlace mode */
+ if (crtc_state->hw.enable &&
+ crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE &&
+ (fb->modifier == I915_FORMAT_MOD_Y_TILED ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
+ fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Y/Yf tiling not supported in IF-ID mode\n");
+ return -EINVAL;
+ }
+
+ /* Wa_1606054188:tgl,adl-s */
+ if ((IS_ALDERLAKE_S(dev_priv) || IS_TIGERLAKE(dev_priv)) &&
+ plane_state->ckey.flags & I915_SET_COLORKEY_SOURCE &&
+ intel_format_is_p01x(fb->format->format)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Source color keying not supported with P01x formats\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_w = drm_rect_width(&plane_state->uapi.dst);
+ int pipe_src_w = crtc_state->pipe_src_w;
+
+ /*
+ * Display WA #1175: cnl,glk
+ * Planes other than the cursor may cause FIFO underflow and display
+ * corruption if starting less than 4 pixels from the right edge of
+ * the screen.
+ * Besides the above WA fix the similar problem, where planes other
+ * than the cursor ending less than 4 pixels from the left edge of the
+ * screen may cause FIFO underflow and display corruption.
+ */
+ if (IS_DISPLAY_VER(dev_priv, 10) &&
+ (crtc_x + crtc_w < 4 || crtc_x > pipe_src_w - 4)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "requested plane X %s position %d invalid (valid range %d-%d)\n",
+ crtc_x + crtc_w < 4 ? "end" : "start",
+ crtc_x + crtc_w < 4 ? crtc_x + crtc_w : crtc_x,
+ 4, pipe_src_w - 4);
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
+ int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+
+ /* Display WA #1106 */
+ if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
+ src_w & 3 &&
+ (rotation == DRM_MODE_ROTATE_270 ||
+ rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) {
+ DRM_DEBUG_KMS("src width must be multiple of 4 for rotated planar YUV\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int skl_plane_max_scale(struct drm_i915_private *dev_priv,
+ const struct drm_framebuffer *fb)
+{
+ /*
+ * We don't yet know the final source width nor
+ * whether we can use the HQ scaler mode. Assume
+ * the best case.
+ * FIXME need to properly check this later.
+ */
+ if (DISPLAY_VER(dev_priv) >= 10 ||
+ !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
+ return 0x30000 - 1;
+ else
+ return 0x20000 - 1;
+}
+
+static int intel_plane_min_width(struct intel_plane *plane,
+ const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ if (plane->min_width)
+ return plane->min_width(fb, color_plane, rotation);
+ else
+ return 1;
+}
+
+static int intel_plane_max_width(struct intel_plane *plane,
+ const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ if (plane->max_width)
+ return plane->max_width(fb, color_plane, rotation);
+ else
+ return INT_MAX;
+}
+
+static int intel_plane_max_height(struct intel_plane *plane,
+ const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ if (plane->max_height)
+ return plane->max_height(fb, color_plane, rotation);
+ else
+ return INT_MAX;
+}
+
+static bool
+skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
+ int main_x, int main_y, u32 main_offset,
+ int ccs_plane)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int aux_x = plane_state->view.color_plane[ccs_plane].x;
+ int aux_y = plane_state->view.color_plane[ccs_plane].y;
+ u32 aux_offset = plane_state->view.color_plane[ccs_plane].offset;
+ u32 alignment = intel_surf_alignment(fb, ccs_plane);
+ int hsub;
+ int vsub;
+
+ intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
+ while (aux_offset >= main_offset && aux_y <= main_y) {
+ int x, y;
+
+ if (aux_x == main_x && aux_y == main_y)
+ break;
+
+ if (aux_offset == 0)
+ break;
+
+ x = aux_x / hsub;
+ y = aux_y / vsub;
+ aux_offset = intel_plane_adjust_aligned_offset(&x, &y,
+ plane_state,
+ ccs_plane,
+ aux_offset,
+ aux_offset -
+ alignment);
+ aux_x = x * hsub + aux_x % hsub;
+ aux_y = y * vsub + aux_y % vsub;
+ }
+
+ if (aux_x != main_x || aux_y != main_y)
+ return false;
+
+ plane_state->view.color_plane[ccs_plane].offset = aux_offset;
+ plane_state->view.color_plane[ccs_plane].x = aux_x;
+ plane_state->view.color_plane[ccs_plane].y = aux_y;
+
+ return true;
+}
+
+
+int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state,
+ int *x, int *y, u32 *offset)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ const int aux_plane = skl_main_to_aux_plane(fb, 0);
+ const u32 aux_offset = plane_state->view.color_plane[aux_plane].offset;
+ const u32 alignment = intel_surf_alignment(fb, 0);
+ const int w = drm_rect_width(&plane_state->uapi.src) >> 16;
+
+ intel_add_fb_offsets(x, y, plane_state, 0);
+ *offset = intel_plane_compute_aligned_offset(x, y, plane_state, 0);
+ if (drm_WARN_ON(&dev_priv->drm, alignment && !is_power_of_2(alignment)))
+ return -EINVAL;
+
+ /*
+ * AUX surface offset is specified as the distance from the
+ * main surface offset, and it must be non-negative. Make
+ * sure that is what we will get.
+ */
+ if (aux_plane && *offset > aux_offset)
+ *offset = intel_plane_adjust_aligned_offset(x, y, plane_state, 0,
+ *offset,
+ aux_offset & ~(alignment - 1));
+
+ /*
+ * When using an X-tiled surface, the plane blows up
+ * if the x offset + width exceed the stride.
+ *
+ * TODO: linear and Y-tiled seem fine, Yf untested,
+ */
+ if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
+ int cpp = fb->format->cpp[0];
+
+ while ((*x + w) * cpp > plane_state->view.color_plane[0].stride) {
+ if (*offset == 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Unable to find suitable display surface offset due to X-tiling\n");
+ return -EINVAL;
+ }
+
+ *offset = intel_plane_adjust_aligned_offset(x, y, plane_state, 0,
+ *offset,
+ *offset - alignment);
+ }
+ }
+
+ return 0;
+}
+
+static int skl_check_main_surface(struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ const unsigned int rotation = plane_state->hw.rotation;
+ int x = plane_state->uapi.src.x1 >> 16;
+ int y = plane_state->uapi.src.y1 >> 16;
+ const int w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ const int h = drm_rect_height(&plane_state->uapi.src) >> 16;
+ const int min_width = intel_plane_min_width(plane, fb, 0, rotation);
+ const int max_width = intel_plane_max_width(plane, fb, 0, rotation);
+ const int max_height = intel_plane_max_height(plane, fb, 0, rotation);
+ const int aux_plane = skl_main_to_aux_plane(fb, 0);
+ const u32 alignment = intel_surf_alignment(fb, 0);
+ u32 offset;
+ int ret;
+
+ if (w > max_width || w < min_width || h > max_height) {
+ drm_dbg_kms(&dev_priv->drm,
+ "requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n",
+ w, h, min_width, max_width, max_height);
+ return -EINVAL;
+ }
+
+ ret = skl_calc_main_surface_offset(plane_state, &x, &y, &offset);
+ if (ret)
+ return ret;
+
+ /*
+ * CCS AUX surface doesn't have its own x/y offsets, we must make sure
+ * they match with the main surface x/y offsets.
+ */
+ if (is_ccs_modifier(fb->modifier)) {
+ while (!skl_check_main_ccs_coordinates(plane_state, x, y,
+ offset, aux_plane)) {
+ if (offset == 0)
+ break;
+
+ offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
+ offset, offset - alignment);
+ }
+
+ if (x != plane_state->view.color_plane[aux_plane].x ||
+ y != plane_state->view.color_plane[aux_plane].y) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Unable to find suitable display surface offset due to CCS\n");
+ return -EINVAL;
+ }
+ }
+
+ drm_WARN_ON(&dev_priv->drm, x > 8191 || y > 8191);
+
+ plane_state->view.color_plane[0].offset = offset;
+ plane_state->view.color_plane[0].x = x;
+ plane_state->view.color_plane[0].y = y;
+
+ /*
+ * Put the final coordinates back so that the src
+ * coordinate checks will see the right values.
+ */
+ drm_rect_translate_to(&plane_state->uapi.src,
+ x << 16, y << 16);
+
+ return 0;
+}
+
+static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
+ int uv_plane = 1;
+ int max_width = intel_plane_max_width(plane, fb, uv_plane, rotation);
+ int max_height = intel_plane_max_height(plane, fb, uv_plane, rotation);
+ int x = plane_state->uapi.src.x1 >> 17;
+ int y = plane_state->uapi.src.y1 >> 17;
+ int w = drm_rect_width(&plane_state->uapi.src) >> 17;
+ int h = drm_rect_height(&plane_state->uapi.src) >> 17;
+ u32 offset;
+
+ /* FIXME not quite sure how/if these apply to the chroma plane */
+ if (w > max_width || h > max_height) {
+ drm_dbg_kms(&i915->drm,
+ "CbCr source size %dx%d too big (limit %dx%d)\n",
+ w, h, max_width, max_height);
+ return -EINVAL;
+ }
+
+ intel_add_fb_offsets(&x, &y, plane_state, uv_plane);
+ offset = intel_plane_compute_aligned_offset(&x, &y,
+ plane_state, uv_plane);
+
+ if (is_ccs_modifier(fb->modifier)) {
+ int ccs_plane = main_to_ccs_plane(fb, uv_plane);
+ u32 aux_offset = plane_state->view.color_plane[ccs_plane].offset;
+ u32 alignment = intel_surf_alignment(fb, uv_plane);
+
+ if (offset > aux_offset)
+ offset = intel_plane_adjust_aligned_offset(&x, &y,
+ plane_state,
+ uv_plane,
+ offset,
+ aux_offset & ~(alignment - 1));
+
+ while (!skl_check_main_ccs_coordinates(plane_state, x, y,
+ offset, ccs_plane)) {
+ if (offset == 0)
+ break;
+
+ offset = intel_plane_adjust_aligned_offset(&x, &y,
+ plane_state,
+ uv_plane,
+ offset, offset - alignment);
+ }
+
+ if (x != plane_state->view.color_plane[ccs_plane].x ||
+ y != plane_state->view.color_plane[ccs_plane].y) {
+ drm_dbg_kms(&i915->drm,
+ "Unable to find suitable display surface offset due to CCS\n");
+ return -EINVAL;
+ }
+ }
+
+ drm_WARN_ON(&i915->drm, x > 8191 || y > 8191);
+
+ plane_state->view.color_plane[uv_plane].offset = offset;
+ plane_state->view.color_plane[uv_plane].x = x;
+ plane_state->view.color_plane[uv_plane].y = y;
+
+ return 0;
+}
+
+static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int src_x = plane_state->uapi.src.x1 >> 16;
+ int src_y = plane_state->uapi.src.y1 >> 16;
+ u32 offset;
+ int ccs_plane;
+
+ for (ccs_plane = 0; ccs_plane < fb->format->num_planes; ccs_plane++) {
+ int main_hsub, main_vsub;
+ int hsub, vsub;
+ int x, y;
+
+ if (!is_ccs_plane(fb, ccs_plane) ||
+ is_gen12_ccs_cc_plane(fb, ccs_plane))
+ continue;
+
+ intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb,
+ skl_ccs_to_main_plane(fb, ccs_plane));
+ intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
+
+ hsub *= main_hsub;
+ vsub *= main_vsub;
+ x = src_x / hsub;
+ y = src_y / vsub;
+
+ intel_add_fb_offsets(&x, &y, plane_state, ccs_plane);
+
+ offset = intel_plane_compute_aligned_offset(&x, &y,
+ plane_state,
+ ccs_plane);
+
+ plane_state->view.color_plane[ccs_plane].offset = offset;
+ plane_state->view.color_plane[ccs_plane].x = (x * hsub + src_x % hsub) / main_hsub;
+ plane_state->view.color_plane[ccs_plane].y = (y * vsub + src_y % vsub) / main_vsub;
+ }
+
+ return 0;
+}
+
+static int skl_check_plane_surface(struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int ret;
+
+ ret = intel_plane_compute_gtt(plane_state);
+ if (ret)
+ return ret;
+
+ if (!plane_state->uapi.visible)
+ return 0;
+
+ /*
+ * Handle the AUX surface first since the main surface setup depends on
+ * it.
+ */
+ if (is_ccs_modifier(fb->modifier)) {
+ ret = skl_check_ccs_aux_surface(plane_state);
+ if (ret)
+ return ret;
+ }
+
+ if (intel_format_info_is_yuv_semiplanar(fb->format,
+ fb->modifier)) {
+ ret = skl_check_nv12_aux_surface(plane_state);
+ if (ret)
+ return ret;
+ }
+
+ ret = skl_check_main_surface(plane_state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static bool skl_fb_scalable(const struct drm_framebuffer *fb)
+{
+ if (!fb)
+ return false;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_C8:
+ return false;
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ return DISPLAY_VER(to_i915(fb->dev)) >= 11;
+ default:
+ return true;
+ }
+}
+
+static int skl_plane_check(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int min_scale = DRM_PLANE_HELPER_NO_SCALING;
+ int max_scale = DRM_PLANE_HELPER_NO_SCALING;
+ int ret;
+
+ ret = skl_plane_check_fb(crtc_state, plane_state);
+ if (ret)
+ return ret;
+
+ /* use scaler when colorkey is not required */
+ if (!plane_state->ckey.flags && skl_fb_scalable(fb)) {
+ min_scale = 1;
+ max_scale = skl_plane_max_scale(dev_priv, fb);
+ }
+
+ ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
+ min_scale, max_scale, true);
+ if (ret)
+ return ret;
+
+ ret = skl_check_plane_surface(plane_state);
+ if (ret)
+ return ret;
+
+ if (!plane_state->uapi.visible)
+ return 0;
+
+ ret = skl_plane_check_dst_coordinates(crtc_state, plane_state);
+ if (ret)
+ return ret;
+
+ ret = intel_plane_check_src_coordinates(plane_state);
+ if (ret)
+ return ret;
+
+ ret = skl_plane_check_nv12_rotation(plane_state);
+ if (ret)
+ return ret;
+
+ /* HW only has 8 bits pixel precision, disable plane if invisible */
+ if (!(plane_state->hw.alpha >> 8))
+ plane_state->uapi.visible = false;
+
+ plane_state->ctl = skl_plane_ctl(crtc_state, plane_state);
+
+ if (DISPLAY_VER(dev_priv) >= 10)
+ plane_state->color_ctl = glk_plane_color_ctl(crtc_state,
+ plane_state);
+
+ if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
+ icl_is_hdr_plane(dev_priv, plane->id))
+ /* Enable and use MPEG-2 chroma siting */
+ plane_state->cus_ctl = PLANE_CUS_ENABLE |
+ PLANE_CUS_HPHASE_0 |
+ PLANE_CUS_VPHASE_SIGN_NEGATIVE | PLANE_CUS_VPHASE_0_25;
+ else
+ plane_state->cus_ctl = 0;
+
+ return 0;
+}
+
+static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ if (!HAS_FBC(dev_priv))
+ return false;
+
+ return pipe == PIPE_A && plane_id == PLANE_PRIMARY;
+}
+
+static bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ /* Display WA #0870: skl, bxt */
+ if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
+ return false;
+
+ if (IS_DISPLAY_VER(dev_priv, 9) && pipe == PIPE_C)
+ return false;
+
+ if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0)
+ return false;
+
+ return true;
+}
+
+static const u32 *skl_get_plane_formats(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id,
+ int *num_formats)
+{
+ if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
+ *num_formats = ARRAY_SIZE(skl_planar_formats);
+ return skl_planar_formats;
+ } else {
+ *num_formats = ARRAY_SIZE(skl_plane_formats);
+ return skl_plane_formats;
+ }
+}
+
+static const u32 *glk_get_plane_formats(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id,
+ int *num_formats)
+{
+ if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
+ *num_formats = ARRAY_SIZE(glk_planar_formats);
+ return glk_planar_formats;
+ } else {
+ *num_formats = ARRAY_SIZE(skl_plane_formats);
+ return skl_plane_formats;
+ }
+}
+
+static const u32 *icl_get_plane_formats(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id,
+ int *num_formats)
+{
+ if (icl_is_hdr_plane(dev_priv, plane_id)) {
+ *num_formats = ARRAY_SIZE(icl_hdr_plane_formats);
+ return icl_hdr_plane_formats;
+ } else if (icl_is_nv12_y_plane(dev_priv, plane_id)) {
+ *num_formats = ARRAY_SIZE(icl_sdr_y_plane_formats);
+ return icl_sdr_y_plane_formats;
+ } else {
+ *num_formats = ARRAY_SIZE(icl_sdr_uv_plane_formats);
+ return icl_sdr_uv_plane_formats;
+ }
+}
+
+static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ if (plane_id == PLANE_CURSOR)
+ return false;
+
+ if (DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv))
+ return true;
+
+ if (IS_GEMINILAKE(dev_priv))
+ return pipe != PIPE_C;
+
+ return pipe != PIPE_C &&
+ (plane_id == PLANE_PRIMARY ||
+ plane_id == PLANE_SPRITE0);
+}
+
+static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
+{
+ struct intel_plane *plane = to_intel_plane(_plane);
+
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ break;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ if (!plane->has_ccs)
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ if (is_ccs_modifier(modifier))
+ return true;
+ fallthrough;
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_XYUV8888:
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ case DRM_FORMAT_XVYU2101010:
+ if (modifier == I915_FORMAT_MOD_Yf_TILED)
+ return true;
+ fallthrough;
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_Y210:
+ case DRM_FORMAT_Y212:
+ case DRM_FORMAT_Y216:
+ case DRM_FORMAT_XVYU12_16161616:
+ case DRM_FORMAT_XVYU16161616:
+ if (modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == I915_FORMAT_MOD_X_TILED ||
+ modifier == I915_FORMAT_MOD_Y_TILED)
+ return true;
+ fallthrough;
+ default:
+ return false;
+ }
+}
+
+static bool gen12_plane_supports_mc_ccs(struct drm_i915_private *dev_priv,
+ enum plane_id plane_id)
+{
+ /* Wa_14010477008:tgl[a0..c0],rkl[all],dg1[all] */
+ if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv) ||
+ IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0))
+ return false;
+
+ return plane_id < PLANE_SPRITE4;
+}
+
+static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
+{
+ struct drm_i915_private *dev_priv = to_i915(_plane->dev);
+ struct intel_plane *plane = to_intel_plane(_plane);
+
+ switch (modifier) {
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ if (!gen12_plane_supports_mc_ccs(dev_priv, plane->id))
+ return false;
+ fallthrough;
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
+ break;
+ default:
+ return false;
+ }
+
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ if (is_ccs_modifier(modifier))
+ return true;
+ fallthrough;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_XYUV8888:
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ if (modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS)
+ return true;
+ fallthrough;
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_XVYU2101010:
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_Y210:
+ case DRM_FORMAT_Y212:
+ case DRM_FORMAT_Y216:
+ case DRM_FORMAT_XVYU12_16161616:
+ case DRM_FORMAT_XVYU16161616:
+ if (modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == I915_FORMAT_MOD_X_TILED ||
+ modifier == I915_FORMAT_MOD_Y_TILED)
+ return true;
+ fallthrough;
+ default:
+ return false;
+ }
+}
+
+static const u64 *gen12_get_plane_modifiers(struct drm_i915_private *dev_priv,
+ enum plane_id plane_id)
+{
+ if (gen12_plane_supports_mc_ccs(dev_priv, plane_id))
+ return gen12_plane_format_modifiers_mc_ccs;
+ else
+ return gen12_plane_format_modifiers_rc_ccs;
+}
+
+static const struct drm_plane_funcs skl_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = skl_plane_format_mod_supported,
+};
+
+static const struct drm_plane_funcs gen12_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = gen12_plane_format_mod_supported,
+};
+
+static void
+skl_plane_enable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ spin_lock_irq(&i915->irq_lock);
+ bdw_enable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+skl_plane_disable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ spin_lock_irq(&i915->irq_lock);
+ bdw_disable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+struct intel_plane *
+skl_universal_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ const struct drm_plane_funcs *plane_funcs;
+ struct intel_plane *plane;
+ enum drm_plane_type plane_type;
+ unsigned int supported_rotations;
+ unsigned int supported_csc;
+ const u64 *modifiers;
+ const u32 *formats;
+ int num_formats;
+ int ret;
+
+ plane = intel_plane_alloc();
+ if (IS_ERR(plane))
+ return plane;
+
+ plane->pipe = pipe;
+ plane->id = plane_id;
+ plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane_id);
+
+ plane->has_fbc = skl_plane_has_fbc(dev_priv, pipe, plane_id);
+ if (plane->has_fbc) {
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
+ }
+
+ if (DISPLAY_VER(dev_priv) >= 11) {
+ plane->min_width = icl_plane_min_width;
+ plane->max_width = icl_plane_max_width;
+ plane->max_height = icl_plane_max_height;
+ } else if (DISPLAY_VER(dev_priv) >= 10) {
+ plane->max_width = glk_plane_max_width;
+ plane->max_height = skl_plane_max_height;
+ } else {
+ plane->max_width = skl_plane_max_width;
+ plane->max_height = skl_plane_max_height;
+ }
+
+ plane->max_stride = skl_plane_max_stride;
+ plane->update_plane = skl_update_plane;
+ plane->disable_plane = skl_disable_plane;
+ plane->get_hw_state = skl_plane_get_hw_state;
+ plane->check_plane = skl_plane_check;
+ plane->min_cdclk = skl_plane_min_cdclk;
+
+ if (plane_id == PLANE_PRIMARY) {
+ plane->need_async_flip_disable_wa = IS_DISPLAY_RANGE(dev_priv,
+ 9, 10);
+ plane->async_flip = skl_plane_async_flip;
+ plane->enable_flip_done = skl_plane_enable_flip_done;
+ plane->disable_flip_done = skl_plane_disable_flip_done;
+ }
+
+ if (DISPLAY_VER(dev_priv) >= 11)
+ formats = icl_get_plane_formats(dev_priv, pipe,
+ plane_id, &num_formats);
+ else if (DISPLAY_VER(dev_priv) >= 10)
+ formats = glk_get_plane_formats(dev_priv, pipe,
+ plane_id, &num_formats);
+ else
+ formats = skl_get_plane_formats(dev_priv, pipe,
+ plane_id, &num_formats);
+
+ plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ modifiers = gen12_get_plane_modifiers(dev_priv, plane_id);
+ plane_funcs = &gen12_plane_funcs;
+ } else {
+ if (plane->has_ccs)
+ modifiers = skl_plane_format_modifiers_ccs;
+ else
+ modifiers = skl_plane_format_modifiers_noccs;
+ plane_funcs = &skl_plane_funcs;
+ }
+
+ if (plane_id == PLANE_PRIMARY)
+ plane_type = DRM_PLANE_TYPE_PRIMARY;
+ else
+ plane_type = DRM_PLANE_TYPE_OVERLAY;
+
+ ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+ 0, plane_funcs,
+ formats, num_formats, modifiers,
+ plane_type,
+ "plane %d%c", plane_id + 1,
+ pipe_name(pipe));
+ if (ret)
+ goto fail;
+
+ supported_rotations =
+ DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
+ DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
+
+ if (DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv))
+ supported_rotations |= DRM_MODE_REFLECT_X;
+
+ drm_plane_create_rotation_property(&plane->base,
+ DRM_MODE_ROTATE_0,
+ supported_rotations);
+
+ supported_csc = BIT(DRM_COLOR_YCBCR_BT601) | BIT(DRM_COLOR_YCBCR_BT709);
+
+ if (DISPLAY_VER(dev_priv) >= 10)
+ supported_csc |= BIT(DRM_COLOR_YCBCR_BT2020);
+
+ drm_plane_create_color_properties(&plane->base,
+ supported_csc,
+ BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
+ BIT(DRM_COLOR_YCBCR_FULL_RANGE),
+ DRM_COLOR_YCBCR_BT709,
+ DRM_COLOR_YCBCR_LIMITED_RANGE);
+
+ drm_plane_create_alpha_property(&plane->base);
+ drm_plane_create_blend_mode_property(&plane->base,
+ BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE));
+
+ drm_plane_create_zpos_immutable_property(&plane->base, plane_id);
+
+ if (DISPLAY_VER(dev_priv) >= 12)
+ drm_plane_enable_fb_damage_clips(&plane->base);
+
+ if (DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv))
+ drm_plane_create_scaling_filter_property(&plane->base,
+ BIT(DRM_SCALING_FILTER_DEFAULT) |
+ BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR));
+
+ drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
+
+ return plane;
+
+fail:
+ intel_plane_free(plane);
+
+ return ERR_PTR(ret);
+}
+
+void
+skl_get_initial_plane_config(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config)
+{
+ struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe;
+ u32 val, base, offset, stride_mult, tiling, alpha;
+ int fourcc, pixel_format;
+ unsigned int aligned_height;
+ struct drm_framebuffer *fb;
+ struct intel_framebuffer *intel_fb;
+
+ if (!plane->get_hw_state(plane, &pipe))
+ return;
+
+ drm_WARN_ON(dev, pipe != crtc->pipe);
+
+ if (crtc_state->bigjoiner) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Unsupported bigjoiner configuration for initial FB\n");
+ return;
+ }
+
+ intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ if (!intel_fb) {
+ drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
+ return;
+ }
+
+ fb = &intel_fb->base;
+
+ fb->dev = dev;
+
+ val = intel_de_read(dev_priv, PLANE_CTL(pipe, plane_id));
+
+ if (DISPLAY_VER(dev_priv) >= 11)
+ pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
+ else
+ pixel_format = val & PLANE_CTL_FORMAT_MASK;
+
+ if (DISPLAY_VER(dev_priv) >= 10) {
+ alpha = intel_de_read(dev_priv,
+ PLANE_COLOR_CTL(pipe, plane_id));
+ alpha &= PLANE_COLOR_ALPHA_MASK;
+ } else {
+ alpha = val & PLANE_CTL_ALPHA_MASK;
+ }
+
+ fourcc = skl_format_to_fourcc(pixel_format,
+ val & PLANE_CTL_ORDER_RGBX, alpha);
+ fb->format = drm_format_info(fourcc);
+
+ tiling = val & PLANE_CTL_TILED_MASK;
+ switch (tiling) {
+ case PLANE_CTL_TILED_LINEAR:
+ fb->modifier = DRM_FORMAT_MOD_LINEAR;
+ break;
+ case PLANE_CTL_TILED_X:
+ plane_config->tiling = I915_TILING_X;
+ fb->modifier = I915_FORMAT_MOD_X_TILED;
+ break;
+ case PLANE_CTL_TILED_Y:
+ plane_config->tiling = I915_TILING_Y;
+ if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
+ fb->modifier = DISPLAY_VER(dev_priv) >= 12 ?
+ I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS :
+ I915_FORMAT_MOD_Y_TILED_CCS;
+ else if (val & PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE)
+ fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
+ else
+ fb->modifier = I915_FORMAT_MOD_Y_TILED;
+ break;
+ case PLANE_CTL_TILED_YF:
+ if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
+ fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
+ else
+ fb->modifier = I915_FORMAT_MOD_Yf_TILED;
+ break;
+ default:
+ MISSING_CASE(tiling);
+ goto error;
+ }
+
+ /*
+ * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
+ * while i915 HW rotation is clockwise, thats why this swapping.
+ */
+ switch (val & PLANE_CTL_ROTATE_MASK) {
+ case PLANE_CTL_ROTATE_0:
+ plane_config->rotation = DRM_MODE_ROTATE_0;
+ break;
+ case PLANE_CTL_ROTATE_90:
+ plane_config->rotation = DRM_MODE_ROTATE_270;
+ break;
+ case PLANE_CTL_ROTATE_180:
+ plane_config->rotation = DRM_MODE_ROTATE_180;
+ break;
+ case PLANE_CTL_ROTATE_270:
+ plane_config->rotation = DRM_MODE_ROTATE_90;
+ break;
+ }
+
+ if ((DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv)) && val & PLANE_CTL_FLIP_HORIZONTAL)
+ plane_config->rotation |= DRM_MODE_REFLECT_X;
+
+ /* 90/270 degree rotation would require extra work */
+ if (drm_rotation_90_or_270(plane_config->rotation))
+ goto error;
+
+ base = intel_de_read(dev_priv, PLANE_SURF(pipe, plane_id)) & 0xfffff000;
+ plane_config->base = base;
+
+ offset = intel_de_read(dev_priv, PLANE_OFFSET(pipe, plane_id));
+
+ val = intel_de_read(dev_priv, PLANE_SIZE(pipe, plane_id));
+ fb->height = ((val >> 16) & 0xffff) + 1;
+ fb->width = ((val >> 0) & 0xffff) + 1;
+
+ val = intel_de_read(dev_priv, PLANE_STRIDE(pipe, plane_id));
+ stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
+ fb->pitches[0] = (val & 0x3ff) * stride_mult;
+
+ aligned_height = intel_fb_align_height(fb, 0, fb->height);
+
+ plane_config->size = fb->pitches[0] * aligned_height;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+ crtc->base.name, plane->base.name, fb->width, fb->height,
+ fb->format->cpp[0] * 8, base, fb->pitches[0],
+ plane_config->size);
+
+ plane_config->fb = intel_fb;
+ return;
+
+error:
+ kfree(intel_fb);
+}
+
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.h b/drivers/gpu/drm/i915/display/skl_universal_plane.h
new file mode 100644
index 000000000000..351040b64dc7
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _SKL_UNIVERSAL_PLANE_H_
+#define _SKL_UNIVERSAL_PLANE_H_
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct intel_crtc;
+struct intel_initial_plane_config;
+struct intel_plane_state;
+
+enum pipe;
+enum plane_id;
+
+struct intel_plane *
+skl_universal_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id);
+
+void skl_get_initial_plane_config(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config);
+
+int skl_format_to_fourcc(int format, bool rgb_order, bool alpha);
+
+int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state,
+ int *x, int *y, u32 *offset);
+
+bool icl_is_nv12_y_plane(struct drm_i915_private *dev_priv,
+ enum plane_id plane_id);
+bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index f94025ec603a..74a27508759d 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -38,6 +38,7 @@
#include "intel_fifo_underrun.h"
#include "intel_panel.h"
#include "intel_sideband.h"
+#include "skl_scaler.h"
/* return pixels in terms of txbyteclkhs */
static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count,
@@ -992,14 +993,14 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
* FIXME As we do with eDP, just make a note of the time here
* and perform the wait before the next panel power on.
*/
- intel_dsi_msleep(intel_dsi, intel_dsi->panel_pwr_cycle_delay);
+ msleep(intel_dsi->panel_pwr_cycle_delay);
}
static void intel_dsi_shutdown(struct intel_encoder *encoder)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- intel_dsi_msleep(intel_dsi, intel_dsi->panel_pwr_cycle_delay);
+ msleep(intel_dsi->panel_pwr_cycle_delay);
}
static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index bc0223716906..daf9284ef1f5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -27,15 +27,8 @@ static void __do_clflush(struct drm_i915_gem_object *obj)
static int clflush_work(struct dma_fence_work *base)
{
struct clflush *clflush = container_of(base, typeof(*clflush), base);
- struct drm_i915_gem_object *obj = clflush->obj;
- int err;
- err = i915_gem_object_pin_pages(obj);
- if (err)
- return err;
-
- __do_clflush(obj);
- i915_gem_object_unpin_pages(obj);
+ __do_clflush(clflush->obj);
return 0;
}
@@ -44,6 +37,7 @@ static void clflush_release(struct dma_fence_work *base)
{
struct clflush *clflush = container_of(base, typeof(*clflush), base);
+ i915_gem_object_unpin_pages(clflush->obj);
i915_gem_object_put(clflush->obj);
}
@@ -63,6 +57,11 @@ static struct clflush *clflush_work_create(struct drm_i915_gem_object *obj)
if (!clflush)
return NULL;
+ if (__i915_gem_object_get_pages(obj) < 0) {
+ kfree(clflush);
+ return NULL;
+ }
+
dma_fence_work_init(&clflush->base, &clflush_ops);
clflush->obj = i915_gem_object_get(obj); /* obj <-> clflush cycle */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 4d2f40cf237b..fd8ee52e17a4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -232,6 +232,8 @@ static void intel_context_set_gem(struct intel_context *ce,
if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
intel_engine_has_timeslices(ce->engine))
__set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
+
+ intel_context_set_watchdog_us(ce, ctx->watchdog.timeout_us);
}
static void __free_engines(struct i915_gem_engines *e, unsigned int count)
@@ -386,38 +388,6 @@ static bool __cancel_engine(struct intel_engine_cs *engine)
return intel_engine_pulse(engine) == 0;
}
-static bool
-__active_engine(struct i915_request *rq, struct intel_engine_cs **active)
-{
- struct intel_engine_cs *engine, *locked;
- bool ret = false;
-
- /*
- * Serialise with __i915_request_submit() so that it sees
- * is-banned?, or we know the request is already inflight.
- *
- * Note that rq->engine is unstable, and so we double
- * check that we have acquired the lock on the final engine.
- */
- locked = READ_ONCE(rq->engine);
- spin_lock_irq(&locked->active.lock);
- while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
- spin_unlock(&locked->active.lock);
- locked = engine;
- spin_lock(&locked->active.lock);
- }
-
- if (i915_request_is_active(rq)) {
- if (!__i915_request_is_complete(rq))
- *active = locked;
- ret = true;
- }
-
- spin_unlock_irq(&locked->active.lock);
-
- return ret;
-}
-
static struct intel_engine_cs *active_engine(struct intel_context *ce)
{
struct intel_engine_cs *engine = NULL;
@@ -445,7 +415,7 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce)
/* Check with the backend if the request is inflight */
found = true;
if (likely(rcu_access_pointer(rq->timeline) == ce->timeline))
- found = __active_engine(rq, &engine);
+ found = i915_request_active_engine(rq, &engine);
i915_request_put(rq);
if (found)
@@ -679,7 +649,7 @@ __create_context(struct drm_i915_private *i915)
kref_init(&ctx->ref);
ctx->i915 = i915;
- ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
+ ctx->sched.priority = I915_PRIORITY_NORMAL;
mutex_init(&ctx->mutex);
INIT_LIST_HEAD(&ctx->link);
@@ -822,6 +792,41 @@ static void __assign_timeline(struct i915_gem_context *ctx,
context_apply_all(ctx, __apply_timeline, timeline);
}
+static int __apply_watchdog(struct intel_context *ce, void *timeout_us)
+{
+ return intel_context_set_watchdog_us(ce, (uintptr_t)timeout_us);
+}
+
+static int
+__set_watchdog(struct i915_gem_context *ctx, unsigned long timeout_us)
+{
+ int ret;
+
+ ret = context_apply_all(ctx, __apply_watchdog,
+ (void *)(uintptr_t)timeout_us);
+ if (!ret)
+ ctx->watchdog.timeout_us = timeout_us;
+
+ return ret;
+}
+
+static void __set_default_fence_expiry(struct i915_gem_context *ctx)
+{
+ struct drm_i915_private *i915 = ctx->i915;
+ int ret;
+
+ if (!IS_ACTIVE(CONFIG_DRM_I915_REQUEST_TIMEOUT) ||
+ !i915->params.request_timeout_ms)
+ return;
+
+ /* Default expiry for user fences. */
+ ret = __set_watchdog(ctx, i915->params.request_timeout_ms * 1000);
+ if (ret)
+ drm_notice(&i915->drm,
+ "Failed to configure default fence expiry! (%d)",
+ ret);
+}
+
static struct i915_gem_context *
i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
{
@@ -866,6 +871,8 @@ i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
intel_timeline_put(timeline);
}
+ __set_default_fence_expiry(ctx);
+
trace_i915_context_create(ctx);
return ctx;
@@ -1959,7 +1966,7 @@ static int set_priority(struct i915_gem_context *ctx,
!capable(CAP_SYS_NICE))
return -EPERM;
- ctx->sched.priority = I915_USER_PRIORITY(priority);
+ ctx->sched.priority = priority;
context_apply_all(ctx, __apply_priority, ctx);
return 0;
@@ -2463,7 +2470,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
case I915_CONTEXT_PARAM_PRIORITY:
args->size = 0;
- args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
+ args->value = ctx->sched.priority;
break;
case I915_CONTEXT_PARAM_SSEU:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 1449f54924e0..340473aa70de 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -154,6 +154,10 @@ struct i915_gem_context {
*/
atomic_t active_count;
+ struct {
+ u64 timeout_us;
+ } watchdog;
+
/**
* @hang_timestamp: The last time(s) this context caused a GPU hang
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 04e9c04545ad..ccede73c6465 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -25,7 +25,7 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
struct scatterlist *src, *dst;
int ret, i;
- ret = i915_gem_object_pin_pages(obj);
+ ret = i915_gem_object_pin_pages_unlocked(obj);
if (ret)
goto err;
@@ -82,7 +82,7 @@ static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
void *vaddr;
- vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
@@ -123,42 +123,48 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
+ struct i915_gem_ww_ctx ww;
int err;
- err = i915_gem_object_pin_pages(obj);
- if (err)
- return err;
-
- err = i915_gem_object_lock_interruptible(obj, NULL);
- if (err)
- goto out;
-
- err = i915_gem_object_set_to_cpu_domain(obj, write);
- i915_gem_object_unlock(obj);
-
-out:
- i915_gem_object_unpin_pages(obj);
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ err = i915_gem_object_lock(obj, &ww);
+ if (!err)
+ err = i915_gem_object_pin_pages(obj);
+ if (!err) {
+ err = i915_gem_object_set_to_cpu_domain(obj, write);
+ i915_gem_object_unpin_pages(obj);
+ }
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
return err;
}
static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
+ struct i915_gem_ww_ctx ww;
int err;
- err = i915_gem_object_pin_pages(obj);
- if (err)
- return err;
-
- err = i915_gem_object_lock_interruptible(obj, NULL);
- if (err)
- goto out;
-
- err = i915_gem_object_set_to_gtt_domain(obj, false);
- i915_gem_object_unlock(obj);
-
-out:
- i915_gem_object_unpin_pages(obj);
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ err = i915_gem_object_lock(obj, &ww);
+ if (!err)
+ err = i915_gem_object_pin_pages(obj);
+ if (!err) {
+ err = i915_gem_object_set_to_gtt_domain(obj, false);
+ i915_gem_object_unpin_pages(obj);
+ }
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
return err;
}
@@ -244,6 +250,9 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
}
}
+ if (i915_gem_object_size_2big(dma_buf->size))
+ return ERR_PTR(-E2BIG);
+
/* need to attach */
attach = dma_buf_attach(dma_buf, dev->dev);
if (IS_ERR(attach))
@@ -258,7 +267,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
}
drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
- i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class);
+ i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class, 0);
obj->base.import_attach = attach;
obj->base.resv = dma_buf->resv;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 36f54cedaaeb..073822100da7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -335,7 +335,14 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
* not allowed to be changed by userspace.
*/
if (i915_gem_object_is_proxy(obj)) {
- ret = -ENXIO;
+ /*
+ * Silently allow cached for userptr; the vulkan driver
+ * sets all objects to cached
+ */
+ if (!i915_gem_object_is_userptr(obj) ||
+ args->caching != I915_CACHING_CACHED)
+ ret = -ENXIO;
+
goto out;
}
@@ -359,12 +366,12 @@ out:
*/
struct i915_vma *
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
+ struct i915_gem_ww_ctx *ww,
u32 alignment,
const struct i915_ggtt_view *view,
unsigned int flags)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_gem_ww_ctx ww;
struct i915_vma *vma;
int ret;
@@ -372,11 +379,6 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
if (HAS_LMEM(i915) && !i915_gem_object_is_lmem(obj))
return ERR_PTR(-EINVAL);
- i915_gem_ww_ctx_init(&ww, true);
-retry:
- ret = i915_gem_object_lock(obj, &ww);
- if (ret)
- goto err;
/*
* The display engine is not coherent with the LLC cache on gen6. As
* a result, we make sure that the pinning that is about to occur is
@@ -391,7 +393,7 @@ retry:
HAS_WT(i915) ?
I915_CACHE_WT : I915_CACHE_NONE);
if (ret)
- goto err;
+ return ERR_PTR(ret);
/*
* As the user may map the buffer once pinned in the display plane
@@ -404,33 +406,20 @@ retry:
vma = ERR_PTR(-ENOSPC);
if ((flags & PIN_MAPPABLE) == 0 &&
(!view || view->type == I915_GGTT_VIEW_NORMAL))
- vma = i915_gem_object_ggtt_pin_ww(obj, &ww, view, 0, alignment,
+ vma = i915_gem_object_ggtt_pin_ww(obj, ww, view, 0, alignment,
flags | PIN_MAPPABLE |
PIN_NONBLOCK);
if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK))
- vma = i915_gem_object_ggtt_pin_ww(obj, &ww, view, 0,
+ vma = i915_gem_object_ggtt_pin_ww(obj, ww, view, 0,
alignment, flags);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto err;
- }
+ if (IS_ERR(vma))
+ return vma;
vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
i915_vma_mark_scanout(vma);
i915_gem_object_flush_if_display_locked(obj);
-err:
- if (ret == -EDEADLK) {
- ret = i915_gem_ww_ctx_backoff(&ww);
- if (!ret)
- goto retry;
- }
- i915_gem_ww_ctx_fini(&ww);
-
- if (ret)
- return ERR_PTR(ret);
-
return vma;
}
@@ -526,6 +515,21 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
if (err)
goto out;
+ if (i915_gem_object_is_userptr(obj)) {
+ /*
+ * Try to grab userptr pages, iris uses set_domain to check
+ * userptr validity
+ */
+ err = i915_gem_object_userptr_validate(obj);
+ if (!err)
+ err = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_PRIORITY |
+ (write_domain ? I915_WAIT_ALL : 0),
+ MAX_SCHEDULE_TIMEOUT);
+ goto out;
+ }
+
/*
* Proxy objects do not control access to the backing storage, ergo
* they cannot be used as a means to manipulate the cache domain
@@ -537,6 +541,10 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
goto out;
}
+ err = i915_gem_object_lock_interruptible(obj, NULL);
+ if (err)
+ goto out;
+
/*
* Flush and acquire obj->pages so that we are coherent through
* direct access in memory with previous cached writes through
@@ -548,7 +556,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
*/
err = i915_gem_object_pin_pages(obj);
if (err)
- goto out;
+ goto out_unlock;
/*
* Already in the desired write domain? Nothing for us to do!
@@ -563,10 +571,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
if (READ_ONCE(obj->write_domain) == read_domains)
goto out_unpin;
- err = i915_gem_object_lock_interruptible(obj, NULL);
- if (err)
- goto out_unpin;
-
if (read_domains & I915_GEM_DOMAIN_WC)
err = i915_gem_object_set_to_wc_domain(obj, write_domain);
else if (read_domains & I915_GEM_DOMAIN_GTT)
@@ -574,13 +578,15 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
else
err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+
+out_unlock:
i915_gem_object_unlock(obj);
- if (write_domain)
+ if (!err && write_domain)
i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
-out_unpin:
- i915_gem_object_unpin_pages(obj);
out:
i915_gem_object_put(obj);
return err;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index d70ca36f74f6..5964e67c7d36 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -28,6 +28,7 @@
#include "i915_sw_fence_work.h"
#include "i915_trace.h"
#include "i915_user_extensions.h"
+#include "i915_memcpy.h"
struct eb_vma {
struct i915_vma *vma;
@@ -49,16 +50,19 @@ enum {
#define DBG_FORCE_RELOC 0 /* choose one of the above! */
};
-#define __EXEC_OBJECT_HAS_PIN BIT(31)
-#define __EXEC_OBJECT_HAS_FENCE BIT(30)
-#define __EXEC_OBJECT_NEEDS_MAP BIT(29)
-#define __EXEC_OBJECT_NEEDS_BIAS BIT(28)
-#define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 28) /* all of the above */
+/* __EXEC_OBJECT_NO_RESERVE is BIT(31), defined in i915_vma.h */
+#define __EXEC_OBJECT_HAS_PIN BIT(30)
+#define __EXEC_OBJECT_HAS_FENCE BIT(29)
+#define __EXEC_OBJECT_USERPTR_INIT BIT(28)
+#define __EXEC_OBJECT_NEEDS_MAP BIT(27)
+#define __EXEC_OBJECT_NEEDS_BIAS BIT(26)
+#define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 26) /* all of the above + */
#define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)
#define __EXEC_HAS_RELOC BIT(31)
#define __EXEC_ENGINE_PINNED BIT(30)
-#define __EXEC_INTERNAL_FLAGS (~0u << 30)
+#define __EXEC_USERPTR_USED BIT(29)
+#define __EXEC_INTERNAL_FLAGS (~0u << 29)
#define UPDATE PIN_OFFSET_FIXED
#define BATCH_OFFSET_BIAS (256*1024)
@@ -419,13 +423,14 @@ static u64 eb_pin_flags(const struct drm_i915_gem_exec_object2 *entry,
return pin_flags;
}
-static inline bool
+static inline int
eb_pin_vma(struct i915_execbuffer *eb,
const struct drm_i915_gem_exec_object2 *entry,
struct eb_vma *ev)
{
struct i915_vma *vma = ev->vma;
u64 pin_flags;
+ int err;
if (vma->node.size)
pin_flags = vma->node.start;
@@ -437,24 +442,29 @@ eb_pin_vma(struct i915_execbuffer *eb,
pin_flags |= PIN_GLOBAL;
/* Attempt to reuse the current location if available */
- /* TODO: Add -EDEADLK handling here */
- if (unlikely(i915_vma_pin_ww(vma, &eb->ww, 0, 0, pin_flags))) {
+ err = i915_vma_pin_ww(vma, &eb->ww, 0, 0, pin_flags);
+ if (err == -EDEADLK)
+ return err;
+
+ if (unlikely(err)) {
if (entry->flags & EXEC_OBJECT_PINNED)
- return false;
+ return err;
/* Failing that pick any _free_ space if suitable */
- if (unlikely(i915_vma_pin_ww(vma, &eb->ww,
+ err = i915_vma_pin_ww(vma, &eb->ww,
entry->pad_to_size,
entry->alignment,
eb_pin_flags(entry, ev->flags) |
- PIN_USER | PIN_NOEVICT)))
- return false;
+ PIN_USER | PIN_NOEVICT);
+ if (unlikely(err))
+ return err;
}
if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
- if (unlikely(i915_vma_pin_fence(vma))) {
+ err = i915_vma_pin_fence(vma);
+ if (unlikely(err)) {
i915_vma_unpin(vma);
- return false;
+ return err;
}
if (vma->fence)
@@ -462,7 +472,10 @@ eb_pin_vma(struct i915_execbuffer *eb,
}
ev->flags |= __EXEC_OBJECT_HAS_PIN;
- return !eb_vma_misplaced(entry, vma, ev->flags);
+ if (eb_vma_misplaced(entry, vma, ev->flags))
+ return -EBADSLT;
+
+ return 0;
}
static inline void
@@ -483,6 +496,13 @@ eb_validate_vma(struct i915_execbuffer *eb,
struct drm_i915_gem_exec_object2 *entry,
struct i915_vma *vma)
{
+ /* Relocations are disallowed for all platforms after TGL-LP. This
+ * also covers all platforms with local memory.
+ */
+ if (entry->relocation_count &&
+ INTEL_GEN(eb->i915) >= 12 && !IS_TIGERLAKE(eb->i915))
+ return -EINVAL;
+
if (unlikely(entry->flags & eb->invalid_flags))
return -EINVAL;
@@ -853,6 +873,26 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
}
eb_add_vma(eb, i, batch, vma);
+
+ if (i915_gem_object_is_userptr(vma->obj)) {
+ err = i915_gem_object_userptr_submit_init(vma->obj);
+ if (err) {
+ if (i + 1 < eb->buffer_count) {
+ /*
+ * Execbuffer code expects last vma entry to be NULL,
+ * since we already initialized this entry,
+ * set the next value to NULL or we mess up
+ * cleanup handling.
+ */
+ eb->vma[i + 1].vma = NULL;
+ }
+
+ return err;
+ }
+
+ eb->vma[i].flags |= __EXEC_OBJECT_USERPTR_INIT;
+ eb->args->flags |= __EXEC_USERPTR_USED;
+ }
}
if (unlikely(eb->batch->flags & EXEC_OBJECT_WRITE)) {
@@ -898,7 +938,11 @@ static int eb_validate_vmas(struct i915_execbuffer *eb)
if (err)
return err;
- if (eb_pin_vma(eb, entry, ev)) {
+ err = eb_pin_vma(eb, entry, ev);
+ if (err == -EDEADLK)
+ return err;
+
+ if (!err) {
if (entry->offset != vma->node.start) {
entry->offset = vma->node.start | UPDATE;
eb->args->flags |= __EXEC_HAS_RELOC;
@@ -914,6 +958,12 @@ static int eb_validate_vmas(struct i915_execbuffer *eb)
}
}
+ if (!(ev->flags & EXEC_OBJECT_WRITE)) {
+ err = dma_resv_reserve_shared(vma->resv, 1);
+ if (err)
+ return err;
+ }
+
GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
eb_vma_misplaced(&eb->exec[i], vma, ev->flags));
}
@@ -944,7 +994,7 @@ eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
}
}
-static void eb_release_vmas(struct i915_execbuffer *eb, bool final)
+static void eb_release_vmas(struct i915_execbuffer *eb, bool final, bool release_userptr)
{
const unsigned int count = eb->buffer_count;
unsigned int i;
@@ -958,6 +1008,11 @@ static void eb_release_vmas(struct i915_execbuffer *eb, bool final)
eb_unreserve_vma(ev);
+ if (release_userptr && ev->flags & __EXEC_OBJECT_USERPTR_INIT) {
+ ev->flags &= ~__EXEC_OBJECT_USERPTR_INIT;
+ i915_gem_object_userptr_submit_fini(vma->obj);
+ }
+
if (final)
i915_vma_put(vma);
}
@@ -1294,6 +1349,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
err = PTR_ERR(cmd);
goto err_pool;
}
+ intel_gt_buffer_pool_mark_used(pool);
memset32(cmd, 0, pool->obj->base.size / sizeof(u32));
@@ -1895,6 +1951,31 @@ static int eb_prefault_relocations(const struct i915_execbuffer *eb)
return 0;
}
+static int eb_reinit_userptr(struct i915_execbuffer *eb)
+{
+ const unsigned int count = eb->buffer_count;
+ unsigned int i;
+ int ret;
+
+ if (likely(!(eb->args->flags & __EXEC_USERPTR_USED)))
+ return 0;
+
+ for (i = 0; i < count; i++) {
+ struct eb_vma *ev = &eb->vma[i];
+
+ if (!i915_gem_object_is_userptr(ev->vma->obj))
+ continue;
+
+ ret = i915_gem_object_userptr_submit_init(ev->vma->obj);
+ if (ret)
+ return ret;
+
+ ev->flags |= __EXEC_OBJECT_USERPTR_INIT;
+ }
+
+ return 0;
+}
+
static noinline int eb_relocate_parse_slow(struct i915_execbuffer *eb,
struct i915_request *rq)
{
@@ -1909,7 +1990,7 @@ repeat:
}
/* We may process another execbuffer during the unlock... */
- eb_release_vmas(eb, false);
+ eb_release_vmas(eb, false, true);
i915_gem_ww_ctx_fini(&eb->ww);
if (rq) {
@@ -1951,7 +2032,7 @@ repeat:
}
if (!err)
- flush_workqueue(eb->i915->mm.userptr_wq);
+ err = eb_reinit_userptr(eb);
err_relock:
i915_gem_ww_ctx_init(&eb->ww, true);
@@ -2013,7 +2094,7 @@ repeat_validate:
err:
if (err == -EDEADLK) {
- eb_release_vmas(eb, false);
+ eb_release_vmas(eb, false, false);
err = i915_gem_ww_ctx_backoff(&eb->ww);
if (!err)
goto repeat_validate;
@@ -2110,7 +2191,7 @@ retry:
err:
if (err == -EDEADLK) {
- eb_release_vmas(eb, false);
+ eb_release_vmas(eb, false, false);
err = i915_gem_ww_ctx_backoff(&eb->ww);
if (!err)
goto retry;
@@ -2181,8 +2262,33 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
}
if (err == 0)
- err = i915_vma_move_to_active(vma, eb->request, flags);
+ err = i915_vma_move_to_active(vma, eb->request,
+ flags | __EXEC_OBJECT_NO_RESERVE);
+ }
+
+#ifdef CONFIG_MMU_NOTIFIER
+ if (!err && (eb->args->flags & __EXEC_USERPTR_USED)) {
+ spin_lock(&eb->i915->mm.notifier_lock);
+
+ /*
+ * count is always at least 1, otherwise __EXEC_USERPTR_USED
+ * could not have been set
+ */
+ for (i = 0; i < count; i++) {
+ struct eb_vma *ev = &eb->vma[i];
+ struct drm_i915_gem_object *obj = ev->vma->obj;
+
+ if (!i915_gem_object_is_userptr(obj))
+ continue;
+
+ err = i915_gem_object_userptr_submit_done(obj);
+ if (err)
+ break;
+ }
+
+ spin_unlock(&eb->i915->mm.notifier_lock);
}
+#endif
if (unlikely(err))
goto err_skip;
@@ -2274,24 +2380,45 @@ struct eb_parse_work {
struct i915_vma *trampoline;
unsigned long batch_offset;
unsigned long batch_length;
+ unsigned long *jump_whitelist;
+ const void *batch_map;
+ void *shadow_map;
};
static int __eb_parse(struct dma_fence_work *work)
{
struct eb_parse_work *pw = container_of(work, typeof(*pw), base);
+ int ret;
+ bool cookie;
- return intel_engine_cmd_parser(pw->engine,
- pw->batch,
- pw->batch_offset,
- pw->batch_length,
- pw->shadow,
- pw->trampoline);
+ cookie = dma_fence_begin_signalling();
+ ret = intel_engine_cmd_parser(pw->engine,
+ pw->batch,
+ pw->batch_offset,
+ pw->batch_length,
+ pw->shadow,
+ pw->jump_whitelist,
+ pw->shadow_map,
+ pw->batch_map);
+ dma_fence_end_signalling(cookie);
+
+ return ret;
}
static void __eb_parse_release(struct dma_fence_work *work)
{
struct eb_parse_work *pw = container_of(work, typeof(*pw), base);
+ if (!IS_ERR_OR_NULL(pw->jump_whitelist))
+ kfree(pw->jump_whitelist);
+
+ if (pw->batch_map)
+ i915_gem_object_unpin_map(pw->batch->obj);
+ else
+ i915_gem_object_unpin_pages(pw->batch->obj);
+
+ i915_gem_object_unpin_map(pw->shadow->obj);
+
if (pw->trampoline)
i915_active_release(&pw->trampoline->active);
i915_active_release(&pw->shadow->active);
@@ -2341,6 +2468,8 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
struct i915_vma *trampoline)
{
struct eb_parse_work *pw;
+ struct drm_i915_gem_object *batch = eb->batch->vma->obj;
+ bool needs_clflush;
int err;
GEM_BUG_ON(overflows_type(eb->batch_start_offset, pw->batch_offset));
@@ -2364,6 +2493,34 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
goto err_shadow;
}
+ pw->shadow_map = i915_gem_object_pin_map(shadow->obj, I915_MAP_WB);
+ if (IS_ERR(pw->shadow_map)) {
+ err = PTR_ERR(pw->shadow_map);
+ goto err_trampoline;
+ }
+
+ needs_clflush =
+ !(batch->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ);
+
+ pw->batch_map = ERR_PTR(-ENODEV);
+ if (needs_clflush && i915_has_memcpy_from_wc())
+ pw->batch_map = i915_gem_object_pin_map(batch, I915_MAP_WC);
+
+ if (IS_ERR(pw->batch_map)) {
+ err = i915_gem_object_pin_pages(batch);
+ if (err)
+ goto err_unmap_shadow;
+ pw->batch_map = NULL;
+ }
+
+ pw->jump_whitelist =
+ intel_engine_cmd_parser_alloc_jump_whitelist(eb->batch_len,
+ trampoline);
+ if (IS_ERR(pw->jump_whitelist)) {
+ err = PTR_ERR(pw->jump_whitelist);
+ goto err_unmap_batch;
+ }
+
dma_fence_work_init(&pw->base, &eb_parse_ops);
pw->engine = eb->engine;
@@ -2382,6 +2539,10 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
if (err)
goto err_commit;
+ err = dma_resv_reserve_shared(shadow->resv, 1);
+ if (err)
+ goto err_commit;
+
/* Wait for all writes (and relocs) into the batch to complete */
err = i915_sw_fence_await_reservation(&pw->base.chain,
pw->batch->resv, NULL, false,
@@ -2403,6 +2564,16 @@ err_commit:
dma_fence_work_commit_imm(&pw->base);
return err;
+err_unmap_batch:
+ if (pw->batch_map)
+ i915_gem_object_unpin_map(batch);
+ else
+ i915_gem_object_unpin_pages(batch);
+err_unmap_shadow:
+ i915_gem_object_unpin_map(shadow->obj);
+err_trampoline:
+ if (trampoline)
+ i915_active_release(&trampoline->active);
err_shadow:
i915_active_release(&shadow->active);
err_batch:
@@ -2474,6 +2645,7 @@ static int eb_parse(struct i915_execbuffer *eb)
err = PTR_ERR(shadow);
goto err;
}
+ intel_gt_buffer_pool_mark_used(pool);
i915_gem_object_set_readonly(shadow->obj);
shadow->private = pool;
@@ -3263,7 +3435,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
err = eb_lookup_vmas(&eb);
if (err) {
- eb_release_vmas(&eb, true);
+ eb_release_vmas(&eb, true, true);
goto err_engine;
}
@@ -3335,6 +3507,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
trace_i915_request_queue(eb.request, eb.batch_flags);
err = eb_submit(&eb, batch);
+
err_request:
i915_request_get(eb.request);
err = eb_request_add(&eb, err);
@@ -3355,7 +3528,7 @@ err_request:
i915_request_put(eb.request);
err_vma:
- eb_release_vmas(&eb, true);
+ eb_release_vmas(&eb, true, true);
if (eb.trampoline)
i915_vma_unpin(eb.trampoline);
WARN_ON(err == -EDEADLK);
@@ -3401,106 +3574,6 @@ static bool check_buffer_count(size_t count)
return !(count < 1 || count > INT_MAX || count > SIZE_MAX / sz - 1);
}
-/*
- * Legacy execbuffer just creates an exec2 list from the original exec object
- * list array and passes it to the real function.
- */
-int
-i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_i915_private *i915 = to_i915(dev);
- struct drm_i915_gem_execbuffer *args = data;
- struct drm_i915_gem_execbuffer2 exec2;
- struct drm_i915_gem_exec_object *exec_list = NULL;
- struct drm_i915_gem_exec_object2 *exec2_list = NULL;
- const size_t count = args->buffer_count;
- unsigned int i;
- int err;
-
- if (!check_buffer_count(count)) {
- drm_dbg(&i915->drm, "execbuf2 with %zd buffers\n", count);
- return -EINVAL;
- }
-
- exec2.buffers_ptr = args->buffers_ptr;
- exec2.buffer_count = args->buffer_count;
- exec2.batch_start_offset = args->batch_start_offset;
- exec2.batch_len = args->batch_len;
- exec2.DR1 = args->DR1;
- exec2.DR4 = args->DR4;
- exec2.num_cliprects = args->num_cliprects;
- exec2.cliprects_ptr = args->cliprects_ptr;
- exec2.flags = I915_EXEC_RENDER;
- i915_execbuffer2_set_context_id(exec2, 0);
-
- err = i915_gem_check_execbuffer(&exec2);
- if (err)
- return err;
-
- /* Copy in the exec list from userland */
- exec_list = kvmalloc_array(count, sizeof(*exec_list),
- __GFP_NOWARN | GFP_KERNEL);
-
- /* Allocate extra slots for use by the command parser */
- exec2_list = kvmalloc_array(count + 2, eb_element_size(),
- __GFP_NOWARN | GFP_KERNEL);
- if (exec_list == NULL || exec2_list == NULL) {
- drm_dbg(&i915->drm,
- "Failed to allocate exec list for %d buffers\n",
- args->buffer_count);
- kvfree(exec_list);
- kvfree(exec2_list);
- return -ENOMEM;
- }
- err = copy_from_user(exec_list,
- u64_to_user_ptr(args->buffers_ptr),
- sizeof(*exec_list) * count);
- if (err) {
- drm_dbg(&i915->drm, "copy %d exec entries failed %d\n",
- args->buffer_count, err);
- kvfree(exec_list);
- kvfree(exec2_list);
- return -EFAULT;
- }
-
- for (i = 0; i < args->buffer_count; i++) {
- exec2_list[i].handle = exec_list[i].handle;
- exec2_list[i].relocation_count = exec_list[i].relocation_count;
- exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
- exec2_list[i].alignment = exec_list[i].alignment;
- exec2_list[i].offset = exec_list[i].offset;
- if (INTEL_GEN(to_i915(dev)) < 4)
- exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
- else
- exec2_list[i].flags = 0;
- }
-
- err = i915_gem_do_execbuffer(dev, file, &exec2, exec2_list);
- if (exec2.flags & __EXEC_HAS_RELOC) {
- struct drm_i915_gem_exec_object __user *user_exec_list =
- u64_to_user_ptr(args->buffers_ptr);
-
- /* Copy the new buffer offsets back to the user's exec list. */
- for (i = 0; i < args->buffer_count; i++) {
- if (!(exec2_list[i].offset & UPDATE))
- continue;
-
- exec2_list[i].offset =
- gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
- exec2_list[i].offset &= PIN_OFFSET_MASK;
- if (__copy_to_user(&user_exec_list[i].offset,
- &exec2_list[i].offset,
- sizeof(user_exec_list[i].offset)))
- break;
- }
- }
-
- kvfree(exec_list);
- kvfree(exec2_list);
- return err;
-}
-
int
i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_fence.c b/drivers/gpu/drm/i915/gem/i915_gem_fence.c
deleted file mode 100644
index 8ab842c80f99..000000000000
--- a/drivers/gpu/drm/i915/gem/i915_gem_fence.c
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2019 Intel Corporation
- */
-
-#include "i915_drv.h"
-#include "i915_gem_object.h"
-
-struct stub_fence {
- struct dma_fence dma;
- struct i915_sw_fence chain;
-};
-
-static int __i915_sw_fence_call
-stub_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
-{
- struct stub_fence *stub = container_of(fence, typeof(*stub), chain);
-
- switch (state) {
- case FENCE_COMPLETE:
- dma_fence_signal(&stub->dma);
- break;
-
- case FENCE_FREE:
- dma_fence_put(&stub->dma);
- break;
- }
-
- return NOTIFY_DONE;
-}
-
-static const char *stub_driver_name(struct dma_fence *fence)
-{
- return DRIVER_NAME;
-}
-
-static const char *stub_timeline_name(struct dma_fence *fence)
-{
- return "object";
-}
-
-static void stub_release(struct dma_fence *fence)
-{
- struct stub_fence *stub = container_of(fence, typeof(*stub), dma);
-
- i915_sw_fence_fini(&stub->chain);
-
- BUILD_BUG_ON(offsetof(typeof(*stub), dma));
- dma_fence_free(&stub->dma);
-}
-
-static const struct dma_fence_ops stub_fence_ops = {
- .get_driver_name = stub_driver_name,
- .get_timeline_name = stub_timeline_name,
- .release = stub_release,
-};
-
-struct dma_fence *
-i915_gem_object_lock_fence(struct drm_i915_gem_object *obj)
-{
- struct stub_fence *stub;
-
- assert_object_held(obj);
-
- stub = kmalloc(sizeof(*stub), GFP_KERNEL);
- if (!stub)
- return NULL;
-
- i915_sw_fence_init(&stub->chain, stub_notify);
- dma_fence_init(&stub->dma, &stub_fence_ops, &stub->chain.wait.lock,
- 0, 0);
-
- if (i915_sw_fence_await_reservation(&stub->chain,
- obj->base.resv, NULL, true,
- i915_fence_timeout(to_i915(obj->base.dev)),
- I915_FENCE_GFP) < 0)
- goto err;
-
- dma_resv_add_excl_fence(obj->base.resv, &stub->dma);
-
- return &stub->dma;
-
-err:
- stub_release(&stub->dma);
- return NULL;
-}
-
-void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
- struct dma_fence *fence)
-{
- struct stub_fence *stub = container_of(fence, typeof(*stub), dma);
-
- i915_sw_fence_commit(&stub->chain);
-}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index ad22f42541bd..ce6b664b10aa 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -42,7 +42,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
max_order = MAX_ORDER;
#ifdef CONFIG_SWIOTLB
- if (swiotlb_nr_tbl()) {
+ if (is_swiotlb_active()) {
unsigned int max_segment;
max_segment = swiotlb_max_segment();
@@ -138,8 +138,7 @@ static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
.name = "i915_gem_object_internal",
- .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
- I915_GEM_OBJECT_IS_SHRINKABLE,
+ .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
.get_pages = i915_gem_object_get_pages_internal,
.put_pages = i915_gem_object_put_pages_internal,
};
@@ -178,7 +177,8 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, size);
- i915_gem_object_init(obj, &i915_gem_object_internal_ops, &lock_class);
+ i915_gem_object_init(obj, &i915_gem_object_internal_ops, &lock_class,
+ I915_BO_ALLOC_STRUCT_PAGE);
/*
* Mark the object as volatile, such that the pages are marked as
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
index 87d8b27f426d..7fd22f3efbef 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
@@ -14,8 +14,6 @@ int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-int i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
int i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index 194f35342710..ce1c83c13d05 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -40,13 +40,13 @@ int __i915_gem_lmem_object_init(struct intel_memory_region *mem,
struct drm_i915_private *i915 = mem->i915;
drm_gem_private_object_init(&i915->drm, &obj->base, size);
- i915_gem_object_init(obj, &i915_gem_lmem_obj_ops, &lock_class);
+ i915_gem_object_init(obj, &i915_gem_lmem_obj_ops, &lock_class, flags);
obj->read_domains = I915_GEM_DOMAIN_WC | I915_GEM_DOMAIN_GTT;
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
- i915_gem_object_init_memory_region(obj, mem, flags);
+ i915_gem_object_init_memory_region(obj, mem);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index ec28a6cde49b..23f6b00e08e2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -246,12 +246,15 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
area->vm_flags & VM_WRITE))
return VM_FAULT_SIGBUS;
+ if (i915_gem_object_lock_interruptible(obj, NULL))
+ return VM_FAULT_NOPAGE;
+
err = i915_gem_object_pin_pages(obj);
if (err)
goto out;
iomap = -1;
- if (!i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE)) {
+ if (!i915_gem_object_has_struct_page(obj)) {
iomap = obj->mm.region->iomap.base;
iomap -= obj->mm.region->region.start;
}
@@ -269,6 +272,7 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
i915_gem_object_unpin_pages(obj);
out:
+ i915_gem_object_unlock(obj);
return i915_error_to_vmf_fault(err);
}
@@ -363,11 +367,10 @@ retry:
goto err_unpin;
/* Finally, remap it using the new GTT offset */
- ret = remap_io_mapping(area,
- area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
- (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
- min_t(u64, vma->size, area->vm_end - area->vm_start),
- &ggtt->iomap);
+ ret = io_mapping_map_user(&ggtt->iomap, area, area->vm_start +
+ (vma->ggtt_view.partial.offset << PAGE_SHIFT),
+ (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
+ min_t(u64, vma->size, area->vm_end - area->vm_start));
if (ret)
goto err_fence;
@@ -417,7 +420,9 @@ vm_access(struct vm_area_struct *area, unsigned long addr,
{
struct i915_mmap_offset *mmo = area->vm_private_data;
struct drm_i915_gem_object *obj = mmo->obj;
+ struct i915_gem_ww_ctx ww;
void *vaddr;
+ int err = 0;
if (i915_gem_object_is_readonly(obj) && write)
return -EACCES;
@@ -426,10 +431,18 @@ vm_access(struct vm_area_struct *area, unsigned long addr,
if (addr >= obj->base.size)
return -EINVAL;
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ err = i915_gem_object_lock(obj, &ww);
+ if (err)
+ goto out;
+
/* As this is primarily for debugging, let's focus on simplicity */
vaddr = i915_gem_object_pin_map(obj, I915_MAP_FORCE_WC);
- if (IS_ERR(vaddr))
- return PTR_ERR(vaddr);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto out;
+ }
if (write) {
memcpy(vaddr + addr, buf, len);
@@ -439,6 +452,16 @@ vm_access(struct vm_area_struct *area, unsigned long addr,
}
i915_gem_object_unpin_map(obj);
+out:
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+
+ if (err)
+ return err;
return len;
}
@@ -653,9 +676,8 @@ __assign_mmap_offset(struct drm_file *file,
}
if (mmap_type != I915_MMAP_TYPE_GTT &&
- !i915_gem_object_type_has(obj,
- I915_GEM_OBJECT_HAS_STRUCT_PAGE |
- I915_GEM_OBJECT_HAS_IOMEM)) {
+ !i915_gem_object_has_struct_page(obj) &&
+ !i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM)) {
err = -ENODEV;
goto out;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 70f798405f7f..ea74cbca95be 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -60,10 +60,8 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj)
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops,
- struct lock_class_key *key)
+ struct lock_class_key *key, unsigned flags)
{
- __mutex_init(&obj->mm.lock, ops->name ?: "obj->mm.lock", key);
-
spin_lock_init(&obj->vma.lock);
INIT_LIST_HEAD(&obj->vma.list);
@@ -78,16 +76,14 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
init_rcu_head(&obj->rcu);
obj->ops = ops;
+ GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
+ obj->flags = flags;
obj->mm.madv = I915_MADV_WILLNEED;
INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
mutex_init(&obj->mm.get_page.lock);
INIT_RADIX_TREE(&obj->mm.get_dma_page.radix, GFP_KERNEL | __GFP_NOWARN);
mutex_init(&obj->mm.get_dma_page.lock);
-
- if (IS_ENABLED(CONFIG_LOCKDEP) && i915_gem_object_is_shrinkable(obj))
- i915_gem_shrinker_taints_mutex(to_i915(obj->base.dev),
- &obj->mm.lock);
}
/**
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index d0ae834d787a..2ebd79537aea 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -16,6 +16,32 @@
#include "i915_gem_gtt.h"
#include "i915_vma_types.h"
+/*
+ * XXX: There is a prevalence of the assumption that we fit the
+ * object's page count inside a 32bit _signed_ variable. Let's document
+ * this and catch if we ever need to fix it. In the meantime, if you do
+ * spot such a local variable, please consider fixing!
+ *
+ * Aside from our own locals (for which we have no excuse!):
+ * - sg_table embeds unsigned int for num_pages
+ * - get_user_pages*() mixed ints with longs
+ */
+#define GEM_CHECK_SIZE_OVERFLOW(sz) \
+ GEM_WARN_ON((sz) >> PAGE_SHIFT > INT_MAX)
+
+static inline bool i915_gem_object_size_2big(u64 size)
+{
+ struct drm_i915_gem_object *obj;
+
+ if (GEM_CHECK_SIZE_OVERFLOW(size))
+ return true;
+
+ if (overflows_type(size, obj->base.size))
+ return true;
+
+ return false;
+}
+
void i915_gem_init__objects(struct drm_i915_private *i915);
struct drm_i915_gem_object *i915_gem_object_alloc(void);
@@ -23,7 +49,8 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops,
- struct lock_class_key *key);
+ struct lock_class_key *key,
+ unsigned alloc_flags);
struct drm_i915_gem_object *
i915_gem_object_create_shmem(struct drm_i915_private *i915,
resource_size_t size);
@@ -32,11 +59,21 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
const void *data, resource_size_t size);
extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
+
void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
struct sg_table *pages,
bool needs_clflush);
+int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pwrite *args);
+int i915_gem_object_pread_phys(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pread *args);
+
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
+void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
+void i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
void i915_gem_flush_free_objects(struct drm_i915_private *i915);
@@ -107,6 +144,20 @@ i915_gem_object_put(struct drm_i915_gem_object *obj)
#define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
+/*
+ * If more than one potential simultaneous locker, assert held.
+ */
+static inline void assert_object_held_shared(struct drm_i915_gem_object *obj)
+{
+ /*
+ * Note mm list lookup is protected by
+ * kref_get_unless_zero().
+ */
+ if (IS_ENABLED(CONFIG_LOCKDEP) &&
+ kref_read(&obj->base.refcount) > 0)
+ assert_object_held(obj);
+}
+
static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj,
struct i915_gem_ww_ctx *ww,
bool intr)
@@ -152,11 +203,6 @@ static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
dma_resv_unlock(obj->base.resv);
}
-struct dma_fence *
-i915_gem_object_lock_fence(struct drm_i915_gem_object *obj);
-void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
- struct dma_fence *fence);
-
static inline void
i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
{
@@ -215,7 +261,7 @@ i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
static inline bool
i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
{
- return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE);
+ return obj->flags & I915_BO_ALLOC_STRUCT_PAGE;
}
static inline bool
@@ -243,12 +289,6 @@ i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj)
}
static inline bool
-i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
-{
- return i915_gem_object_type_has(obj, I915_GEM_OBJECT_ASYNC_CANCEL);
-}
-
-static inline bool
i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
{
return READ_ONCE(obj->frontbuffer);
@@ -299,22 +339,22 @@ struct scatterlist *
__i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
struct i915_gem_object_page_iter *iter,
unsigned int n,
- unsigned int *offset);
+ unsigned int *offset, bool allow_alloc);
static inline struct scatterlist *
i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
unsigned int n,
- unsigned int *offset)
+ unsigned int *offset, bool allow_alloc)
{
- return __i915_gem_object_get_sg(obj, &obj->mm.get_page, n, offset);
+ return __i915_gem_object_get_sg(obj, &obj->mm.get_page, n, offset, allow_alloc);
}
static inline struct scatterlist *
i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj,
unsigned int n,
- unsigned int *offset)
+ unsigned int *offset, bool allow_alloc)
{
- return __i915_gem_object_get_sg(obj, &obj->mm.get_dma_page, n, offset);
+ return __i915_gem_object_get_sg(obj, &obj->mm.get_dma_page, n, offset, allow_alloc);
}
struct page *
@@ -341,27 +381,10 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
-enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
- I915_MM_NORMAL = 0,
- /*
- * Only used by struct_mutex, when called "recursively" from
- * direct-reclaim-esque. Safe because there is only every one
- * struct_mutex in the entire system.
- */
- I915_MM_SHRINKER = 1,
- /*
- * Used for obj->mm.lock when allocating pages. Safe because the object
- * isn't yet on any LRU, and therefore the shrinker can't deadlock on
- * it. As soon as the object has pages, obj->mm.lock nests within
- * fs_reclaim.
- */
- I915_MM_GET_PAGES = 1,
-};
-
static inline int __must_check
i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
- might_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
+ assert_object_held(obj);
if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
return 0;
@@ -369,6 +392,8 @@ i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
return __i915_gem_object_get_pages(obj);
}
+int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj);
+
static inline bool
i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
{
@@ -427,6 +452,9 @@ void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
enum i915_map_type type);
+void *__must_check i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
+ enum i915_map_type type);
+
void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
unsigned long offset,
unsigned long size);
@@ -495,6 +523,7 @@ int __must_check
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
struct i915_vma * __must_check
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
+ struct i915_gem_ww_ctx *ww,
u32 alignment,
const struct i915_ggtt_view *view,
unsigned int flags);
@@ -558,4 +587,25 @@ int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset,
bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj);
+#ifdef CONFIG_MMU_NOTIFIER
+static inline bool
+i915_gem_object_is_userptr(struct drm_i915_gem_object *obj)
+{
+ return obj->userptr.notifier.mm;
+}
+
+int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj);
+int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj);
+void i915_gem_object_userptr_submit_fini(struct drm_i915_gem_object *obj);
+int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj);
+#else
+static inline bool i915_gem_object_is_userptr(struct drm_i915_gem_object *obj) { return false; }
+
+static inline int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
+static inline int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
+static inline void i915_gem_object_userptr_submit_fini(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); }
+static inline int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
+
+#endif
+
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
index d6dac21fce0b..df8e8c18c6c9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
@@ -55,6 +55,9 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
if (unlikely(err))
goto out_put;
+ /* we pinned the pool, mark it as such */
+ intel_gt_buffer_pool_mark_used(pool);
+
cmd = i915_gem_object_pin_map(pool->obj, pool->type);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
@@ -277,6 +280,9 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
if (unlikely(err))
goto out_put;
+ /* we pinned the pool, mark it as such */
+ intel_gt_buffer_pool_mark_used(pool);
+
cmd = i915_gem_object_pin_map(pool->obj, pool->type);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 0438e00d4ca7..8e485cb3343c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -7,6 +7,8 @@
#ifndef __I915_GEM_OBJECT_TYPES_H__
#define __I915_GEM_OBJECT_TYPES_H__
+#include <linux/mmu_notifier.h>
+
#include <drm/drm_gem.h>
#include <uapi/drm/i915_drm.h>
@@ -30,12 +32,10 @@ struct i915_lut_handle {
struct drm_i915_gem_object_ops {
unsigned int flags;
-#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
#define I915_GEM_OBJECT_HAS_IOMEM BIT(1)
#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(2)
#define I915_GEM_OBJECT_IS_PROXY BIT(3)
#define I915_GEM_OBJECT_NO_MMAP BIT(4)
-#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(5)
/* Interface between the GEM object and its backing storage.
* get_pages() is called once prior to the use of the associated set
@@ -171,9 +171,12 @@ struct drm_i915_gem_object {
unsigned long flags;
#define I915_BO_ALLOC_CONTIGUOUS BIT(0)
#define I915_BO_ALLOC_VOLATILE BIT(1)
-#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_VOLATILE)
-#define I915_BO_READONLY BIT(2)
-#define I915_TILING_QUIRK_BIT 3 /* unknown swizzling; do not release! */
+#define I915_BO_ALLOC_STRUCT_PAGE BIT(2)
+#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \
+ I915_BO_ALLOC_VOLATILE | \
+ I915_BO_ALLOC_STRUCT_PAGE)
+#define I915_BO_READONLY BIT(3)
+#define I915_TILING_QUIRK_BIT 4 /* unknown swizzling; do not release! */
/*
* Is the object to be mapped as read-only to the GPU
@@ -213,7 +216,6 @@ struct drm_i915_gem_object {
* Protects the pages and their use. Do not use directly, but
* instead go through the pin/unpin interfaces.
*/
- struct mutex lock;
atomic_t pages_pin_count;
atomic_t shrink_pin;
@@ -288,13 +290,16 @@ struct drm_i915_gem_object {
unsigned long *bit_17;
union {
+#ifdef CONFIG_MMU_NOTIFIER
struct i915_gem_userptr {
uintptr_t ptr;
+ unsigned long notifier_seq;
- struct i915_mm_struct *mm;
- struct i915_mmu_object *mmu_object;
- struct work_struct *work;
+ struct mmu_interval_notifier notifier;
+ struct page **pvec;
+ int page_ref;
} userptr;
+#endif
struct drm_mm_node *stolen;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 43028f3539a6..aed8a37ccdc9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -19,7 +19,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
bool shrinkable;
int i;
- lockdep_assert_held(&obj->mm.lock);
+ assert_object_held_shared(obj);
if (i915_gem_object_is_volatile(obj))
obj->mm.madv = I915_MADV_DONTNEED;
@@ -70,6 +70,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct list_head *list;
unsigned long flags;
+ assert_object_held(obj);
spin_lock_irqsave(&i915->mm.obj_lock, flags);
i915->mm.shrink_count++;
@@ -91,6 +92,8 @@ int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
struct drm_i915_private *i915 = to_i915(obj->base.dev);
int err;
+ assert_object_held_shared(obj);
+
if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
drm_dbg(&i915->drm,
"Attempting to obtain a purgeable object\n");
@@ -114,23 +117,41 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
int err;
- err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
- if (err)
- return err;
+ assert_object_held(obj);
+
+ assert_object_held_shared(obj);
if (unlikely(!i915_gem_object_has_pages(obj))) {
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
err = ____i915_gem_object_get_pages(obj);
if (err)
- goto unlock;
+ return err;
smp_mb__before_atomic();
}
atomic_inc(&obj->mm.pages_pin_count);
-unlock:
- mutex_unlock(&obj->mm.lock);
+ return 0;
+}
+
+int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj)
+{
+ struct i915_gem_ww_ctx ww;
+ int err;
+
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ err = i915_gem_object_lock(obj, &ww);
+ if (!err)
+ err = i915_gem_object_pin_pages(obj);
+
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
return err;
}
@@ -145,7 +166,7 @@ void i915_gem_object_truncate(struct drm_i915_gem_object *obj)
/* Try to discard unwanted pages */
void i915_gem_object_writeback(struct drm_i915_gem_object *obj)
{
- lockdep_assert_held(&obj->mm.lock);
+ assert_object_held_shared(obj);
GEM_BUG_ON(i915_gem_object_has_pages(obj));
if (obj->ops->writeback)
@@ -176,6 +197,8 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
{
struct sg_table *pages;
+ assert_object_held_shared(obj);
+
pages = fetch_and_zero(&obj->mm.pages);
if (IS_ERR_OR_NULL(pages))
return pages;
@@ -199,17 +222,12 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
{
struct sg_table *pages;
- int err;
if (i915_gem_object_has_pinned_pages(obj))
return -EBUSY;
/* May be called by shrinker from within get_pages() (on another bo) */
- mutex_lock(&obj->mm.lock);
- if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
- err = -EBUSY;
- goto unlock;
- }
+ assert_object_held_shared(obj);
i915_gem_object_release_mmap_offset(obj);
@@ -226,17 +244,10 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
* get_pages backends we should be better able to handle the
* cancellation of the async task in a more uniform manner.
*/
- if (!pages && !i915_gem_object_needs_async_cancel(obj))
- pages = ERR_PTR(-EINVAL);
-
- if (!IS_ERR(pages))
+ if (!IS_ERR_OR_NULL(pages))
obj->ops->put_pages(obj, pages);
- err = 0;
-unlock:
- mutex_unlock(&obj->mm.lock);
-
- return err;
+ return 0;
}
/* The 'mapping' part of i915_gem_object_pin_map() below */
@@ -333,18 +344,15 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
enum i915_map_type type)
{
enum i915_map_type has_type;
- unsigned int flags;
bool pinned;
void *ptr;
int err;
- flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM;
- if (!i915_gem_object_type_has(obj, flags))
+ if (!i915_gem_object_has_struct_page(obj) &&
+ !i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM))
return ERR_PTR(-ENXIO);
- err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
- if (err)
- return ERR_PTR(err);
+ assert_object_held(obj);
pinned = !(type & I915_MAP_OVERRIDE);
type &= ~I915_MAP_OVERRIDE;
@@ -354,10 +362,8 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
err = ____i915_gem_object_get_pages(obj);
- if (err) {
- ptr = ERR_PTR(err);
- goto out_unlock;
- }
+ if (err)
+ return ERR_PTR(err);
smp_mb__before_atomic();
}
@@ -392,13 +398,23 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
obj->mm.mapping = page_pack_bits(ptr, type);
}
-out_unlock:
- mutex_unlock(&obj->mm.lock);
return ptr;
err_unpin:
atomic_dec(&obj->mm.pages_pin_count);
- goto out_unlock;
+ return ptr;
+}
+
+void *i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
+ enum i915_map_type type)
+{
+ void *ret;
+
+ i915_gem_object_lock(obj, NULL);
+ ret = i915_gem_object_pin_map(obj, type);
+ i915_gem_object_unlock(obj);
+
+ return ret;
}
void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
@@ -448,7 +464,8 @@ struct scatterlist *
__i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
struct i915_gem_object_page_iter *iter,
unsigned int n,
- unsigned int *offset)
+ unsigned int *offset,
+ bool allow_alloc)
{
const bool dma = iter == &obj->mm.get_dma_page;
struct scatterlist *sg;
@@ -470,6 +487,9 @@ __i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
if (n < READ_ONCE(iter->sg_idx))
goto lookup;
+ if (!allow_alloc)
+ goto manual_lookup;
+
mutex_lock(&iter->lock);
/* We prefer to reuse the last sg so that repeated lookup of this
@@ -519,7 +539,16 @@ scan:
if (unlikely(n < idx)) /* insertion completed by another thread */
goto lookup;
- /* In case we failed to insert the entry into the radixtree, we need
+ goto manual_walk;
+
+manual_lookup:
+ idx = 0;
+ sg = obj->mm.pages->sgl;
+ count = __sg_page_count(sg);
+
+manual_walk:
+ /*
+ * In case we failed to insert the entry into the radixtree, we need
* to look beyond the current sg.
*/
while (idx + count <= n) {
@@ -566,7 +595,7 @@ i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
- sg = i915_gem_object_get_sg(obj, n, &offset);
+ sg = i915_gem_object_get_sg(obj, n, &offset, true);
return nth_page(sg_page(sg), offset);
}
@@ -592,7 +621,7 @@ i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
struct scatterlist *sg;
unsigned int offset;
- sg = i915_gem_object_get_sg_dma(obj, n, &offset);
+ sg = i915_gem_object_get_sg_dma(obj, n, &offset, true);
if (len)
*len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 3c0b157e2a35..81dc2bf59bc3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -35,7 +35,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
* to handle all possible callers, and given typical object sizes,
* the alignment of the buddy allocation will naturally match.
*/
- vaddr = dma_alloc_coherent(&obj->base.dev->pdev->dev,
+ vaddr = dma_alloc_coherent(obj->base.dev->dev,
roundup_pow_of_two(obj->base.size),
&dma, GFP_KERNEL);
if (!vaddr)
@@ -76,6 +76,8 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
+ /* We're no longer struct page backed */
+ obj->flags &= ~I915_BO_ALLOC_STRUCT_PAGE;
__i915_gem_object_set_pages(obj, st, sg->length);
return 0;
@@ -83,13 +85,13 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
err_st:
kfree(st);
err_pci:
- dma_free_coherent(&obj->base.dev->pdev->dev,
+ dma_free_coherent(obj->base.dev->dev,
roundup_pow_of_two(obj->base.size),
vaddr, dma);
return -ENOMEM;
}
-static void
+void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
@@ -129,14 +131,13 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
sg_free_table(pages);
kfree(pages);
- dma_free_coherent(&obj->base.dev->pdev->dev,
+ dma_free_coherent(obj->base.dev->dev,
roundup_pow_of_two(obj->base.size),
vaddr, dma);
}
-static int
-phys_pwrite(struct drm_i915_gem_object *obj,
- const struct drm_i915_gem_pwrite *args)
+int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pwrite *args)
{
void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
char __user *user_data = u64_to_user_ptr(args->data_ptr);
@@ -165,9 +166,8 @@ phys_pwrite(struct drm_i915_gem_object *obj,
return 0;
}
-static int
-phys_pread(struct drm_i915_gem_object *obj,
- const struct drm_i915_gem_pread *args)
+int i915_gem_object_pread_phys(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pread *args)
{
void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
char __user *user_data = u64_to_user_ptr(args->data_ptr);
@@ -186,62 +186,14 @@ phys_pread(struct drm_i915_gem_object *obj,
return 0;
}
-static void phys_release(struct drm_i915_gem_object *obj)
-{
- fput(obj->base.filp);
-}
-
-static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
- .name = "i915_gem_object_phys",
- .get_pages = i915_gem_object_get_pages_phys,
- .put_pages = i915_gem_object_put_pages_phys,
-
- .pread = phys_pread,
- .pwrite = phys_pwrite,
-
- .release = phys_release,
-};
-
-int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
+static int i915_gem_object_shmem_to_phys(struct drm_i915_gem_object *obj)
{
struct sg_table *pages;
int err;
- if (align > obj->base.size)
- return -EINVAL;
-
- if (obj->ops == &i915_gem_phys_ops)
- return 0;
-
- if (!i915_gem_object_is_shmem(obj))
- return -EINVAL;
-
- err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
- if (err)
- return err;
-
- mutex_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
-
- if (obj->mm.madv != I915_MADV_WILLNEED) {
- err = -EFAULT;
- goto err_unlock;
- }
-
- if (i915_gem_object_has_tiling_quirk(obj)) {
- err = -EFAULT;
- goto err_unlock;
- }
-
- if (obj->mm.mapping) {
- err = -EBUSY;
- goto err_unlock;
- }
-
pages = __i915_gem_object_unset_pages(obj);
- obj->ops = &i915_gem_phys_ops;
-
- err = ____i915_gem_object_get_pages(obj);
+ err = i915_gem_object_get_pages_phys(obj);
if (err)
goto err_xfer;
@@ -249,25 +201,57 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
__i915_gem_object_pin_pages(obj);
if (!IS_ERR_OR_NULL(pages))
- i915_gem_shmem_ops.put_pages(obj, pages);
+ i915_gem_object_put_pages_shmem(obj, pages);
i915_gem_object_release_memory_region(obj);
-
- mutex_unlock(&obj->mm.lock);
return 0;
err_xfer:
- obj->ops = &i915_gem_shmem_ops;
if (!IS_ERR_OR_NULL(pages)) {
unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
}
-err_unlock:
- mutex_unlock(&obj->mm.lock);
return err;
}
+int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
+{
+ int err;
+
+ assert_object_held(obj);
+
+ if (align > obj->base.size)
+ return -EINVAL;
+
+ if (!i915_gem_object_is_shmem(obj))
+ return -EINVAL;
+
+ if (!i915_gem_object_has_struct_page(obj))
+ return 0;
+
+ err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
+ if (err)
+ return err;
+
+ if (obj->mm.madv != I915_MADV_WILLNEED)
+ return -EFAULT;
+
+ if (i915_gem_object_has_tiling_quirk(obj))
+ return -EFAULT;
+
+ if (obj->mm.mapping || i915_gem_object_has_pinned_pages(obj))
+ return -EBUSY;
+
+ if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
+ drm_dbg(obj->base.dev,
+ "Attempting to obtain a purgeable object\n");
+ return -EFAULT;
+ }
+
+ return i915_gem_object_shmem_to_phys(obj);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_gem_phys.c"
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 000e1cd8e920..8b9d7d14c4bd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -116,7 +116,7 @@ int i915_gem_freeze_late(struct drm_i915_private *i915)
*/
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- i915_gem_shrink(i915, -1UL, NULL, ~0);
+ i915_gem_shrink(NULL, i915, -1UL, NULL, ~0);
i915_gem_drain_freed_objects(i915);
wbinvd_on_all_cpus();
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index 3e3dad22a683..6a84fb6dde24 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -106,13 +106,11 @@ err_free_sg:
}
void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
- struct intel_memory_region *mem,
- unsigned long flags)
+ struct intel_memory_region *mem)
{
INIT_LIST_HEAD(&obj->mm.blocks);
obj->mm.region = intel_memory_region_get(mem);
- obj->flags |= flags;
if (obj->base.size <= mem->min_page_size)
obj->flags |= I915_BO_ALLOC_CONTIGUOUS;
@@ -161,17 +159,7 @@ i915_gem_object_create_region(struct intel_memory_region *mem,
GEM_BUG_ON(!size);
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_MIN_ALIGNMENT));
- /*
- * XXX: There is a prevalence of the assumption that we fit the
- * object's page count inside a 32bit _signed_ variable. Let's document
- * this and catch if we ever need to fix it. In the meantime, if you do
- * spot such a local variable, please consider fixing!
- */
-
- if (size >> PAGE_SHIFT > INT_MAX)
- return ERR_PTR(-E2BIG);
-
- if (overflows_type(size, obj->base.size))
+ if (i915_gem_object_size_2big(size))
return ERR_PTR(-E2BIG);
obj = i915_gem_object_alloc();
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.h b/drivers/gpu/drm/i915/gem/i915_gem_region.h
index f2ff6f8bff74..ebddc86d78f7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.h
@@ -17,8 +17,7 @@ void i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
struct sg_table *pages);
void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
- struct intel_memory_region *mem,
- unsigned long flags);
+ struct intel_memory_region *mem);
void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj);
struct drm_i915_gem_object *
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index cf83c208688c..a9bfa66c8da1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -99,7 +99,7 @@ rebuild_st:
goto err_sg;
}
- i915_gem_shrink(i915, 2 * page_count, NULL, *s++);
+ i915_gem_shrink(NULL, i915, 2 * page_count, NULL, *s++);
/*
* We've tried hard to allocate the memory by reaping
@@ -172,7 +172,7 @@ rebuild_st:
max_segment = PAGE_SIZE;
goto rebuild_st;
} else {
- dev_warn(&i915->drm.pdev->dev,
+ dev_warn(i915->drm.dev,
"Failed to DMA remap %lu pages\n",
page_count);
goto err_pages;
@@ -296,8 +296,7 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
__start_cpu_write(obj);
}
-static void
-shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages)
+void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages)
{
struct sgt_iter sgt_iter;
struct pagevec pvec;
@@ -331,6 +330,15 @@ shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages)
kfree(pages);
}
+static void
+shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages)
+{
+ if (likely(i915_gem_object_has_struct_page(obj)))
+ i915_gem_object_put_pages_shmem(obj, pages);
+ else
+ i915_gem_object_put_pages_phys(obj, pages);
+}
+
static int
shmem_pwrite(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pwrite *arg)
@@ -343,6 +351,9 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
/* Caller already validated user args */
GEM_BUG_ON(!access_ok(user_data, arg->size));
+ if (!i915_gem_object_has_struct_page(obj))
+ return i915_gem_object_pwrite_phys(obj, arg);
+
/*
* Before we instantiate/pin the backing store for our use, we
* can prepopulate the shmemfs filp efficiently using a write into
@@ -421,17 +432,27 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
return 0;
}
+static int
+shmem_pread(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pread *arg)
+{
+ if (!i915_gem_object_has_struct_page(obj))
+ return i915_gem_object_pread_phys(obj, arg);
+
+ return -ENODEV;
+}
+
static void shmem_release(struct drm_i915_gem_object *obj)
{
- i915_gem_object_release_memory_region(obj);
+ if (obj->flags & I915_BO_ALLOC_STRUCT_PAGE)
+ i915_gem_object_release_memory_region(obj);
fput(obj->base.filp);
}
const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
.name = "i915_gem_object_shmem",
- .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
- I915_GEM_OBJECT_IS_SHRINKABLE,
+ .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
.get_pages = shmem_get_pages,
.put_pages = shmem_put_pages,
@@ -439,6 +460,7 @@ const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
.writeback = shmem_writeback,
.pwrite = shmem_pwrite,
+ .pread = shmem_pread,
.release = shmem_release,
};
@@ -491,7 +513,8 @@ static int shmem_object_init(struct intel_memory_region *mem,
mapping_set_gfp_mask(mapping, mask);
GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
- i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class);
+ i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class,
+ I915_BO_ALLOC_STRUCT_PAGE);
obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU;
@@ -515,7 +538,7 @@ static int shmem_object_init(struct intel_memory_region *mem,
i915_gem_object_set_cache_coherency(obj, cache_level);
- i915_gem_object_init_memory_region(obj, mem, 0);
+ i915_gem_object_init_memory_region(obj, mem);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index c2dba1cd9532..4f9c8d3021ab 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -49,9 +49,9 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
flags = I915_GEM_OBJECT_UNBIND_TEST;
if (i915_gem_object_unbind(obj, flags) == 0)
- __i915_gem_object_put_pages(obj);
+ return true;
- return !i915_gem_object_has_pages(obj);
+ return false;
}
static void try_to_writeback(struct drm_i915_gem_object *obj,
@@ -70,6 +70,7 @@ static void try_to_writeback(struct drm_i915_gem_object *obj,
/**
* i915_gem_shrink - Shrink buffer object caches
+ * @ww: i915 gem ww acquire ctx, or NULL
* @i915: i915 device
* @target: amount of memory to make available, in pages
* @nr_scanned: optional output for number of pages scanned (incremental)
@@ -94,7 +95,8 @@ static void try_to_writeback(struct drm_i915_gem_object *obj,
* The number of pages of backing storage actually released.
*/
unsigned long
-i915_gem_shrink(struct drm_i915_private *i915,
+i915_gem_shrink(struct i915_gem_ww_ctx *ww,
+ struct drm_i915_private *i915,
unsigned long target,
unsigned long *nr_scanned,
unsigned int shrink)
@@ -113,6 +115,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
intel_wakeref_t wakeref = 0;
unsigned long count = 0;
unsigned long scanned = 0;
+ int err;
trace_i915_gem_shrink(i915, target, shrink);
@@ -200,25 +203,40 @@ i915_gem_shrink(struct drm_i915_private *i915,
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+ err = 0;
if (unsafe_drop_pages(obj, shrink)) {
/* May arrive from get_pages on another bo */
- mutex_lock(&obj->mm.lock);
- if (!i915_gem_object_has_pages(obj)) {
+ if (!ww) {
+ if (!i915_gem_object_trylock(obj))
+ goto skip;
+ } else {
+ err = i915_gem_object_lock(obj, ww);
+ if (err)
+ goto skip;
+ }
+
+ if (!__i915_gem_object_put_pages(obj)) {
try_to_writeback(obj, shrink);
count += obj->base.size >> PAGE_SHIFT;
}
- mutex_unlock(&obj->mm.lock);
+ if (!ww)
+ i915_gem_object_unlock(obj);
}
dma_resv_prune(obj->base.resv);
scanned += obj->base.size >> PAGE_SHIFT;
+skip:
i915_gem_object_put(obj);
spin_lock_irqsave(&i915->mm.obj_lock, flags);
+ if (err)
+ break;
}
list_splice_tail(&still_in_list, phase->list);
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+ if (err)
+ return err;
}
if (shrink & I915_SHRINK_BOUND)
@@ -249,7 +267,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
unsigned long freed = 0;
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- freed = i915_gem_shrink(i915, -1UL, NULL,
+ freed = i915_gem_shrink(NULL, i915, -1UL, NULL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND);
}
@@ -295,7 +313,7 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
sc->nr_scanned = 0;
- freed = i915_gem_shrink(i915,
+ freed = i915_gem_shrink(NULL, i915,
sc->nr_to_scan,
&sc->nr_scanned,
I915_SHRINK_BOUND |
@@ -304,7 +322,7 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
intel_wakeref_t wakeref;
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- freed += i915_gem_shrink(i915,
+ freed += i915_gem_shrink(NULL, i915,
sc->nr_to_scan - sc->nr_scanned,
&sc->nr_scanned,
I915_SHRINK_ACTIVE |
@@ -329,7 +347,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
freed_pages = 0;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- freed_pages += i915_gem_shrink(i915, -1UL, NULL,
+ freed_pages += i915_gem_shrink(NULL, i915, -1UL, NULL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_WRITEBACK);
@@ -367,7 +385,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
intel_wakeref_t wakeref;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- freed_pages += i915_gem_shrink(i915, -1UL, NULL,
+ freed_pages += i915_gem_shrink(NULL, i915, -1UL, NULL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_VMAPS);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.h b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.h
index b397d7785789..8512470f6fd6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.h
@@ -9,10 +9,12 @@
#include <linux/bits.h>
struct drm_i915_private;
+struct i915_gem_ww_ctx;
struct mutex;
/* i915_gem_shrinker.c */
-unsigned long i915_gem_shrink(struct drm_i915_private *i915,
+unsigned long i915_gem_shrink(struct i915_gem_ww_ctx *ww,
+ struct drm_i915_private *i915,
unsigned long target,
unsigned long *nr_scanned,
unsigned flags);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index a1e197a6e999..b0597de206de 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -630,20 +630,22 @@ static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
int err;
drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
- i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class);
+ i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class, 0);
obj->stolen = stolen;
obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
i915_gem_object_set_cache_coherency(obj, cache_level);
- err = i915_gem_object_pin_pages(obj);
- if (err)
- return err;
+ if (WARN_ON(!i915_gem_object_trylock(obj)))
+ return -EBUSY;
- i915_gem_object_init_memory_region(obj, mem, 0);
+ err = i915_gem_object_pin_pages(obj);
+ if (!err)
+ i915_gem_object_init_memory_region(obj, mem);
+ i915_gem_object_unlock(obj);
- return 0;
+ return err;
}
static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
@@ -686,7 +688,7 @@ struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_i915_private *i915,
resource_size_t size)
{
- return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_STOLEN],
+ return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_STOLEN_SMEM],
size, I915_BO_ALLOC_CONTIGUOUS);
}
@@ -726,7 +728,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
resource_size_t stolen_offset,
resource_size_t size)
{
- struct intel_memory_region *mem = i915->mm.regions[INTEL_REGION_STOLEN];
+ struct intel_memory_region *mem = i915->mm.regions[INTEL_REGION_STOLEN_SMEM];
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
int ret;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index d589d3d81085..9e8945013090 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -265,7 +265,6 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
* pages to prevent them being swapped out and causing corruption
* due to the change in swizzling.
*/
- mutex_lock(&obj->mm.lock);
if (i915_gem_object_has_pages(obj) &&
obj->mm.madv == I915_MADV_WILLNEED &&
i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
@@ -280,7 +279,6 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
i915_gem_object_set_tiling_quirk(obj);
}
}
- mutex_unlock(&obj->mm.lock);
spin_lock(&obj->vma.lock);
for_each_ggtt_vma(vma, obj) {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index f2eaed6aca3d..a657b99ec760 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -2,10 +2,39 @@
* SPDX-License-Identifier: MIT
*
* Copyright © 2012-2014 Intel Corporation
+ *
+ * Based on amdgpu_mn, which bears the following notice:
+ *
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ * Christian König <christian.koenig@amd.com>
*/
#include <linux/mmu_context.h>
-#include <linux/mmu_notifier.h>
#include <linux/mempolicy.h>
#include <linux/swap.h>
#include <linux/sched/mm.h>
@@ -15,408 +44,121 @@
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
-struct i915_mm_struct {
- struct mm_struct *mm;
- struct drm_i915_private *i915;
- struct i915_mmu_notifier *mn;
- struct hlist_node node;
- struct kref kref;
- struct rcu_work work;
-};
-
-#if defined(CONFIG_MMU_NOTIFIER)
-#include <linux/interval_tree.h>
+#ifdef CONFIG_MMU_NOTIFIER
-struct i915_mmu_notifier {
- spinlock_t lock;
- struct hlist_node node;
- struct mmu_notifier mn;
- struct rb_root_cached objects;
- struct i915_mm_struct *mm;
-};
-
-struct i915_mmu_object {
- struct i915_mmu_notifier *mn;
- struct drm_i915_gem_object *obj;
- struct interval_tree_node it;
-};
-
-static void add_object(struct i915_mmu_object *mo)
+/**
+ * i915_gem_userptr_invalidate - callback to notify about mm change
+ *
+ * @mni: the range (mm) is about to update
+ * @range: details on the invalidation
+ * @cur_seq: Value to pass to mmu_interval_set_seq()
+ *
+ * Block for operations on BOs to finish and mark pages as accessed and
+ * potentially dirty.
+ */
+static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
{
- GEM_BUG_ON(!RB_EMPTY_NODE(&mo->it.rb));
- interval_tree_insert(&mo->it, &mo->mn->objects);
-}
+ struct drm_i915_gem_object *obj = container_of(mni, struct drm_i915_gem_object, userptr.notifier);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ long r;
-static void del_object(struct i915_mmu_object *mo)
-{
- if (RB_EMPTY_NODE(&mo->it.rb))
- return;
+ if (!mmu_notifier_range_blockable(range))
+ return false;
- interval_tree_remove(&mo->it, &mo->mn->objects);
- RB_CLEAR_NODE(&mo->it.rb);
-}
+ spin_lock(&i915->mm.notifier_lock);
-static void
-__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value)
-{
- struct i915_mmu_object *mo = obj->userptr.mmu_object;
+ mmu_interval_set_seq(mni, cur_seq);
+
+ spin_unlock(&i915->mm.notifier_lock);
/*
- * During mm_invalidate_range we need to cancel any userptr that
- * overlaps the range being invalidated. Doing so requires the
- * struct_mutex, and that risks recursion. In order to cause
- * recursion, the user must alias the userptr address space with
- * a GTT mmapping (possible with a MAP_FIXED) - then when we have
- * to invalidate that mmaping, mm_invalidate_range is called with
- * the userptr address *and* the struct_mutex held. To prevent that
- * we set a flag under the i915_mmu_notifier spinlock to indicate
- * whether this object is valid.
+ * We don't wait when the process is exiting. This is valid
+ * because the object will be cleaned up anyway.
+ *
+ * This is also temporarily required as a hack, because we
+ * cannot currently force non-consistent batch buffers to preempt
+ * and reschedule by waiting on it, hanging processes on exit.
*/
- if (!mo)
- return;
+ if (current->flags & PF_EXITING)
+ return true;
- spin_lock(&mo->mn->lock);
- if (value)
- add_object(mo);
- else
- del_object(mo);
- spin_unlock(&mo->mn->lock);
-}
-
-static int
-userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
- const struct mmu_notifier_range *range)
-{
- struct i915_mmu_notifier *mn =
- container_of(_mn, struct i915_mmu_notifier, mn);
- struct interval_tree_node *it;
- unsigned long end;
- int ret = 0;
-
- if (RB_EMPTY_ROOT(&mn->objects.rb_root))
- return 0;
-
- /* interval ranges are inclusive, but invalidate range is exclusive */
- end = range->end - 1;
-
- spin_lock(&mn->lock);
- it = interval_tree_iter_first(&mn->objects, range->start, end);
- while (it) {
- struct drm_i915_gem_object *obj;
-
- if (!mmu_notifier_range_blockable(range)) {
- ret = -EAGAIN;
- break;
- }
-
- /*
- * The mmu_object is released late when destroying the
- * GEM object so it is entirely possible to gain a
- * reference on an object in the process of being freed
- * since our serialisation is via the spinlock and not
- * the struct_mutex - and consequently use it after it
- * is freed and then double free it. To prevent that
- * use-after-free we only acquire a reference on the
- * object if it is not in the process of being destroyed.
- */
- obj = container_of(it, struct i915_mmu_object, it)->obj;
- if (!kref_get_unless_zero(&obj->base.refcount)) {
- it = interval_tree_iter_next(it, range->start, end);
- continue;
- }
- spin_unlock(&mn->lock);
-
- ret = i915_gem_object_unbind(obj,
- I915_GEM_OBJECT_UNBIND_ACTIVE |
- I915_GEM_OBJECT_UNBIND_BARRIER);
- if (ret == 0)
- ret = __i915_gem_object_put_pages(obj);
- i915_gem_object_put(obj);
- if (ret)
- return ret;
-
- spin_lock(&mn->lock);
-
- /*
- * As we do not (yet) protect the mmu from concurrent insertion
- * over this range, there is no guarantee that this search will
- * terminate given a pathologic workload.
- */
- it = interval_tree_iter_first(&mn->objects, range->start, end);
- }
- spin_unlock(&mn->lock);
-
- return ret;
+ /* we will unbind on next submission, still have userptr pins */
+ r = dma_resv_wait_timeout_rcu(obj->base.resv, true, false,
+ MAX_SCHEDULE_TIMEOUT);
+ if (r <= 0)
+ drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r);
+ return true;
}
-static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
- .invalidate_range_start = userptr_mn_invalidate_range_start,
+static const struct mmu_interval_notifier_ops i915_gem_userptr_notifier_ops = {
+ .invalidate = i915_gem_userptr_invalidate,
};
-static struct i915_mmu_notifier *
-i915_mmu_notifier_create(struct i915_mm_struct *mm)
-{
- struct i915_mmu_notifier *mn;
-
- mn = kmalloc(sizeof(*mn), GFP_KERNEL);
- if (mn == NULL)
- return ERR_PTR(-ENOMEM);
-
- spin_lock_init(&mn->lock);
- mn->mn.ops = &i915_gem_userptr_notifier;
- mn->objects = RB_ROOT_CACHED;
- mn->mm = mm;
-
- return mn;
-}
-
-static void
-i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
-{
- struct i915_mmu_object *mo;
-
- mo = fetch_and_zero(&obj->userptr.mmu_object);
- if (!mo)
- return;
-
- spin_lock(&mo->mn->lock);
- del_object(mo);
- spin_unlock(&mo->mn->lock);
- kfree(mo);
-}
-
-static struct i915_mmu_notifier *
-i915_mmu_notifier_find(struct i915_mm_struct *mm)
-{
- struct i915_mmu_notifier *mn, *old;
- int err;
-
- mn = READ_ONCE(mm->mn);
- if (likely(mn))
- return mn;
-
- mn = i915_mmu_notifier_create(mm);
- if (IS_ERR(mn))
- return mn;
-
- err = mmu_notifier_register(&mn->mn, mm->mm);
- if (err) {
- kfree(mn);
- return ERR_PTR(err);
- }
-
- old = cmpxchg(&mm->mn, NULL, mn);
- if (old) {
- mmu_notifier_unregister(&mn->mn, mm->mm);
- kfree(mn);
- mn = old;
- }
-
- return mn;
-}
-
static int
-i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
- unsigned flags)
+i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj)
{
- struct i915_mmu_notifier *mn;
- struct i915_mmu_object *mo;
-
- if (flags & I915_USERPTR_UNSYNCHRONIZED)
- return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
-
- if (GEM_WARN_ON(!obj->userptr.mm))
- return -EINVAL;
-
- mn = i915_mmu_notifier_find(obj->userptr.mm);
- if (IS_ERR(mn))
- return PTR_ERR(mn);
-
- mo = kzalloc(sizeof(*mo), GFP_KERNEL);
- if (!mo)
- return -ENOMEM;
-
- mo->mn = mn;
- mo->obj = obj;
- mo->it.start = obj->userptr.ptr;
- mo->it.last = obj->userptr.ptr + obj->base.size - 1;
- RB_CLEAR_NODE(&mo->it.rb);
-
- obj->userptr.mmu_object = mo;
- return 0;
-}
-
-static void
-i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
- struct mm_struct *mm)
-{
- if (mn == NULL)
- return;
-
- mmu_notifier_unregister(&mn->mn, mm);
- kfree(mn);
-}
-
-#else
-
-static void
-__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value)
-{
-}
-
-static void
-i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
-{
-}
-
-static int
-i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
- unsigned flags)
-{
- if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
- return -ENODEV;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- return 0;
-}
-
-static void
-i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
- struct mm_struct *mm)
-{
-}
-
-#endif
-
-static struct i915_mm_struct *
-__i915_mm_struct_find(struct drm_i915_private *i915, struct mm_struct *real)
-{
- struct i915_mm_struct *it, *mm = NULL;
-
- rcu_read_lock();
- hash_for_each_possible_rcu(i915->mm_structs,
- it, node,
- (unsigned long)real)
- if (it->mm == real && kref_get_unless_zero(&it->kref)) {
- mm = it;
- break;
- }
- rcu_read_unlock();
-
- return mm;
+ return mmu_interval_notifier_insert(&obj->userptr.notifier, current->mm,
+ obj->userptr.ptr, obj->base.size,
+ &i915_gem_userptr_notifier_ops);
}
-static int
-i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
+static void i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_mm_struct *mm, *new;
- int ret = 0;
-
- /* During release of the GEM object we hold the struct_mutex. This
- * precludes us from calling mmput() at that time as that may be
- * the last reference and so call exit_mmap(). exit_mmap() will
- * attempt to reap the vma, and if we were holding a GTT mmap
- * would then call drm_gem_vm_close() and attempt to reacquire
- * the struct mutex. So in order to avoid that recursion, we have
- * to defer releasing the mm reference until after we drop the
- * struct_mutex, i.e. we need to schedule a worker to do the clean
- * up.
- */
- mm = __i915_mm_struct_find(i915, current->mm);
- if (mm)
- goto out;
-
- new = kmalloc(sizeof(*mm), GFP_KERNEL);
- if (!new)
- return -ENOMEM;
+ struct page **pvec = NULL;
- kref_init(&new->kref);
- new->i915 = to_i915(obj->base.dev);
- new->mm = current->mm;
- new->mn = NULL;
-
- spin_lock(&i915->mm_lock);
- mm = __i915_mm_struct_find(i915, current->mm);
- if (!mm) {
- hash_add_rcu(i915->mm_structs,
- &new->node,
- (unsigned long)new->mm);
- mmgrab(current->mm);
- mm = new;
+ spin_lock(&i915->mm.notifier_lock);
+ if (!--obj->userptr.page_ref) {
+ pvec = obj->userptr.pvec;
+ obj->userptr.pvec = NULL;
}
- spin_unlock(&i915->mm_lock);
- if (mm != new)
- kfree(new);
-
-out:
- obj->userptr.mm = mm;
- return ret;
-}
-
-static void
-__i915_mm_struct_free__worker(struct work_struct *work)
-{
- struct i915_mm_struct *mm = container_of(work, typeof(*mm), work.work);
-
- i915_mmu_notifier_free(mm->mn, mm->mm);
- mmdrop(mm->mm);
- kfree(mm);
-}
-
-static void
-__i915_mm_struct_free(struct kref *kref)
-{
- struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
-
- spin_lock(&mm->i915->mm_lock);
- hash_del_rcu(&mm->node);
- spin_unlock(&mm->i915->mm_lock);
+ GEM_BUG_ON(obj->userptr.page_ref < 0);
+ spin_unlock(&i915->mm.notifier_lock);
- INIT_RCU_WORK(&mm->work, __i915_mm_struct_free__worker);
- queue_rcu_work(system_wq, &mm->work);
-}
-
-static void
-i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
-{
- if (obj->userptr.mm == NULL)
- return;
+ if (pvec) {
+ const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
- kref_put(&obj->userptr.mm->kref, __i915_mm_struct_free);
- obj->userptr.mm = NULL;
+ unpin_user_pages(pvec, num_pages);
+ kvfree(pvec);
+ }
}
-struct get_pages_work {
- struct work_struct work;
- struct drm_i915_gem_object *obj;
- struct task_struct *task;
-};
-
-static struct sg_table *
-__i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
- struct page **pvec, unsigned long num_pages)
+static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
unsigned int max_segment = i915_sg_segment_size();
struct sg_table *st;
unsigned int sg_page_sizes;
struct scatterlist *sg;
+ struct page **pvec;
int ret;
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
+
+ spin_lock(&i915->mm.notifier_lock);
+ if (GEM_WARN_ON(!obj->userptr.page_ref)) {
+ spin_unlock(&i915->mm.notifier_lock);
+ ret = -EFAULT;
+ goto err_free;
+ }
+
+ obj->userptr.page_ref++;
+ pvec = obj->userptr.pvec;
+ spin_unlock(&i915->mm.notifier_lock);
alloc_table:
sg = __sg_alloc_table_from_pages(st, pvec, num_pages, 0,
num_pages << PAGE_SHIFT, max_segment,
NULL, 0, GFP_KERNEL);
if (IS_ERR(sg)) {
- kfree(st);
- return ERR_CAST(sg);
+ ret = PTR_ERR(sg);
+ goto err;
}
ret = i915_gem_gtt_prepare_pages(obj, st);
@@ -428,203 +170,20 @@ alloc_table:
goto alloc_table;
}
- kfree(st);
- return ERR_PTR(ret);
+ goto err;
}
sg_page_sizes = i915_sg_page_sizes(st->sgl);
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
- return st;
-}
-
-static void
-__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
-{
- struct get_pages_work *work = container_of(_work, typeof(*work), work);
- struct drm_i915_gem_object *obj = work->obj;
- const unsigned long npages = obj->base.size >> PAGE_SHIFT;
- unsigned long pinned;
- struct page **pvec;
- int ret;
-
- ret = -ENOMEM;
- pinned = 0;
-
- pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
- if (pvec != NULL) {
- struct mm_struct *mm = obj->userptr.mm->mm;
- unsigned int flags = 0;
- int locked = 0;
-
- if (!i915_gem_object_is_readonly(obj))
- flags |= FOLL_WRITE;
-
- ret = -EFAULT;
- if (mmget_not_zero(mm)) {
- while (pinned < npages) {
- if (!locked) {
- mmap_read_lock(mm);
- locked = 1;
- }
- ret = pin_user_pages_remote
- (mm,
- obj->userptr.ptr + pinned * PAGE_SIZE,
- npages - pinned,
- flags,
- pvec + pinned, NULL, &locked);
- if (ret < 0)
- break;
-
- pinned += ret;
- }
- if (locked)
- mmap_read_unlock(mm);
- mmput(mm);
- }
- }
-
- mutex_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
- if (obj->userptr.work == &work->work) {
- struct sg_table *pages = ERR_PTR(ret);
-
- if (pinned == npages) {
- pages = __i915_gem_userptr_alloc_pages(obj, pvec,
- npages);
- if (!IS_ERR(pages)) {
- pinned = 0;
- pages = NULL;
- }
- }
-
- obj->userptr.work = ERR_CAST(pages);
- if (IS_ERR(pages))
- __i915_gem_userptr_set_active(obj, false);
- }
- mutex_unlock(&obj->mm.lock);
-
- unpin_user_pages(pvec, pinned);
- kvfree(pvec);
-
- i915_gem_object_put(obj);
- put_task_struct(work->task);
- kfree(work);
-}
-
-static struct sg_table *
-__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
-{
- struct get_pages_work *work;
-
- /* Spawn a worker so that we can acquire the
- * user pages without holding our mutex. Access
- * to the user pages requires mmap_lock, and we have
- * a strict lock ordering of mmap_lock, struct_mutex -
- * we already hold struct_mutex here and so cannot
- * call gup without encountering a lock inversion.
- *
- * Userspace will keep on repeating the operation
- * (thanks to EAGAIN) until either we hit the fast
- * path or the worker completes. If the worker is
- * cancelled or superseded, the task is still run
- * but the results ignored. (This leads to
- * complications that we may have a stray object
- * refcount that we need to be wary of when
- * checking for existing objects during creation.)
- * If the worker encounters an error, it reports
- * that error back to this function through
- * obj->userptr.work = ERR_PTR.
- */
- work = kmalloc(sizeof(*work), GFP_KERNEL);
- if (work == NULL)
- return ERR_PTR(-ENOMEM);
-
- obj->userptr.work = &work->work;
-
- work->obj = i915_gem_object_get(obj);
-
- work->task = current;
- get_task_struct(work->task);
-
- INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
- queue_work(to_i915(obj->base.dev)->mm.userptr_wq, &work->work);
-
- return ERR_PTR(-EAGAIN);
-}
-
-static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
-{
- const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
- struct mm_struct *mm = obj->userptr.mm->mm;
- struct page **pvec;
- struct sg_table *pages;
- bool active;
- int pinned;
- unsigned int gup_flags = 0;
-
- /* If userspace should engineer that these pages are replaced in
- * the vma between us binding this page into the GTT and completion
- * of rendering... Their loss. If they change the mapping of their
- * pages they need to create a new bo to point to the new vma.
- *
- * However, that still leaves open the possibility of the vma
- * being copied upon fork. Which falls under the same userspace
- * synchronisation issue as a regular bo, except that this time
- * the process may not be expecting that a particular piece of
- * memory is tied to the GPU.
- *
- * Fortunately, we can hook into the mmu_notifier in order to
- * discard the page references prior to anything nasty happening
- * to the vma (discard or cloning) which should prevent the more
- * egregious cases from causing harm.
- */
-
- if (obj->userptr.work) {
- /* active flag should still be held for the pending work */
- if (IS_ERR(obj->userptr.work))
- return PTR_ERR(obj->userptr.work);
- else
- return -EAGAIN;
- }
-
- pvec = NULL;
- pinned = 0;
-
- if (mm == current->mm) {
- pvec = kvmalloc_array(num_pages, sizeof(struct page *),
- GFP_KERNEL |
- __GFP_NORETRY |
- __GFP_NOWARN);
- if (pvec) {
- /* defer to worker if malloc fails */
- if (!i915_gem_object_is_readonly(obj))
- gup_flags |= FOLL_WRITE;
- pinned = pin_user_pages_fast_only(obj->userptr.ptr,
- num_pages, gup_flags,
- pvec);
- }
- }
-
- active = false;
- if (pinned < 0) {
- pages = ERR_PTR(pinned);
- pinned = 0;
- } else if (pinned < num_pages) {
- pages = __i915_gem_userptr_get_pages_schedule(obj);
- active = pages == ERR_PTR(-EAGAIN);
- } else {
- pages = __i915_gem_userptr_alloc_pages(obj, pvec, num_pages);
- active = !IS_ERR(pages);
- }
- if (active)
- __i915_gem_userptr_set_active(obj, true);
-
- if (IS_ERR(pages))
- unpin_user_pages(pvec, pinned);
- kvfree(pvec);
+ return 0;
- return PTR_ERR_OR_ZERO(pages);
+err:
+ i915_gem_object_userptr_drop_ref(obj);
+err_free:
+ kfree(st);
+ return ret;
}
static void
@@ -634,9 +193,6 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
struct sgt_iter sgt_iter;
struct page *page;
- /* Cancel any inflight work and force them to restart their gup */
- obj->userptr.work = NULL;
- __i915_gem_userptr_set_active(obj, false);
if (!pages)
return;
@@ -676,42 +232,224 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
}
mark_page_accessed(page);
- unpin_user_page(page);
}
obj->mm.dirty = false;
sg_free_table(pages);
kfree(pages);
+
+ i915_gem_object_userptr_drop_ref(obj);
+}
+
+static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj, bool get_pages)
+{
+ struct sg_table *pages;
+ int err;
+
+ err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
+ if (err)
+ return err;
+
+ if (GEM_WARN_ON(i915_gem_object_has_pinned_pages(obj)))
+ return -EBUSY;
+
+ assert_object_held(obj);
+
+ pages = __i915_gem_object_unset_pages(obj);
+ if (!IS_ERR_OR_NULL(pages))
+ i915_gem_userptr_put_pages(obj, pages);
+
+ if (get_pages)
+ err = ____i915_gem_object_get_pages(obj);
+
+ return err;
+}
+
+int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
+ struct page **pvec;
+ unsigned int gup_flags = 0;
+ unsigned long notifier_seq;
+ int pinned, ret;
+
+ if (obj->userptr.notifier.mm != current->mm)
+ return -EFAULT;
+
+ ret = i915_gem_object_lock_interruptible(obj, NULL);
+ if (ret)
+ return ret;
+
+ /* optimistically try to preserve current pages while unlocked */
+ if (i915_gem_object_has_pages(obj) &&
+ !mmu_interval_check_retry(&obj->userptr.notifier,
+ obj->userptr.notifier_seq)) {
+ spin_lock(&i915->mm.notifier_lock);
+ if (obj->userptr.pvec &&
+ !mmu_interval_read_retry(&obj->userptr.notifier,
+ obj->userptr.notifier_seq)) {
+ obj->userptr.page_ref++;
+
+ /* We can keep using the current binding, this is the fastpath */
+ ret = 1;
+ }
+ spin_unlock(&i915->mm.notifier_lock);
+ }
+
+ if (!ret) {
+ /* Make sure userptr is unbound for next attempt, so we don't use stale pages. */
+ ret = i915_gem_object_userptr_unbind(obj, false);
+ }
+ i915_gem_object_unlock(obj);
+ if (ret < 0)
+ return ret;
+
+ if (ret > 0)
+ return 0;
+
+ notifier_seq = mmu_interval_read_begin(&obj->userptr.notifier);
+
+ pvec = kvmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL);
+ if (!pvec)
+ return -ENOMEM;
+
+ if (!i915_gem_object_is_readonly(obj))
+ gup_flags |= FOLL_WRITE;
+
+ pinned = ret = 0;
+ while (pinned < num_pages) {
+ ret = pin_user_pages_fast(obj->userptr.ptr + pinned * PAGE_SIZE,
+ num_pages - pinned, gup_flags,
+ &pvec[pinned]);
+ if (ret < 0)
+ goto out;
+
+ pinned += ret;
+ }
+ ret = 0;
+
+ spin_lock(&i915->mm.notifier_lock);
+
+ if (mmu_interval_read_retry(&obj->userptr.notifier,
+ !obj->userptr.page_ref ? notifier_seq :
+ obj->userptr.notifier_seq)) {
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+
+ if (!obj->userptr.page_ref++) {
+ obj->userptr.pvec = pvec;
+ obj->userptr.notifier_seq = notifier_seq;
+
+ pvec = NULL;
+ }
+
+out_unlock:
+ spin_unlock(&i915->mm.notifier_lock);
+
+out:
+ if (pvec) {
+ unpin_user_pages(pvec, pinned);
+ kvfree(pvec);
+ }
+
+ return ret;
+}
+
+int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj)
+{
+ if (mmu_interval_read_retry(&obj->userptr.notifier,
+ obj->userptr.notifier_seq)) {
+ /* We collided with the mmu notifier, need to retry */
+
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+void i915_gem_object_userptr_submit_fini(struct drm_i915_gem_object *obj)
+{
+ i915_gem_object_userptr_drop_ref(obj);
+}
+
+int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj)
+{
+ int err;
+
+ err = i915_gem_object_userptr_submit_init(obj);
+ if (err)
+ return err;
+
+ err = i915_gem_object_lock_interruptible(obj, NULL);
+ if (!err) {
+ /*
+ * Since we only check validity, not use the pages,
+ * it doesn't matter if we collide with the mmu notifier,
+ * and -EAGAIN handling is not required.
+ */
+ err = i915_gem_object_pin_pages(obj);
+ if (!err)
+ i915_gem_object_unpin_pages(obj);
+
+ i915_gem_object_unlock(obj);
+ }
+
+ i915_gem_object_userptr_submit_fini(obj);
+ return err;
}
static void
i915_gem_userptr_release(struct drm_i915_gem_object *obj)
{
- i915_gem_userptr_release__mmu_notifier(obj);
- i915_gem_userptr_release__mm_struct(obj);
+ GEM_WARN_ON(obj->userptr.page_ref);
+
+ mmu_interval_notifier_remove(&obj->userptr.notifier);
+ obj->userptr.notifier.mm = NULL;
}
static int
i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
{
- if (obj->userptr.mmu_object)
- return 0;
+ drm_dbg(obj->base.dev, "Exporting userptr no longer allowed\n");
+
+ return -EINVAL;
+}
+
+static int
+i915_gem_userptr_pwrite(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pwrite *args)
+{
+ drm_dbg(obj->base.dev, "pwrite to userptr no longer allowed\n");
+
+ return -EINVAL;
+}
- return i915_gem_userptr_init__mmu_notifier(obj, 0);
+static int
+i915_gem_userptr_pread(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pread *args)
+{
+ drm_dbg(obj->base.dev, "pread from userptr no longer allowed\n");
+
+ return -EINVAL;
}
static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
.name = "i915_gem_object_userptr",
- .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
- I915_GEM_OBJECT_IS_SHRINKABLE |
+ .flags = I915_GEM_OBJECT_IS_SHRINKABLE |
I915_GEM_OBJECT_NO_MMAP |
- I915_GEM_OBJECT_ASYNC_CANCEL,
+ I915_GEM_OBJECT_IS_PROXY,
.get_pages = i915_gem_userptr_get_pages,
.put_pages = i915_gem_userptr_put_pages,
.dmabuf_export = i915_gem_userptr_dmabuf_export,
+ .pwrite = i915_gem_userptr_pwrite,
+ .pread = i915_gem_userptr_pread,
.release = i915_gem_userptr_release,
};
+#endif
+
/*
* Creates a new mm object that wraps some normal memory from the process
* context - user memory.
@@ -752,12 +490,12 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
void *data,
struct drm_file *file)
{
- static struct lock_class_key lock_class;
+ static struct lock_class_key __maybe_unused lock_class;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_userptr *args = data;
- struct drm_i915_gem_object *obj;
- int ret;
- u32 handle;
+ struct drm_i915_gem_object __maybe_unused *obj;
+ int __maybe_unused ret;
+ u32 __maybe_unused handle;
if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
/* We cannot support coherent userptr objects on hw without
@@ -770,21 +508,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
I915_USERPTR_UNSYNCHRONIZED))
return -EINVAL;
- /*
- * XXX: There is a prevalence of the assumption that we fit the
- * object's page count inside a 32bit _signed_ variable. Let's document
- * this and catch if we ever need to fix it. In the meantime, if you do
- * spot such a local variable, please consider fixing!
- *
- * Aside from our own locals (for which we have no excuse!):
- * - sg_table embeds unsigned int for num_pages
- * - get_user_pages*() mixed ints with longs
- */
-
- if (args->user_size >> PAGE_SHIFT > INT_MAX)
- return -E2BIG;
-
- if (overflows_type(args->user_size, obj->base.size))
+ if (i915_gem_object_size_2big(args->user_size))
return -E2BIG;
if (!args->user_size)
@@ -796,6 +520,9 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
if (!access_ok((char __user *)(unsigned long)args->user_ptr, args->user_size))
return -EFAULT;
+ if (args->flags & I915_USERPTR_UNSYNCHRONIZED)
+ return -ENODEV;
+
if (args->flags & I915_USERPTR_READ_ONLY) {
/*
* On almost all of the older hw, we cannot tell the GPU that
@@ -805,17 +532,20 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
return -ENODEV;
}
+#ifdef CONFIG_MMU_NOTIFIER
obj = i915_gem_object_alloc();
if (obj == NULL)
return -ENOMEM;
drm_gem_private_object_init(dev, &obj->base, args->user_size);
- i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class);
+ i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class,
+ I915_BO_ALLOC_STRUCT_PAGE);
obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
obj->userptr.ptr = args->user_ptr;
+ obj->userptr.notifier_seq = ULONG_MAX;
if (args->flags & I915_USERPTR_READ_ONLY)
i915_gem_object_set_readonly(obj);
@@ -823,9 +553,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
* at binding. This means that we need to hook into the mmu_notifier
* in order to detect if the mmu is destroyed.
*/
- ret = i915_gem_userptr_init__mm_struct(obj);
- if (ret == 0)
- ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
+ ret = i915_gem_userptr_init__mmu_notifier(obj);
if (ret == 0)
ret = drm_gem_handle_create(file, &obj->base, &handle);
@@ -836,24 +564,20 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
args->handle = handle;
return 0;
+#else
+ return -ENODEV;
+#endif
}
int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
{
- spin_lock_init(&dev_priv->mm_lock);
- hash_init(dev_priv->mm_structs);
-
- dev_priv->mm.userptr_wq =
- alloc_workqueue("i915-userptr-acquire",
- WQ_HIGHPRI | WQ_UNBOUND,
- 0);
- if (!dev_priv->mm.userptr_wq)
- return -ENOMEM;
+#ifdef CONFIG_MMU_NOTIFIER
+ spin_lock_init(&dev_priv->mm.notifier_lock);
+#endif
return 0;
}
void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv)
{
- destroy_workqueue(dev_priv->mm.userptr_wq);
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
index 2fb501a78a85..0c8ecfdf5405 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
@@ -89,7 +89,6 @@ static void huge_put_pages(struct drm_i915_gem_object *obj,
static const struct drm_i915_gem_object_ops huge_ops = {
.name = "huge-gem",
- .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
.get_pages = huge_get_pages,
.put_pages = huge_put_pages,
};
@@ -115,7 +114,8 @@ huge_gem_object(struct drm_i915_private *i915,
return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, dma_size);
- i915_gem_object_init(obj, &huge_ops, &lock_class);
+ i915_gem_object_init(obj, &huge_ops, &lock_class,
+ I915_BO_ALLOC_STRUCT_PAGE);
obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index aacf4856ccb4..dadd485bc52f 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -140,8 +140,7 @@ static void put_huge_pages(struct drm_i915_gem_object *obj,
static const struct drm_i915_gem_object_ops huge_page_ops = {
.name = "huge-gem",
- .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
- I915_GEM_OBJECT_IS_SHRINKABLE,
+ .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
.get_pages = get_huge_pages,
.put_pages = put_huge_pages,
};
@@ -168,7 +167,8 @@ huge_pages_object(struct drm_i915_private *i915,
return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, size);
- i915_gem_object_init(obj, &huge_page_ops, &lock_class);
+ i915_gem_object_init(obj, &huge_page_ops, &lock_class,
+ I915_BO_ALLOC_STRUCT_PAGE);
i915_gem_object_set_volatile(obj);
@@ -319,9 +319,9 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
drm_gem_private_object_init(&i915->drm, &obj->base, size);
if (single)
- i915_gem_object_init(obj, &fake_ops_single, &lock_class);
+ i915_gem_object_init(obj, &fake_ops_single, &lock_class, 0);
else
- i915_gem_object_init(obj, &fake_ops, &lock_class);
+ i915_gem_object_init(obj, &fake_ops, &lock_class, 0);
i915_gem_object_set_volatile(obj);
@@ -589,7 +589,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
goto out_put;
}
- err = i915_gem_object_pin_pages(obj);
+ err = i915_gem_object_pin_pages_unlocked(obj);
if (err)
goto out_put;
@@ -653,15 +653,19 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
break;
}
+ i915_gem_object_lock(obj, NULL);
i915_gem_object_unpin_pages(obj);
__i915_gem_object_put_pages(obj);
+ i915_gem_object_unlock(obj);
i915_gem_object_put(obj);
}
return 0;
out_unpin:
+ i915_gem_object_lock(obj, NULL);
i915_gem_object_unpin_pages(obj);
+ i915_gem_object_unlock(obj);
out_put:
i915_gem_object_put(obj);
@@ -675,8 +679,10 @@ static void close_object_list(struct list_head *objects,
list_for_each_entry_safe(obj, on, objects, st_link) {
list_del(&obj->st_link);
+ i915_gem_object_lock(obj, NULL);
i915_gem_object_unpin_pages(obj);
__i915_gem_object_put_pages(obj);
+ i915_gem_object_unlock(obj);
i915_gem_object_put(obj);
}
}
@@ -713,7 +719,7 @@ static int igt_mock_ppgtt_huge_fill(void *arg)
break;
}
- err = i915_gem_object_pin_pages(obj);
+ err = i915_gem_object_pin_pages_unlocked(obj);
if (err) {
i915_gem_object_put(obj);
break;
@@ -889,7 +895,7 @@ static int igt_mock_ppgtt_64K(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- err = i915_gem_object_pin_pages(obj);
+ err = i915_gem_object_pin_pages_unlocked(obj);
if (err)
goto out_object_put;
@@ -943,8 +949,10 @@ static int igt_mock_ppgtt_64K(void *arg)
}
i915_vma_unpin(vma);
+ i915_gem_object_lock(obj, NULL);
i915_gem_object_unpin_pages(obj);
__i915_gem_object_put_pages(obj);
+ i915_gem_object_unlock(obj);
i915_gem_object_put(obj);
}
}
@@ -954,7 +962,9 @@ static int igt_mock_ppgtt_64K(void *arg)
out_vma_unpin:
i915_vma_unpin(vma);
out_object_unpin:
+ i915_gem_object_lock(obj, NULL);
i915_gem_object_unpin_pages(obj);
+ i915_gem_object_unlock(obj);
out_object_put:
i915_gem_object_put(obj);
@@ -1024,7 +1034,7 @@ static int __cpu_check_vmap(struct drm_i915_gem_object *obj, u32 dword, u32 val)
if (err)
return err;
- ptr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ ptr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(ptr))
return PTR_ERR(ptr);
@@ -1304,7 +1314,7 @@ try_again:
return err;
}
- err = i915_gem_object_pin_pages(obj);
+ err = i915_gem_object_pin_pages_unlocked(obj);
if (err) {
if (err == -ENXIO || err == -E2BIG) {
i915_gem_object_put(obj);
@@ -1327,8 +1337,10 @@ try_again:
__func__, size, i);
}
out_unpin:
+ i915_gem_object_lock(obj, NULL);
i915_gem_object_unpin_pages(obj);
__i915_gem_object_put_pages(obj);
+ i915_gem_object_unlock(obj);
out_put:
i915_gem_object_put(obj);
@@ -1402,7 +1414,7 @@ static int igt_ppgtt_sanity_check(void *arg)
return err;
}
- err = i915_gem_object_pin_pages(obj);
+ err = i915_gem_object_pin_pages_unlocked(obj);
if (err) {
i915_gem_object_put(obj);
goto out;
@@ -1416,8 +1428,10 @@ static int igt_ppgtt_sanity_check(void *arg)
err = igt_write_huge(ctx, obj);
+ i915_gem_object_lock(obj, NULL);
i915_gem_object_unpin_pages(obj);
__i915_gem_object_put_pages(obj);
+ i915_gem_object_unlock(obj);
i915_gem_object_put(obj);
if (err) {
@@ -1462,7 +1476,7 @@ static int igt_tmpfs_fallback(void *arg)
goto out_restore;
}
- vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto out_put;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index 6a674a7994df..d36873885cc1 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -45,7 +45,7 @@ static int __igt_client_fill(struct intel_engine_cs *engine)
goto err_flush;
}
- vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto err_put;
@@ -157,7 +157,7 @@ static int prepare_blit(const struct tiled_blits *t,
u32 src_pitch, dst_pitch;
u32 cmd, *cs;
- cs = i915_gem_object_pin_map(batch, I915_MAP_WC);
+ cs = i915_gem_object_pin_map_unlocked(batch, I915_MAP_WC);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -377,7 +377,7 @@ static int verify_buffer(const struct tiled_blits *t,
y = i915_prandom_u32_max_state(t->height, prng);
p = y * t->width + x;
- vaddr = i915_gem_object_pin_map(buf->vma->obj, I915_MAP_WC);
+ vaddr = i915_gem_object_pin_map_unlocked(buf->vma->obj, I915_MAP_WC);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
@@ -564,7 +564,7 @@ static int tiled_blits_prepare(struct tiled_blits *t,
int err;
int i;
- map = i915_gem_object_pin_map(t->scratch.vma->obj, I915_MAP_WC);
+ map = i915_gem_object_pin_map_unlocked(t->scratch.vma->obj, I915_MAP_WC);
if (IS_ERR(map))
return PTR_ERR(map);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index 1117d2a44518..e937b6629019 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -160,7 +160,7 @@ static int wc_set(struct context *ctx, unsigned long offset, u32 v)
if (err)
return err;
- map = i915_gem_object_pin_map(ctx->obj, I915_MAP_WC);
+ map = i915_gem_object_pin_map_unlocked(ctx->obj, I915_MAP_WC);
if (IS_ERR(map))
return PTR_ERR(map);
@@ -183,7 +183,7 @@ static int wc_get(struct context *ctx, unsigned long offset, u32 *v)
if (err)
return err;
- map = i915_gem_object_pin_map(ctx->obj, I915_MAP_WC);
+ map = i915_gem_object_pin_map_unlocked(ctx->obj, I915_MAP_WC);
if (IS_ERR(map))
return PTR_ERR(map);
@@ -200,17 +200,15 @@ static int gpu_set(struct context *ctx, unsigned long offset, u32 v)
u32 *cs;
int err;
+ vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, 0);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
i915_gem_object_lock(ctx->obj, NULL);
err = i915_gem_object_set_to_gtt_domain(ctx->obj, true);
if (err)
goto out_unlock;
- vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, 0);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto out_unlock;
- }
-
rq = intel_engine_create_kernel_request(ctx->engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index d3f87dc4eda3..5fef592390cb 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -1094,7 +1094,7 @@ __read_slice_count(struct intel_context *ce,
if (ret < 0)
return ret;
- buf = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ buf = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
return ret;
@@ -1511,7 +1511,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
if (IS_ERR(obj))
return PTR_ERR(obj);
- cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto out;
@@ -1622,7 +1622,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
if (err)
goto out_vm;
- cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto out;
@@ -1658,7 +1658,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
if (err)
goto out_vm;
- cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto out;
@@ -1715,7 +1715,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
if (err)
goto out_vm;
- cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto out_vm;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index b6d43880b0c1..dd74bc09ec88 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -194,7 +194,7 @@ static int igt_dmabuf_import_ownership(void *arg)
dma_buf_put(dmabuf);
- err = i915_gem_object_pin_pages(obj);
+ err = i915_gem_object_pin_pages_unlocked(obj);
if (err) {
pr_err("i915_gem_object_pin_pages failed with err=%d\n", err);
goto out_obj;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
index e1d50a5a1477..4df505e4c53a 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
@@ -116,7 +116,7 @@ static int igt_gpu_reloc(void *arg)
if (IS_ERR(scratch))
return PTR_ERR(scratch);
- map = i915_gem_object_pin_map(scratch, I915_MAP_WC);
+ map = i915_gem_object_pin_map_unlocked(scratch, I915_MAP_WC);
if (IS_ERR(map)) {
err = PTR_ERR(map);
goto err_scratch;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index d429c7643ff2..5cf6df49c333 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -322,7 +322,7 @@ static int igt_partial_tiling(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- err = i915_gem_object_pin_pages(obj);
+ err = i915_gem_object_pin_pages_unlocked(obj);
if (err) {
pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
nreal, obj->base.size / PAGE_SIZE, err);
@@ -459,7 +459,7 @@ static int igt_smoke_tiling(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- err = i915_gem_object_pin_pages(obj);
+ err = i915_gem_object_pin_pages_unlocked(obj);
if (err) {
pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
nreal, obj->base.size / PAGE_SIZE, err);
@@ -798,7 +798,7 @@ static int wc_set(struct drm_i915_gem_object *obj)
{
void *vaddr;
- vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
@@ -814,7 +814,7 @@ static int wc_check(struct drm_i915_gem_object *obj)
void *vaddr;
int err = 0;
- vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
@@ -835,9 +835,8 @@ static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
return false;
if (type != I915_MMAP_TYPE_GTT &&
- !i915_gem_object_type_has(obj,
- I915_GEM_OBJECT_HAS_STRUCT_PAGE |
- I915_GEM_OBJECT_HAS_IOMEM))
+ !i915_gem_object_has_struct_page(obj) &&
+ !i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM))
return false;
return true;
@@ -977,10 +976,8 @@ static const char *repr_mmap_type(enum i915_mmap_type type)
static bool can_access(const struct drm_i915_gem_object *obj)
{
- unsigned int flags =
- I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM;
-
- return i915_gem_object_type_has(obj, flags);
+ return i915_gem_object_has_struct_page(obj) ||
+ i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM);
}
static int __igt_mmap_access(struct drm_i915_private *i915,
@@ -1319,7 +1316,9 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915,
}
if (type != I915_MMAP_TYPE_GTT) {
+ i915_gem_object_lock(obj, NULL);
__i915_gem_object_put_pages(obj);
+ i915_gem_object_unlock(obj);
if (i915_gem_object_has_pages(obj)) {
pr_err("Failed to put-pages object!\n");
err = -EINVAL;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
index bf853c40ec65..740ee8086a27 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
@@ -47,7 +47,7 @@ static int igt_gem_huge(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- err = i915_gem_object_pin_pages(obj);
+ err = i915_gem_object_pin_pages_unlocked(obj);
if (err) {
pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
nreal, obj->base.size / PAGE_SIZE, err);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
index 23b6e11bbc3e..8c335d1a8406 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
@@ -220,7 +220,7 @@ static int igt_fill_blt_thread(void *arg)
return PTR_ERR(ctx);
prio = i915_prandom_u32_max_state(I915_PRIORITY_MAX, prng);
- ctx->sched.priority = I915_USER_PRIORITY(prio);
+ ctx->sched.priority = prio;
}
ce = i915_gem_context_get_engine(ctx, 0);
@@ -262,7 +262,7 @@ static int igt_fill_blt_thread(void *arg)
goto err_flush;
}
- vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto err_put;
@@ -338,7 +338,7 @@ static int igt_copy_blt_thread(void *arg)
return PTR_ERR(ctx);
prio = i915_prandom_u32_max_state(I915_PRIORITY_MAX, prng);
- ctx->sched.priority = I915_USER_PRIORITY(prio);
+ ctx->sched.priority = prio;
}
ce = i915_gem_context_get_engine(ctx, 0);
@@ -380,7 +380,7 @@ static int igt_copy_blt_thread(void *arg)
goto err_flush;
}
- vaddr = i915_gem_object_pin_map(src, I915_MAP_WB);
+ vaddr = i915_gem_object_pin_map_unlocked(src, I915_MAP_WB);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto err_put_src;
@@ -400,7 +400,7 @@ static int igt_copy_blt_thread(void *arg)
goto err_put_src;
}
- vaddr = i915_gem_object_pin_map(dst, I915_MAP_WB);
+ vaddr = i915_gem_object_pin_map_unlocked(dst, I915_MAP_WB);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto err_put_dst;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
index 8cee68c6a6dc..3a6ce87f8b52 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
@@ -25,13 +25,21 @@ static int mock_phys_object(void *arg)
goto out;
}
+ if (!i915_gem_object_has_struct_page(obj)) {
+ err = -EINVAL;
+ pr_err("shmem has no struct page\n");
+ goto out_obj;
+ }
+
+ i915_gem_object_lock(obj, NULL);
err = i915_gem_object_attach_phys(obj, PAGE_SIZE);
+ i915_gem_object_unlock(obj);
if (err) {
pr_err("i915_gem_object_attach_phys failed, err=%d\n", err);
goto out_obj;
}
- if (obj->ops != &i915_gem_phys_ops) {
+ if (i915_gem_object_has_struct_page(obj)) {
pr_err("i915_gem_object_attach_phys did not create a phys object\n");
err = -EINVAL;
goto out_obj;
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
index d6783061bc72..0b092c62bb34 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
@@ -55,7 +55,7 @@ igt_emit_store_dw(struct i915_vma *vma,
if (IS_ERR(obj))
return ERR_CAST(obj);
- cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto err;
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt.c b/drivers/gpu/drm/i915/gt/debugfs_gt.c
index 3a21cf63b3f0..591eb60785db 100644
--- a/drivers/gpu/drm/i915/gt/debugfs_gt.c
+++ b/drivers/gpu/drm/i915/gt/debugfs_gt.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: MIT
-
/*
* Copyright © 2019 Intel Corporation
*/
@@ -37,6 +36,7 @@ void intel_gt_debugfs_register_files(struct dentry *root,
{
while (count--) {
umode_t mode = files->fops->write ? 0644 : 0444;
+
if (!files->eval || files->eval(data))
debugfs_create_file(files->name,
mode, root, data,
diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
index b491a64919c8..9646200d2792 100644
--- a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
@@ -143,7 +143,7 @@ static u32 *__gen2_emit_breadcrumb(struct i915_request *rq, u32 *cs,
int flush, int post)
{
GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
- GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
+ GEM_BUG_ON(offset_in_page(rq->hwsp_seqno) != I915_GEM_HWS_SEQNO_ADDR);
*cs++ = MI_FLUSH;
diff --git a/drivers/gpu/drm/i915/gt/gen6_engine_cs.c b/drivers/gpu/drm/i915/gt/gen6_engine_cs.c
index ce38d1bcaba3..b388ceeeb1c9 100644
--- a/drivers/gpu/drm/i915/gt/gen6_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen6_engine_cs.c
@@ -161,7 +161,7 @@ u32 *gen6_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
PIPE_CONTROL_DC_FLUSH_ENABLE |
PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_CS_STALL);
- *cs++ = i915_request_active_timeline(rq)->hwsp_offset |
+ *cs++ = i915_request_active_seqno(rq) |
PIPE_CONTROL_GLOBAL_GTT;
*cs++ = rq->fence.seqno;
@@ -359,7 +359,7 @@ u32 *gen7_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL);
- *cs++ = i915_request_active_timeline(rq)->hwsp_offset;
+ *cs++ = i915_request_active_seqno(rq);
*cs++ = rq->fence.seqno;
*cs++ = MI_USER_INTERRUPT;
@@ -374,7 +374,7 @@ u32 *gen7_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
u32 *gen6_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
{
GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
- GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
+ GEM_BUG_ON(offset_in_page(rq->hwsp_seqno) != I915_GEM_HWS_SEQNO_ADDR);
*cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
*cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
@@ -394,7 +394,7 @@ u32 *gen7_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
int i;
GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
- GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
+ GEM_BUG_ON(offset_in_page(rq->hwsp_seqno) != I915_GEM_HWS_SEQNO_ADDR);
*cs++ = MI_FLUSH_DW | MI_INVALIDATE_TLB |
MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.h b/drivers/gpu/drm/i915/gt/gen6_ppgtt.h
index 3357228f3304..6a61a5c3a85a 100644
--- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.h
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.h
@@ -59,9 +59,9 @@ static inline struct gen6_ppgtt *to_gen6_ppgtt(struct i915_ppgtt *base)
for (iter = gen6_pde_index(start); \
length > 0 && iter < I915_PDES && \
(pt = i915_pt_entry(pd, iter), true); \
- ({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT); \
+ ({ u32 temp = ALIGN(start + 1, 1 << GEN6_PDE_SHIFT); \
temp = min(temp - start, length); \
- start += temp, length -= temp; }), ++iter)
+ start += temp; length -= temp; }), ++iter)
#define gen6_for_all_pdes(pt, pd, iter) \
for (iter = 0; \
diff --git a/drivers/gpu/drm/i915/gt/gen6_renderstate.c b/drivers/gpu/drm/i915/gt/gen6_renderstate.c
index 11c8e7b3dd7c..555e83f3a93a 100644
--- a/drivers/gpu/drm/i915/gt/gen6_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/gen6_renderstate.c
@@ -1,25 +1,7 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright © 2014 Intel Corporation
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Generated by: intel-gpu-tools-1.8-220-g01153e7
*/
diff --git a/drivers/gpu/drm/i915/gt/gen7_renderstate.c b/drivers/gpu/drm/i915/gt/gen7_renderstate.c
index 655180646152..c36e84d30d24 100644
--- a/drivers/gpu/drm/i915/gt/gen7_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/gen7_renderstate.c
@@ -1,25 +1,7 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright © 2014 Intel Corporation
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Generated by: intel-gpu-tools-1.8-220-g01153e7
*/
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
index 07ba524da90b..732c2ed1d933 100644
--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
@@ -42,7 +42,7 @@ int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode)
vf_flush_wa = true;
/* WaForGAMHang:kbl */
- if (IS_KBL_GT_REVID(rq->engine->i915, 0, KBL_REVID_B0))
+ if (IS_KBL_GT_STEP(rq->engine->i915, 0, STEP_B0))
dc_flush_wa = true;
}
@@ -338,15 +338,14 @@ static u32 preempt_address(struct intel_engine_cs *engine)
static u32 hwsp_offset(const struct i915_request *rq)
{
- const struct intel_timeline_cacheline *cl;
+ const struct intel_timeline *tl;
- /* Before the request is executed, the timeline/cachline is fixed */
+ /* Before the request is executed, the timeline is fixed */
+ tl = rcu_dereference_protected(rq->timeline,
+ !i915_request_signaled(rq));
- cl = rcu_dereference_protected(rq->hwsp_cacheline, 1);
- if (cl)
- return cl->ggtt_offset;
-
- return rcu_dereference_protected(rq->timeline, 1)->hwsp_offset;
+ /* See the comment in i915_request_active_seqno(). */
+ return page_mask_bits(tl->hwsp_offset) + offset_in_page(rq->hwsp_seqno);
}
int gen8_emit_init_breadcrumb(struct i915_request *rq)
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index 755522ced60d..176c19633412 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -5,6 +5,8 @@
#include <linux/log2.h>
+#include "gem/i915_gem_lmem.h"
+
#include "gen8_ppgtt.h"
#include "i915_scatterlist.h"
#include "i915_trace.h"
@@ -35,6 +37,9 @@ static u64 gen8_pte_encode(dma_addr_t addr,
if (unlikely(flags & PTE_READ_ONLY))
pte &= ~_PAGE_RW;
+ if (flags & PTE_LM)
+ pte |= GEN12_PPGTT_PTE_LM;
+
switch (level) {
case I915_CACHE_NONE:
pte |= PPAT_UNCACHED;
@@ -145,6 +150,7 @@ static unsigned int gen8_pt_count(u64 start, u64 end)
static unsigned int gen8_pd_top_count(const struct i915_address_space *vm)
{
unsigned int shift = __gen8_pte_shift(vm->top);
+
return (vm->total + (1ull << shift) - 1) >> shift;
}
@@ -557,6 +563,7 @@ static void gen8_ppgtt_insert(struct i915_address_space *vm,
static int gen8_init_scratch(struct i915_address_space *vm)
{
+ u32 pte_flags;
int ret;
int i;
@@ -580,9 +587,13 @@ static int gen8_init_scratch(struct i915_address_space *vm)
if (ret)
return ret;
+ pte_flags = vm->has_read_only;
+ if (i915_gem_object_is_lmem(vm->scratch[0]))
+ pte_flags |= PTE_LM;
+
vm->scratch[0]->encode =
gen8_pte_encode(px_dma(vm->scratch[0]),
- I915_CACHE_LLC, vm->has_read_only);
+ I915_CACHE_LLC, pte_flags);
for (i = 1; i <= vm->top; i++) {
struct drm_i915_gem_object *obj;
diff --git a/drivers/gpu/drm/i915/gt/gen8_renderstate.c b/drivers/gpu/drm/i915/gt/gen8_renderstate.c
index 95288a34c15d..ef9d7b0dd2da 100644
--- a/drivers/gpu/drm/i915/gt/gen8_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/gen8_renderstate.c
@@ -1,25 +1,7 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright © 2014 Intel Corporation
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Generated by: intel-gpu-tools-1.8-220-g01153e7
*/
diff --git a/drivers/gpu/drm/i915/gt/gen9_renderstate.c b/drivers/gpu/drm/i915/gt/gen9_renderstate.c
index 7d3ac02f0177..428b724ded3e 100644
--- a/drivers/gpu/drm/i915/gt/gen9_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/gen9_renderstate.c
@@ -1,25 +1,7 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright © 2014 Intel Corporation
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
* Generated by: intel-gpu-tools-1.19-177-g68e2eab2
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index 34a645d6babd..38cc42783dfb 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -1,25 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * Copyright © 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
+ * Copyright © 2015-2021 Intel Corporation
*/
#include <linux/kthread.h>
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 349e7fa1488d..17cf2640b082 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index d24ab6fa0ee5..f83a73a2b39f 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_context_param.h b/drivers/gpu/drm/i915/gt/intel_context_param.h
index f053d8633fe2..3ecacc675f41 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_param.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_param.h
@@ -6,9 +6,18 @@
#ifndef INTEL_CONTEXT_PARAM_H
#define INTEL_CONTEXT_PARAM_H
-struct intel_context;
+#include <linux/types.h>
+
+#include "intel_context.h"
int intel_context_set_ring_size(struct intel_context *ce, long sz);
long intel_context_get_ring_size(struct intel_context *ce);
+static inline int
+intel_context_set_watchdog_us(struct intel_context *ce, u64 timeout_us)
+{
+ ce->watchdog.timeout_us = timeout_us;
+ return 0;
+}
+
#endif /* INTEL_CONTEXT_PARAM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index e10d78601bbd..ed8c447a7346 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
@@ -97,6 +96,10 @@ struct intel_context {
#define CONTEXT_FORCE_SINGLE_SUBMISSION 7
#define CONTEXT_NOPREEMPT 8
+ struct {
+ u64 timeout_us;
+ } watchdog;
+
u32 *lrc_reg_state;
union {
struct {
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 9cf555d6842b..efe935f80c1a 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -1,25 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
*/
#include <drm/drm_print.h>
@@ -619,6 +600,7 @@ static void cleanup_status_page(struct intel_engine_cs *engine)
}
static int pin_ggtt_status_page(struct intel_engine_cs *engine,
+ struct i915_gem_ww_ctx *ww,
struct i915_vma *vma)
{
unsigned int flags;
@@ -639,12 +621,13 @@ static int pin_ggtt_status_page(struct intel_engine_cs *engine,
else
flags = PIN_HIGH;
- return i915_ggtt_pin(vma, NULL, 0, flags);
+ return i915_ggtt_pin(vma, ww, 0, flags);
}
static int init_status_page(struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *obj;
+ struct i915_gem_ww_ctx ww;
struct i915_vma *vma;
void *vaddr;
int ret;
@@ -670,30 +653,39 @@ static int init_status_page(struct intel_engine_cs *engine)
vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
- goto err;
+ goto err_put;
}
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ ret = i915_gem_object_lock(obj, &ww);
+ if (!ret && !HWS_NEEDS_PHYSICAL(engine->i915))
+ ret = pin_ggtt_status_page(engine, &ww, vma);
+ if (ret)
+ goto err;
+
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
- goto err;
+ goto err_unpin;
}
engine->status_page.addr = memset(vaddr, 0, PAGE_SIZE);
engine->status_page.vma = vma;
- if (!HWS_NEEDS_PHYSICAL(engine->i915)) {
- ret = pin_ggtt_status_page(engine, vma);
- if (ret)
- goto err_unpin;
- }
-
- return 0;
-
err_unpin:
- i915_gem_object_unpin_map(obj);
+ if (ret)
+ i915_vma_unpin(vma);
err:
- i915_gem_object_put(obj);
+ if (ret == -EDEADLK) {
+ ret = i915_gem_ww_ctx_backoff(&ww);
+ if (!ret)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+err_put:
+ if (ret)
+ i915_gem_object_put(obj);
return ret;
}
@@ -763,6 +755,7 @@ static int measure_breadcrumb_dw(struct intel_context *ce)
frame->rq.engine = engine;
frame->rq.context = ce;
rcu_assign_pointer(frame->rq.timeline, ce->timeline);
+ frame->rq.hwsp_seqno = ce->timeline->hwsp_seqno;
frame->ring.vaddr = frame->cs;
frame->ring.size = sizeof(frame->cs);
@@ -1239,14 +1232,14 @@ void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync)
{
struct tasklet_struct *t = &engine->execlists.tasklet;
- if (!t->func)
+ if (!t->callback)
return;
local_bh_disable();
if (tasklet_trylock(t)) {
/* Must wait for any GPU reset in progress. */
if (__tasklet_is_enabled(t))
- t->func(t->data);
+ t->callback(t);
tasklet_unlock(t);
}
local_bh_enable();
@@ -1273,14 +1266,8 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
return true;
/* Waiting to drain ELSP? */
- if (execlists_active(&engine->execlists)) {
- synchronize_hardirq(engine->i915->drm.pdev->irq);
-
- intel_engine_flush_submission(engine);
-
- if (execlists_active(&engine->execlists))
- return false;
- }
+ synchronize_hardirq(to_pci_dev(engine->i915->drm.dev)->irq);
+ intel_engine_flush_submission(engine);
/* ELSP is empty, but there are ready requests? E.g. after reset */
if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root))
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index d7be2b9339f9..b99ac41695f3 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
@@ -32,7 +31,7 @@ static bool next_heartbeat(struct intel_engine_cs *engine)
delay = msecs_to_jiffies_timeout(delay);
if (delay >= HZ)
delay = round_jiffies_up_relative(delay);
- mod_delayed_work(system_highpri_wq, &engine->heartbeat.work, delay);
+ mod_delayed_work(system_highpri_wq, &engine->heartbeat.work, delay + 1);
return true;
}
@@ -81,9 +80,7 @@ static void show_heartbeat(const struct i915_request *rq,
static void heartbeat(struct work_struct *wrk)
{
- struct i915_sched_attr attr = {
- .priority = I915_USER_PRIORITY(I915_PRIORITY_MIN),
- };
+ struct i915_sched_attr attr = { .priority = I915_PRIORITY_MIN };
struct intel_engine_cs *engine =
container_of(wrk, typeof(*engine), heartbeat.work.work);
struct intel_context *ce = engine->kernel_context;
@@ -106,6 +103,13 @@ static void heartbeat(struct work_struct *wrk)
goto out;
if (engine->heartbeat.systole) {
+ long delay = READ_ONCE(engine->props.heartbeat_interval_ms);
+
+ /* Safeguard against too-fast worker invocations */
+ if (!time_after(jiffies,
+ rq->emitted_jiffies + msecs_to_jiffies(delay)))
+ goto out;
+
if (!i915_sw_fence_signaled(&rq->submit)) {
/*
* Not yet submitted, system is stalled.
@@ -125,9 +129,9 @@ static void heartbeat(struct work_struct *wrk)
* low latency and no jitter] the chance to naturally
* complete before being preempted.
*/
- attr.priority = I915_PRIORITY_MASK;
+ attr.priority = 0;
if (rq->sched.attr.priority >= attr.priority)
- attr.priority |= I915_USER_PRIORITY(I915_PRIORITY_HEARTBEAT);
+ attr.priority = I915_PRIORITY_HEARTBEAT;
if (rq->sched.attr.priority >= attr.priority)
attr.priority = I915_PRIORITY_BARRIER;
@@ -143,6 +147,8 @@ static void heartbeat(struct work_struct *wrk)
"stopped heartbeat on %s",
engine->name);
}
+
+ rq->emitted_jiffies = jiffies;
goto out;
}
@@ -279,15 +285,14 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
mutex_unlock(&ce->timeline->mutex);
}
+ intel_engine_flush_submission(engine);
intel_engine_pm_put(engine);
return err;
}
int intel_engine_flush_barriers(struct intel_engine_cs *engine)
{
- struct i915_sched_attr attr = {
- .priority = I915_USER_PRIORITY(I915_PRIORITY_MIN),
- };
+ struct i915_sched_attr attr = { .priority = I915_PRIORITY_MIN };
struct intel_context *ce = engine->kernel_context;
struct i915_request *rq;
int err;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
index a7b8c0f9e005..a488ea3e84a3 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index e67d09259dd0..7c9af86fdb1e 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
@@ -27,12 +26,16 @@ static void dbg_poison_ce(struct intel_context *ce)
int type = i915_coherent_map_type(ce->engine->i915);
void *map;
+ if (!i915_gem_object_trylock(obj))
+ return;
+
map = i915_gem_object_pin_map(obj, type);
if (!IS_ERR(map)) {
memset(map, CONTEXT_REDZONE, obj->base.size);
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
}
+ i915_gem_object_unlock(obj);
}
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
index 418df0a13145..70ea46d6cfb0 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index d2346b425547..883bafc44902 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c
index 34e6096f196e..3cca7ea2d6ea 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_user.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
@@ -49,7 +48,8 @@ static const u8 uabi_classes[] = {
[VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE,
};
-static int engine_cmp(void *priv, struct list_head *A, struct list_head *B)
+static int engine_cmp(void *priv, const struct list_head *A,
+ const struct list_head *B)
{
const struct intel_engine_cs *a =
container_of((struct rb_node *)A, typeof(*a), uabi_node);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.h b/drivers/gpu/drm/i915/gt/intel_engine_user.h
index f845ea1cbfaa..3dc7e8ab9fbc 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_user.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index ac1be7a632d3..de124870af44 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -274,22 +274,13 @@ static int effective_prio(const struct i915_request *rq)
static int queue_prio(const struct intel_engine_execlists *execlists)
{
- struct i915_priolist *p;
struct rb_node *rb;
rb = rb_first_cached(&execlists->queue);
if (!rb)
return INT_MIN;
- /*
- * As the priolist[] are inverted, with the highest priority in [0],
- * we have to flip the index value to become priority.
- */
- p = to_priolist(rb);
- if (!I915_USER_PRIORITY_SHIFT)
- return p->priority;
-
- return ((p->priority + 1) << I915_USER_PRIORITY_SHIFT) - ffs(p->used);
+ return to_priolist(rb)->priority;
}
static int virtual_prio(const struct intel_engine_execlists *el)
@@ -470,6 +461,11 @@ static void reset_active(struct i915_request *rq,
ce->lrc.lrca = lrc_update_regs(ce, engine, head);
}
+static bool bad_request(const struct i915_request *rq)
+{
+ return rq->fence.error && i915_request_started(rq);
+}
+
static struct intel_engine_cs *
__execlists_schedule_in(struct i915_request *rq)
{
@@ -482,7 +478,7 @@ __execlists_schedule_in(struct i915_request *rq)
!intel_engine_has_heartbeat(engine)))
intel_context_set_banned(ce);
- if (unlikely(intel_context_is_banned(ce)))
+ if (unlikely(intel_context_is_banned(ce) || bad_request(rq)))
reset_active(rq, engine);
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
@@ -752,9 +748,8 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
{
struct intel_engine_cs *engine =
container_of(execlists, typeof(*engine), execlists);
- struct i915_request * const *port, *rq;
+ struct i915_request * const *port, *rq, *prev = NULL;
struct intel_context *ce = NULL;
- bool sentinel = false;
u32 ccid = -1;
trace_ports(execlists, msg, execlists->pending);
@@ -804,15 +799,20 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
* Sentinels are supposed to be the last request so they flush
* the current execution off the HW. Check that they are the only
* request in the pending submission.
+ *
+ * NB: Due to the async nature of preempt-to-busy and request
+ * cancellation we need to handle the case where request
+ * becomes a sentinel in parallel to CSB processing.
*/
- if (sentinel) {
+ if (prev && i915_request_has_sentinel(prev) &&
+ !READ_ONCE(prev->fence.error)) {
GEM_TRACE_ERR("%s: context:%llx after sentinel in pending[%zd]\n",
engine->name,
ce->timeline->fence_context,
port - execlists->pending);
return false;
}
- sentinel = i915_request_has_sentinel(rq);
+ prev = rq;
/*
* We want virtual requests to only be in the first slot so
@@ -948,7 +948,7 @@ static bool can_merge_rq(const struct i915_request *prev,
if (__i915_request_is_complete(next))
return true;
- if (unlikely((i915_request_flags(prev) ^ i915_request_flags(next)) &
+ if (unlikely((i915_request_flags(prev) | i915_request_flags(next)) &
(BIT(I915_FENCE_FLAG_NOPREEMPT) |
BIT(I915_FENCE_FLAG_SENTINEL))))
return false;
@@ -1072,7 +1072,6 @@ static void defer_request(struct i915_request *rq, struct list_head * const pl)
__i915_request_has_started(w) &&
!__i915_request_is_complete(rq));
- GEM_BUG_ON(i915_request_is_active(w));
if (!i915_request_is_ready(w))
continue;
@@ -1080,6 +1079,7 @@ static void defer_request(struct i915_request *rq, struct list_head * const pl)
continue;
GEM_BUG_ON(rq_prio(w) > rq_prio(rq));
+ GEM_BUG_ON(i915_request_is_active(w));
list_move_tail(&w->sched.link, &list);
}
@@ -1208,7 +1208,7 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
return 0;
/* Force a fast reset for terminated contexts (ignoring sysfs!) */
- if (unlikely(intel_context_is_banned(rq->context)))
+ if (unlikely(intel_context_is_banned(rq->context) || bad_request(rq)))
return 1;
return READ_ONCE(engine->props.preempt_timeout_ms);
@@ -1452,9 +1452,8 @@ unlock:
while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb);
struct i915_request *rq, *rn;
- int i;
- priolist_for_each_request_consume(rq, rn, p, i) {
+ priolist_for_each_request_consume(rq, rn, p) {
bool merge = true;
/*
@@ -2344,9 +2343,10 @@ static bool preempt_timeout(const struct intel_engine_cs *const engine)
* Check the unread Context Status Buffers and manage the submission of new
* contexts to the ELSP accordingly.
*/
-static void execlists_submission_tasklet(unsigned long data)
+static void execlists_submission_tasklet(struct tasklet_struct *t)
{
- struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+ struct intel_engine_cs * const engine =
+ from_tasklet(engine, t, execlists.tasklet);
struct i915_request *post[2 * EXECLIST_MAX_PORTS];
struct i915_request **inactive;
@@ -2457,11 +2457,31 @@ static void execlists_submit_request(struct i915_request *request)
spin_unlock_irqrestore(&engine->active.lock, flags);
}
+static int
+__execlists_context_pre_pin(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ struct i915_gem_ww_ctx *ww, void **vaddr)
+{
+ int err;
+
+ err = lrc_pre_pin(ce, engine, ww, vaddr);
+ if (err)
+ return err;
+
+ if (!__test_and_set_bit(CONTEXT_INIT_BIT, &ce->flags)) {
+ lrc_init_state(ce, engine, *vaddr);
+
+ __i915_gem_object_flush_map(ce->state->obj, 0, engine->context_size);
+ }
+
+ return 0;
+}
+
static int execlists_context_pre_pin(struct intel_context *ce,
struct i915_gem_ww_ctx *ww,
void **vaddr)
{
- return lrc_pre_pin(ce, ce->engine, ww, vaddr);
+ return __execlists_context_pre_pin(ce, ce->engine, ww, vaddr);
}
static int execlists_context_pin(struct intel_context *ce, void *vaddr)
@@ -2729,31 +2749,11 @@ static void enable_execlists(struct intel_engine_cs *engine)
enable_error_interrupt(engine);
}
-static bool unexpected_starting_state(struct intel_engine_cs *engine)
-{
- bool unexpected = false;
-
- if (ENGINE_READ_FW(engine, RING_MI_MODE) & STOP_RING) {
- drm_dbg(&engine->i915->drm,
- "STOP_RING still set in RING_MI_MODE\n");
- unexpected = true;
- }
-
- return unexpected;
-}
-
static int execlists_resume(struct intel_engine_cs *engine)
{
intel_mocs_init_engine(engine);
-
intel_breadcrumbs_reset(engine->breadcrumbs);
- if (GEM_SHOW_DEBUG() && unexpected_starting_state(engine)) {
- struct drm_printer p = drm_debug_printer(__func__);
-
- intel_engine_dump(engine, &p, NULL);
- }
-
enable_execlists(engine);
return 0;
@@ -2924,9 +2924,10 @@ static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
rcu_read_unlock();
}
-static void nop_submission_tasklet(unsigned long data)
+static void nop_submission_tasklet(struct tasklet_struct *t)
{
- struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+ struct intel_engine_cs * const engine =
+ from_tasklet(engine, t, execlists.tasklet);
/* The driver is wedged; don't process any more events. */
WRITE_ONCE(engine->execlists.queue_priority_hint, INT_MIN);
@@ -2962,17 +2963,18 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
/* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->active.requests, sched.link)
- i915_request_mark_eio(rq);
+ i915_request_put(i915_request_mark_eio(rq));
intel_engine_signal_breadcrumbs(engine);
/* Flush the queued requests to the timeline list (for retiring). */
while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb);
- int i;
- priolist_for_each_request_consume(rq, rn, p, i) {
- i915_request_mark_eio(rq);
- __i915_request_submit(rq);
+ priolist_for_each_request_consume(rq, rn, p) {
+ if (i915_request_mark_eio(rq)) {
+ __i915_request_submit(rq);
+ i915_request_put(rq);
+ }
}
rb_erase_cached(&p->node, &execlists->queue);
@@ -2981,7 +2983,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
/* On-hold requests will be flushed to timeline upon their release */
list_for_each_entry(rq, &engine->active.hold, sched.link)
- i915_request_mark_eio(rq);
+ i915_request_put(i915_request_mark_eio(rq));
/* Cancel all attached virtual engines */
while ((rb = rb_first_cached(&execlists->virtual))) {
@@ -2994,10 +2996,11 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
spin_lock(&ve->base.active.lock);
rq = fetch_and_zero(&ve->request);
if (rq) {
- i915_request_mark_eio(rq);
-
- rq->engine = engine;
- __i915_request_submit(rq);
+ if (i915_request_mark_eio(rq)) {
+ rq->engine = engine;
+ __i915_request_submit(rq);
+ i915_request_put(rq);
+ }
i915_request_put(rq);
ve->base.execlists.queue_priority_hint = INT_MIN;
@@ -3011,7 +3014,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
execlists->queue = RB_ROOT_CACHED;
GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
- execlists->tasklet.func = nop_submission_tasklet;
+ execlists->tasklet.callback = nop_submission_tasklet;
spin_unlock_irqrestore(&engine->active.lock, flags);
rcu_read_unlock();
@@ -3072,7 +3075,7 @@ static void execlists_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = execlists_submit_request;
engine->schedule = i915_schedule;
- engine->execlists.tasklet.func = execlists_submission_tasklet;
+ engine->execlists.tasklet.callback = execlists_submission_tasklet;
engine->reset.prepare = execlists_reset_prepare;
engine->reset.rewind = execlists_reset_rewind;
@@ -3119,7 +3122,7 @@ static void execlists_release(struct intel_engine_cs *engine)
static void
logical_ring_default_vfuncs(struct intel_engine_cs *engine)
{
- /* Default vfuncs which can be overriden by each engine. */
+ /* Default vfuncs which can be overridden by each engine. */
engine->resume = execlists_resume;
@@ -3195,8 +3198,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
struct intel_uncore *uncore = engine->uncore;
u32 base = engine->mmio_base;
- tasklet_init(&engine->execlists.tasklet,
- execlists_submission_tasklet, (unsigned long)engine);
+ tasklet_setup(&engine->execlists.tasklet, execlists_submission_tasklet);
timer_setup(&engine->execlists.timer, execlists_timeslice, 0);
timer_setup(&engine->execlists.preempt, execlists_preempt, 0);
@@ -3244,7 +3246,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
static struct list_head *virtual_queue(struct virtual_engine *ve)
{
- return &ve->base.execlists.default_priolist.requests[0];
+ return &ve->base.execlists.default_priolist.requests;
}
static void rcu_virtual_context_destroy(struct work_struct *wrk)
@@ -3360,13 +3362,13 @@ static int virtual_context_alloc(struct intel_context *ce)
}
static int virtual_context_pre_pin(struct intel_context *ce,
- struct i915_gem_ww_ctx *ww,
- void **vaddr)
+ struct i915_gem_ww_ctx *ww,
+ void **vaddr)
{
struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
- /* Note: we must use a real engine class for setting up reg state */
- return lrc_pre_pin(ce, ve->siblings[0], ww, vaddr);
+ /* Note: we must use a real engine class for setting up reg state */
+ return __execlists_context_pre_pin(ce, ve->siblings[0], ww, vaddr);
}
static int virtual_context_pin(struct intel_context *ce, void *vaddr)
@@ -3438,9 +3440,10 @@ static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
return mask;
}
-static void virtual_submission_tasklet(unsigned long data)
+static void virtual_submission_tasklet(struct tasklet_struct *t)
{
- struct virtual_engine * const ve = (struct virtual_engine *)data;
+ struct virtual_engine * const ve =
+ from_tasklet(ve, t, base.execlists.tasklet);
const int prio = READ_ONCE(ve->base.execlists.queue_priority_hint);
intel_engine_mask_t mask;
unsigned int n;
@@ -3650,9 +3653,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
INIT_LIST_HEAD(virtual_queue(ve));
ve->base.execlists.queue_priority_hint = INT_MIN;
- tasklet_init(&ve->base.execlists.tasklet,
- virtual_submission_tasklet,
- (unsigned long)ve);
+ tasklet_setup(&ve->base.execlists.tasklet, virtual_submission_tasklet);
intel_context_init(&ve->context, &ve->base);
@@ -3680,7 +3681,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
* layering if we handle cloning of the requests and
* submitting a copy into each backend.
*/
- if (sibling->execlists.tasklet.func !=
+ if (sibling->execlists.tasklet.callback !=
execlists_submission_tasklet) {
err = -ENODEV;
goto err_put;
@@ -3840,9 +3841,8 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
count = 0;
for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
- int i;
- priolist_for_each_request(rq, p, i) {
+ priolist_for_each_request(rq, p) {
if (count++ < max - 1)
show_request(m, rq, "\t\t", 0);
else
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.h b/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
index a8fd7adefd82..fd61dae820e9 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
@@ -6,6 +6,7 @@
#ifndef __INTEL_EXECLISTS_SUBMISSION_H__
#define __INTEL_EXECLISTS_SUBMISSION_H__
+#include <linux/llist.h>
#include <linux/types.h>
struct drm_printer;
@@ -13,6 +14,7 @@ struct drm_printer;
struct i915_request;
struct intel_context;
struct intel_engine_cs;
+struct intel_gt;
enum {
INTEL_CONTEXT_SCHEDULE_IN = 0,
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 700588bc9d57..670c1271e7d5 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -10,6 +10,8 @@
#include <drm/i915_drm.h>
+#include "gem/i915_gem_lmem.h"
+
#include "intel_gt.h"
#include "i915_drv.h"
#include "i915_scatterlist.h"
@@ -92,7 +94,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *i915)
}
/*
- * Certain Gen5 chipsets require require idling the GPU before
+ * Certain Gen5 chipsets require idling the GPU before
* unmapping anything from the GTT when VT-d is enabled.
*/
static bool needs_idle_maps(struct drm_i915_private *i915)
@@ -189,7 +191,12 @@ static u64 gen8_ggtt_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
u32 flags)
{
- return addr | _PAGE_PRESENT;
+ gen8_pte_t pte = addr | _PAGE_PRESENT;
+
+ if (flags & PTE_LM)
+ pte |= GEN12_GGTT_PTE_LM;
+
+ return pte;
}
static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
@@ -201,13 +208,13 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
dma_addr_t addr,
u64 offset,
enum i915_cache_level level,
- u32 unused)
+ u32 flags)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
gen8_pte_t __iomem *pte =
(gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
- gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, 0));
+ gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, flags));
ggtt->invalidate(ggtt);
}
@@ -217,7 +224,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
enum i915_cache_level level,
u32 flags)
{
- const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, 0);
+ const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, flags);
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
gen8_pte_t __iomem *gte;
gen8_pte_t __iomem *end;
@@ -459,6 +466,8 @@ static void ggtt_bind_vma(struct i915_address_space *vm,
pte_flags = 0;
if (i915_gem_object_is_readonly(obj))
pte_flags |= PTE_READ_ONLY;
+ if (i915_gem_object_is_lmem(obj))
+ pte_flags |= PTE_LM;
vm->insert_entries(vm, vma, cache_level, pte_flags);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
@@ -647,7 +656,9 @@ static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
if (err)
goto err_ppgtt;
+ i915_gem_object_lock(ppgtt->vm.scratch[0], NULL);
err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash);
+ i915_gem_object_unlock(ppgtt->vm.scratch[0]);
if (err)
goto err_stash;
@@ -734,6 +745,7 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
mutex_unlock(&ggtt->vm.mutex);
i915_address_space_fini(&ggtt->vm);
+ dma_resv_fini(&ggtt->vm.resv);
arch_phys_wc_del(ggtt->mtrr);
@@ -792,8 +804,9 @@ static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
phys_addr_t phys_addr;
+ u32 pte_flags;
int ret;
/* For Modern GENs the PTEs and register space are split in the BAR */
@@ -823,9 +836,13 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
return ret;
}
+ pte_flags = 0;
+ if (i915_gem_object_is_lmem(ggtt->vm.scratch[0]))
+ pte_flags |= PTE_LM;
+
ggtt->vm.scratch[0]->encode =
ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]),
- I915_CACHE_NONE, 0);
+ I915_CACHE_NONE, pte_flags);
return 0;
}
@@ -862,7 +879,7 @@ static struct resource pci_resource(struct pci_dev *pdev, int bar)
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
unsigned int size;
u16 snb_gmch_ctl;
@@ -1006,7 +1023,7 @@ static u64 iris_pte_encode(dma_addr_t addr,
static int gen6_gmch_probe(struct i915_ggtt *ggtt)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
unsigned int size;
u16 snb_gmch_ctl;
@@ -1069,7 +1086,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
phys_addr_t gmadr_base;
int ret;
- ret = intel_gmch_probe(i915->bridge_dev, i915->drm.pdev, NULL);
+ ret = intel_gmch_probe(i915->bridge_dev, to_pci_dev(i915->drm.dev), NULL);
if (!ret) {
drm_err(&i915->drm, "failed to set up gmch\n");
return -EIO;
@@ -1114,7 +1131,8 @@ static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
ggtt->vm.gt = gt;
ggtt->vm.i915 = i915;
- ggtt->vm.dma = &i915->drm.pdev->dev;
+ ggtt->vm.dma = i915->drm.dev;
+ dma_resv_init(&ggtt->vm.resv);
if (INTEL_GEN(i915) <= 5)
ret = i915_gmch_probe(ggtt);
@@ -1122,8 +1140,10 @@ static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
ret = gen6_gmch_probe(ggtt);
else
ret = gen8_gmch_probe(ggtt);
- if (ret)
+ if (ret) {
+ dma_resv_fini(&ggtt->vm.resv);
return ret;
+ }
if ((ggtt->vm.total - 1) >> 32) {
drm_err(&i915->drm,
@@ -1247,14 +1267,16 @@ void i915_ggtt_resume(struct i915_ggtt *ggtt)
static struct scatterlist *
rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
unsigned int width, unsigned int height,
- unsigned int stride,
+ unsigned int src_stride, unsigned int dst_stride,
struct sg_table *st, struct scatterlist *sg)
{
unsigned int column, row;
unsigned int src_idx;
for (column = 0; column < width; column++) {
- src_idx = stride * (height - 1) + column + offset;
+ unsigned int left;
+
+ src_idx = src_stride * (height - 1) + column + offset;
for (row = 0; row < height; row++) {
st->nents++;
/*
@@ -1267,8 +1289,25 @@ rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
i915_gem_object_get_dma_address(obj, src_idx);
sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
sg = sg_next(sg);
- src_idx -= stride;
+ src_idx -= src_stride;
}
+
+ left = (dst_stride - height) * I915_GTT_PAGE_SIZE;
+
+ if (!left)
+ continue;
+
+ st->nents++;
+
+ /*
+ * The DE ignores the PTEs for the padding tiles, the sg entry
+ * here is just a conenience to indicate how many padding PTEs
+ * to insert at this spot.
+ */
+ sg_set_page(sg, NULL, left, 0);
+ sg_dma_address(sg) = 0;
+ sg_dma_len(sg) = left;
+ sg = sg_next(sg);
}
return sg;
@@ -1297,11 +1336,12 @@ intel_rotate_pages(struct intel_rotation_info *rot_info,
st->nents = 0;
sg = st->sgl;
- for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
+ for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
sg = rotate_pages(obj, rot_info->plane[i].offset,
rot_info->plane[i].width, rot_info->plane[i].height,
- rot_info->plane[i].stride, st, sg);
- }
+ rot_info->plane[i].src_stride,
+ rot_info->plane[i].dst_stride,
+ st, sg);
return st;
@@ -1319,7 +1359,7 @@ err_st_alloc:
static struct scatterlist *
remap_pages(struct drm_i915_gem_object *obj, unsigned int offset,
unsigned int width, unsigned int height,
- unsigned int stride,
+ unsigned int src_stride, unsigned int dst_stride,
struct sg_table *st, struct scatterlist *sg)
{
unsigned int row;
@@ -1352,7 +1392,24 @@ remap_pages(struct drm_i915_gem_object *obj, unsigned int offset,
left -= length;
}
- offset += stride - width;
+ offset += src_stride - width;
+
+ left = (dst_stride - width) * I915_GTT_PAGE_SIZE;
+
+ if (!left)
+ continue;
+
+ st->nents++;
+
+ /*
+ * The DE ignores the PTEs for the padding tiles, the sg entry
+ * here is just a conenience to indicate how many padding PTEs
+ * to insert at this spot.
+ */
+ sg_set_page(sg, NULL, left, 0);
+ sg_dma_address(sg) = 0;
+ sg_dma_len(sg) = left;
+ sg = sg_next(sg);
}
return sg;
@@ -1384,7 +1441,8 @@ intel_remap_pages(struct intel_remapped_info *rem_info,
for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
sg = remap_pages(obj, rem_info->plane[i].offset,
rem_info->plane[i].width, rem_info->plane[i].height,
- rem_info->plane[i].stride, st, sg);
+ rem_info->plane[i].src_stride, rem_info->plane[i].dst_stride,
+ st, sg);
}
i915_sg_trim(st);
@@ -1420,7 +1478,7 @@ intel_partial_pages(const struct i915_ggtt_view *view,
if (ret)
goto err_sg_alloc;
- iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset);
+ iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset, true);
GEM_BUG_ON(!iter);
sg = st->sgl;
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index 67de2b189598..e72b7a0dc316 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -1,24 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright © 2008-2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
*/
#include "i915_drv.h"
@@ -609,6 +591,7 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
}
} else {
u32 dimm_c0, dimm_c1;
+
dimm_c0 = intel_uncore_read(uncore, MAD_DIMM_C0);
dimm_c1 = intel_uncore_read(uncore, MAD_DIMM_C1);
dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
@@ -798,10 +781,12 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
i = 0;
for_each_sgt_page(page, sgt_iter, pages) {
char new_bit_17 = page_to_phys(page) >> 17;
+
if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) {
swizzle_page(page);
set_page_dirty(page);
}
+
i++;
}
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h
index 9eef679e1311..25340be5ecf0 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h
@@ -1,25 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
*/
#ifndef __INTEL_GGTT_FENCING_H__
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index 534e435f20bc..14e2ffb6c0e5 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT*/
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright � 2003-2018 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index d8e1ab412634..8d77dcbad059 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -4,6 +4,8 @@
*/
#include "debugfs_gt.h"
+
+#include "gem/i915_gem_lmem.h"
#include "i915_drv.h"
#include "intel_context.h"
#include "intel_gt.h"
@@ -29,6 +31,9 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
INIT_LIST_HEAD(&gt->closed_vma);
spin_lock_init(&gt->closed_lock);
+ init_llist_head(&gt->watchdog.list);
+ INIT_WORK(&gt->watchdog.work, intel_gt_watchdog_work);
+
intel_gt_init_buffer_pool(gt);
intel_gt_init_reset(gt);
intel_gt_init_requests(gt);
@@ -39,6 +44,42 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
intel_uc_init_early(&gt->uc);
}
+int intel_gt_probe_lmem(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_memory_region *mem;
+ int id;
+ int err;
+
+ mem = intel_gt_setup_lmem(gt);
+ if (mem == ERR_PTR(-ENODEV))
+ mem = intel_gt_setup_fake_lmem(gt);
+ if (IS_ERR(mem)) {
+ err = PTR_ERR(mem);
+ if (err == -ENODEV)
+ return 0;
+
+ drm_err(&i915->drm,
+ "Failed to setup region(%d) type=%d\n",
+ err, INTEL_MEMORY_LOCAL);
+ return err;
+ }
+
+ id = INTEL_REGION_LMEM;
+
+ mem->id = id;
+ mem->type = INTEL_MEMORY_LOCAL;
+ mem->instance = 0;
+
+ intel_memory_region_set_name(mem, "local%u", mem->instance);
+
+ GEM_BUG_ON(!HAS_REGION(i915, id));
+ GEM_BUG_ON(i915->mm.regions[id]);
+ i915->mm.regions[id] = mem;
+
+ return 0;
+}
+
void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt)
{
gt->ggtt = ggtt;
@@ -344,11 +385,13 @@ static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
struct i915_vma *vma;
int ret;
- obj = i915_gem_object_create_stolen(i915, size);
+ obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_VOLATILE);
+ if (IS_ERR(obj))
+ obj = i915_gem_object_create_stolen(i915, size);
if (IS_ERR(obj))
obj = i915_gem_object_create_internal(i915, size);
if (IS_ERR(obj)) {
- DRM_ERROR("Failed to allocate scratch page\n");
+ drm_err(&i915->drm, "Failed to allocate scratch page\n");
return PTR_ERR(obj);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 9157c7411f60..7ec395cace69 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -36,6 +36,7 @@ static inline struct intel_gt *huc_to_gt(struct intel_huc *huc)
void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915);
void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt);
+int intel_gt_probe_lmem(struct intel_gt *gt);
int intel_gt_init_mmio(struct intel_gt *gt);
int __must_check intel_gt_init_hw(struct intel_gt *gt);
int intel_gt_init(struct intel_gt *gt);
@@ -77,4 +78,6 @@ static inline bool intel_gt_is_wedged(const struct intel_gt *gt)
void intel_gt_info_print(const struct intel_gt_info *info,
struct drm_printer *p);
+void intel_gt_watchdog_work(struct work_struct *work);
+
#endif /* __INTEL_GT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
index 06d84cf09570..c59468107598 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
@@ -98,28 +98,6 @@ static void pool_free_work(struct work_struct *wrk)
round_jiffies_up_relative(HZ));
}
-static int pool_active(struct i915_active *ref)
-{
- struct intel_gt_buffer_pool_node *node =
- container_of(ref, typeof(*node), active);
- struct dma_resv *resv = node->obj->base.resv;
- int err;
-
- if (dma_resv_trylock(resv)) {
- dma_resv_add_excl_fence(resv, NULL);
- dma_resv_unlock(resv);
- }
-
- err = i915_gem_object_pin_pages(node->obj);
- if (err)
- return err;
-
- /* Hide this pinned object from the shrinker until retired */
- i915_gem_object_make_unshrinkable(node->obj);
-
- return 0;
-}
-
__i915_active_call
static void pool_retire(struct i915_active *ref)
{
@@ -129,10 +107,13 @@ static void pool_retire(struct i915_active *ref)
struct list_head *list = bucket_for_size(pool, node->obj->base.size);
unsigned long flags;
- i915_gem_object_unpin_pages(node->obj);
+ if (node->pinned) {
+ i915_gem_object_unpin_pages(node->obj);
- /* Return this object to the shrinker pool */
- i915_gem_object_make_purgeable(node->obj);
+ /* Return this object to the shrinker pool */
+ i915_gem_object_make_purgeable(node->obj);
+ node->pinned = false;
+ }
GEM_BUG_ON(node->age);
spin_lock_irqsave(&pool->lock, flags);
@@ -144,6 +125,19 @@ static void pool_retire(struct i915_active *ref)
round_jiffies_up_relative(HZ));
}
+void intel_gt_buffer_pool_mark_used(struct intel_gt_buffer_pool_node *node)
+{
+ assert_object_held(node->obj);
+
+ if (node->pinned)
+ return;
+
+ __i915_gem_object_pin_pages(node->obj);
+ /* Hide this pinned object from the shrinker until retired */
+ i915_gem_object_make_unshrinkable(node->obj);
+ node->pinned = true;
+}
+
static struct intel_gt_buffer_pool_node *
node_create(struct intel_gt_buffer_pool *pool, size_t sz,
enum i915_map_type type)
@@ -159,7 +153,8 @@ node_create(struct intel_gt_buffer_pool *pool, size_t sz,
node->age = 0;
node->pool = pool;
- i915_active_init(&node->active, pool_active, pool_retire);
+ node->pinned = false;
+ i915_active_init(&node->active, NULL, pool_retire);
obj = i915_gem_object_create_internal(gt->i915, sz);
if (IS_ERR(obj)) {
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
index 6068f8f1762e..487b8a5520f1 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
@@ -18,10 +18,15 @@ struct intel_gt_buffer_pool_node *
intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size,
enum i915_map_type type);
+void intel_gt_buffer_pool_mark_used(struct intel_gt_buffer_pool_node *node);
+
static inline int
intel_gt_buffer_pool_mark_active(struct intel_gt_buffer_pool_node *node,
struct i915_request *rq)
{
+ /* did we call mark_used? */
+ GEM_WARN_ON(!node->pinned);
+
return i915_active_add_request(&node->active, rq);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h
index d8d82c890da8..df1d75d08cd2 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2018 Intel Corporation
*/
@@ -31,6 +30,7 @@ struct intel_gt_buffer_pool_node {
};
unsigned long age;
enum i915_map_type type;
+ u32 pinned;
};
#endif /* INTEL_GT_BUFFER_POOL_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
index a4242ca8dcd7..582fcaee11aa 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
@@ -165,7 +165,6 @@ void intel_gt_init_clock_frequency(struct intel_gt *gt)
gt->clock_period_ns,
div_u64(mul_u32_u32(gt->clock_period_ns, S32_MAX),
USEC_PER_SEC));
-
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index 9830342aa6f4..9fc6c912a4e5 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.h b/drivers/gpu/drm/i915/gt/intel_gt_irq.h
index 886c5cf408a2..f667e976fb2b 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index c94e8ac884eb..aef3084e8b16 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index 63846a856e7e..d0588d8aaa44 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c
index babe866126d7..811a11ed181c 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.h b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.h
index b29816a04809..ff766966d6fc 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
index dc06c78c9eeb..647eca9d867a 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
@@ -9,6 +8,7 @@
#include "i915_drv.h" /* for_each_engine() */
#include "i915_request.h"
#include "intel_engine_heartbeat.h"
+#include "intel_execlists_submission.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
#include "intel_gt_requests.h"
@@ -243,4 +243,31 @@ void intel_gt_fini_requests(struct intel_gt *gt)
{
/* Wait until the work is marked as finished before unloading! */
cancel_delayed_work_sync(&gt->requests.retire_work);
+
+ flush_work(&gt->watchdog.work);
+}
+
+void intel_gt_watchdog_work(struct work_struct *work)
+{
+ struct intel_gt *gt =
+ container_of(work, typeof(*gt), watchdog.work);
+ struct i915_request *rq, *rn;
+ struct llist_node *first;
+
+ first = llist_del_all(&gt->watchdog.list);
+ if (!first)
+ return;
+
+ llist_for_each_entry_safe(rq, rn, first, watchdog.link) {
+ if (!i915_request_completed(rq)) {
+ struct dma_fence *f = &rq->fence;
+
+ pr_notice("Fence expiration time out i915-%s:%s:%llx!\n",
+ f->ops->get_driver_name(f),
+ f->ops->get_timeline_name(f),
+ f->seqno);
+ i915_request_cancel(rq, -EINTR);
+ }
+ i915_request_put(rq);
+ }
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.h b/drivers/gpu/drm/i915/gt/intel_gt_requests.h
index dbac53baf1cb..fcc30a6e4fe9 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index a83d3e18254d..0caf6ca0a784 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -8,10 +8,12 @@
#include <linux/ktime.h>
#include <linux/list.h>
+#include <linux/llist.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/spinlock.h>
#include <linux/types.h>
+#include <linux/workqueue.h>
#include "uc/intel_uc.h"
@@ -39,10 +41,6 @@ struct intel_gt {
struct intel_gt_timelines {
spinlock_t lock; /* protects active_list */
struct list_head active_list;
-
- /* Pack multiple timelines' seqnos into the same page */
- spinlock_t hwsp_lock;
- struct list_head hwsp_free_list;
} timelines;
struct intel_gt_requests {
@@ -56,6 +54,11 @@ struct intel_gt {
struct delayed_work retire_work;
} requests;
+ struct {
+ struct llist_head list;
+ struct work_struct work;
+ } watchdog;
+
struct intel_wakeref wakeref;
atomic_t user_wakeref;
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 04aa6601e984..941f8af016d6 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -13,16 +13,36 @@
struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
{
+ struct drm_i915_gem_object *obj;
+
if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
i915_gem_shrink_all(vm->i915);
- return i915_gem_object_create_internal(vm->i915, sz);
+ obj = i915_gem_object_create_internal(vm->i915, sz);
+ /* ensure all dma objects have the same reservation class */
+ if (!IS_ERR(obj))
+ obj->base.resv = &vm->resv;
+ return obj;
}
int pin_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
{
int err;
+ i915_gem_object_lock(obj, NULL);
+ err = i915_gem_object_pin_pages(obj);
+ i915_gem_object_unlock(obj);
+ if (err)
+ return err;
+
+ i915_gem_object_make_unshrinkable(obj);
+ return 0;
+}
+
+int pin_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
+{
+ int err;
+
err = i915_gem_object_pin_pages(obj);
if (err)
return err;
@@ -56,6 +76,20 @@ void __i915_vm_close(struct i915_address_space *vm)
mutex_unlock(&vm->mutex);
}
+/* lock the vm into the current ww, if we lock one, we lock all */
+int i915_vm_lock_objects(struct i915_address_space *vm,
+ struct i915_gem_ww_ctx *ww)
+{
+ if (vm->scratch[0]->base.resv == &vm->resv) {
+ return i915_gem_object_lock(vm->scratch[0], ww);
+ } else {
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+
+ /* We borrowed the scratch page from ggtt, take the top level object */
+ return i915_gem_object_lock(ppgtt->pd->pt.base, ww);
+ }
+}
+
void i915_address_space_fini(struct i915_address_space *vm)
{
drm_mm_takedown(&vm->mm);
@@ -69,6 +103,7 @@ static void __i915_vm_release(struct work_struct *work)
vm->cleanup(vm);
i915_address_space_fini(vm);
+ dma_resv_fini(&vm->resv);
kfree(vm);
}
@@ -98,6 +133,7 @@ void i915_address_space_init(struct i915_address_space *vm, int subclass)
mutex_init(&vm->mutex);
lockdep_set_subclass(&vm->mutex, subclass);
i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
+ dma_resv_init(&vm->resv);
GEM_BUG_ON(!vm->total);
drm_mm_init(&vm->mm, 0, vm->total);
@@ -427,7 +463,6 @@ __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
- int err;
obj = i915_gem_object_create_internal(vm->i915, PAGE_ALIGN(size));
if (IS_ERR(obj))
@@ -441,6 +476,19 @@ __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size)
return vma;
}
+ return vma;
+}
+
+struct i915_vma *
+__vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size)
+{
+ struct i915_vma *vma;
+ int err;
+
+ vma = __vm_create_scratch_for_read(vm, size);
+ if (IS_ERR(vma))
+ return vma;
+
err = i915_vma_pin(vma, 0, 0,
i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
if (err) {
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index 29c10fde8ce3..e67e34e17913 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -85,6 +85,10 @@ typedef u64 gen8_pte_t;
#define BYT_PTE_SNOOPED_BY_CPU_CACHES REG_BIT(2)
#define BYT_PTE_WRITEABLE REG_BIT(1)
+#define GEN12_PPGTT_PTE_LM BIT_ULL(11)
+
+#define GEN12_GGTT_PTE_LM BIT_ULL(1)
+
/*
* Cacheability Control is a 4-bit value. The low three bits are stored in bits
* 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
@@ -238,6 +242,7 @@ struct i915_address_space {
atomic_t open;
struct mutex mutex; /* protects vma and our lists */
+ struct dma_resv resv; /* reservation lock for all pd objects, and buffer pool */
#define VM_CLASS_GGTT 0
#define VM_CLASS_PPGTT 1
@@ -264,6 +269,7 @@ struct i915_address_space {
enum i915_cache_level level,
u32 flags); /* Create a valid PTE */
#define PTE_READ_ONLY BIT(0)
+#define PTE_LM BIT(1)
void (*allocate_va_range)(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
@@ -346,6 +352,9 @@ struct i915_ppgtt {
#define i915_is_ggtt(vm) ((vm)->is_ggtt)
+int __must_check
+i915_vm_lock_objects(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww);
+
static inline bool
i915_vm_is_4lvl(const struct i915_address_space *vm)
{
@@ -522,6 +531,7 @@ struct i915_page_directory *alloc_pd(struct i915_address_space *vm);
struct i915_page_directory *__alloc_pd(int npde);
int pin_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
+int pin_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
void free_px(struct i915_address_space *vm,
struct i915_page_table *pt, int lvl);
@@ -576,6 +586,9 @@ void i915_vm_free_pt_stash(struct i915_address_space *vm,
struct i915_vma *
__vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size);
+struct i915_vma *
+__vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size);
+
static inline struct sgt_dma {
struct scatterlist *sg;
dma_addr_t dma, max;
diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c
index e3f637b3650e..075d741644ae 100644
--- a/drivers/gpu/drm/i915/gt/intel_llc.c
+++ b/drivers/gpu/drm/i915/gt/intel_llc.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_llc.h b/drivers/gpu/drm/i915/gt/intel_llc.h
index ef09a890d2b7..0e2e3871c919 100644
--- a/drivers/gpu/drm/i915/gt/intel_llc.h
+++ b/drivers/gpu/drm/i915/gt/intel_llc.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_llc_types.h b/drivers/gpu/drm/i915/gt/intel_llc_types.h
index ecad4687b930..ca5bdf166a23 100644
--- a/drivers/gpu/drm/i915/gt/intel_llc_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_llc_types.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 94f485b591af..e86897cde984 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -3,6 +3,8 @@
* Copyright © 2014 Intel Corporation
*/
+#include "gem/i915_gem_lmem.h"
+
#include "gen8_engine_cs.h"
#include "i915_drv.h"
#include "i915_perf.h"
@@ -808,7 +810,9 @@ __lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine)
context_size += PAGE_SIZE;
}
- obj = i915_gem_object_create_shmem(engine->i915, context_size);
+ obj = i915_gem_object_create_lmem(engine->i915, context_size, 0);
+ if (IS_ERR(obj))
+ obj = i915_gem_object_create_shmem(engine->i915, context_size);
if (IS_ERR(obj))
return ERR_CAST(obj);
@@ -1417,7 +1421,7 @@ gen10_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
#define CTX_WA_BB_SIZE (PAGE_SIZE)
-static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
+static int lrc_create_wa_ctx(struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
@@ -1433,10 +1437,6 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
goto err;
}
- err = i915_ggtt_pin(vma, NULL, 0, PIN_HIGH);
- if (err)
- goto err;
-
engine->wa_ctx.vma = vma;
return 0;
@@ -1448,9 +1448,6 @@ err:
void lrc_fini_wa_ctx(struct intel_engine_cs *engine)
{
i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
-
- /* Called on error unwind, clear all flags to prevent further use */
- memset(&engine->wa_ctx, 0, sizeof(engine->wa_ctx));
}
typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch);
@@ -1462,6 +1459,7 @@ void lrc_init_wa_ctx(struct intel_engine_cs *engine)
&wa_ctx->indirect_ctx, &wa_ctx->per_ctx
};
wa_bb_func_t wa_bb_fn[ARRAY_SIZE(wa_bb)];
+ struct i915_gem_ww_ctx ww;
void *batch, *batch_ptr;
unsigned int i;
int err;
@@ -1490,7 +1488,7 @@ void lrc_init_wa_ctx(struct intel_engine_cs *engine)
return;
}
- err = lrc_setup_wa_ctx(engine);
+ err = lrc_create_wa_ctx(engine);
if (err) {
/*
* We continue even if we fail to initialize WA batch
@@ -1503,7 +1501,22 @@ void lrc_init_wa_ctx(struct intel_engine_cs *engine)
return;
}
+ if (!engine->wa_ctx.vma)
+ return;
+
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ err = i915_gem_object_lock(wa_ctx->vma->obj, &ww);
+ if (!err)
+ err = i915_ggtt_pin(wa_ctx->vma, &ww, 0, PIN_HIGH);
+ if (err)
+ goto err;
+
batch = i915_gem_object_pin_map(wa_ctx->vma->obj, I915_MAP_WB);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto err_unpin;
+ }
/*
* Emit the two workaround batch buffers, recording the offset from the
@@ -1528,8 +1541,26 @@ void lrc_init_wa_ctx(struct intel_engine_cs *engine)
__i915_gem_object_release_map(wa_ctx->vma->obj);
/* Verify that we can handle failure to setup the wa_ctx */
- if (err || i915_inject_probe_error(engine->i915, -ENODEV))
- lrc_fini_wa_ctx(engine);
+ if (!err)
+ err = i915_inject_probe_error(engine->i915, -ENODEV);
+
+err_unpin:
+ if (err)
+ i915_vma_unpin(wa_ctx->vma);
+err:
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+
+ if (err) {
+ i915_vma_put(engine->wa_ctx.vma);
+
+ /* Clear all flags to prevent further use */
+ memset(wa_ctx, 0, sizeof(*wa_ctx));
+ }
}
static void st_update_runtime_underflow(struct intel_context *ce, s32 dt)
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
index 65fe76738335..41e5350a7a05 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2018 Intel Corporation
*/
@@ -40,7 +39,7 @@
#define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
u32 *reg_state__ = (reg_state); \
- const u64 addr__ = px_dma(ppgtt->pd); \
+ const u64 addr__ = px_dma((ppgtt)->pd); \
(reg_state__)[CTX_PDP0_UDW] = upper_32_bits(addr__); \
(reg_state__)[CTX_PDP0_LDW] = lower_32_bits(addr__); \
} while (0)
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 8acb84960cd0..b14138fd505c 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -1,23 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * Copyright (c) 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions: *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright © 2015 Intel Corporation
*/
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.h b/drivers/gpu/drm/i915/gt/intel_mocs.h
index 83371f3e6ba1..d83274f5163b 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.h
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.h
@@ -1,24 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright (c) 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright © 2015 Intel Corporation
*/
#ifndef INTEL_MOCS_H
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
index 96b85a10ef33..014ae8ac4480 100644
--- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -5,6 +5,8 @@
#include <linux/slab.h>
+#include "gem/i915_gem_lmem.h"
+
#include "i915_trace.h"
#include "intel_gtt.h"
#include "gen6_ppgtt.h"
@@ -192,6 +194,8 @@ void ppgtt_bind_vma(struct i915_address_space *vm,
pte_flags = 0;
if (i915_gem_object_is_readonly(vma->obj))
pte_flags |= PTE_READ_ONLY;
+ if (i915_gem_object_is_lmem(vma->obj))
+ pte_flags |= PTE_LM;
vm->insert_entries(vm, vma, cache_level, pte_flags);
wmb();
@@ -262,7 +266,7 @@ int i915_vm_pin_pt_stash(struct i915_address_space *vm,
for (n = 0; n < ARRAY_SIZE(stash->pt); n++) {
for (pt = stash->pt[n]; pt; pt = pt->stash) {
- err = pin_pt_dma(vm, pt->base);
+ err = pin_pt_dma_locked(vm, pt->base);
if (err)
return err;
}
@@ -301,9 +305,10 @@ void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt)
ppgtt->vm.gt = gt;
ppgtt->vm.i915 = i915;
- ppgtt->vm.dma = &i915->drm.pdev->dev;
+ ppgtt->vm.dma = i915->drm.dev;
ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
+ dma_resv_init(&ppgtt->vm.resv);
i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 35504c97f11d..3b7e62debe7e 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
@@ -176,7 +175,6 @@ static void gen9_rc6_enable(struct intel_rc6 *rc6)
/* 3a: Enable RC6 */
set(uncore, GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
-
rc6->ctl_enable =
GEN6_RC_CTL_HW_ENABLE |
GEN6_RC_CTL_RC6_ENABLE |
@@ -485,14 +483,14 @@ static bool rc6_supported(struct intel_rc6 *rc6)
static void rpm_get(struct intel_rc6 *rc6)
{
GEM_BUG_ON(rc6->wakeref);
- pm_runtime_get_sync(&rc6_to_i915(rc6)->drm.pdev->dev);
+ pm_runtime_get_sync(rc6_to_i915(rc6)->drm.dev);
rc6->wakeref = true;
}
static void rpm_put(struct intel_rc6 *rc6)
{
GEM_BUG_ON(!rc6->wakeref);
- pm_runtime_put(&rc6_to_i915(rc6)->drm.pdev->dev);
+ pm_runtime_put(rc6_to_i915(rc6)->drm.dev);
rc6->wakeref = false;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.h b/drivers/gpu/drm/i915/gt/intel_rc6.h
index 9f0f23fca8af..e119ec4a0bcc 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.h
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6_types.h b/drivers/gpu/drm/i915/gt/intel_rc6_types.h
index bfbb623f7a4f..e747492b2f46 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_rc6_types.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
index 60393ce5614d..be6f2c8f5184 100644
--- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -26,12 +26,12 @@ static int init_fake_lmem_bar(struct intel_memory_region *mem)
if (ret)
return ret;
- mem->remap_addr = dma_map_resource(&i915->drm.pdev->dev,
+ mem->remap_addr = dma_map_resource(i915->drm.dev,
mem->region.start,
mem->fake_mappable.size,
PCI_DMA_BIDIRECTIONAL,
DMA_ATTR_FORCE_CONTIGUOUS);
- if (dma_mapping_error(&i915->drm.pdev->dev, mem->remap_addr)) {
+ if (dma_mapping_error(i915->drm.dev, mem->remap_addr)) {
drm_mm_remove_node(&mem->fake_mappable);
return -EINVAL;
}
@@ -56,7 +56,7 @@ static void release_fake_lmem_bar(struct intel_memory_region *mem)
drm_mm_remove_node(&mem->fake_mappable);
- dma_unmap_resource(&mem->i915->drm.pdev->dev,
+ dma_unmap_resource(mem->i915->drm.dev,
mem->remap_addr,
mem->fake_mappable.size,
PCI_DMA_BIDIRECTIONAL,
@@ -90,8 +90,6 @@ region_lmem_init(struct intel_memory_region *mem)
if (ret)
io_mapping_fini(&mem->iomap);
- intel_memory_region_set_name(mem, "local");
-
return ret;
}
@@ -102,20 +100,26 @@ static const struct intel_memory_region_ops intel_region_lmem_ops = {
};
struct intel_memory_region *
-intel_setup_fake_lmem(struct drm_i915_private *i915)
+intel_gt_setup_fake_lmem(struct intel_gt *gt)
{
- struct pci_dev *pdev = i915->drm.pdev;
+ struct drm_i915_private *i915 = gt->i915;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
struct intel_memory_region *mem;
resource_size_t mappable_end;
resource_size_t io_start;
resource_size_t start;
+ if (!HAS_LMEM(i915))
+ return ERR_PTR(-ENODEV);
+
+ if (!i915->params.fake_lmem_start)
+ return ERR_PTR(-ENODEV);
+
GEM_BUG_ON(i915_ggtt_has_aperture(&i915->ggtt));
- GEM_BUG_ON(!i915->params.fake_lmem_start);
/* Your mappable aperture belongs to me now! */
mappable_end = pci_resource_len(pdev, 2);
- io_start = pci_resource_start(pdev, 2),
+ io_start = pci_resource_start(pdev, 2);
start = i915->params.fake_lmem_start;
mem = intel_memory_region_create(i915,
@@ -136,3 +140,86 @@ intel_setup_fake_lmem(struct drm_i915_private *i915)
return mem;
}
+
+static bool get_legacy_lowmem_region(struct intel_uncore *uncore,
+ u64 *start, u32 *size)
+{
+ if (!IS_DG1_REVID(uncore->i915, DG1_REVID_A0, DG1_REVID_B0))
+ return false;
+
+ *start = 0;
+ *size = SZ_1M;
+
+ drm_dbg(&uncore->i915->drm, "LMEM: reserved legacy low-memory [0x%llx-0x%llx]\n",
+ *start, *start + *size);
+
+ return true;
+}
+
+static int reserve_lowmem_region(struct intel_uncore *uncore,
+ struct intel_memory_region *mem)
+{
+ u64 reserve_start;
+ u32 reserve_size;
+ int ret;
+
+ if (!get_legacy_lowmem_region(uncore, &reserve_start, &reserve_size))
+ return 0;
+
+ ret = intel_memory_region_reserve(mem, reserve_start, reserve_size);
+ if (ret)
+ drm_err(&uncore->i915->drm, "LMEM: reserving low memory region failed\n");
+
+ return ret;
+}
+
+static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ struct pci_dev *pdev = i915->drm.pdev;
+ struct intel_memory_region *mem;
+ resource_size_t io_start;
+ resource_size_t lmem_size;
+ int err;
+
+ if (!IS_DGFX(i915))
+ return ERR_PTR(-ENODEV);
+
+ /* Stolen starts from GSMBASE on DG1 */
+ lmem_size = intel_uncore_read64(uncore, GEN12_GSMBASE);
+
+ io_start = pci_resource_start(pdev, 2);
+ if (GEM_WARN_ON(lmem_size > pci_resource_len(pdev, 2)))
+ return ERR_PTR(-ENODEV);
+
+ mem = intel_memory_region_create(i915,
+ 0,
+ lmem_size,
+ I915_GTT_PAGE_SIZE_4K,
+ io_start,
+ &intel_region_lmem_ops);
+ if (IS_ERR(mem))
+ return mem;
+
+ err = reserve_lowmem_region(uncore, mem);
+ if (err)
+ goto err_region_put;
+
+ drm_dbg(&i915->drm, "Local memory: %pR\n", &mem->region);
+ drm_dbg(&i915->drm, "Local memory IO start: %pa\n",
+ &mem->io_start);
+ drm_info(&i915->drm, "Local memory available: %pa\n",
+ &lmem_size);
+
+ return mem;
+
+err_region_put:
+ intel_memory_region_put(mem);
+ return ERR_PTR(err);
+}
+
+struct intel_memory_region *intel_gt_setup_lmem(struct intel_gt *gt)
+{
+ return setup_lmem(gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.h b/drivers/gpu/drm/i915/gt/intel_region_lmem.h
index 8ea43e538dab..062d0542ae34 100644
--- a/drivers/gpu/drm/i915/gt/intel_region_lmem.h
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.h
@@ -6,9 +6,11 @@
#ifndef __INTEL_REGION_LMEM_H
#define __INTEL_REGION_LMEM_H
-struct drm_i915_private;
+struct intel_gt;
+
+struct intel_memory_region *intel_gt_setup_lmem(struct intel_gt *gt);
struct intel_memory_region *
-intel_setup_fake_lmem(struct drm_i915_private *i915);
+intel_gt_setup_fake_lmem(struct intel_gt *gt);
#endif /* !__INTEL_REGION_LMEM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c
index ca816ba22197..b03e197b1d99 100644
--- a/drivers/gpu/drm/i915/gt/intel_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c
@@ -1,28 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Mika Kuoppala <mika.kuoppala@intel.com>
- *
*/
#include "i915_drv.h"
@@ -65,7 +43,7 @@ render_state_get_rodata(const struct intel_engine_cs *engine)
if ((i) >= PAGE_SIZE / sizeof(u32)) \
goto out; \
(batch)[(i)++] = (val); \
- } while(0)
+ } while (0)
static int render_state_setup(struct intel_renderstate *so,
struct drm_i915_private *i915)
@@ -84,6 +62,7 @@ static int render_state_setup(struct intel_renderstate *so,
if (i * 4 == rodata->reloc[reloc_index]) {
u64 r = s + so->vma->node.start;
+
s = lower_32_bits(r);
if (HAS_64BIT_RELOC(i915)) {
if (i + 1 >= rodata->batch_items ||
@@ -197,7 +176,7 @@ retry:
if (err)
goto err_context;
- err = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ err = i915_vma_pin_ww(so->vma, &so->ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err)
goto err_context;
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.h b/drivers/gpu/drm/i915/gt/intel_renderstate.h
index 713aa1e86c80..48f009203917 100644
--- a/drivers/gpu/drm/i915/gt/intel_renderstate.h
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.h
@@ -1,24 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
*/
#ifndef _INTEL_RENDERSTATE_H_
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 61410cd62927..a377c4588aaa 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2008-2018 Intel Corporation
*/
@@ -178,7 +177,7 @@ static int i915_do_reset(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
- struct pci_dev *pdev = gt->i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
int err;
/* Assert reset for at least 20 usec, and wait for acknowledgement. */
@@ -207,7 +206,7 @@ static int g33_do_reset(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
- struct pci_dev *pdev = gt->i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
return wait_for_atomic(g4x_reset_complete(pdev), 50);
@@ -217,7 +216,7 @@ static int g4x_do_reset(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
- struct pci_dev *pdev = gt->i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
struct intel_uncore *uncore = gt->uncore;
int ret;
@@ -787,18 +786,15 @@ static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake)
static void nop_submit_request(struct i915_request *request)
{
- struct intel_engine_cs *engine = request->engine;
- unsigned long flags;
-
RQ_TRACE(request, "-EIO\n");
- i915_request_set_error_once(request, -EIO);
- spin_lock_irqsave(&engine->active.lock, flags);
- __i915_request_submit(request);
- i915_request_mark_complete(request);
- spin_unlock_irqrestore(&engine->active.lock, flags);
+ request = i915_request_mark_eio(request);
+ if (request) {
+ i915_request_submit(request);
+ intel_engine_signal_breadcrumbs(request->engine);
- intel_engine_signal_breadcrumbs(engine);
+ i915_request_put(request);
+ }
}
static void __intel_gt_set_wedged(struct intel_gt *gt)
@@ -974,8 +970,6 @@ static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
{
int err, i;
- gt_revoke(gt);
-
err = __intel_gt_reset(gt, ALL_ENGINES);
for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
msleep(10 * (i + 1));
@@ -1030,6 +1024,13 @@ void intel_gt_reset(struct intel_gt *gt,
might_sleep();
GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
+
+ /*
+ * FIXME: Revoking cpu mmap ptes cannot be done from a dma_fence
+ * critical section like gpu reset.
+ */
+ gt_revoke(gt);
+
mutex_lock(&gt->reset.mutex);
/* Clear any previous failed attempts at recovery. Time to try again. */
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h
index 7dbf5cc8a333..adc734e67387 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2008-2018 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_reset_types.h b/drivers/gpu/drm/i915/gt/intel_reset_types.h
index add6b86d9d03..9312b29f5a97 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset_types.h
@@ -32,7 +32,7 @@ struct intel_reset {
*
* #I915_WEDGED_ON_INIT - If we fail to initialize the GPU we can no
* longer use the GPU - similar to #I915_WEDGED bit. The difference in
- * in the way we're handling "forced" unwedged (e.g. through debugfs),
+ * the way we're handling "forced" unwedged (e.g. through debugfs),
* which is not allowed in case we failed to initialize.
*
* #I915_WEDGED_ON_FINI - Similar to #I915_WEDGED_ON_INIT, except we
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
index 78d1360caa0f..aee0a77c77e0 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -1,9 +1,9 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
+#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_object.h"
#include "i915_drv.h"
@@ -109,8 +109,8 @@ static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
- obj = ERR_PTR(-ENODEV);
- if (i915_ggtt_has_aperture(ggtt))
+ obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_VOLATILE);
+ if (IS_ERR(obj) && i915_ggtt_has_aperture(ggtt))
obj = i915_gem_object_create_stolen(i915, size);
if (IS_ERR(obj))
obj = i915_gem_object_create_internal(i915, size);
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.h b/drivers/gpu/drm/i915/gt/intel_ring.h
index 1700579bdc93..dbf5f14a136f 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.h
+++ b/drivers/gpu/drm/i915/gt/intel_ring.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
@@ -82,6 +81,7 @@ static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
{
/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
u32 offset = addr - rq->ring->vaddr;
+
GEM_BUG_ON(offset > rq->ring->size);
return intel_ring_wrap(rq->ring, offset);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 4984ff565424..9585546556ee 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -1,30 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * Copyright © 2008-2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Eric Anholt <eric@anholt.net>
- * Zou Nan hai <nanhai.zou@intel.com>
- * Xiang Hai hao<haihao.xiang@intel.com>
- *
+ * Copyright © 2008-2021 Intel Corporation
*/
#include "gen2_engine_cs.h"
@@ -183,15 +159,36 @@ static void set_pp_dir(struct intel_engine_cs *engine)
}
}
+static bool stop_ring(struct intel_engine_cs *engine)
+{
+ /* Empty the ring by skipping to the end */
+ ENGINE_WRITE_FW(engine, RING_HEAD, ENGINE_READ_FW(engine, RING_TAIL));
+ ENGINE_POSTING_READ(engine, RING_HEAD);
+
+ /* The ring must be empty before it is disabled */
+ ENGINE_WRITE_FW(engine, RING_CTL, 0);
+ ENGINE_POSTING_READ(engine, RING_CTL);
+
+ /* Then reset the disabled ring */
+ ENGINE_WRITE_FW(engine, RING_HEAD, 0);
+ ENGINE_WRITE_FW(engine, RING_TAIL, 0);
+
+ return (ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) == 0;
+}
+
static int xcs_resume(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
struct intel_ring *ring = engine->legacy.ring;
ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n",
ring->head, ring->tail);
- if (HWS_NEEDS_PHYSICAL(dev_priv))
+ /* Double check the ring is empty & disabled before we resume */
+ synchronize_hardirq(engine->i915->drm.irq);
+ if (!stop_ring(engine))
+ goto err;
+
+ if (HWS_NEEDS_PHYSICAL(engine->i915))
ring_setup_phys_status_page(engine);
else
ring_setup_status_page(engine);
@@ -228,21 +225,10 @@ static int xcs_resume(struct intel_engine_cs *engine)
if (__intel_wait_for_register_fw(engine->uncore,
RING_CTL(engine->mmio_base),
RING_VALID, RING_VALID,
- 5000, 0, NULL)) {
- drm_err(&dev_priv->drm,
- "%s initialization failed; "
- "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
- engine->name,
- ENGINE_READ(engine, RING_CTL),
- ENGINE_READ(engine, RING_CTL) & RING_VALID,
- ENGINE_READ(engine, RING_HEAD), ring->head,
- ENGINE_READ(engine, RING_TAIL), ring->tail,
- ENGINE_READ(engine, RING_START),
- i915_ggtt_offset(ring->vma));
- return -EIO;
- }
+ 5000, 0, NULL))
+ goto err;
- if (INTEL_GEN(dev_priv) > 2)
+ if (INTEL_GEN(engine->i915) > 2)
ENGINE_WRITE_FW(engine,
RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
@@ -255,6 +241,19 @@ static int xcs_resume(struct intel_engine_cs *engine)
/* Papering over lost _interrupts_ immediately following the restart */
intel_engine_signal_breadcrumbs(engine);
return 0;
+
+err:
+ drm_err(&engine->i915->drm,
+ "%s initialization failed; "
+ "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
+ engine->name,
+ ENGINE_READ(engine, RING_CTL),
+ ENGINE_READ(engine, RING_CTL) & RING_VALID,
+ ENGINE_READ(engine, RING_HEAD), ring->head,
+ ENGINE_READ(engine, RING_TAIL), ring->tail,
+ ENGINE_READ(engine, RING_START),
+ i915_ggtt_offset(ring->vma));
+ return -EIO;
}
static void sanitize_hwsp(struct intel_engine_cs *engine)
@@ -290,23 +289,6 @@ static void xcs_sanitize(struct intel_engine_cs *engine)
clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
}
-static bool stop_ring(struct intel_engine_cs *engine)
-{
- /* Empty the ring by skipping to the end */
- ENGINE_WRITE_FW(engine, RING_HEAD, ENGINE_READ_FW(engine, RING_TAIL));
- ENGINE_POSTING_READ(engine, RING_HEAD);
-
- /* The ring must be empty before it is disabled */
- ENGINE_WRITE_FW(engine, RING_CTL, 0);
- ENGINE_POSTING_READ(engine, RING_CTL);
-
- /* Then reset the disabled ring */
- ENGINE_WRITE_FW(engine, RING_HEAD, 0);
- ENGINE_WRITE_FW(engine, RING_TAIL, 0);
-
- return (ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) == 0;
-}
-
static void reset_prepare(struct intel_engine_cs *engine)
{
/*
@@ -329,25 +311,23 @@ static void reset_prepare(struct intel_engine_cs *engine)
if (!stop_ring(engine)) {
/* G45 ring initialization often fails to reset head to zero */
- drm_dbg(&engine->i915->drm,
- "%s head not reset to zero "
- "ctl %08x head %08x tail %08x start %08x\n",
- engine->name,
- ENGINE_READ_FW(engine, RING_CTL),
- ENGINE_READ_FW(engine, RING_HEAD),
- ENGINE_READ_FW(engine, RING_TAIL),
- ENGINE_READ_FW(engine, RING_START));
- }
-
- if (!stop_ring(engine)) {
- drm_err(&engine->i915->drm,
- "failed to set %s head to zero "
- "ctl %08x head %08x tail %08x start %08x\n",
- engine->name,
- ENGINE_READ_FW(engine, RING_CTL),
- ENGINE_READ_FW(engine, RING_HEAD),
- ENGINE_READ_FW(engine, RING_TAIL),
- ENGINE_READ_FW(engine, RING_START));
+ ENGINE_TRACE(engine,
+ "HEAD not reset to zero, "
+ "{ CTL:%08x, HEAD:%08x, TAIL:%08x, START:%08x }\n",
+ ENGINE_READ_FW(engine, RING_CTL),
+ ENGINE_READ_FW(engine, RING_HEAD),
+ ENGINE_READ_FW(engine, RING_TAIL),
+ ENGINE_READ_FW(engine, RING_START));
+ if (!stop_ring(engine)) {
+ drm_err(&engine->i915->drm,
+ "failed to set %s head to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ engine->name,
+ ENGINE_READ_FW(engine, RING_CTL),
+ ENGINE_READ_FW(engine, RING_HEAD),
+ ENGINE_READ_FW(engine, RING_TAIL),
+ ENGINE_READ_FW(engine, RING_START));
+ }
}
}
@@ -431,7 +411,7 @@ static void reset_cancel(struct intel_engine_cs *engine)
/* Mark all submitted requests as skipped. */
list_for_each_entry(request, &engine->active.requests, sched.link)
- i915_request_mark_eio(request);
+ i915_request_put(i915_request_mark_eio(request));
intel_engine_signal_breadcrumbs(engine);
/* Remaining _unready_ requests will be nop'ed when submitted */
@@ -466,6 +446,26 @@ static void ring_context_destroy(struct kref *ref)
intel_context_free(ce);
}
+static int ring_context_init_default_state(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww)
+{
+ struct drm_i915_gem_object *obj = ce->state->obj;
+ void *vaddr;
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ shmem_read(ce->engine->default_state, 0,
+ vaddr, ce->engine->context_size);
+
+ i915_gem_object_flush_map(obj);
+ __i915_gem_object_release_map(obj);
+
+ __set_bit(CONTEXT_VALID_BIT, &ce->flags);
+ return 0;
+}
+
static int ring_context_pre_pin(struct intel_context *ce,
struct i915_gem_ww_ctx *ww,
void **unused)
@@ -473,6 +473,13 @@ static int ring_context_pre_pin(struct intel_context *ce,
struct i915_address_space *vm;
int err = 0;
+ if (ce->engine->default_state &&
+ !test_bit(CONTEXT_VALID_BIT, &ce->flags)) {
+ err = ring_context_init_default_state(ce, ww);
+ if (err)
+ return err;
+ }
+
vm = vm_alias(ce->vm);
if (vm)
err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm)), ww);
@@ -528,22 +535,6 @@ alloc_context_vma(struct intel_engine_cs *engine)
if (IS_IVYBRIDGE(i915))
i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC);
- if (engine->default_state) {
- void *vaddr;
-
- vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
- if (IS_ERR(vaddr)) {
- err = PTR_ERR(vaddr);
- goto err_obj;
- }
-
- shmem_read(engine->default_state, 0,
- vaddr, engine->context_size);
-
- i915_gem_object_flush_map(obj);
- __i915_gem_object_release_map(obj);
- }
-
vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
@@ -575,8 +566,6 @@ static int ring_context_alloc(struct intel_context *ce)
return PTR_ERR(vma);
ce->state = vma;
- if (engine->default_state)
- __set_bit(CONTEXT_VALID_BIT, &ce->flags);
}
return 0;
@@ -761,13 +750,14 @@ static int mi_set_context(struct i915_request *rq,
static int remap_l3_slice(struct i915_request *rq, int slice)
{
+#define L3LOG_DW (GEN7_L3LOG_SIZE / sizeof(u32))
u32 *cs, *remap_info = rq->engine->i915->l3_parity.remap_info[slice];
int i;
if (!remap_info)
return 0;
- cs = intel_ring_begin(rq, GEN7_L3LOG_SIZE/4 * 2 + 2);
+ cs = intel_ring_begin(rq, L3LOG_DW * 2 + 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -776,8 +766,8 @@ static int remap_l3_slice(struct i915_request *rq, int slice)
* here because no other code should access these registers other than
* at initialization time.
*/
- *cs++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4);
- for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
+ *cs++ = MI_LOAD_REGISTER_IMM(L3LOG_DW);
+ for (i = 0; i < L3LOG_DW; i++) {
*cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
*cs++ = remap_info[i];
}
@@ -785,6 +775,7 @@ static int remap_l3_slice(struct i915_request *rq, int slice)
intel_ring_advance(rq, cs);
return 0;
+#undef L3LOG_DW
}
static int remap_l3(struct i915_request *rq)
@@ -1176,37 +1167,15 @@ static int gen7_ctx_switch_bb_setup(struct intel_engine_cs * const engine,
return gen7_setup_clear_gpr_bb(engine, vma);
}
-static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine)
+static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine,
+ struct i915_gem_ww_ctx *ww,
+ struct i915_vma *vma)
{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- int size;
int err;
- size = gen7_ctx_switch_bb_setup(engine, NULL /* probe size */);
- if (size <= 0)
- return size;
-
- size = ALIGN(size, PAGE_SIZE);
- obj = i915_gem_object_create_internal(engine->i915, size);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- vma = i915_vma_instance(obj, engine->gt->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_obj;
- }
-
- vma->private = intel_context_create(engine); /* dummy residuals */
- if (IS_ERR(vma->private)) {
- err = PTR_ERR(vma->private);
- goto err_obj;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH);
+ err = i915_vma_pin_ww(vma, ww, 0, 0, PIN_USER | PIN_HIGH);
if (err)
- goto err_private;
+ return err;
err = i915_vma_sync(vma);
if (err)
@@ -1221,17 +1190,53 @@ static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine)
err_unpin:
i915_vma_unpin(vma);
-err_private:
- intel_context_put(vma->private);
-err_obj:
- i915_gem_object_put(obj);
return err;
}
+static struct i915_vma *gen7_ctx_vma(struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int size, err;
+
+ if (!IS_GEN(engine->i915, 7) || engine->class != RENDER_CLASS)
+ return 0;
+
+ err = gen7_ctx_switch_bb_setup(engine, NULL /* probe size */);
+ if (err < 0)
+ return ERR_PTR(err);
+ if (!err)
+ return NULL;
+
+ size = ALIGN(err, PAGE_SIZE);
+
+ obj = i915_gem_object_create_internal(engine->i915, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, engine->gt->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return ERR_CAST(vma);
+ }
+
+ vma->private = intel_context_create(engine); /* dummy residuals */
+ if (IS_ERR(vma->private)) {
+ err = PTR_ERR(vma->private);
+ vma->private = NULL;
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+ }
+
+ return vma;
+}
+
int intel_ring_submission_setup(struct intel_engine_cs *engine)
{
+ struct i915_gem_ww_ctx ww;
struct intel_timeline *timeline;
struct intel_ring *ring;
+ struct i915_vma *gen7_wa_vma;
int err;
setup_common(engine);
@@ -1262,43 +1267,72 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
}
GEM_BUG_ON(timeline->has_initial_breadcrumb);
- err = intel_timeline_pin(timeline, NULL);
- if (err)
- goto err_timeline;
-
ring = intel_engine_create_ring(engine, SZ_16K);
if (IS_ERR(ring)) {
err = PTR_ERR(ring);
- goto err_timeline_unpin;
+ goto err_timeline;
}
- err = intel_ring_pin(ring, NULL);
- if (err)
- goto err_ring;
-
GEM_BUG_ON(engine->legacy.ring);
engine->legacy.ring = ring;
engine->legacy.timeline = timeline;
- GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
+ gen7_wa_vma = gen7_ctx_vma(engine);
+ if (IS_ERR(gen7_wa_vma)) {
+ err = PTR_ERR(gen7_wa_vma);
+ goto err_ring;
+ }
- if (IS_GEN(engine->i915, 7) && engine->class == RENDER_CLASS) {
- err = gen7_ctx_switch_bb_init(engine);
+ i915_gem_ww_ctx_init(&ww, false);
+
+retry:
+ err = i915_gem_object_lock(timeline->hwsp_ggtt->obj, &ww);
+ if (!err && gen7_wa_vma)
+ err = i915_gem_object_lock(gen7_wa_vma->obj, &ww);
+ if (!err && engine->legacy.ring->vma->obj)
+ err = i915_gem_object_lock(engine->legacy.ring->vma->obj, &ww);
+ if (!err)
+ err = intel_timeline_pin(timeline, &ww);
+ if (!err) {
+ err = intel_ring_pin(ring, &ww);
if (err)
- goto err_ring_unpin;
+ intel_timeline_unpin(timeline);
+ }
+ if (err)
+ goto out;
+
+ GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
+
+ if (gen7_wa_vma) {
+ err = gen7_ctx_switch_bb_init(engine, &ww, gen7_wa_vma);
+ if (err) {
+ intel_ring_unpin(ring);
+ intel_timeline_unpin(timeline);
+ }
}
+out:
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+ if (err)
+ goto err_gen7_put;
+
/* Finally, take ownership and responsibility for cleanup! */
engine->release = ring_release;
return 0;
-err_ring_unpin:
- intel_ring_unpin(ring);
+err_gen7_put:
+ if (gen7_wa_vma) {
+ intel_context_put(gen7_wa_vma->private);
+ i915_gem_object_put(gen7_wa_vma->obj);
+ }
err_ring:
intel_ring_put(ring);
-err_timeline_unpin:
- intel_timeline_unpin(timeline);
err_timeline:
intel_timeline_put(timeline);
err:
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_types.h b/drivers/gpu/drm/i915/gt/intel_ring_types.h
index 1a189ea00fd8..49ccb76dda3b 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_ring_types.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index ee5835c29c03..405d814e9040 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h
index 8d3c9d663662..1d2cfc98b510 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_rps_types.h b/drivers/gpu/drm/i915/gt/intel_rps_types.h
index 029fe13cf303..3941d8551f52 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps_types.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index 8a72e0fe34ca..0d9f74aec8fe 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h
index 23ba6c2ebe70..4cd1a8a7298a 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.h
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 037b0e3ccbed..f19cf6d2fa85 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2016-2018 Intel Corporation
*/
@@ -12,21 +11,9 @@
#include "intel_ring.h"
#include "intel_timeline.h"
-#define ptr_set_bit(ptr, bit) ((typeof(ptr))((unsigned long)(ptr) | BIT(bit)))
-#define ptr_test_bit(ptr, bit) ((unsigned long)(ptr) & BIT(bit))
+#define TIMELINE_SEQNO_BYTES 8
-#define CACHELINE_BITS 6
-#define CACHELINE_FREE CACHELINE_BITS
-
-struct intel_timeline_hwsp {
- struct intel_gt *gt;
- struct intel_gt_timelines *gt_timelines;
- struct list_head free_link;
- struct i915_vma *vma;
- u64 free_bitmap;
-};
-
-static struct i915_vma *__hwsp_alloc(struct intel_gt *gt)
+static struct i915_vma *hwsp_alloc(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
struct drm_i915_gem_object *obj;
@@ -45,174 +32,42 @@ static struct i915_vma *__hwsp_alloc(struct intel_gt *gt)
return vma;
}
-static struct i915_vma *
-hwsp_alloc(struct intel_timeline *timeline, unsigned int *cacheline)
-{
- struct intel_gt_timelines *gt = &timeline->gt->timelines;
- struct intel_timeline_hwsp *hwsp;
-
- BUILD_BUG_ON(BITS_PER_TYPE(u64) * CACHELINE_BYTES > PAGE_SIZE);
-
- spin_lock_irq(&gt->hwsp_lock);
-
- /* hwsp_free_list only contains HWSP that have available cachelines */
- hwsp = list_first_entry_or_null(&gt->hwsp_free_list,
- typeof(*hwsp), free_link);
- if (!hwsp) {
- struct i915_vma *vma;
-
- spin_unlock_irq(&gt->hwsp_lock);
-
- hwsp = kmalloc(sizeof(*hwsp), GFP_KERNEL);
- if (!hwsp)
- return ERR_PTR(-ENOMEM);
-
- vma = __hwsp_alloc(timeline->gt);
- if (IS_ERR(vma)) {
- kfree(hwsp);
- return vma;
- }
-
- GT_TRACE(timeline->gt, "new HWSP allocated\n");
-
- vma->private = hwsp;
- hwsp->gt = timeline->gt;
- hwsp->vma = vma;
- hwsp->free_bitmap = ~0ull;
- hwsp->gt_timelines = gt;
-
- spin_lock_irq(&gt->hwsp_lock);
- list_add(&hwsp->free_link, &gt->hwsp_free_list);
- }
-
- GEM_BUG_ON(!hwsp->free_bitmap);
- *cacheline = __ffs64(hwsp->free_bitmap);
- hwsp->free_bitmap &= ~BIT_ULL(*cacheline);
- if (!hwsp->free_bitmap)
- list_del(&hwsp->free_link);
-
- spin_unlock_irq(&gt->hwsp_lock);
-
- GEM_BUG_ON(hwsp->vma->private != hwsp);
- return hwsp->vma;
-}
-
-static void __idle_hwsp_free(struct intel_timeline_hwsp *hwsp, int cacheline)
-{
- struct intel_gt_timelines *gt = hwsp->gt_timelines;
- unsigned long flags;
-
- spin_lock_irqsave(&gt->hwsp_lock, flags);
-
- /* As a cacheline becomes available, publish the HWSP on the freelist */
- if (!hwsp->free_bitmap)
- list_add_tail(&hwsp->free_link, &gt->hwsp_free_list);
-
- GEM_BUG_ON(cacheline >= BITS_PER_TYPE(hwsp->free_bitmap));
- hwsp->free_bitmap |= BIT_ULL(cacheline);
-
- /* And if no one is left using it, give the page back to the system */
- if (hwsp->free_bitmap == ~0ull) {
- i915_vma_put(hwsp->vma);
- list_del(&hwsp->free_link);
- kfree(hwsp);
- }
-
- spin_unlock_irqrestore(&gt->hwsp_lock, flags);
-}
-
-static void __rcu_cacheline_free(struct rcu_head *rcu)
-{
- struct intel_timeline_cacheline *cl =
- container_of(rcu, typeof(*cl), rcu);
-
- /* Must wait until after all *rq->hwsp are complete before removing */
- i915_gem_object_unpin_map(cl->hwsp->vma->obj);
- __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
-
- i915_active_fini(&cl->active);
- kfree(cl);
-}
-
-static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
-{
- GEM_BUG_ON(!i915_active_is_idle(&cl->active));
- call_rcu(&cl->rcu, __rcu_cacheline_free);
-}
-
__i915_active_call
-static void __cacheline_retire(struct i915_active *active)
+static void __timeline_retire(struct i915_active *active)
{
- struct intel_timeline_cacheline *cl =
- container_of(active, typeof(*cl), active);
+ struct intel_timeline *tl =
+ container_of(active, typeof(*tl), active);
- i915_vma_unpin(cl->hwsp->vma);
- if (ptr_test_bit(cl->vaddr, CACHELINE_FREE))
- __idle_cacheline_free(cl);
+ i915_vma_unpin(tl->hwsp_ggtt);
+ intel_timeline_put(tl);
}
-static int __cacheline_active(struct i915_active *active)
+static int __timeline_active(struct i915_active *active)
{
- struct intel_timeline_cacheline *cl =
- container_of(active, typeof(*cl), active);
+ struct intel_timeline *tl =
+ container_of(active, typeof(*tl), active);
- __i915_vma_pin(cl->hwsp->vma);
+ __i915_vma_pin(tl->hwsp_ggtt);
+ intel_timeline_get(tl);
return 0;
}
-static struct intel_timeline_cacheline *
-cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline)
+I915_SELFTEST_EXPORT int
+intel_timeline_pin_map(struct intel_timeline *timeline)
{
- struct intel_timeline_cacheline *cl;
+ struct drm_i915_gem_object *obj = timeline->hwsp_ggtt->obj;
+ u32 ofs = offset_in_page(timeline->hwsp_offset);
void *vaddr;
- GEM_BUG_ON(cacheline >= BIT(CACHELINE_BITS));
-
- cl = kmalloc(sizeof(*cl), GFP_KERNEL);
- if (!cl)
- return ERR_PTR(-ENOMEM);
-
- vaddr = i915_gem_object_pin_map(hwsp->vma->obj, I915_MAP_WB);
- if (IS_ERR(vaddr)) {
- kfree(cl);
- return ERR_CAST(vaddr);
- }
-
- cl->hwsp = hwsp;
- cl->vaddr = page_pack_bits(vaddr, cacheline);
-
- i915_active_init(&cl->active, __cacheline_active, __cacheline_retire);
-
- return cl;
-}
-
-static void cacheline_acquire(struct intel_timeline_cacheline *cl,
- u32 ggtt_offset)
-{
- if (!cl)
- return;
-
- cl->ggtt_offset = ggtt_offset;
- i915_active_acquire(&cl->active);
-}
-
-static void cacheline_release(struct intel_timeline_cacheline *cl)
-{
- if (cl)
- i915_active_release(&cl->active);
-}
-
-static void cacheline_free(struct intel_timeline_cacheline *cl)
-{
- if (!i915_active_acquire_if_busy(&cl->active)) {
- __idle_cacheline_free(cl);
- return;
- }
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
- GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE));
- cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE);
+ timeline->hwsp_map = vaddr;
+ timeline->hwsp_seqno = memset(vaddr + ofs, 0, TIMELINE_SEQNO_BYTES);
+ clflush(vaddr + ofs);
- i915_active_release(&cl->active);
+ return 0;
}
static int intel_timeline_init(struct intel_timeline *timeline,
@@ -220,45 +75,25 @@ static int intel_timeline_init(struct intel_timeline *timeline,
struct i915_vma *hwsp,
unsigned int offset)
{
- void *vaddr;
-
kref_init(&timeline->kref);
atomic_set(&timeline->pin_count, 0);
timeline->gt = gt;
- timeline->has_initial_breadcrumb = !hwsp;
- timeline->hwsp_cacheline = NULL;
-
- if (!hwsp) {
- struct intel_timeline_cacheline *cl;
- unsigned int cacheline;
-
- hwsp = hwsp_alloc(timeline, &cacheline);
+ if (hwsp) {
+ timeline->hwsp_offset = offset;
+ timeline->hwsp_ggtt = i915_vma_get(hwsp);
+ } else {
+ timeline->has_initial_breadcrumb = true;
+ hwsp = hwsp_alloc(gt);
if (IS_ERR(hwsp))
return PTR_ERR(hwsp);
-
- cl = cacheline_alloc(hwsp->private, cacheline);
- if (IS_ERR(cl)) {
- __idle_hwsp_free(hwsp->private, cacheline);
- return PTR_ERR(cl);
- }
-
- timeline->hwsp_cacheline = cl;
- timeline->hwsp_offset = cacheline * CACHELINE_BYTES;
-
- vaddr = page_mask_bits(cl->vaddr);
- } else {
- timeline->hwsp_offset = offset;
- vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB);
- if (IS_ERR(vaddr))
- return PTR_ERR(vaddr);
+ timeline->hwsp_ggtt = hwsp;
}
- timeline->hwsp_seqno =
- memset(vaddr + timeline->hwsp_offset, 0, CACHELINE_BYTES);
+ timeline->hwsp_map = NULL;
+ timeline->hwsp_seqno = (void *)(long)timeline->hwsp_offset;
- timeline->hwsp_ggtt = i915_vma_get(hwsp);
GEM_BUG_ON(timeline->hwsp_offset >= hwsp->size);
timeline->fence_context = dma_fence_context_alloc(1);
@@ -269,6 +104,7 @@ static int intel_timeline_init(struct intel_timeline *timeline,
INIT_LIST_HEAD(&timeline->requests);
i915_syncmap_init(&timeline->sync);
+ i915_active_init(&timeline->active, __timeline_active, __timeline_retire);
return 0;
}
@@ -279,23 +115,19 @@ void intel_gt_init_timelines(struct intel_gt *gt)
spin_lock_init(&timelines->lock);
INIT_LIST_HEAD(&timelines->active_list);
-
- spin_lock_init(&timelines->hwsp_lock);
- INIT_LIST_HEAD(&timelines->hwsp_free_list);
}
-static void intel_timeline_fini(struct intel_timeline *timeline)
+static void intel_timeline_fini(struct rcu_head *rcu)
{
- GEM_BUG_ON(atomic_read(&timeline->pin_count));
- GEM_BUG_ON(!list_empty(&timeline->requests));
- GEM_BUG_ON(timeline->retire);
+ struct intel_timeline *timeline =
+ container_of(rcu, struct intel_timeline, rcu);
- if (timeline->hwsp_cacheline)
- cacheline_free(timeline->hwsp_cacheline);
- else
+ if (timeline->hwsp_map)
i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj);
i915_vma_put(timeline->hwsp_ggtt);
+ i915_active_fini(&timeline->active);
+ kfree(timeline);
}
struct intel_timeline *
@@ -351,6 +183,12 @@ int intel_timeline_pin(struct intel_timeline *tl, struct i915_gem_ww_ctx *ww)
if (atomic_add_unless(&tl->pin_count, 1, 0))
return 0;
+ if (!tl->hwsp_map) {
+ err = intel_timeline_pin_map(tl);
+ if (err)
+ return err;
+ }
+
err = i915_ggtt_pin(tl->hwsp_ggtt, ww, 0, PIN_HIGH);
if (err)
return err;
@@ -361,9 +199,9 @@ int intel_timeline_pin(struct intel_timeline *tl, struct i915_gem_ww_ctx *ww)
GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
tl->fence_context, tl->hwsp_offset);
- cacheline_acquire(tl->hwsp_cacheline, tl->hwsp_offset);
+ i915_active_acquire(&tl->active);
if (atomic_fetch_inc(&tl->pin_count)) {
- cacheline_release(tl->hwsp_cacheline);
+ i915_active_release(&tl->active);
__i915_vma_unpin(tl->hwsp_ggtt);
}
@@ -372,9 +210,13 @@ int intel_timeline_pin(struct intel_timeline *tl, struct i915_gem_ww_ctx *ww)
void intel_timeline_reset_seqno(const struct intel_timeline *tl)
{
+ u32 *hwsp_seqno = (u32 *)tl->hwsp_seqno;
/* Must be pinned to be writable, and no requests in flight. */
GEM_BUG_ON(!atomic_read(&tl->pin_count));
- WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
+
+ memset(hwsp_seqno + 1, 0, TIMELINE_SEQNO_BYTES - sizeof(*hwsp_seqno));
+ WRITE_ONCE(*hwsp_seqno, tl->seqno);
+ clflush(hwsp_seqno);
}
void intel_timeline_enter(struct intel_timeline *tl)
@@ -450,106 +292,23 @@ static u32 timeline_advance(struct intel_timeline *tl)
return tl->seqno += 1 + tl->has_initial_breadcrumb;
}
-static void timeline_rollback(struct intel_timeline *tl)
-{
- tl->seqno -= 1 + tl->has_initial_breadcrumb;
-}
-
static noinline int
__intel_timeline_get_seqno(struct intel_timeline *tl,
- struct i915_request *rq,
u32 *seqno)
{
- struct intel_timeline_cacheline *cl;
- unsigned int cacheline;
- struct i915_vma *vma;
- void *vaddr;
- int err;
-
- might_lock(&tl->gt->ggtt->vm.mutex);
- GT_TRACE(tl->gt, "timeline:%llx wrapped\n", tl->fence_context);
-
- /*
- * If there is an outstanding GPU reference to this cacheline,
- * such as it being sampled by a HW semaphore on another timeline,
- * we cannot wraparound our seqno value (the HW semaphore does
- * a strict greater-than-or-equals compare, not i915_seqno_passed).
- * So if the cacheline is still busy, we must detach ourselves
- * from it and leave it inflight alongside its users.
- *
- * However, if nobody is watching and we can guarantee that nobody
- * will, we could simply reuse the same cacheline.
- *
- * if (i915_active_request_is_signaled(&tl->last_request) &&
- * i915_active_is_signaled(&tl->hwsp_cacheline->active))
- * return 0;
- *
- * That seems unlikely for a busy timeline that needed to wrap in
- * the first place, so just replace the cacheline.
- */
-
- vma = hwsp_alloc(tl, &cacheline);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_rollback;
- }
-
- err = i915_ggtt_pin(vma, NULL, 0, PIN_HIGH);
- if (err) {
- __idle_hwsp_free(vma->private, cacheline);
- goto err_rollback;
- }
-
- cl = cacheline_alloc(vma->private, cacheline);
- if (IS_ERR(cl)) {
- err = PTR_ERR(cl);
- __idle_hwsp_free(vma->private, cacheline);
- goto err_unpin;
- }
- GEM_BUG_ON(cl->hwsp->vma != vma);
-
- /*
- * Attach the old cacheline to the current request, so that we only
- * free it after the current request is retired, which ensures that
- * all writes into the cacheline from previous requests are complete.
- */
- err = i915_active_ref(&tl->hwsp_cacheline->active,
- tl->fence_context,
- &rq->fence);
- if (err)
- goto err_cacheline;
-
- cacheline_release(tl->hwsp_cacheline); /* ownership now xfered to rq */
- cacheline_free(tl->hwsp_cacheline);
-
- i915_vma_unpin(tl->hwsp_ggtt); /* binding kept alive by old cacheline */
- i915_vma_put(tl->hwsp_ggtt);
-
- tl->hwsp_ggtt = i915_vma_get(vma);
+ u32 next_ofs = offset_in_page(tl->hwsp_offset + TIMELINE_SEQNO_BYTES);
- vaddr = page_mask_bits(cl->vaddr);
- tl->hwsp_offset = cacheline * CACHELINE_BYTES;
- tl->hwsp_seqno =
- memset(vaddr + tl->hwsp_offset, 0, CACHELINE_BYTES);
+ /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
+ if (TIMELINE_SEQNO_BYTES <= BIT(5) && (next_ofs & BIT(5)))
+ next_ofs = offset_in_page(next_ofs + BIT(5));
- tl->hwsp_offset += i915_ggtt_offset(vma);
- GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
- tl->fence_context, tl->hwsp_offset);
-
- cacheline_acquire(cl, tl->hwsp_offset);
- tl->hwsp_cacheline = cl;
+ tl->hwsp_offset = i915_ggtt_offset(tl->hwsp_ggtt) + next_ofs;
+ tl->hwsp_seqno = tl->hwsp_map + next_ofs;
+ intel_timeline_reset_seqno(tl);
*seqno = timeline_advance(tl);
GEM_BUG_ON(i915_seqno_passed(*tl->hwsp_seqno, *seqno));
return 0;
-
-err_cacheline:
- cacheline_free(cl);
-err_unpin:
- i915_vma_unpin(vma);
-err_rollback:
- timeline_rollback(tl);
- return err;
}
int intel_timeline_get_seqno(struct intel_timeline *tl,
@@ -559,51 +318,52 @@ int intel_timeline_get_seqno(struct intel_timeline *tl,
*seqno = timeline_advance(tl);
/* Replace the HWSP on wraparound for HW semaphores */
- if (unlikely(!*seqno && tl->hwsp_cacheline))
- return __intel_timeline_get_seqno(tl, rq, seqno);
+ if (unlikely(!*seqno && tl->has_initial_breadcrumb))
+ return __intel_timeline_get_seqno(tl, seqno);
return 0;
}
-static int cacheline_ref(struct intel_timeline_cacheline *cl,
- struct i915_request *rq)
-{
- return i915_active_add_request(&cl->active, rq);
-}
-
int intel_timeline_read_hwsp(struct i915_request *from,
struct i915_request *to,
u32 *hwsp)
{
- struct intel_timeline_cacheline *cl;
+ struct intel_timeline *tl;
int err;
- GEM_BUG_ON(!rcu_access_pointer(from->hwsp_cacheline));
-
rcu_read_lock();
- cl = rcu_dereference(from->hwsp_cacheline);
- if (i915_request_signaled(from)) /* confirm cacheline is valid */
- goto unlock;
- if (unlikely(!i915_active_acquire_if_busy(&cl->active)))
- goto unlock; /* seqno wrapped and completed! */
- if (unlikely(__i915_request_is_complete(from)))
- goto release;
+ tl = rcu_dereference(from->timeline);
+ if (i915_request_signaled(from) ||
+ !i915_active_acquire_if_busy(&tl->active))
+ tl = NULL;
+
+ if (tl) {
+ /* hwsp_offset may wraparound, so use from->hwsp_seqno */
+ *hwsp = i915_ggtt_offset(tl->hwsp_ggtt) +
+ offset_in_page(from->hwsp_seqno);
+ }
+
+ /* ensure we wait on the right request, if not, we completed */
+ if (tl && __i915_request_is_complete(from)) {
+ i915_active_release(&tl->active);
+ tl = NULL;
+ }
rcu_read_unlock();
- err = cacheline_ref(cl, to);
- if (err)
+ if (!tl)
+ return 1;
+
+ /* Can't do semaphore waits on kernel context */
+ if (!tl->has_initial_breadcrumb) {
+ err = -EINVAL;
goto out;
+ }
+
+ err = i915_active_add_request(&tl->active, to);
- *hwsp = cl->ggtt_offset;
out:
- i915_active_release(&cl->active);
+ i915_active_release(&tl->active);
return err;
-
-release:
- i915_active_release(&cl->active);
-unlock:
- rcu_read_unlock();
- return 1;
}
void intel_timeline_unpin(struct intel_timeline *tl)
@@ -612,8 +372,7 @@ void intel_timeline_unpin(struct intel_timeline *tl)
if (!atomic_dec_and_test(&tl->pin_count))
return;
- cacheline_release(tl->hwsp_cacheline);
-
+ i915_active_release(&tl->active);
__i915_vma_unpin(tl->hwsp_ggtt);
}
@@ -622,8 +381,11 @@ void __intel_timeline_free(struct kref *kref)
struct intel_timeline *timeline =
container_of(kref, typeof(*timeline), kref);
- intel_timeline_fini(timeline);
- kfree_rcu(timeline, rcu);
+ GEM_BUG_ON(atomic_read(&timeline->pin_count));
+ GEM_BUG_ON(!list_empty(&timeline->requests));
+ GEM_BUG_ON(timeline->retire);
+
+ call_rcu(&timeline->rcu, intel_timeline_fini);
}
void intel_gt_fini_timelines(struct intel_gt *gt)
@@ -631,7 +393,6 @@ void intel_gt_fini_timelines(struct intel_gt *gt)
struct intel_gt_timelines *timelines = &gt->timelines;
GEM_BUG_ON(!list_empty(&timelines->active_list));
- GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list));
}
void intel_gt_show_timelines(struct intel_gt *gt,
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.h b/drivers/gpu/drm/i915/gt/intel_timeline.h
index dcdee692a80e..57308c4d664a 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.h
@@ -1,25 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
*/
#ifndef I915_TIMELINE_H
@@ -117,4 +98,6 @@ intel_timeline_is_last(const struct intel_timeline *tl,
return list_is_last_rcu(&rq->link, &tl->requests);
}
+I915_SELFTEST_DECLARE(int intel_timeline_pin_map(struct intel_timeline *tl));
+
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
index e360f50706bf..74e67dbf89c5 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2016 Intel Corporation
*/
@@ -18,7 +17,6 @@
struct i915_vma;
struct i915_syncmap;
struct intel_gt;
-struct intel_timeline_hwsp;
struct intel_timeline {
u64 fence_context;
@@ -45,12 +43,11 @@ struct intel_timeline {
atomic_t pin_count;
atomic_t active_count;
+ void *hwsp_map;
const u32 *hwsp_seqno;
struct i915_vma *hwsp_ggtt;
u32 hwsp_offset;
- struct intel_timeline_cacheline *hwsp_cacheline;
-
bool has_initial_breadcrumb;
/**
@@ -67,6 +64,8 @@ struct intel_timeline {
*/
struct i915_active_fence last_request;
+ struct i915_active active;
+
/** A chain of completed timelines ready for early retirement. */
struct intel_timeline *retire;
@@ -90,15 +89,4 @@ struct intel_timeline {
struct rcu_head rcu;
};
-struct intel_timeline_cacheline {
- struct i915_active active;
-
- struct intel_timeline_hwsp *hwsp;
- void *vaddr;
-
- u32 ggtt_offset;
-
- struct rcu_head rcu;
-};
-
#endif /* __I915_TIMELINE_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index ec366cf9ef56..2c6f7217469f 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2018 Intel Corporation
*/
@@ -53,37 +52,6 @@
* - Public functions to init or apply the given workaround type.
*/
-/*
- * KBL revision ID ordering is bizarre; higher revision ID's map to lower
- * steppings in some cases. So rather than test against the revision ID
- * directly, let's map that into our own range of increasing ID's that we
- * can test against in a regular manner.
- */
-
-const struct i915_rev_steppings kbl_revids[] = {
- [0] = { .gt_stepping = KBL_REVID_A0, .disp_stepping = KBL_REVID_A0 },
- [1] = { .gt_stepping = KBL_REVID_B0, .disp_stepping = KBL_REVID_B0 },
- [2] = { .gt_stepping = KBL_REVID_C0, .disp_stepping = KBL_REVID_B0 },
- [3] = { .gt_stepping = KBL_REVID_D0, .disp_stepping = KBL_REVID_B0 },
- [4] = { .gt_stepping = KBL_REVID_F0, .disp_stepping = KBL_REVID_C0 },
- [5] = { .gt_stepping = KBL_REVID_C0, .disp_stepping = KBL_REVID_B1 },
- [6] = { .gt_stepping = KBL_REVID_D1, .disp_stepping = KBL_REVID_B1 },
- [7] = { .gt_stepping = KBL_REVID_G0, .disp_stepping = KBL_REVID_C0 },
-};
-
-const struct i915_rev_steppings tgl_uy_revids[] = {
- [0] = { .gt_stepping = TGL_REVID_A0, .disp_stepping = TGL_REVID_A0 },
- [1] = { .gt_stepping = TGL_REVID_B0, .disp_stepping = TGL_REVID_C0 },
- [2] = { .gt_stepping = TGL_REVID_B1, .disp_stepping = TGL_REVID_C0 },
- [3] = { .gt_stepping = TGL_REVID_C0, .disp_stepping = TGL_REVID_D0 },
-};
-
-/* Same GT stepping between tgl_uy_revids and tgl_revids don't mean the same HW */
-const struct i915_rev_steppings tgl_revids[] = {
- [0] = { .gt_stepping = TGL_REVID_A0, .disp_stepping = TGL_REVID_B0 },
- [1] = { .gt_stepping = TGL_REVID_B0, .disp_stepping = TGL_REVID_D0 },
-};
-
static void wa_init_start(struct i915_wa_list *wal, const char *name, const char *engine_name)
{
wal->name = name;
@@ -273,7 +241,7 @@ static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
/* Use Force Non-Coherent whenever executing a 3D context. This is a
- * workaround for for a possible hang in the unlikely event a TLB
+ * workaround for a possible hang in the unlikely event a TLB
* invalidation occurs during a PSD flush.
*/
/* WaForceEnableNonCoherent:bdw,chv */
@@ -513,7 +481,7 @@ static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
gen9_ctx_workarounds_init(engine, wal);
/* WaToEnableHwFixForPushConstHWBug:kbl */
- if (IS_KBL_GT_REVID(i915, KBL_REVID_C0, REVID_FOREVER))
+ if (IS_KBL_GT_STEP(i915, STEP_C0, STEP_FOREVER))
wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
@@ -722,7 +690,8 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
if (IS_DG1(i915))
dg1_ctx_workarounds_init(engine, wal);
- else if (IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915))
+ else if (IS_ALDERLAKE_S(i915) || IS_ROCKETLAKE(i915) ||
+ IS_TIGERLAKE(i915))
tgl_ctx_workarounds_init(engine, wal);
else if (IS_GEN(i915, 12))
gen12_ctx_workarounds_init(engine, wal);
@@ -749,7 +718,7 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
else if (IS_GEN(i915, 6))
gen6_ctx_workarounds_init(engine, wal);
else if (INTEL_GEN(i915) < 8)
- return;
+ ;
else
MISSING_CASE(INTEL_GEN(i915));
@@ -930,7 +899,7 @@ kbl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
gen9_gt_workarounds_init(i915, wal);
/* WaDisableDynamicCreditSharing:kbl */
- if (IS_KBL_GT_REVID(i915, 0, KBL_REVID_B0))
+ if (IS_KBL_GT_STEP(i915, 0, STEP_B0))
wa_write_or(wal,
GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
@@ -1103,11 +1072,10 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
/* Wa_1607087056:icl,ehl,jsl */
if (IS_ICELAKE(i915) ||
- IS_JSL_EHL_REVID(i915, EHL_REVID_A0, EHL_REVID_A0)) {
+ IS_JSL_EHL_REVID(i915, EHL_REVID_A0, EHL_REVID_A0))
wa_write_or(wal,
SLICE_UNIT_LEVEL_CLKGATE,
L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
- }
}
static void
@@ -1123,19 +1091,19 @@ tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
gen12_gt_workarounds_init(i915, wal);
/* Wa_1409420604:tgl */
- if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
+ if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_A0))
wa_write_or(wal,
SUBSLICE_UNIT_LEVEL_CLKGATE2,
CPSSUNIT_CLKGATE_DIS);
/* Wa_1607087056:tgl also know as BUG:1409180338 */
- if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
+ if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_A0))
wa_write_or(wal,
SLICE_UNIT_LEVEL_CLKGATE,
L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
/* Wa_1408615072:tgl[a0] */
- if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
+ if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_A0))
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
VSUNIT_CLKGATE_DIS_TGL);
}
@@ -1202,7 +1170,7 @@ gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
else if (IS_GEN(i915, 4))
gen4_gt_workarounds_init(i915, wal);
else if (INTEL_GEN(i915) <= 8)
- return;
+ ;
else
MISSING_CASE(INTEL_GEN(i915));
}
@@ -1577,7 +1545,7 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
else if (IS_SKYLAKE(i915))
skl_whitelist_build(engine);
else if (INTEL_GEN(i915) <= 8)
- return;
+ ;
else
MISSING_CASE(INTEL_GEN(i915));
@@ -1613,7 +1581,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
struct drm_i915_private *i915 = engine->i915;
if (IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0) ||
- IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) {
+ IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_A0)) {
/*
* Wa_1607138336:tgl[a0],dg1[a0]
* Wa_1607063988:tgl[a0],dg1[a0]
@@ -1623,7 +1591,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN12_DISABLE_POSH_BUSY_FF_DOP_CG);
}
- if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) {
+ if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_A0)) {
/*
* Wa_1606679103:tgl
* (see also Wa_1606682166:icl)
@@ -1633,45 +1601,45 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN7_DISABLE_SAMPLER_PREFETCH);
}
- if (IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
- /* Wa_1606931601:tgl,rkl,dg1 */
+ if (IS_ALDERLAKE_S(i915) || IS_DG1(i915) ||
+ IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
+ /* Wa_1606931601:tgl,rkl,dg1,adl-s */
wa_masked_en(wal, GEN7_ROW_CHICKEN2, GEN12_DISABLE_EARLY_READ);
/*
* Wa_1407928979:tgl A*
* Wa_18011464164:tgl[B0+],dg1[B0+]
* Wa_22010931296:tgl[B0+],dg1[B0+]
- * Wa_14010919138:rkl, dg1
+ * Wa_14010919138:rkl,dg1,adl-s
*/
wa_write_or(wal, GEN7_FF_THREAD_MODE,
GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
/*
* Wa_1606700617:tgl,dg1
- * Wa_22010271021:tgl,rkl,dg1
+ * Wa_22010271021:tgl,rkl,dg1, adl-s
*/
wa_masked_en(wal,
GEN9_CS_DEBUG_MODE1,
FF_DOP_CLOCK_GATE_DISABLE);
-
- /* Wa_1406941453:tgl,rkl,dg1 */
- wa_masked_en(wal,
- GEN10_SAMPLER_MODE,
- ENABLE_SMALLPL);
}
- if (IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0) ||
+ if (IS_ALDERLAKE_S(i915) || IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0) ||
IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
- /* Wa_1409804808:tgl,rkl,dg1[a0] */
+ /* Wa_1409804808:tgl,rkl,dg1[a0],adl-s */
wa_masked_en(wal, GEN7_ROW_CHICKEN2,
GEN12_PUSH_CONST_DEREF_HOLD_DIS);
/*
* Wa_1409085225:tgl
- * Wa_14010229206:tgl,rkl,dg1[a0]
+ * Wa_14010229206:tgl,rkl,dg1[a0],adl-s
*/
wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
+ }
+
+ if (IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0) ||
+ IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
/*
* Wa_1607030317:tgl
* Wa_1607186500:tgl
@@ -1688,6 +1656,13 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN8_RC_SEMA_IDLE_MSG_DISABLE);
}
+ if (IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
+ /* Wa_1406941453:tgl,rkl,dg1 */
+ wa_masked_en(wal,
+ GEN10_SAMPLER_MODE,
+ ENABLE_SMALLPL);
+ }
+
if (IS_GEN(i915, 11)) {
/* This is not an Wa. Enable for better image quality */
wa_masked_en(wal,
@@ -2045,7 +2020,7 @@ xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
struct drm_i915_private *i915 = engine->i915;
/* WaKBLVECSSemaphoreWaitPoll:kbl */
- if (IS_KBL_GT_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
+ if (IS_KBL_GT_STEP(i915, STEP_A0, STEP_E0)) {
wa_write(wal,
RING_SEMA_WAIT_POLL(engine->mmio_base),
1);
@@ -2197,10 +2172,15 @@ retry:
if (err)
goto err_pm;
+ err = i915_vma_pin_ww(vma, &ww, 0, 0,
+ i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
+ if (err)
+ goto err_unpin;
+
rq = i915_request_create(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto err_unpin;
+ goto err_vma;
}
err = i915_request_await_object(rq, vma->obj, true);
@@ -2241,6 +2221,8 @@ retry:
err_rq:
i915_request_put(rq);
+err_vma:
+ i915_vma_unpin(vma);
err_unpin:
intel_context_unpin(ce);
err_pm:
@@ -2251,7 +2233,6 @@ err_pm:
}
i915_gem_ww_ctx_fini(&ww);
intel_engine_pm_put(ce->engine);
- i915_vma_unpin(vma);
i915_vma_put(vma);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.h b/drivers/gpu/drm/i915/gt/intel_workarounds.h
index 8c9c769c2204..15abb68b6c00 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.h
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2018 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds_types.h b/drivers/gpu/drm/i915/gt/intel_workarounds_types.h
index d166a7145720..c214111ea367 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds_types.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2018 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 4b4f03b70df7..e1ba03b93ffa 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -1,25 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
*/
#include "gem/i915_gem_context.h"
@@ -32,9 +13,20 @@
#include "mock_engine.h"
#include "selftests/mock_request.h"
-static void mock_timeline_pin(struct intel_timeline *tl)
+static int mock_timeline_pin(struct intel_timeline *tl)
{
+ int err;
+
+ if (WARN_ON(!i915_gem_object_trylock(tl->hwsp_ggtt->obj)))
+ return -EBUSY;
+
+ err = intel_timeline_pin_map(tl);
+ i915_gem_object_unlock(tl->hwsp_ggtt->obj);
+ if (err)
+ return err;
+
atomic_inc(&tl->pin_count);
+ return 0;
}
static void mock_timeline_unpin(struct intel_timeline *tl)
@@ -152,6 +144,8 @@ static void mock_context_destroy(struct kref *ref)
static int mock_context_alloc(struct intel_context *ce)
{
+ int err;
+
ce->ring = mock_ring(ce->engine);
if (!ce->ring)
return -ENOMEM;
@@ -162,7 +156,12 @@ static int mock_context_alloc(struct intel_context *ce)
return PTR_ERR(ce->timeline);
}
- mock_timeline_pin(ce->timeline);
+ err = mock_timeline_pin(ce->timeline);
+ if (err) {
+ intel_timeline_put(ce->timeline);
+ ce->timeline = NULL;
+ return err;
+ }
return 0;
}
@@ -258,13 +257,15 @@ static void mock_reset_cancel(struct intel_engine_cs *engine)
/* Mark all submitted requests as skipped. */
list_for_each_entry(rq, &engine->active.requests, sched.link)
- i915_request_mark_eio(rq);
+ i915_request_put(i915_request_mark_eio(rq));
intel_engine_signal_breadcrumbs(engine);
/* Cancel and submit all pending requests. */
list_for_each_entry(rq, &mock->hw_queue, mock.link) {
- i915_request_mark_eio(rq);
- __i915_request_submit(rq);
+ if (i915_request_mark_eio(rq)) {
+ __i915_request_submit(rq);
+ i915_request_put(rq);
+ }
}
INIT_LIST_HEAD(&mock->hw_queue);
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.h b/drivers/gpu/drm/i915/gt/mock_engine.h
index 3f9b698c49d2..cc5ab6e1f37e 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.h
+++ b/drivers/gpu/drm/i915/gt/mock_engine.h
@@ -1,25 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
*/
#ifndef __MOCK_ENGINE_H__
diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
index db738d400168..b9bdd1d23243 100644
--- a/drivers/gpu/drm/i915/gt/selftest_context.c
+++ b/drivers/gpu/drm/i915/gt/selftest_context.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * SPDX-License-Identifier: GPL-2.0
- *
* Copyright © 2019 Intel Corporation
*/
@@ -88,8 +87,8 @@ static int __live_context_size(struct intel_engine_cs *engine)
if (err)
goto err;
- vaddr = i915_gem_object_pin_map(ce->state->obj,
- i915_coherent_map_type(engine->i915));
+ vaddr = i915_gem_object_pin_map_unlocked(ce->state->obj,
+ i915_coherent_map_type(engine->i915));
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
intel_context_unpin(ce);
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine.c b/drivers/gpu/drm/i915/gt/selftest_engine.c
index f65b118e261d..262764f6d90a 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * SPDX-License-Identifier: GPL-2.0
- *
* Copyright © 2018 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine.h b/drivers/gpu/drm/i915/gt/selftest_engine.h
index ab32d09ec5a1..c6feb3bd2ccc 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine.h
+++ b/drivers/gpu/drm/i915/gt/selftest_engine.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * SPDX-License-Identifier: GPL-2.0
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
index 439c8984f5fa..b32814a1f20b 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * SPDX-License-Identifier: GPL-2.0
- *
* Copyright © 2018 Intel Corporation
*/
@@ -42,6 +41,9 @@ static int perf_end(struct intel_gt *gt)
static int write_timestamp(struct i915_request *rq, int slot)
{
+ struct intel_timeline *tl =
+ rcu_dereference_protected(rq->timeline,
+ !i915_request_signaled(rq));
u32 cmd;
u32 *cs;
@@ -54,7 +56,7 @@ static int write_timestamp(struct i915_request *rq, int slot)
cmd++;
*cs++ = cmd;
*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
- *cs++ = i915_request_timeline(rq)->hwsp_offset + slot * sizeof(u32);
+ *cs++ = tl->hwsp_offset + slot * sizeof(u32);
*cs++ = 0;
intel_ring_advance(rq, cs);
@@ -73,7 +75,7 @@ static struct i915_vma *create_empty_batch(struct intel_context *ce)
if (IS_ERR(obj))
return ERR_CAST(obj);
- cs = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
goto err_put;
@@ -209,7 +211,7 @@ static struct i915_vma *create_nop_batch(struct intel_context *ce)
if (IS_ERR(obj))
return ERR_CAST(obj);
- cs = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
goto err_put;
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
index 223ab88f7e57..b2c369317bf1 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2018 Intel Corporation
*/
@@ -12,6 +11,12 @@
#include "i915_selftest.h"
#include "selftest_engine_heartbeat.h"
+static void reset_heartbeat(struct intel_engine_cs *engine)
+{
+ intel_engine_set_heartbeat(engine,
+ engine->defaults.heartbeat_interval_ms);
+}
+
static int timeline_sync(struct intel_timeline *tl)
{
struct dma_fence *fence;
@@ -270,7 +275,7 @@ static int __live_heartbeat_fast(struct intel_engine_cs *engine)
err = -EINVAL;
}
- intel_engine_set_heartbeat(engine, CONFIG_DRM_I915_HEARTBEAT_INTERVAL);
+ reset_heartbeat(engine);
err_pm:
intel_engine_pm_put(engine);
intel_context_put(ce);
@@ -285,7 +290,7 @@ static int live_heartbeat_fast(void *arg)
int err = 0;
/* Check that the heartbeat ticks at the desired rate. */
- if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL)
+ if (!IS_ACTIVE(CONFIG_DRM_I915_HEARTBEAT_INTERVAL))
return 0;
for_each_engine(engine, gt, id) {
@@ -333,7 +338,7 @@ static int __live_heartbeat_off(struct intel_engine_cs *engine)
}
err_beat:
- intel_engine_set_heartbeat(engine, CONFIG_DRM_I915_HEARTBEAT_INTERVAL);
+ reset_heartbeat(engine);
err_pm:
intel_engine_pm_put(engine);
return err;
@@ -347,7 +352,7 @@ static int live_heartbeat_off(void *arg)
int err = 0;
/* Check that we can turn off heartbeat and not interrupt VIP */
- if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL)
+ if (!IS_ACTIVE(CONFIG_DRM_I915_HEARTBEAT_INTERVAL))
return 0;
for_each_engine(engine, gt, id) {
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
index c3d965279fc3..2c898622bdfb 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * SPDX-License-Identifier: GPL-2.0
- *
* Copyright © 2018 Intel Corporation
*/
@@ -111,13 +110,15 @@ static int __measure_timestamps(struct intel_context *ce,
cpu_relax();
/* Run the request for a 100us, sampling timestamps before/after */
- preempt_disable();
- *dt = local_clock();
+ local_irq_disable();
write_semaphore(&sema[2], 0);
+ while (READ_ONCE(sema[1]) == 0) /* wait for the gpu to catch up */
+ cpu_relax();
+ *dt = local_clock();
udelay(100);
*dt = local_clock() - *dt;
write_semaphore(&sema[2], 1);
- preempt_enable();
+ local_irq_enable();
if (i915_request_wait(rq, 0, HZ / 2) < 0) {
i915_request_put(rq);
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index 264b5ebdb021..1081cd36a2bd 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2018 Intel Corporation
*/
@@ -321,7 +320,7 @@ static int live_unlite_switch(void *arg)
static int live_unlite_preempt(void *arg)
{
- return live_unlite_restore(arg, I915_USER_PRIORITY(I915_PRIORITY_MAX));
+ return live_unlite_restore(arg, I915_PRIORITY_MAX);
}
static int live_unlite_ring(void *arg)
@@ -609,7 +608,7 @@ static int live_hold_reset(void *arg)
}
tasklet_disable(&engine->execlists.tasklet);
- engine->execlists.tasklet.func(engine->execlists.tasklet.data);
+ engine->execlists.tasklet.callback(&engine->execlists.tasklet);
GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
i915_request_get(rq);
@@ -989,7 +988,7 @@ static int live_timeslice_preempt(void *arg)
goto err_obj;
}
- vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto err_obj;
@@ -1081,7 +1080,6 @@ create_rewinder(struct intel_context *ce,
intel_ring_advance(rq, cs);
- rq->sched.attr.priority = I915_PRIORITY_MASK;
err = 0;
err:
i915_request_get(rq);
@@ -1297,7 +1295,7 @@ static int live_timeslice_queue(void *arg)
goto err_obj;
}
- vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto err_obj;
@@ -1312,9 +1310,7 @@ static int live_timeslice_queue(void *arg)
goto err_pin;
for_each_engine(engine, gt, id) {
- struct i915_sched_attr attr = {
- .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
- };
+ struct i915_sched_attr attr = { .priority = I915_PRIORITY_MAX };
struct i915_request *rq, *nop;
if (!intel_engine_has_preemption(engine))
@@ -1529,14 +1525,12 @@ static int live_busywait_preempt(void *arg)
ctx_hi = kernel_context(gt->i915);
if (!ctx_hi)
return -ENOMEM;
- ctx_hi->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
+ ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
ctx_lo = kernel_context(gt->i915);
if (!ctx_lo)
goto err_ctx_hi;
- ctx_lo->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
+ ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(obj)) {
@@ -1544,7 +1538,7 @@ static int live_busywait_preempt(void *arg)
goto err_ctx_lo;
}
- map = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(map)) {
err = PTR_ERR(map);
goto err_obj;
@@ -1733,14 +1727,12 @@ static int live_preempt(void *arg)
ctx_hi = kernel_context(gt->i915);
if (!ctx_hi)
goto err_spin_lo;
- ctx_hi->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
+ ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
ctx_lo = kernel_context(gt->i915);
if (!ctx_lo)
goto err_ctx_hi;
- ctx_lo->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
+ ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
for_each_engine(engine, gt, id) {
struct igt_live_test t;
@@ -1833,7 +1825,7 @@ static int live_late_preempt(void *arg)
goto err_ctx_hi;
/* Make sure ctx_lo stays before ctx_hi until we trigger preemption. */
- ctx_lo->sched.priority = I915_USER_PRIORITY(1);
+ ctx_lo->sched.priority = 1;
for_each_engine(engine, gt, id) {
struct igt_live_test t;
@@ -1874,7 +1866,7 @@ static int live_late_preempt(void *arg)
goto err_wedged;
}
- attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
+ attr.priority = I915_PRIORITY_MAX;
engine->schedule(rq, &attr);
if (!igt_wait_for_spinner(&spin_hi, rq)) {
@@ -1955,7 +1947,7 @@ static int live_nopreempt(void *arg)
return -ENOMEM;
if (preempt_client_init(gt, &b))
goto err_client_a;
- b.ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
+ b.ctx->sched.priority = I915_PRIORITY_MAX;
for_each_engine(engine, gt, id) {
struct i915_request *rq_a, *rq_b;
@@ -2420,11 +2412,9 @@ err_wedged:
static int live_suppress_self_preempt(void *arg)
{
+ struct i915_sched_attr attr = { .priority = I915_PRIORITY_MAX };
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
- struct i915_sched_attr attr = {
- .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX)
- };
struct preempt_client a, b;
enum intel_engine_id id;
int err = -ENOMEM;
@@ -2555,9 +2545,7 @@ static int live_chain_preempt(void *arg)
goto err_client_hi;
for_each_engine(engine, gt, id) {
- struct i915_sched_attr attr = {
- .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
- };
+ struct i915_sched_attr attr = { .priority = I915_PRIORITY_MAX };
struct igt_live_test t;
struct i915_request *rq;
int ring_size, count, i;
@@ -2714,7 +2702,7 @@ static int create_gang(struct intel_engine_cs *engine,
if (err)
goto err_obj;
- cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
goto err_obj;
@@ -2876,7 +2864,7 @@ static int __live_preempt_ring(struct intel_engine_cs *engine,
err = wait_for_submit(engine, rq, HZ / 2);
i915_request_put(rq);
if (err) {
- pr_err("%s: preemption request was not submited\n",
+ pr_err("%s: preemption request was not submitted\n",
engine->name);
err = -ETIME;
}
@@ -2976,9 +2964,7 @@ static int live_preempt_gang(void *arg)
return -EIO;
do {
- struct i915_sched_attr attr = {
- .priority = I915_USER_PRIORITY(prio++),
- };
+ struct i915_sched_attr attr = { .priority = prio++ };
err = create_gang(engine, &rq);
if (err)
@@ -2997,7 +2983,7 @@ static int live_preempt_gang(void *arg)
* it will terminate the next lowest spinner until there
* are no more spinners and the gang is complete.
*/
- cs = i915_gem_object_pin_map(rq->batch->obj, I915_MAP_WC);
+ cs = i915_gem_object_pin_map_unlocked(rq->batch->obj, I915_MAP_WC);
if (!IS_ERR(cs)) {
*cs = 0;
i915_gem_object_unpin_map(rq->batch->obj);
@@ -3014,7 +3000,7 @@ static int live_preempt_gang(void *arg)
drm_info_printer(engine->i915->drm.dev);
pr_err("Failed to flush chain of %d requests, at %d\n",
- prio, rq_prio(rq) >> I915_USER_PRIORITY_SHIFT);
+ prio, rq_prio(rq));
intel_engine_dump(engine, &p,
"%s\n", engine->name);
@@ -3062,7 +3048,7 @@ create_gpr_user(struct intel_engine_cs *engine,
return ERR_PTR(err);
}
- cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(cs)) {
i915_vma_put(vma);
return ERR_CAST(cs);
@@ -3269,7 +3255,7 @@ static int live_preempt_user(void *arg)
if (IS_ERR(global))
return PTR_ERR(global);
- result = i915_gem_object_pin_map(global->obj, I915_MAP_WC);
+ result = i915_gem_object_pin_map_unlocked(global->obj, I915_MAP_WC);
if (IS_ERR(result)) {
i915_vma_unpin_and_release(&global, 0);
return PTR_ERR(result);
@@ -3384,14 +3370,12 @@ static int live_preempt_timeout(void *arg)
ctx_hi = kernel_context(gt->i915);
if (!ctx_hi)
goto err_spin_lo;
- ctx_hi->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
+ ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
ctx_lo = kernel_context(gt->i915);
if (!ctx_lo)
goto err_ctx_hi;
- ctx_lo->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
+ ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
for_each_engine(engine, gt, id) {
unsigned long saved_timeout;
@@ -3658,7 +3642,7 @@ static int live_preempt_smoke(void *arg)
goto err_free;
}
- cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
+ cs = i915_gem_object_pin_map_unlocked(smoke.batch, I915_MAP_WB);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
goto err_batch;
@@ -4197,8 +4181,9 @@ static int preserved_virtual_engine(struct intel_gt *gt,
int err = 0;
u32 *cs;
- scratch = __vm_create_scratch_for_read(&siblings[0]->gt->ggtt->vm,
- PAGE_SIZE);
+ scratch =
+ __vm_create_scratch_for_read_pinned(&siblings[0]->gt->ggtt->vm,
+ PAGE_SIZE);
if (IS_ERR(scratch))
return PTR_ERR(scratch);
@@ -4262,7 +4247,7 @@ static int preserved_virtual_engine(struct intel_gt *gt,
goto out_end;
}
- cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+ cs = i915_gem_object_pin_map_unlocked(scratch->obj, I915_MAP_WB);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
goto out_end;
@@ -4610,7 +4595,7 @@ static int reset_virtual_engine(struct intel_gt *gt,
}
tasklet_disable(&engine->execlists.tasklet);
- engine->execlists.tasklet.func(engine->execlists.tasklet.data);
+ engine->execlists.tasklet.callback(&engine->execlists.tasklet);
GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
/* Fake a preemption event; failed of course */
diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
index 5d911f724ebe..c0845bf72dd3 100644
--- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
@@ -1,7 +1,5 @@
-
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 463bb6a700c8..746985971c3a 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -1,25 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
*/
#include <linux/kthread.h>
@@ -80,15 +61,15 @@ static int hang_init(struct hang *h, struct intel_gt *gt)
}
i915_gem_object_set_cache_coherency(h->hws, I915_CACHE_LLC);
- vaddr = i915_gem_object_pin_map(h->hws, I915_MAP_WB);
+ vaddr = i915_gem_object_pin_map_unlocked(h->hws, I915_MAP_WB);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto err_obj;
}
h->seqno = memset(vaddr, 0xff, PAGE_SIZE);
- vaddr = i915_gem_object_pin_map(h->obj,
- i915_coherent_map_type(gt->i915));
+ vaddr = i915_gem_object_pin_map_unlocked(h->obj,
+ i915_coherent_map_type(gt->i915));
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto err_unpin_hws;
@@ -149,7 +130,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
return ERR_CAST(obj);
}
- vaddr = i915_gem_object_pin_map(obj, i915_coherent_map_type(gt->i915));
+ vaddr = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(gt->i915));
if (IS_ERR(vaddr)) {
i915_gem_object_put(obj);
i915_vm_put(vm);
diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.c b/drivers/gpu/drm/i915/gt/selftest_llc.c
index a912159693fd..94006f117bbd 100644
--- a/drivers/gpu/drm/i915/gt/selftest_llc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_llc.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.h b/drivers/gpu/drm/i915/gt/selftest_llc.h
index 873f896e72f2..88ee9480022a 100644
--- a/drivers/gpu/drm/i915/gt/selftest_llc.h
+++ b/drivers/gpu/drm/i915/gt/selftest_llc.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 920979a89413..85e7df6a5123 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -27,7 +27,7 @@
static struct i915_vma *create_scratch(struct intel_gt *gt)
{
- return __vm_create_scratch_for_read(&gt->ggtt->vm, PAGE_SIZE);
+ return __vm_create_scratch_for_read_pinned(&gt->ggtt->vm, PAGE_SIZE);
}
static bool is_active(struct i915_request *rq)
@@ -627,7 +627,7 @@ static int __live_lrc_gpr(struct intel_engine_cs *engine,
goto err_rq;
}
- cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+ cs = i915_gem_object_pin_map_unlocked(scratch->obj, I915_MAP_WB);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
goto err_rq;
@@ -733,7 +733,6 @@ create_timestamp(struct intel_context *ce, void *slot, int idx)
intel_ring_advance(rq, cs);
- rq->sched.attr.priority = I915_PRIORITY_MASK;
err = 0;
err:
i915_request_get(rq);
@@ -921,7 +920,7 @@ store_context(struct intel_context *ce, struct i915_vma *scratch)
if (IS_ERR(batch))
return batch;
- cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+ cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
if (IS_ERR(cs)) {
i915_vma_put(batch);
return ERR_CAST(cs);
@@ -1085,7 +1084,7 @@ static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
if (IS_ERR(batch))
return batch;
- cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+ cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
if (IS_ERR(cs)) {
i915_vma_put(batch);
return ERR_CAST(cs);
@@ -1199,29 +1198,29 @@ static int compare_isolation(struct intel_engine_cs *engine,
u32 *defaults;
int err = 0;
- A[0] = i915_gem_object_pin_map(ref[0]->obj, I915_MAP_WC);
+ A[0] = i915_gem_object_pin_map_unlocked(ref[0]->obj, I915_MAP_WC);
if (IS_ERR(A[0]))
return PTR_ERR(A[0]);
- A[1] = i915_gem_object_pin_map(ref[1]->obj, I915_MAP_WC);
+ A[1] = i915_gem_object_pin_map_unlocked(ref[1]->obj, I915_MAP_WC);
if (IS_ERR(A[1])) {
err = PTR_ERR(A[1]);
goto err_A0;
}
- B[0] = i915_gem_object_pin_map(result[0]->obj, I915_MAP_WC);
+ B[0] = i915_gem_object_pin_map_unlocked(result[0]->obj, I915_MAP_WC);
if (IS_ERR(B[0])) {
err = PTR_ERR(B[0]);
goto err_A1;
}
- B[1] = i915_gem_object_pin_map(result[1]->obj, I915_MAP_WC);
+ B[1] = i915_gem_object_pin_map_unlocked(result[1]->obj, I915_MAP_WC);
if (IS_ERR(B[1])) {
err = PTR_ERR(B[1]);
goto err_B0;
}
- lrc = i915_gem_object_pin_map(ce->state->obj,
+ lrc = i915_gem_object_pin_map_unlocked(ce->state->obj,
i915_coherent_map_type(engine->i915));
if (IS_ERR(lrc)) {
err = PTR_ERR(lrc);
diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c
index cf373c72359e..e55a887d11e2 100644
--- a/drivers/gpu/drm/i915/gt/selftest_mocs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
@@ -13,8 +12,9 @@
#include "selftests/igt_spinner.h"
struct live_mocs {
- struct drm_i915_mocs_table mocs;
- struct drm_i915_mocs_table l3cc;
+ struct drm_i915_mocs_table table;
+ struct drm_i915_mocs_table *mocs;
+ struct drm_i915_mocs_table *l3cc;
struct i915_vma *scratch;
void *vaddr;
};
@@ -59,27 +59,27 @@ static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt)
{
- struct drm_i915_mocs_table table;
unsigned int flags;
int err;
memset(arg, 0, sizeof(*arg));
- flags = get_mocs_settings(gt->i915, &table);
+ flags = get_mocs_settings(gt->i915, &arg->table);
if (!flags)
return -EINVAL;
if (flags & HAS_RENDER_L3CC)
- arg->l3cc = table;
+ arg->l3cc = &arg->table;
if (flags & (HAS_GLOBAL_MOCS | HAS_ENGINE_MOCS))
- arg->mocs = table;
+ arg->mocs = &arg->table;
- arg->scratch = __vm_create_scratch_for_read(&gt->ggtt->vm, PAGE_SIZE);
+ arg->scratch =
+ __vm_create_scratch_for_read_pinned(&gt->ggtt->vm, PAGE_SIZE);
if (IS_ERR(arg->scratch))
return PTR_ERR(arg->scratch);
- arg->vaddr = i915_gem_object_pin_map(arg->scratch->obj, I915_MAP_WB);
+ arg->vaddr = i915_gem_object_pin_map_unlocked(arg->scratch->obj, I915_MAP_WB);
if (IS_ERR(arg->vaddr)) {
err = PTR_ERR(arg->vaddr);
goto err_scratch;
@@ -131,6 +131,9 @@ static int read_mocs_table(struct i915_request *rq,
{
u32 addr;
+ if (!table)
+ return 0;
+
if (HAS_GLOBAL_MOCS_REGISTERS(rq->engine->i915))
addr = global_mocs_offset();
else
@@ -145,6 +148,9 @@ static int read_l3cc_table(struct i915_request *rq,
{
u32 addr = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
+ if (!table)
+ return 0;
+
return read_regs(rq, addr, (table->n_entries + 1) / 2, offset);
}
@@ -155,6 +161,9 @@ static int check_mocs_table(struct intel_engine_cs *engine,
unsigned int i;
u32 expect;
+ if (!table)
+ return 0;
+
for_each_mocs(expect, table, i) {
if (**vaddr != expect) {
pr_err("%s: Invalid MOCS[%d] entry, found %08x, expected %08x\n",
@@ -186,6 +195,9 @@ static int check_l3cc_table(struct intel_engine_cs *engine,
unsigned int i;
u32 expect;
+ if (!table)
+ return 0;
+
for_each_l3cc(expect, table, i) {
if (!mcr_range(engine->i915, reg) && **vaddr != expect) {
pr_err("%s: Invalid L3CC[%d] entry, found %08x, expected %08x\n",
@@ -223,9 +235,9 @@ static int check_mocs_engine(struct live_mocs *arg,
/* Read the mocs tables back using SRM */
offset = i915_ggtt_offset(vma);
if (!err)
- err = read_mocs_table(rq, &arg->mocs, &offset);
+ err = read_mocs_table(rq, arg->mocs, &offset);
if (!err && ce->engine->class == RENDER_CLASS)
- err = read_l3cc_table(rq, &arg->l3cc, &offset);
+ err = read_l3cc_table(rq, arg->l3cc, &offset);
offset -= i915_ggtt_offset(vma);
GEM_BUG_ON(offset > PAGE_SIZE);
@@ -236,9 +248,9 @@ static int check_mocs_engine(struct live_mocs *arg,
/* Compare the results against the expected tables */
vaddr = arg->vaddr;
if (!err)
- err = check_mocs_table(ce->engine, &arg->mocs, &vaddr);
+ err = check_mocs_table(ce->engine, arg->mocs, &vaddr);
if (!err && ce->engine->class == RENDER_CLASS)
- err = check_l3cc_table(ce->engine, &arg->l3cc, &vaddr);
+ err = check_l3cc_table(ce->engine, arg->l3cc, &vaddr);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
index 61abc0556601..f097e420ac45 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rc6.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.h b/drivers/gpu/drm/i915/gt/selftest_rc6.h
index 762fd442d7b2..daf0927909bc 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rc6.h
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/selftest_ring_submission.c b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
index 3350e7c995bc..99609271c3a7 100644
--- a/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
@@ -35,7 +35,7 @@ static struct i915_vma *create_wally(struct intel_engine_cs *engine)
return ERR_PTR(err);
}
- cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(cs)) {
i915_gem_object_put(obj);
return ERR_CAST(cs);
@@ -212,7 +212,7 @@ static int __live_ctx_switch_wa(struct intel_engine_cs *engine)
if (IS_ERR(bb))
return PTR_ERR(bb);
- result = i915_gem_object_pin_map(bb->obj, I915_MAP_WC);
+ result = i915_gem_object_pin_map_unlocked(bb->obj, I915_MAP_WC);
if (IS_ERR(result)) {
intel_context_put(bb->private);
i915_vma_unpin_and_release(&bb, 0);
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index 6f3a3687ef0f..9adbd9d147be 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2017-2018 Intel Corporation
*/
@@ -35,10 +34,31 @@ static unsigned long hwsp_cacheline(struct intel_timeline *tl)
{
unsigned long address = (unsigned long)page_address(hwsp_page(tl));
- return (address + tl->hwsp_offset) / CACHELINE_BYTES;
+ return (address + offset_in_page(tl->hwsp_offset)) / TIMELINE_SEQNO_BYTES;
}
-#define CACHELINES_PER_PAGE (PAGE_SIZE / CACHELINE_BYTES)
+static int selftest_tl_pin(struct intel_timeline *tl)
+{
+ struct i915_gem_ww_ctx ww;
+ int err;
+
+ i915_gem_ww_ctx_init(&ww, false);
+retry:
+ err = i915_gem_object_lock(tl->hwsp_ggtt->obj, &ww);
+ if (!err)
+ err = intel_timeline_pin(tl, &ww);
+
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+ return err;
+}
+
+/* Only half of seqno's are usable, see __intel_timeline_get_seqno() */
+#define CACHELINES_PER_PAGE (PAGE_SIZE / TIMELINE_SEQNO_BYTES / 2)
struct mock_hwsp_freelist {
struct intel_gt *gt;
@@ -59,6 +79,7 @@ static void __mock_hwsp_record(struct mock_hwsp_freelist *state,
tl = xchg(&state->history[idx], tl);
if (tl) {
radix_tree_delete(&state->cachelines, hwsp_cacheline(tl));
+ intel_timeline_unpin(tl);
intel_timeline_put(tl);
}
}
@@ -78,6 +99,12 @@ static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state,
if (IS_ERR(tl))
return PTR_ERR(tl);
+ err = selftest_tl_pin(tl);
+ if (err) {
+ intel_timeline_put(tl);
+ return err;
+ }
+
cacheline = hwsp_cacheline(tl);
err = radix_tree_insert(&state->cachelines, cacheline, tl);
if (err) {
@@ -85,6 +112,7 @@ static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state,
pr_err("HWSP cacheline %lu already used; duplicate allocation!\n",
cacheline);
}
+ intel_timeline_unpin(tl);
intel_timeline_put(tl);
return err;
}
@@ -452,17 +480,24 @@ static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value)
}
static struct i915_request *
-tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value)
+checked_tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value)
{
struct i915_request *rq;
int err;
- err = intel_timeline_pin(tl, NULL);
+ err = selftest_tl_pin(tl);
if (err) {
rq = ERR_PTR(err);
goto out;
}
+ if (READ_ONCE(*tl->hwsp_seqno) != tl->seqno) {
+ pr_err("Timeline created with incorrect breadcrumb, found %x, expected %x\n",
+ *tl->hwsp_seqno, tl->seqno);
+ intel_timeline_unpin(tl);
+ return ERR_PTR(-EINVAL);
+ }
+
rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq))
goto out_unpin;
@@ -484,25 +519,6 @@ out:
return rq;
}
-static struct intel_timeline *
-checked_intel_timeline_create(struct intel_gt *gt)
-{
- struct intel_timeline *tl;
-
- tl = intel_timeline_create(gt);
- if (IS_ERR(tl))
- return tl;
-
- if (READ_ONCE(*tl->hwsp_seqno) != tl->seqno) {
- pr_err("Timeline created with incorrect breadcrumb, found %x, expected %x\n",
- *tl->hwsp_seqno, tl->seqno);
- intel_timeline_put(tl);
- return ERR_PTR(-EINVAL);
- }
-
- return tl;
-}
-
static int live_hwsp_engine(void *arg)
{
#define NUM_TIMELINES 4096
@@ -535,13 +551,13 @@ static int live_hwsp_engine(void *arg)
struct intel_timeline *tl;
struct i915_request *rq;
- tl = checked_intel_timeline_create(gt);
+ tl = intel_timeline_create(gt);
if (IS_ERR(tl)) {
err = PTR_ERR(tl);
break;
}
- rq = tl_write(tl, engine, count);
+ rq = checked_tl_write(tl, engine, count);
if (IS_ERR(rq)) {
intel_timeline_put(tl);
err = PTR_ERR(rq);
@@ -608,14 +624,14 @@ static int live_hwsp_alternate(void *arg)
if (!intel_engine_can_store_dword(engine))
continue;
- tl = checked_intel_timeline_create(gt);
+ tl = intel_timeline_create(gt);
if (IS_ERR(tl)) {
err = PTR_ERR(tl);
goto out;
}
intel_engine_pm_get(engine);
- rq = tl_write(tl, engine, count);
+ rq = checked_tl_write(tl, engine, count);
intel_engine_pm_put(engine);
if (IS_ERR(rq)) {
intel_timeline_put(tl);
@@ -666,10 +682,10 @@ static int live_hwsp_wrap(void *arg)
if (IS_ERR(tl))
return PTR_ERR(tl);
- if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline)
+ if (!tl->has_initial_breadcrumb)
goto out_free;
- err = intel_timeline_pin(tl, NULL);
+ err = selftest_tl_pin(tl);
if (err)
goto out_free;
@@ -816,13 +832,13 @@ static int setup_watcher(struct hwsp_watcher *w, struct intel_gt *gt)
if (IS_ERR(obj))
return PTR_ERR(obj);
- w->map = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ w->map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
if (IS_ERR(w->map)) {
i915_gem_object_put(obj);
return PTR_ERR(w->map);
}
- vma = i915_gem_object_ggtt_pin_ww(obj, NULL, NULL, 0, 0, 0);
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
if (IS_ERR(vma)) {
i915_gem_object_put(obj);
return PTR_ERR(vma);
@@ -833,12 +849,26 @@ static int setup_watcher(struct hwsp_watcher *w, struct intel_gt *gt)
return 0;
}
+static void switch_tl_lock(struct i915_request *from, struct i915_request *to)
+{
+ /* some light mutex juggling required; think co-routines */
+
+ if (from) {
+ lockdep_unpin_lock(&from->context->timeline->mutex, from->cookie);
+ mutex_unlock(&from->context->timeline->mutex);
+ }
+
+ if (to) {
+ mutex_lock(&to->context->timeline->mutex);
+ to->cookie = lockdep_pin_lock(&to->context->timeline->mutex);
+ }
+}
+
static int create_watcher(struct hwsp_watcher *w,
struct intel_engine_cs *engine,
int ringsz)
{
struct intel_context *ce;
- struct intel_timeline *tl;
ce = intel_context_create(engine);
if (IS_ERR(ce))
@@ -851,11 +881,8 @@ static int create_watcher(struct hwsp_watcher *w,
return PTR_ERR(w->rq);
w->addr = i915_ggtt_offset(w->vma);
- tl = w->rq->context->timeline;
- /* some light mutex juggling required; think co-routines */
- lockdep_unpin_lock(&tl->mutex, w->rq->cookie);
- mutex_unlock(&tl->mutex);
+ switch_tl_lock(w->rq, NULL);
return 0;
}
@@ -864,15 +891,13 @@ static int check_watcher(struct hwsp_watcher *w, const char *name,
bool (*op)(u32 hwsp, u32 seqno))
{
struct i915_request *rq = fetch_and_zero(&w->rq);
- struct intel_timeline *tl = rq->context->timeline;
u32 offset, end;
int err;
GEM_BUG_ON(w->addr - i915_ggtt_offset(w->vma) > w->vma->size);
i915_request_get(rq);
- mutex_lock(&tl->mutex);
- rq->cookie = lockdep_pin_lock(&tl->mutex);
+ switch_tl_lock(NULL, rq);
i915_request_add(rq);
if (i915_request_wait(rq, 0, HZ) < 0) {
@@ -901,10 +926,7 @@ out:
static void cleanup_watcher(struct hwsp_watcher *w)
{
if (w->rq) {
- struct intel_timeline *tl = w->rq->context->timeline;
-
- mutex_lock(&tl->mutex);
- w->rq->cookie = lockdep_pin_lock(&tl->mutex);
+ switch_tl_lock(NULL, w->rq);
i915_request_add(w->rq);
}
@@ -942,7 +964,7 @@ static struct i915_request *wrap_timeline(struct i915_request *rq)
}
i915_request_put(rq);
- rq = intel_context_create_request(ce);
+ rq = i915_request_create(ce);
if (IS_ERR(rq))
return rq;
@@ -977,7 +999,7 @@ static int live_hwsp_read(void *arg)
if (IS_ERR(tl))
return PTR_ERR(tl);
- if (!tl->hwsp_cacheline)
+ if (!tl->has_initial_breadcrumb)
goto out_free;
for (i = 0; i < ARRAY_SIZE(watcher); i++) {
@@ -999,7 +1021,7 @@ static int live_hwsp_read(void *arg)
do {
struct i915_sw_fence *submit;
struct i915_request *rq;
- u32 hwsp;
+ u32 hwsp, dummy;
submit = heap_fence_create(GFP_KERNEL);
if (!submit) {
@@ -1017,14 +1039,26 @@ static int live_hwsp_read(void *arg)
goto out;
}
- /* Skip to the end, saving 30 minutes of nops */
- tl->seqno = -10u + 2 * (count & 3);
- WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
ce->timeline = intel_timeline_get(tl);
- rq = intel_context_create_request(ce);
+ /* Ensure timeline is mapped, done during first pin */
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ goto out;
+ }
+
+ /*
+ * Start at a new wrap, and set seqno right before another wrap,
+ * saving 30 minutes of nops
+ */
+ tl->seqno = -12u + 2 * (count & 3);
+ __intel_timeline_get_seqno(tl, &dummy);
+
+ rq = i915_request_create(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
+ intel_context_unpin(ce);
intel_context_put(ce);
goto out;
}
@@ -1034,32 +1068,35 @@ static int live_hwsp_read(void *arg)
GFP_KERNEL);
if (err < 0) {
i915_request_add(rq);
+ intel_context_unpin(ce);
intel_context_put(ce);
goto out;
}
- mutex_lock(&watcher[0].rq->context->timeline->mutex);
+ switch_tl_lock(rq, watcher[0].rq);
err = intel_timeline_read_hwsp(rq, watcher[0].rq, &hwsp);
if (err == 0)
err = emit_read_hwsp(watcher[0].rq, /* before */
rq->fence.seqno, hwsp,
&watcher[0].addr);
- mutex_unlock(&watcher[0].rq->context->timeline->mutex);
+ switch_tl_lock(watcher[0].rq, rq);
if (err) {
i915_request_add(rq);
+ intel_context_unpin(ce);
intel_context_put(ce);
goto out;
}
- mutex_lock(&watcher[1].rq->context->timeline->mutex);
+ switch_tl_lock(rq, watcher[1].rq);
err = intel_timeline_read_hwsp(rq, watcher[1].rq, &hwsp);
if (err == 0)
err = emit_read_hwsp(watcher[1].rq, /* after */
rq->fence.seqno, hwsp,
&watcher[1].addr);
- mutex_unlock(&watcher[1].rq->context->timeline->mutex);
+ switch_tl_lock(watcher[1].rq, rq);
if (err) {
i915_request_add(rq);
+ intel_context_unpin(ce);
intel_context_put(ce);
goto out;
}
@@ -1068,6 +1105,7 @@ static int live_hwsp_read(void *arg)
i915_request_add(rq);
rq = wrap_timeline(rq);
+ intel_context_unpin(ce);
intel_context_put(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
@@ -1107,8 +1145,8 @@ static int live_hwsp_read(void *arg)
3 * watcher[1].rq->ring->size)
break;
- } while (!__igt_timeout(end_time, NULL));
- WRITE_ONCE(*(u32 *)tl->hwsp_seqno, 0xdeadbeef);
+ } while (!__igt_timeout(end_time, NULL) &&
+ count < (PAGE_SIZE / TIMELINE_SEQNO_BYTES - 1) / 2);
pr_info("%s: simulated %lu wraps\n", engine->name, count);
err = check_watcher(&watcher[1], "after", cmp_gte);
@@ -1153,9 +1191,7 @@ static int live_hwsp_rollover_kernel(void *arg)
}
GEM_BUG_ON(i915_active_fence_isset(&tl->last_request));
- tl->seqno = 0;
- timeline_rollback(tl);
- timeline_rollback(tl);
+ tl->seqno = -2u;
WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
for (i = 0; i < ARRAY_SIZE(rq); i++) {
@@ -1235,11 +1271,14 @@ static int live_hwsp_rollover_user(void *arg)
goto out;
tl = ce->timeline;
- if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline)
+ if (!tl->has_initial_breadcrumb)
+ goto out;
+
+ err = intel_context_pin(ce);
+ if (err)
goto out;
- timeline_rollback(tl);
- timeline_rollback(tl);
+ tl->seqno = -4u;
WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
for (i = 0; i < ARRAY_SIZE(rq); i++) {
@@ -1248,7 +1287,7 @@ static int live_hwsp_rollover_user(void *arg)
this = intel_context_create_request(ce);
if (IS_ERR(this)) {
err = PTR_ERR(this);
- goto out;
+ goto out_unpin;
}
pr_debug("%s: create fence.seqnp:%d\n",
@@ -1267,17 +1306,18 @@ static int live_hwsp_rollover_user(void *arg)
if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
pr_err("Wait for timeline wrap timed out!\n");
err = -EIO;
- goto out;
+ goto out_unpin;
}
for (i = 0; i < ARRAY_SIZE(rq); i++) {
if (!i915_request_completed(rq[i])) {
pr_err("Pre-wrap request not completed!\n");
err = -EINVAL;
- goto out;
+ goto out_unpin;
}
}
-
+out_unpin:
+ intel_context_unpin(ce);
out:
for (i = 0; i < ARRAY_SIZE(rq); i++)
i915_request_put(rq[i]);
@@ -1319,13 +1359,13 @@ static int live_hwsp_recycle(void *arg)
struct intel_timeline *tl;
struct i915_request *rq;
- tl = checked_intel_timeline_create(gt);
+ tl = intel_timeline_create(gt);
if (IS_ERR(tl)) {
err = PTR_ERR(tl);
break;
}
- rq = tl_write(tl, engine, count);
+ rq = checked_tl_write(tl, engine, count);
if (IS_ERR(rq)) {
intel_timeline_put(tl);
err = PTR_ERR(rq);
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index 2070b91cb607..19850489a3fc 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2018 Intel Corporation
*/
@@ -112,7 +111,7 @@ read_nonprivs(struct intel_context *ce)
i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
- cs = i915_gem_object_pin_map(result, I915_MAP_WB);
+ cs = i915_gem_object_pin_map_unlocked(result, I915_MAP_WB);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
goto err_obj;
@@ -218,7 +217,7 @@ static int check_whitelist(struct intel_context *ce)
i915_gem_object_lock(results, NULL);
intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */
err = i915_gem_object_set_to_cpu_domain(results, false);
- i915_gem_object_unlock(results);
+
if (intel_gt_is_wedged(engine->gt))
err = -EIO;
if (err)
@@ -246,6 +245,7 @@ static int check_whitelist(struct intel_context *ce)
i915_gem_object_unpin_map(results);
out_put:
+ i915_gem_object_unlock(results);
i915_gem_object_put(results);
return err;
}
@@ -490,7 +490,7 @@ static int check_dirty_whitelist(struct intel_context *ce)
u32 *cs, *results;
sz = (2 * ARRAY_SIZE(values) + 1) * sizeof(u32);
- scratch = __vm_create_scratch_for_read(ce->vm, sz);
+ scratch = __vm_create_scratch_for_read_pinned(ce->vm, sz);
if (IS_ERR(scratch))
return PTR_ERR(scratch);
@@ -502,6 +502,7 @@ static int check_dirty_whitelist(struct intel_context *ce)
for (i = 0; i < engine->whitelist.count; i++) {
u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
+ struct i915_gem_ww_ctx ww;
u64 addr = scratch->node.start;
struct i915_request *rq;
u32 srm, lrm, rsvd;
@@ -517,6 +518,29 @@ static int check_dirty_whitelist(struct intel_context *ce)
ro_reg = ro_register(reg);
+ i915_gem_ww_ctx_init(&ww, false);
+retry:
+ cs = NULL;
+ err = i915_gem_object_lock(scratch->obj, &ww);
+ if (!err)
+ err = i915_gem_object_lock(batch->obj, &ww);
+ if (!err)
+ err = intel_context_pin_ww(ce, &ww);
+ if (err)
+ goto out;
+
+ cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto out_ctx;
+ }
+
+ results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+ if (IS_ERR(results)) {
+ err = PTR_ERR(results);
+ goto out_unmap_batch;
+ }
+
/* Clear non priv flags */
reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
@@ -528,12 +552,6 @@ static int check_dirty_whitelist(struct intel_context *ce)
pr_debug("%s: Writing garbage to %x\n",
engine->name, reg);
- cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
- if (IS_ERR(cs)) {
- err = PTR_ERR(cs);
- goto out_batch;
- }
-
/* SRM original */
*cs++ = srm;
*cs++ = reg;
@@ -580,11 +598,12 @@ static int check_dirty_whitelist(struct intel_context *ce)
i915_gem_object_flush_map(batch->obj);
i915_gem_object_unpin_map(batch->obj);
intel_gt_chipset_flush(engine->gt);
+ cs = NULL;
- rq = intel_context_create_request(ce);
+ rq = i915_request_create(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto out_batch;
+ goto out_unmap_scratch;
}
if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
@@ -593,20 +612,16 @@ static int check_dirty_whitelist(struct intel_context *ce)
goto err_request;
}
- i915_vma_lock(batch);
err = i915_request_await_object(rq, batch->obj, false);
if (err == 0)
err = i915_vma_move_to_active(batch, rq, 0);
- i915_vma_unlock(batch);
if (err)
goto err_request;
- i915_vma_lock(scratch);
err = i915_request_await_object(rq, scratch->obj, true);
if (err == 0)
err = i915_vma_move_to_active(scratch, rq,
EXEC_OBJECT_WRITE);
- i915_vma_unlock(scratch);
if (err)
goto err_request;
@@ -622,13 +637,7 @@ err_request:
pr_err("%s: Futzing %x timedout; cancelling test\n",
engine->name, reg);
intel_gt_set_wedged(engine->gt);
- goto out_batch;
- }
-
- results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
- if (IS_ERR(results)) {
- err = PTR_ERR(results);
- goto out_batch;
+ goto out_unmap_scratch;
}
GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
@@ -639,7 +648,7 @@ err_request:
pr_err("%s: Unable to write to whitelisted register %x\n",
engine->name, reg);
err = -EINVAL;
- goto out_unpin;
+ goto out_unmap_scratch;
}
} else {
rsvd = 0;
@@ -705,15 +714,27 @@ err_request:
err = -EINVAL;
}
-out_unpin:
+out_unmap_scratch:
i915_gem_object_unpin_map(scratch->obj);
+out_unmap_batch:
+ if (cs)
+ i915_gem_object_unpin_map(batch->obj);
+out_ctx:
+ intel_context_unpin(ce);
+out:
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
if (err)
break;
}
if (igt_flush_test(engine->i915))
err = -EIO;
-out_batch:
+
i915_vma_unpin_and_release(&batch, 0);
out_scratch:
i915_vma_unpin_and_release(&scratch, 0);
@@ -847,7 +868,7 @@ static int scrub_whitelisted_registers(struct intel_context *ce)
if (IS_ERR(batch))
return PTR_ERR(batch);
- cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+ cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
goto err_batch;
@@ -982,11 +1003,11 @@ check_whitelisted_registers(struct intel_engine_cs *engine,
u32 *a, *b;
int i, err;
- a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
+ a = i915_gem_object_pin_map_unlocked(A->obj, I915_MAP_WB);
if (IS_ERR(a))
return PTR_ERR(a);
- b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
+ b = i915_gem_object_pin_map_unlocked(B->obj, I915_MAP_WB);
if (IS_ERR(b)) {
err = PTR_ERR(b);
goto err_a;
@@ -1030,14 +1051,14 @@ static int live_isolated_whitelist(void *arg)
for (i = 0; i < ARRAY_SIZE(client); i++) {
client[i].scratch[0] =
- __vm_create_scratch_for_read(gt->vm, 4096);
+ __vm_create_scratch_for_read_pinned(gt->vm, 4096);
if (IS_ERR(client[i].scratch[0])) {
err = PTR_ERR(client[i].scratch[0]);
goto err;
}
client[i].scratch[1] =
- __vm_create_scratch_for_read(gt->vm, 4096);
+ __vm_create_scratch_for_read_pinned(gt->vm, 4096);
if (IS_ERR(client[i].scratch[1])) {
err = PTR_ERR(client[i].scratch[1]);
i915_vma_unpin_and_release(&client[i].scratch[0], 0);
@@ -1220,7 +1241,11 @@ live_engine_reset_workarounds(void *arg)
goto err;
}
- intel_engine_reset(engine, "live_workarounds:idle");
+ ret = intel_engine_reset(engine, "live_workarounds:idle");
+ if (ret) {
+ pr_err("%s: Reset failed while idle\n", engine->name);
+ goto err;
+ }
ok = verify_wa_lists(gt, &lists, "after idle reset");
if (!ok) {
@@ -1241,12 +1266,18 @@ live_engine_reset_workarounds(void *arg)
ret = request_add_spin(rq, &spin);
if (ret) {
- pr_err("Spinner failed to start\n");
+ pr_err("%s: Spinner failed to start\n", engine->name);
igt_spinner_fini(&spin);
goto err;
}
- intel_engine_reset(engine, "live_workarounds:active");
+ ret = intel_engine_reset(engine, "live_workarounds:active");
+ if (ret) {
+ pr_err("%s: Reset failed on an active spinner\n",
+ engine->name);
+ igt_spinner_fini(&spin);
+ goto err;
+ }
igt_spinner_end(&spin);
igt_spinner_fini(&spin);
diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c
index a4d8fc9e2374..f8f02aab842b 100644
--- a/drivers/gpu/drm/i915/gt/shmem_utils.c
+++ b/drivers/gpu/drm/i915/gt/shmem_utils.c
@@ -39,7 +39,7 @@ struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
return file;
}
- ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ ptr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
if (IS_ERR(ptr))
return ERR_CAST(ptr);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 4545e90e3bf1..78305b2ec89d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -682,7 +682,7 @@ int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
if (IS_ERR(vma))
return PTR_ERR(vma);
- vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
+ vaddr = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WB);
if (IS_ERR(vaddr)) {
i915_vma_unpin_and_release(&vma, 0);
return PTR_ERR(vaddr);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index c92f2c056db4..c36d5eb5bbb9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -335,7 +335,7 @@ static int guc_log_map(struct intel_guc_log *log)
* buffer pages, so that we can directly get the data
* (up-to-date) from memory.
*/
- vaddr = i915_gem_object_pin_map(log->vma->obj, I915_MAP_WC);
+ vaddr = i915_gem_object_pin_map_unlocked(log->vma->obj, I915_MAP_WC);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
@@ -744,7 +744,7 @@ int intel_guc_log_dump(struct intel_guc_log *log, struct drm_printer *p,
if (!obj)
return 0;
- map = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(map)) {
DRM_DEBUG("Failed to pin object\n");
drm_puts(p, "(log data unaccessible)\n");
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 23dc0aeaa0ab..92688a9b6717 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -206,9 +206,8 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb);
struct i915_request *rq, *rn;
- int i;
- priolist_for_each_request_consume(rq, rn, p, i) {
+ priolist_for_each_request_consume(rq, rn, p) {
if (last && rq->context != last->context) {
if (port == last_port)
goto done;
@@ -238,9 +237,10 @@ done:
execlists->active = execlists->inflight;
}
-static void guc_submission_tasklet(unsigned long data)
+static void guc_submission_tasklet(struct tasklet_struct *t)
{
- struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+ struct intel_engine_cs * const engine =
+ from_tasklet(engine, t, execlists.tasklet);
struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_request **port, *rq;
unsigned long flags;
@@ -361,9 +361,8 @@ static void guc_reset_cancel(struct intel_engine_cs *engine)
/* Flush the queued requests to the timeline list (for retiring). */
while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb);
- int i;
- priolist_for_each_request_consume(rq, rn, p, i) {
+ priolist_for_each_request_consume(rq, rn, p) {
list_del_init(&rq->sched.link);
__i915_request_submit(rq);
dma_fence_set_error(&rq->fence, -EIO);
@@ -610,7 +609,7 @@ static void guc_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = guc_submit_request;
engine->schedule = i915_schedule;
- engine->execlists.tasklet.func = guc_submission_tasklet;
+ engine->execlists.tasklet.callback = guc_submission_tasklet;
engine->reset.prepare = guc_reset_prepare;
engine->reset.rewind = guc_reset_rewind;
@@ -702,8 +701,7 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine)
*/
GEM_BUG_ON(INTEL_GEN(i915) < 11);
- tasklet_init(&engine->execlists.tasklet,
- guc_submission_tasklet, (unsigned long)engine);
+ tasklet_setup(&engine->execlists.tasklet, guc_submission_tasklet);
guc_default_vfuncs(engine);
guc_default_irqs(engine);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index 65eeb44b397d..2126dd81ac38 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -82,7 +82,7 @@ static int intel_huc_rsa_data_create(struct intel_huc *huc)
if (IS_ERR(vma))
return PTR_ERR(vma);
- vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
+ vaddr = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WB);
if (IS_ERR(vaddr)) {
i915_vma_unpin_and_release(&vma, 0);
return PTR_ERR(vaddr);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 67b06fde1225..df647c9a8d56 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -44,9 +44,11 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
* List of required GuC and HuC binaries per-platform.
* Must be ordered based on platform + revid, from newer to older.
*
- * Note that RKL uses the same firmware as TGL.
+ * Note that RKL and ADL-S have the same GuC/HuC device ID's and use the same
+ * firmware as TGL.
*/
#define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \
+ fw_def(ALDERLAKE_S, 0, guc_def(tgl, 49, 0, 1), huc_def(tgl, 7, 5, 0)) \
fw_def(ROCKETLAKE, 0, guc_def(tgl, 49, 0, 1), huc_def(tgl, 7, 5, 0)) \
fw_def(TIGERLAKE, 0, guc_def(tgl, 49, 0, 1), huc_def(tgl, 7, 5, 0)) \
fw_def(JASPERLAKE, 0, guc_def(ehl, 49, 0, 1), huc_def(ehl, 9, 0, 0)) \
@@ -537,7 +539,7 @@ int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
if (!intel_uc_fw_is_available(uc_fw))
return -ENOEXEC;
- err = i915_gem_object_pin_pages(uc_fw->obj);
+ err = i915_gem_object_pin_pages_unlocked(uc_fw->obj);
if (err) {
DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
intel_uc_fw_type_repr(uc_fw->type), err);
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index ad86c5eb5bba..b490e3db2e38 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -374,6 +374,7 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
bool primary)
{
struct intel_gvt *gvt = vgpu->gvt;
+ struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev);
const struct intel_gvt_device_info *info = &gvt->device_info;
u16 *gmch_ctl;
u8 next;
@@ -407,9 +408,9 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size =
- pci_resource_len(gvt->gt->i915->drm.pdev, 0);
+ pci_resource_len(pdev, 0);
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
- pci_resource_len(gvt->gt->i915->drm.pdev, 2);
+ pci_resource_len(pdev, 2);
memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4);
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index fef1e857cefc..ca9c9e27a43d 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -916,19 +916,26 @@ static int cmd_reg_handler(struct parser_exec_state *s,
if (!strncmp(cmd, "srm", 3) ||
!strncmp(cmd, "lrm", 3)) {
- if (offset != i915_mmio_reg_offset(GEN8_L3SQCREG4) &&
- offset != 0x21f0) {
+ if (offset == i915_mmio_reg_offset(GEN8_L3SQCREG4) ||
+ offset == 0x21f0 ||
+ (IS_BROADWELL(gvt->gt->i915) &&
+ offset == i915_mmio_reg_offset(INSTPM)))
+ return 0;
+ else {
gvt_vgpu_err("%s access to register (%x)\n",
cmd, offset);
return -EPERM;
- } else
- return 0;
+ }
}
if (!strncmp(cmd, "lrr-src", 7) ||
!strncmp(cmd, "lrr-dst", 7)) {
- gvt_vgpu_err("not allowed cmd %s\n", cmd);
- return -EPERM;
+ if (IS_BROADWELL(gvt->gt->i915) && offset == 0x215c)
+ return 0;
+ else {
+ gvt_vgpu_err("not allowed cmd %s reg (%x)\n", cmd, offset);
+ return -EPERM;
+ }
}
if (!strncmp(cmd, "pipe_ctrl", 9)) {
@@ -941,11 +948,6 @@ static int cmd_reg_handler(struct parser_exec_state *s,
/* below are all lri handlers */
vreg = &vgpu_vreg(s->vgpu, offset);
- if (!intel_gvt_mmio_is_cmd_accessible(gvt, offset)) {
- gvt_vgpu_err("%s access to non-render register (%x)\n",
- cmd, offset);
- return -EBADRQC;
- }
if (is_cmd_update_pdps(offset, s) &&
cmd_pdp_mmio_update_handler(s, offset, index))
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
index 62e6a14ad58e..9f1c209d9251 100644
--- a/drivers/gpu/drm/i915/gvt/debugfs.c
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -41,7 +41,7 @@ struct diff_mmio {
/* Compare two diff_mmio items. */
static int mmio_offset_compare(void *priv,
- struct list_head *a, struct list_head *b)
+ const struct list_head *a, const struct list_head *b)
{
struct diff_mmio *ma;
struct diff_mmio *mb;
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 62a5b0dd2003..034c060f89d4 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -516,11 +516,27 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
port->dpcd = NULL;
}
+static enum hrtimer_restart vblank_timer_fn(struct hrtimer *data)
+{
+ struct intel_vgpu_vblank_timer *vblank_timer;
+ struct intel_vgpu *vgpu;
+
+ vblank_timer = container_of(data, struct intel_vgpu_vblank_timer, timer);
+ vgpu = container_of(vblank_timer, struct intel_vgpu, vblank_timer);
+
+ /* Set vblank emulation request per-vGPU bit */
+ intel_gvt_request_service(vgpu->gvt,
+ INTEL_GVT_REQUEST_EMULATE_VBLANK + vgpu->id);
+ hrtimer_add_expires_ns(&vblank_timer->timer, vblank_timer->period);
+ return HRTIMER_RESTART;
+}
+
static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
int type, unsigned int resolution)
{
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
+ struct intel_vgpu_vblank_timer *vblank_timer = &vgpu->vblank_timer;
if (drm_WARN_ON(&i915->drm, resolution >= GVT_EDID_NUM))
return -EINVAL;
@@ -544,6 +560,14 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
port->dpcd->data[DPCD_SINK_COUNT] = 0x1;
port->type = type;
port->id = resolution;
+ port->vrefresh_k = GVT_DEFAULT_REFRESH_RATE * MSEC_PER_SEC;
+ vgpu->display.port_num = port_num;
+
+ /* Init hrtimer based on default refresh rate */
+ hrtimer_init(&vblank_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ vblank_timer->timer.function = vblank_timer_fn;
+ vblank_timer->vrefresh_k = port->vrefresh_k;
+ vblank_timer->period = DIV64_U64_ROUND_CLOSEST(NSEC_PER_SEC * MSEC_PER_SEC, vblank_timer->vrefresh_k);
emulate_monitor_status_change(vgpu);
@@ -551,41 +575,44 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
}
/**
- * intel_gvt_check_vblank_emulation - check if vblank emulation timer should
- * be turned on/off when a virtual pipe is enabled/disabled.
- * @gvt: a GVT device
+ * vgpu_update_vblank_emulation - Update per-vGPU vblank_timer
+ * @vgpu: vGPU operated
+ * @turnon: Turn ON/OFF vblank_timer
*
- * This function is used to turn on/off vblank timer according to currently
- * enabled/disabled virtual pipes.
+ * This function is used to turn on/off or update the per-vGPU vblank_timer
+ * when PIPECONF is enabled or disabled. vblank_timer period is also updated
+ * if guest changed the refresh rate.
*
*/
-void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)
+void vgpu_update_vblank_emulation(struct intel_vgpu *vgpu, bool turnon)
{
- struct intel_gvt_irq *irq = &gvt->irq;
- struct intel_vgpu *vgpu;
- int pipe, id;
- int found = false;
-
- mutex_lock(&gvt->lock);
- for_each_active_vgpu(gvt, vgpu, id) {
- for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) {
- if (pipe_is_enabled(vgpu, pipe)) {
- found = true;
- break;
- }
+ struct intel_vgpu_vblank_timer *vblank_timer = &vgpu->vblank_timer;
+ struct intel_vgpu_port *port =
+ intel_vgpu_port(vgpu, vgpu->display.port_num);
+
+ if (turnon) {
+ /*
+ * Skip the re-enable if already active and vrefresh unchanged.
+ * Otherwise, stop timer if already active and restart with new
+ * period.
+ */
+ if (vblank_timer->vrefresh_k != port->vrefresh_k ||
+ !hrtimer_active(&vblank_timer->timer)) {
+ /* Stop timer before start with new period if active */
+ if (hrtimer_active(&vblank_timer->timer))
+ hrtimer_cancel(&vblank_timer->timer);
+
+ /* Make sure new refresh rate updated to timer period */
+ vblank_timer->vrefresh_k = port->vrefresh_k;
+ vblank_timer->period = DIV64_U64_ROUND_CLOSEST(NSEC_PER_SEC * MSEC_PER_SEC, vblank_timer->vrefresh_k);
+ hrtimer_start(&vblank_timer->timer,
+ ktime_add_ns(ktime_get(), vblank_timer->period),
+ HRTIMER_MODE_ABS);
}
- if (found)
- break;
+ } else {
+ /* Caller request to stop vblank */
+ hrtimer_cancel(&vblank_timer->timer);
}
-
- /* all the pipes are disabled */
- if (!found)
- hrtimer_cancel(&irq->vblank_timer.timer);
- else
- hrtimer_start(&irq->vblank_timer.timer,
- ktime_add_ns(ktime_get(), irq->vblank_timer.period),
- HRTIMER_MODE_ABS);
- mutex_unlock(&gvt->lock);
}
static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
@@ -617,7 +644,7 @@ static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
}
}
-static void emulate_vblank(struct intel_vgpu *vgpu)
+void intel_vgpu_emulate_vblank(struct intel_vgpu *vgpu)
{
int pipe;
@@ -628,24 +655,6 @@ static void emulate_vblank(struct intel_vgpu *vgpu)
}
/**
- * intel_gvt_emulate_vblank - trigger vblank events for vGPUs on GVT device
- * @gvt: a GVT device
- *
- * This function is used to trigger vblank interrupts for vGPUs on GVT device
- *
- */
-void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
-{
- struct intel_vgpu *vgpu;
- int id;
-
- mutex_lock(&gvt->lock);
- for_each_active_vgpu(gvt, vgpu, id)
- emulate_vblank(vgpu);
- mutex_unlock(&gvt->lock);
-}
-
-/**
* intel_vgpu_emulate_hotplug - trigger hotplug event for vGPU
* @vgpu: a vGPU
* @connected: link state
@@ -753,6 +762,8 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
clean_virtual_dp_monitor(vgpu, PORT_D);
else
clean_virtual_dp_monitor(vgpu, PORT_B);
+
+ vgpu_update_vblank_emulation(vgpu, false);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h
index b59b34046e1e..f5616f99ef2f 100644
--- a/drivers/gpu/drm/i915/gvt/display.h
+++ b/drivers/gpu/drm/i915/gvt/display.h
@@ -36,6 +36,7 @@
#define _GVT_DISPLAY_H_
#include <linux/types.h>
+#include <linux/hrtimer.h>
struct intel_gvt;
struct intel_vgpu;
@@ -157,6 +158,7 @@ enum intel_vgpu_edid {
GVT_EDID_NUM,
};
+#define GVT_DEFAULT_REFRESH_RATE 60
struct intel_vgpu_port {
/* per display EDID information */
struct intel_vgpu_edid_data *edid;
@@ -164,6 +166,14 @@ struct intel_vgpu_port {
struct intel_vgpu_dpcd_data *dpcd;
int type;
enum intel_vgpu_edid id;
+ /* x1000 to get accurate 59.94, 24.976, 29.94, etc. in timing std. */
+ u32 vrefresh_k;
+};
+
+struct intel_vgpu_vblank_timer {
+ struct hrtimer timer;
+ u32 vrefresh_k;
+ u64 period;
};
static inline char *vgpu_edid_str(enum intel_vgpu_edid id)
@@ -202,8 +212,8 @@ static inline unsigned int vgpu_edid_yres(enum intel_vgpu_edid id)
}
}
-void intel_gvt_emulate_vblank(struct intel_gvt *gvt);
-void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt);
+void intel_vgpu_emulate_vblank(struct intel_vgpu *vgpu);
+void vgpu_update_vblank_emulation(struct intel_vgpu *vgpu, bool turnon);
int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution);
void intel_vgpu_reset_display(struct intel_vgpu *vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index c3eb3838fe88..d4f883f35b95 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -218,7 +218,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
drm_gem_private_object_init(dev, &obj->base,
roundup(info->size, PAGE_SIZE));
- i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class);
+ i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class, 0);
i915_gem_object_set_readonly(obj);
obj->read_domains = I915_GEM_DOMAIN_GTT;
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index 990a181094e3..1a8274a3f4b1 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -76,7 +76,7 @@ static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data)
static int expose_firmware_sysfs(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
- struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev);
struct gvt_firmware_header *h;
void *firmware;
void *p;
@@ -127,7 +127,7 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
static void clean_firmware_sysfs(struct intel_gvt *gvt)
{
- struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev);
device_remove_bin_file(&pdev->dev, &firmware_attr);
vfree(firmware_attr.private);
@@ -151,7 +151,7 @@ static int verify_firmware(struct intel_gvt *gvt,
const struct firmware *fw)
{
struct intel_gvt_device_info *info = &gvt->device_info;
- struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev);
struct gvt_firmware_header *h;
unsigned long id, crc32_start;
const void *mem;
@@ -205,7 +205,7 @@ invalid_firmware:
int intel_gvt_load_firmware(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
- struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev);
struct intel_gvt_firmware *firmware = &gvt->firmware;
struct gvt_firmware_header *h;
const struct firmware *fw;
@@ -240,7 +240,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
gvt_dbg_core("request hw state firmware %s...\n", path);
- ret = request_firmware(&fw, path, &gvt->gt->i915->drm.pdev->dev);
+ ret = request_firmware(&fw, path, gvt->gt->i915->drm.dev);
kfree(path);
if (ret)
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 897c007ea96a..9478c132d7b6 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -587,12 +587,6 @@ static void _ppgtt_set_root_entry(struct intel_vgpu_mm *mm,
entry, index, false, 0, mm->vgpu);
}
-static inline void ppgtt_set_guest_root_entry(struct intel_vgpu_mm *mm,
- struct intel_gvt_gtt_entry *entry, unsigned long index)
-{
- _ppgtt_set_root_entry(mm, entry, index, true);
-}
-
static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm *mm,
struct intel_gvt_gtt_entry *entry, unsigned long index)
{
@@ -746,7 +740,7 @@ static int detach_oos_page(struct intel_vgpu *vgpu,
static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
{
- struct device *kdev = &spt->vgpu->gvt->gt->i915->drm.pdev->dev;
+ struct device *kdev = spt->vgpu->gvt->gt->i915->drm.dev;
trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type);
@@ -831,7 +825,7 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type)
{
- struct device *kdev = &vgpu->gvt->gt->i915->drm.pdev->dev;
+ struct device *kdev = vgpu->gvt->gt->i915->drm.dev;
struct intel_vgpu_ppgtt_spt *spt = NULL;
dma_addr_t daddr;
int ret;
@@ -1159,8 +1153,8 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
* @vgpu: target vgpu
* @entry: target pfn's gtt entry
*
- * Return 1 if 2MB huge gtt shadowing is possilbe, 0 if miscondition,
- * negtive if found err.
+ * Return 1 if 2MB huge gtt shadowing is possible, 0 if miscondition,
+ * negative if found err.
*/
static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
struct intel_gvt_gtt_entry *entry)
@@ -2402,7 +2396,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
vgpu->gvt->device_info.gtt_entry_size_shift;
void *scratch_pt;
int i;
- struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
+ struct device *dev = vgpu->gvt->gt->i915->drm.dev;
dma_addr_t daddr;
if (drm_WARN_ON(&i915->drm,
@@ -2460,7 +2454,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
static int release_scratch_page_tree(struct intel_vgpu *vgpu)
{
int i;
- struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
+ struct device *dev = vgpu->gvt->gt->i915->drm.dev;
dma_addr_t daddr;
for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
@@ -2732,7 +2726,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
{
int ret;
void *page;
- struct device *dev = &gvt->gt->i915->drm.pdev->dev;
+ struct device *dev = gvt->gt->i915->drm.dev;
dma_addr_t daddr;
gvt_dbg_core("init gtt\n");
@@ -2781,7 +2775,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
*/
void intel_gvt_clean_gtt(struct intel_gvt *gvt)
{
- struct device *dev = &gvt->gt->i915->drm.pdev->dev;
+ struct device *dev = gvt->gt->i915->drm.dev;
dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn <<
I915_GTT_PAGE_SHIFT);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index d1d8ee4a5f16..e7c2babcee8b 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -46,32 +46,23 @@ static const char * const supported_hypervisors[] = {
[INTEL_GVT_HYPERVISOR_KVM] = "KVM",
};
-static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
- const char *name)
+static struct intel_vgpu_type *
+intel_gvt_find_vgpu_type(struct intel_gvt *gvt, unsigned int type_group_id)
{
- const char *driver_name =
- dev_driver_string(&gvt->gt->i915->drm.pdev->dev);
- int i;
-
- name += strlen(driver_name) + 1;
- for (i = 0; i < gvt->num_types; i++) {
- struct intel_vgpu_type *t = &gvt->types[i];
-
- if (!strncmp(t->name, name, sizeof(t->name)))
- return t;
- }
-
- return NULL;
+ if (WARN_ON(type_group_id >= gvt->num_types))
+ return NULL;
+ return &gvt->types[type_group_id];
}
-static ssize_t available_instances_show(struct kobject *kobj,
- struct device *dev, char *buf)
+static ssize_t available_instances_show(struct mdev_type *mtype,
+ struct mdev_type_attribute *attr,
+ char *buf)
{
struct intel_vgpu_type *type;
unsigned int num = 0;
- void *gvt = kdev_to_i915(dev)->gvt;
+ void *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
- type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
+ type = intel_gvt_find_vgpu_type(gvt, mtype_get_type_group_id(mtype));
if (!type)
num = 0;
else
@@ -80,19 +71,19 @@ static ssize_t available_instances_show(struct kobject *kobj,
return sprintf(buf, "%u\n", num);
}
-static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
- char *buf)
+static ssize_t device_api_show(struct mdev_type *mtype,
+ struct mdev_type_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
}
-static ssize_t description_show(struct kobject *kobj, struct device *dev,
- char *buf)
+static ssize_t description_show(struct mdev_type *mtype,
+ struct mdev_type_attribute *attr, char *buf)
{
struct intel_vgpu_type *type;
- void *gvt = kdev_to_i915(dev)->gvt;
+ void *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
- type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
+ type = intel_gvt_find_vgpu_type(gvt, mtype_get_type_group_id(mtype));
if (!type)
return 0;
@@ -126,7 +117,7 @@ static bool intel_get_gvt_attrs(struct attribute_group ***intel_vgpu_type_groups
return true;
}
-static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
+static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
{
int i, j;
struct intel_vgpu_type *type;
@@ -144,7 +135,7 @@ static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
gvt_vgpu_type_groups[i] = group;
}
- return true;
+ return 0;
unwind:
for (j = 0; j < i; j++) {
@@ -152,7 +143,7 @@ unwind:
kfree(group);
}
- return false;
+ return -ENOMEM;
}
static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
@@ -189,7 +180,7 @@ static const struct intel_gvt_ops intel_gvt_ops = {
static void init_device_info(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
- struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev);
info->max_support_vgpus = 8;
info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
@@ -203,6 +194,22 @@ static void init_device_info(struct intel_gvt *gvt)
info->msi_cap_offset = pdev->msi_cap;
}
+static void intel_gvt_test_and_emulate_vblank(struct intel_gvt *gvt)
+{
+ struct intel_vgpu *vgpu;
+ int id;
+
+ mutex_lock(&gvt->lock);
+ idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) {
+ if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK + id,
+ (void *)&gvt->service_request)) {
+ if (vgpu->active)
+ intel_vgpu_emulate_vblank(vgpu);
+ }
+ }
+ mutex_unlock(&gvt->lock);
+}
+
static int gvt_service_thread(void *data)
{
struct intel_gvt *gvt = (struct intel_gvt *)data;
@@ -220,9 +227,7 @@ static int gvt_service_thread(void *data)
if (WARN_ONCE(ret, "service thread is waken up by signal.\n"))
continue;
- if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK,
- (void *)&gvt->service_request))
- intel_gvt_emulate_vblank(gvt);
+ intel_gvt_test_and_emulate_vblank(gvt);
if (test_bit(INTEL_GVT_REQUEST_SCHED,
(void *)&gvt->service_request) ||
@@ -278,7 +283,6 @@ void intel_gvt_clean_device(struct drm_i915_private *i915)
intel_gvt_clean_sched_policy(gvt);
intel_gvt_clean_workload_scheduler(gvt);
intel_gvt_clean_gtt(gvt);
- intel_gvt_clean_irq(gvt);
intel_gvt_free_firmware(gvt);
intel_gvt_clean_mmio_info(gvt);
idr_destroy(&gvt->vgpu_idr);
@@ -337,7 +341,7 @@ int intel_gvt_init_device(struct drm_i915_private *i915)
ret = intel_gvt_init_gtt(gvt);
if (ret)
- goto out_clean_irq;
+ goto out_free_firmware;
ret = intel_gvt_init_workload_scheduler(gvt);
if (ret)
@@ -360,7 +364,7 @@ int intel_gvt_init_device(struct drm_i915_private *i915)
goto out_clean_thread;
ret = intel_gvt_init_vgpu_type_groups(gvt);
- if (ret == false) {
+ if (ret) {
gvt_err("failed to init vgpu type groups: %d\n", ret);
goto out_clean_types;
}
@@ -376,7 +380,7 @@ int intel_gvt_init_device(struct drm_i915_private *i915)
intel_gvt_debugfs_init(gvt);
gvt_dbg_core("gvt device initialization is done\n");
- intel_gvt_host.dev = &i915->drm.pdev->dev;
+ intel_gvt_host.dev = i915->drm.dev;
intel_gvt_host.initialized = true;
return 0;
@@ -392,8 +396,6 @@ out_clean_workload_scheduler:
intel_gvt_clean_workload_scheduler(gvt);
out_clean_gtt:
intel_gvt_clean_gtt(gvt);
-out_clean_irq:
- intel_gvt_clean_irq(gvt);
out_free_firmware:
intel_gvt_free_firmware(gvt);
out_clean_mmio_info:
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 03c993d68f10..88ab360fcb31 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -133,6 +133,7 @@ struct intel_vgpu_display {
struct intel_vgpu_i2c_edid i2c_edid;
struct intel_vgpu_port ports[I915_MAX_PORTS];
struct intel_vgpu_sbi sbi;
+ enum port port_num;
};
struct vgpu_sched_ctl {
@@ -214,6 +215,7 @@ struct intel_vgpu {
struct list_head dmabuf_obj_list_head;
struct mutex dmabuf_lock;
struct idr object_idr;
+ struct intel_vgpu_vblank_timer vblank_timer;
u32 scan_nonprivbb;
};
@@ -346,13 +348,16 @@ static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
}
enum {
- INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
-
/* Scheduling trigger by timer */
- INTEL_GVT_REQUEST_SCHED = 1,
+ INTEL_GVT_REQUEST_SCHED = 0,
/* Scheduling trigger by event */
- INTEL_GVT_REQUEST_EVENT_SCHED = 2,
+ INTEL_GVT_REQUEST_EVENT_SCHED = 1,
+
+ /* per-vGPU vblank emulation request */
+ INTEL_GVT_REQUEST_EMULATE_VBLANK = 2,
+ INTEL_GVT_REQUEST_EMULATE_VBLANK_MAX = INTEL_GVT_REQUEST_EMULATE_VBLANK
+ + GVT_MAX_VGPU,
};
static inline void intel_gvt_request_service(struct intel_gvt *gvt,
@@ -569,8 +574,8 @@ struct intel_gvt_ops {
void (*vgpu_reset)(struct intel_vgpu *);
void (*vgpu_activate)(struct intel_vgpu *);
void (*vgpu_deactivate)(struct intel_vgpu *);
- struct intel_vgpu_type *(*gvt_find_vgpu_type)(struct intel_gvt *gvt,
- const char *name);
+ struct intel_vgpu_type *(*gvt_find_vgpu_type)(
+ struct intel_gvt *gvt, unsigned int type_group_id);
bool (*get_gvt_attrs)(struct attribute_group ***intel_vgpu_type_groups);
int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 6eeaeecb7f85..dda320749c65 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -39,6 +39,7 @@
#include "i915_drv.h"
#include "gvt.h"
#include "i915_pvinfo.h"
+#include "display/intel_display_types.h"
/* XXX FIXME i915 has changed PP_XXX definition */
#define PCH_PP_STATUS _MMIO(0xc7200)
@@ -443,6 +444,254 @@ static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
return 0;
}
+/*
+ * Only PIPE_A is enabled in current vGPU display and PIPE_A is tied to
+ * TRANSCODER_A in HW. DDI/PORT could be PORT_x depends on
+ * setup_virtual_dp_monitor().
+ * emulate_monitor_status_change() set up PLL for PORT_x as the initial enabled
+ * DPLL. Later guest driver may setup a different DPLLx when setting mode.
+ * So the correct sequence to find DP stream clock is:
+ * Check TRANS_DDI_FUNC_CTL on TRANSCODER_A to get PORT_x.
+ * Check correct PLLx for PORT_x to get PLL frequency and DP bitrate.
+ * Then Refresh rate then can be calculated based on follow equations:
+ * Pixel clock = h_total * v_total * refresh_rate
+ * stream clock = Pixel clock
+ * ls_clk = DP bitrate
+ * Link M/N = strm_clk / ls_clk
+ */
+
+static u32 bdw_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
+{
+ u32 dp_br = 0;
+ u32 ddi_pll_sel = vgpu_vreg_t(vgpu, PORT_CLK_SEL(port));
+
+ switch (ddi_pll_sel) {
+ case PORT_CLK_SEL_LCPLL_2700:
+ dp_br = 270000 * 2;
+ break;
+ case PORT_CLK_SEL_LCPLL_1350:
+ dp_br = 135000 * 2;
+ break;
+ case PORT_CLK_SEL_LCPLL_810:
+ dp_br = 81000 * 2;
+ break;
+ case PORT_CLK_SEL_SPLL:
+ {
+ switch (vgpu_vreg_t(vgpu, SPLL_CTL) & SPLL_FREQ_MASK) {
+ case SPLL_FREQ_810MHz:
+ dp_br = 81000 * 2;
+ break;
+ case SPLL_FREQ_1350MHz:
+ dp_br = 135000 * 2;
+ break;
+ case SPLL_FREQ_2700MHz:
+ dp_br = 270000 * 2;
+ break;
+ default:
+ gvt_dbg_dpy("vgpu-%d PORT_%c can't get freq from SPLL 0x%08x\n",
+ vgpu->id, port_name(port), vgpu_vreg_t(vgpu, SPLL_CTL));
+ break;
+ }
+ break;
+ }
+ case PORT_CLK_SEL_WRPLL1:
+ case PORT_CLK_SEL_WRPLL2:
+ {
+ u32 wrpll_ctl;
+ int refclk, n, p, r;
+
+ if (ddi_pll_sel == PORT_CLK_SEL_WRPLL1)
+ wrpll_ctl = vgpu_vreg_t(vgpu, WRPLL_CTL(DPLL_ID_WRPLL1));
+ else
+ wrpll_ctl = vgpu_vreg_t(vgpu, WRPLL_CTL(DPLL_ID_WRPLL2));
+
+ switch (wrpll_ctl & WRPLL_REF_MASK) {
+ case WRPLL_REF_PCH_SSC:
+ refclk = vgpu->gvt->gt->i915->dpll.ref_clks.ssc;
+ break;
+ case WRPLL_REF_LCPLL:
+ refclk = 2700000;
+ break;
+ default:
+ gvt_dbg_dpy("vgpu-%d PORT_%c WRPLL can't get refclk 0x%08x\n",
+ vgpu->id, port_name(port), wrpll_ctl);
+ goto out;
+ }
+
+ r = wrpll_ctl & WRPLL_DIVIDER_REF_MASK;
+ p = (wrpll_ctl & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
+ n = (wrpll_ctl & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
+
+ dp_br = (refclk * n / 10) / (p * r) * 2;
+ break;
+ }
+ default:
+ gvt_dbg_dpy("vgpu-%d PORT_%c has invalid clock select 0x%08x\n",
+ vgpu->id, port_name(port), vgpu_vreg_t(vgpu, PORT_CLK_SEL(port)));
+ break;
+ }
+
+out:
+ return dp_br;
+}
+
+static u32 bxt_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
+{
+ u32 dp_br = 0;
+ int refclk = vgpu->gvt->gt->i915->dpll.ref_clks.nssc;
+ enum dpio_phy phy = DPIO_PHY0;
+ enum dpio_channel ch = DPIO_CH0;
+ struct dpll clock = {0};
+ u32 temp;
+
+ /* Port to PHY mapping is fixed, see bxt_ddi_phy_info{} */
+ switch (port) {
+ case PORT_A:
+ phy = DPIO_PHY1;
+ ch = DPIO_CH0;
+ break;
+ case PORT_B:
+ phy = DPIO_PHY0;
+ ch = DPIO_CH0;
+ break;
+ case PORT_C:
+ phy = DPIO_PHY0;
+ ch = DPIO_CH1;
+ break;
+ default:
+ gvt_dbg_dpy("vgpu-%d no PHY for PORT_%c\n", vgpu->id, port_name(port));
+ goto out;
+ }
+
+ temp = vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(port));
+ if (!(temp & PORT_PLL_ENABLE) || !(temp & PORT_PLL_LOCK)) {
+ gvt_dbg_dpy("vgpu-%d PORT_%c PLL_ENABLE 0x%08x isn't enabled or locked\n",
+ vgpu->id, port_name(port), temp);
+ goto out;
+ }
+
+ clock.m1 = 2;
+ clock.m2 = (vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 0)) & PORT_PLL_M2_MASK) << 22;
+ if (vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 3)) & PORT_PLL_M2_FRAC_ENABLE)
+ clock.m2 |= vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 2)) & PORT_PLL_M2_FRAC_MASK;
+ clock.n = (vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 1)) & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
+ clock.p1 = (vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch)) & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
+ clock.p2 = (vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch)) & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
+ clock.m = clock.m1 * clock.m2;
+ clock.p = clock.p1 * clock.p2;
+
+ if (clock.n == 0 || clock.p == 0) {
+ gvt_dbg_dpy("vgpu-%d PORT_%c PLL has invalid divider\n", vgpu->id, port_name(port));
+ goto out;
+ }
+
+ clock.vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock.m), clock.n << 22);
+ clock.dot = DIV_ROUND_CLOSEST(clock.vco, clock.p);
+
+ dp_br = clock.dot / 5;
+
+out:
+ return dp_br;
+}
+
+static u32 skl_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
+{
+ u32 dp_br = 0;
+ enum intel_dpll_id dpll_id = DPLL_ID_SKL_DPLL0;
+
+ /* Find the enabled DPLL for the DDI/PORT */
+ if (!(vgpu_vreg_t(vgpu, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_OFF(port)) &&
+ (vgpu_vreg_t(vgpu, DPLL_CTRL2) & DPLL_CTRL2_DDI_SEL_OVERRIDE(port))) {
+ dpll_id += (vgpu_vreg_t(vgpu, DPLL_CTRL2) &
+ DPLL_CTRL2_DDI_CLK_SEL_MASK(port)) >>
+ DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port);
+ } else {
+ gvt_dbg_dpy("vgpu-%d DPLL for PORT_%c isn't turned on\n",
+ vgpu->id, port_name(port));
+ return dp_br;
+ }
+
+ /* Find PLL output frequency from correct DPLL, and get bir rate */
+ switch ((vgpu_vreg_t(vgpu, DPLL_CTRL1) &
+ DPLL_CTRL1_LINK_RATE_MASK(dpll_id)) >>
+ DPLL_CTRL1_LINK_RATE_SHIFT(dpll_id)) {
+ case DPLL_CTRL1_LINK_RATE_810:
+ dp_br = 81000 * 2;
+ break;
+ case DPLL_CTRL1_LINK_RATE_1080:
+ dp_br = 108000 * 2;
+ break;
+ case DPLL_CTRL1_LINK_RATE_1350:
+ dp_br = 135000 * 2;
+ break;
+ case DPLL_CTRL1_LINK_RATE_1620:
+ dp_br = 162000 * 2;
+ break;
+ case DPLL_CTRL1_LINK_RATE_2160:
+ dp_br = 216000 * 2;
+ break;
+ case DPLL_CTRL1_LINK_RATE_2700:
+ dp_br = 270000 * 2;
+ break;
+ default:
+ dp_br = 0;
+ gvt_dbg_dpy("vgpu-%d PORT_%c fail to get DPLL-%d freq\n",
+ vgpu->id, port_name(port), dpll_id);
+ }
+
+ return dp_br;
+}
+
+static void vgpu_update_refresh_rate(struct intel_vgpu *vgpu)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
+ enum port port;
+ u32 dp_br, link_m, link_n, htotal, vtotal;
+
+ /* Find DDI/PORT assigned to TRANSCODER_A, expect B or D */
+ port = (vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &
+ TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
+ if (port != PORT_B && port != PORT_D) {
+ gvt_dbg_dpy("vgpu-%d unsupported PORT_%c\n", vgpu->id, port_name(port));
+ return;
+ }
+
+ /* Calculate DP bitrate from PLL */
+ if (IS_BROADWELL(dev_priv))
+ dp_br = bdw_vgpu_get_dp_bitrate(vgpu, port);
+ else if (IS_BROXTON(dev_priv))
+ dp_br = bxt_vgpu_get_dp_bitrate(vgpu, port);
+ else
+ dp_br = skl_vgpu_get_dp_bitrate(vgpu, port);
+
+ /* Get DP link symbol clock M/N */
+ link_m = vgpu_vreg_t(vgpu, PIPE_LINK_M1(TRANSCODER_A));
+ link_n = vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A));
+
+ /* Get H/V total from transcoder timing */
+ htotal = (vgpu_vreg_t(vgpu, HTOTAL(TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT);
+ vtotal = (vgpu_vreg_t(vgpu, VTOTAL(TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT);
+
+ if (dp_br && link_n && htotal && vtotal) {
+ u64 pixel_clk = 0;
+ u32 new_rate = 0;
+ u32 *old_rate = &(intel_vgpu_port(vgpu, vgpu->display.port_num)->vrefresh_k);
+
+ /* Calcuate pixel clock by (ls_clk * M / N) */
+ pixel_clk = div_u64(mul_u32_u32(link_m, dp_br), link_n);
+ pixel_clk *= MSEC_PER_SEC;
+
+ /* Calcuate refresh rate by (pixel_clk / (h_total * v_total)) */
+ new_rate = DIV64_U64_ROUND_CLOSEST(mul_u64_u32_shr(pixel_clk, MSEC_PER_SEC, 0), mul_u32_u32(htotal + 1, vtotal + 1));
+
+ if (*old_rate != new_rate)
+ *old_rate = new_rate;
+
+ gvt_dbg_dpy("vgpu-%d PIPE_%c refresh rate updated to %d\n",
+ vgpu->id, pipe_name(PIPE_A), new_rate);
+ }
+}
+
static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
@@ -451,14 +700,14 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
- if (data & PIPECONF_ENABLE)
+ if (data & PIPECONF_ENABLE) {
vgpu_vreg(vgpu, offset) |= I965_PIPECONF_ACTIVE;
- else
+ vgpu_update_refresh_rate(vgpu);
+ vgpu_update_vblank_emulation(vgpu, true);
+ } else {
vgpu_vreg(vgpu, offset) &= ~I965_PIPECONF_ACTIVE;
- /* vgpu_lock already hold by emulate mmio r/w */
- mutex_unlock(&vgpu->vgpu_lock);
- intel_gvt_check_vblank_emulation(vgpu->gvt);
- mutex_lock(&vgpu->vgpu_lock);
+ vgpu_update_vblank_emulation(vgpu, false);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 7498878e6289..497d28ce47df 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -647,38 +647,6 @@ static void init_events(
}
}
-static enum hrtimer_restart vblank_timer_fn(struct hrtimer *data)
-{
- struct intel_gvt_vblank_timer *vblank_timer;
- struct intel_gvt_irq *irq;
- struct intel_gvt *gvt;
-
- vblank_timer = container_of(data, struct intel_gvt_vblank_timer, timer);
- irq = container_of(vblank_timer, struct intel_gvt_irq, vblank_timer);
- gvt = container_of(irq, struct intel_gvt, irq);
-
- intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EMULATE_VBLANK);
- hrtimer_add_expires_ns(&vblank_timer->timer, vblank_timer->period);
- return HRTIMER_RESTART;
-}
-
-/**
- * intel_gvt_clean_irq - clean up GVT-g IRQ emulation subsystem
- * @gvt: a GVT device
- *
- * This function is called at driver unloading stage, to clean up GVT-g IRQ
- * emulation subsystem.
- *
- */
-void intel_gvt_clean_irq(struct intel_gvt *gvt)
-{
- struct intel_gvt_irq *irq = &gvt->irq;
-
- hrtimer_cancel(&irq->vblank_timer.timer);
-}
-
-#define VBLANK_TIMER_PERIOD 16000000
-
/**
* intel_gvt_init_irq - initialize GVT-g IRQ emulation subsystem
* @gvt: a GVT device
@@ -692,7 +660,6 @@ void intel_gvt_clean_irq(struct intel_gvt *gvt)
int intel_gvt_init_irq(struct intel_gvt *gvt)
{
struct intel_gvt_irq *irq = &gvt->irq;
- struct intel_gvt_vblank_timer *vblank_timer = &irq->vblank_timer;
gvt_dbg_core("init irq framework\n");
@@ -707,9 +674,5 @@ int intel_gvt_init_irq(struct intel_gvt *gvt)
init_irq_map(irq);
- hrtimer_init(&vblank_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- vblank_timer->timer.function = vblank_timer_fn;
- vblank_timer->period = VBLANK_TIMER_PERIOD;
-
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.h b/drivers/gpu/drm/i915/gvt/interrupt.h
index 287cd142629e..6c47d3e33161 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.h
+++ b/drivers/gpu/drm/i915/gvt/interrupt.h
@@ -201,11 +201,6 @@ struct intel_gvt_irq_map {
u32 down_irq_bitmask;
};
-struct intel_gvt_vblank_timer {
- struct hrtimer timer;
- u64 period;
-};
-
/* structure containing device specific IRQ state */
struct intel_gvt_irq {
struct intel_gvt_irq_ops *ops;
@@ -214,11 +209,9 @@ struct intel_gvt_irq {
struct intel_gvt_event_info events[INTEL_GVT_EVENT_MAX];
DECLARE_BITMAP(pending_events, INTEL_GVT_EVENT_MAX);
struct intel_gvt_irq_map *irq_map;
- struct intel_gvt_vblank_timer vblank_timer;
};
int intel_gvt_init_irq(struct intel_gvt *gvt);
-void intel_gvt_clean_irq(struct intel_gvt *gvt);
void intel_vgpu_trigger_virtual_event(struct intel_vgpu *vgpu,
enum intel_gvt_event_type event);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index b4348256ae95..65ff43cfc0f7 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -221,7 +221,7 @@ err:
static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
dma_addr_t *dma_addr, unsigned long size)
{
- struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
+ struct device *dev = vgpu->gvt->gt->i915->drm.dev;
struct page *page = NULL;
int ret;
@@ -244,7 +244,7 @@ static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
dma_addr_t dma_addr, unsigned long size)
{
- struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
+ struct device *dev = vgpu->gvt->gt->i915->drm.dev;
dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
gvt_unpin_guest_page(vgpu, gfn, size);
@@ -689,7 +689,7 @@ static void kvmgt_put_vfio_device(void *vgpu)
vfio_device_put(vdev->vfio_device);
}
-static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
+static int intel_vgpu_create(struct mdev_device *mdev)
{
struct intel_vgpu *vgpu = NULL;
struct intel_vgpu_type *type;
@@ -700,10 +700,9 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
pdev = mdev_parent_dev(mdev);
gvt = kdev_to_i915(pdev)->gvt;
- type = intel_gvt_ops->gvt_find_vgpu_type(gvt, kobject_name(kobj));
+ type = intel_gvt_ops->gvt_find_vgpu_type(gvt,
+ mdev_get_type_group_id(mdev));
if (!type) {
- gvt_vgpu_err("failed to find type %s to create\n",
- kobject_name(kobj));
ret = -EINVAL;
goto out;
}
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 6a16d0ca7cda..9039787f123a 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -300,8 +300,6 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
mutex_unlock(&vgpu->vgpu_lock);
mutex_lock(&gvt->lock);
- if (idr_is_empty(&gvt->vgpu_idr))
- intel_gvt_clean_irq(gvt);
intel_gvt_update_vgpu_types(gvt);
mutex_unlock(&gvt->lock);
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 3bc616cc1ad2..cf9a3d384971 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -293,18 +293,13 @@ static struct active_node *__active_lookup(struct i915_active *ref, u64 idx)
static struct i915_active_fence *
active_instance(struct i915_active *ref, u64 idx)
{
- struct active_node *node, *prealloc;
+ struct active_node *node;
struct rb_node **p, *parent;
node = __active_lookup(ref, idx);
if (likely(node))
return &node->base;
- /* Preallocate a replacement, just in case */
- prealloc = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
- if (!prealloc)
- return NULL;
-
spin_lock_irq(&ref->tree_lock);
GEM_BUG_ON(i915_active_is_idle(ref));
@@ -314,10 +309,8 @@ active_instance(struct i915_active *ref, u64 idx)
parent = *p;
node = rb_entry(parent, struct active_node, node);
- if (node->timeline == idx) {
- kmem_cache_free(global.slab_cache, prealloc);
+ if (node->timeline == idx)
goto out;
- }
if (node->timeline < idx)
p = &parent->rb_right;
@@ -325,7 +318,14 @@ active_instance(struct i915_active *ref, u64 idx)
p = &parent->rb_left;
}
- node = prealloc;
+ /*
+ * XXX: We should preallocate this before i915_active_ref() is ever
+ * called, but we cannot call into fs_reclaim() anyway, so use GFP_ATOMIC.
+ */
+ node = kmem_cache_alloc(global.slab_cache, GFP_ATOMIC);
+ if (!node)
+ goto out;
+
__i915_active_fence_init(&node->base, NULL, node_retire);
node->ref = ref;
node->timeline = idx;
diff --git a/drivers/gpu/drm/i915/i915_buddy.c b/drivers/gpu/drm/i915/i915_buddy.c
index 20babbdb297d..3a2f6eecb2fc 100644
--- a/drivers/gpu/drm/i915/i915_buddy.c
+++ b/drivers/gpu/drm/i915/i915_buddy.c
@@ -48,6 +48,8 @@ static struct i915_buddy_block *i915_block_alloc(struct i915_buddy_block *parent
{
struct i915_buddy_block *block;
+ GEM_BUG_ON(order > I915_BUDDY_MAX_ORDER);
+
block = kmem_cache_zalloc(global.slab_blocks, GFP_KERNEL);
if (!block)
return NULL;
@@ -56,6 +58,7 @@ static struct i915_buddy_block *i915_block_alloc(struct i915_buddy_block *parent
block->header |= order;
block->parent = parent;
+ GEM_BUG_ON(block->header & I915_BUDDY_HEADER_UNUSED);
return block;
}
diff --git a/drivers/gpu/drm/i915/i915_buddy.h b/drivers/gpu/drm/i915/i915_buddy.h
index ed41f3507cdc..9ce5200f4001 100644
--- a/drivers/gpu/drm/i915/i915_buddy.h
+++ b/drivers/gpu/drm/i915/i915_buddy.h
@@ -15,7 +15,9 @@ struct i915_buddy_block {
#define I915_BUDDY_ALLOCATED (1 << 10)
#define I915_BUDDY_FREE (2 << 10)
#define I915_BUDDY_SPLIT (3 << 10)
-#define I915_BUDDY_HEADER_ORDER GENMASK_ULL(9, 0)
+/* Free to be used, if needed in the future */
+#define I915_BUDDY_HEADER_UNUSED GENMASK_ULL(9, 6)
+#define I915_BUDDY_HEADER_ORDER GENMASK_ULL(5, 0)
u64 header;
struct i915_buddy_block *left;
@@ -34,7 +36,8 @@ struct i915_buddy_block {
struct list_head tmp_link;
};
-#define I915_BUDDY_MAX_ORDER I915_BUDDY_HEADER_ORDER
+/* Order-zero must be at least PAGE_SIZE */
+#define I915_BUDDY_MAX_ORDER (63 - PAGE_SHIFT)
/*
* Binary Buddy System.
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 5f86f5b2caf6..e6f1e93abbbb 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -1144,38 +1144,20 @@ find_reg(const struct intel_engine_cs *engine, u32 addr)
/* Returns a vmap'd pointer to dst_obj, which the caller must unmap */
static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
struct drm_i915_gem_object *src_obj,
- unsigned long offset, unsigned long length)
+ unsigned long offset, unsigned long length,
+ void *dst, const void *src)
{
- bool needs_clflush;
- void *dst, *src;
- int ret;
-
- dst = i915_gem_object_pin_map(dst_obj, I915_MAP_WB);
- if (IS_ERR(dst))
- return dst;
-
- ret = i915_gem_object_pin_pages(src_obj);
- if (ret) {
- i915_gem_object_unpin_map(dst_obj);
- return ERR_PTR(ret);
- }
-
- needs_clflush =
+ bool needs_clflush =
!(src_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ);
- src = ERR_PTR(-ENODEV);
- if (needs_clflush && i915_has_memcpy_from_wc()) {
- src = i915_gem_object_pin_map(src_obj, I915_MAP_WC);
- if (!IS_ERR(src)) {
- i915_unaligned_memcpy_from_wc(dst,
- src + offset,
- length);
- i915_gem_object_unpin_map(src_obj);
- }
- }
- if (IS_ERR(src)) {
- unsigned long x, n, remain;
+ if (src) {
+ GEM_BUG_ON(!needs_clflush);
+ i915_unaligned_memcpy_from_wc(dst, src + offset, length);
+ } else {
+ struct scatterlist *sg;
void *ptr;
+ unsigned int x, sg_ofs;
+ unsigned long remain;
/*
* We can avoid clflushing partial cachelines before the write
@@ -1192,23 +1174,31 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
ptr = dst;
x = offset_in_page(offset);
- for (n = offset >> PAGE_SHIFT; remain; n++) {
- int len = min(remain, PAGE_SIZE - x);
-
- src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
- if (needs_clflush)
- drm_clflush_virt_range(src + x, len);
- memcpy(ptr, src + x, len);
- kunmap_atomic(src);
-
- ptr += len;
- remain -= len;
- x = 0;
+ sg = i915_gem_object_get_sg(src_obj, offset >> PAGE_SHIFT, &sg_ofs, false);
+
+ while (remain) {
+ unsigned long sg_max = sg->length >> PAGE_SHIFT;
+
+ for (; remain && sg_ofs < sg_max; sg_ofs++) {
+ unsigned long len = min(remain, PAGE_SIZE - x);
+ void *map;
+
+ map = kmap_atomic(nth_page(sg_page(sg), sg_ofs));
+ if (needs_clflush)
+ drm_clflush_virt_range(map + x, len);
+ memcpy(ptr, map + x, len);
+ kunmap_atomic(map);
+
+ ptr += len;
+ remain -= len;
+ x = 0;
+ }
+
+ sg_ofs = 0;
+ sg = sg_next(sg);
}
}
- i915_gem_object_unpin_pages(src_obj);
-
memset32(dst + length, 0, (dst_obj->base.size - length) / sizeof(u32));
/* dst_obj is returned with vmap pinned */
@@ -1370,9 +1360,6 @@ static int check_bbstart(u32 *cmd, u32 offset, u32 length,
if (target_cmd_index == offset)
return 0;
- if (IS_ERR(jump_whitelist))
- return PTR_ERR(jump_whitelist);
-
if (!test_bit(target_cmd_index, jump_whitelist)) {
DRM_DEBUG("CMD: BB_START to 0x%llx not a previously executed cmd\n",
jump_target);
@@ -1382,10 +1369,14 @@ static int check_bbstart(u32 *cmd, u32 offset, u32 length,
return 0;
}
-static unsigned long *alloc_whitelist(u32 batch_length)
+unsigned long *intel_engine_cmd_parser_alloc_jump_whitelist(u32 batch_length,
+ bool trampoline)
{
unsigned long *jmp;
+ if (trampoline)
+ return NULL;
+
/*
* We expect batch_length to be less than 256KiB for known users,
* i.e. we need at most an 8KiB bitmap allocation which should be
@@ -1423,14 +1414,16 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
unsigned long batch_offset,
unsigned long batch_length,
struct i915_vma *shadow,
- bool trampoline)
+ unsigned long *jump_whitelist,
+ void *shadow_map,
+ const void *batch_map)
{
u32 *cmd, *batch_end, offset = 0;
struct drm_i915_cmd_descriptor default_desc = noop_desc;
const struct drm_i915_cmd_descriptor *desc = &default_desc;
- unsigned long *jump_whitelist;
u64 batch_addr, shadow_addr;
int ret = 0;
+ bool trampoline = !jump_whitelist;
GEM_BUG_ON(!IS_ALIGNED(batch_offset, sizeof(*cmd)));
GEM_BUG_ON(!IS_ALIGNED(batch_length, sizeof(*cmd)));
@@ -1438,16 +1431,8 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
batch->size));
GEM_BUG_ON(!batch_length);
- cmd = copy_batch(shadow->obj, batch->obj, batch_offset, batch_length);
- if (IS_ERR(cmd)) {
- DRM_DEBUG("CMD: Failed to copy batch\n");
- return PTR_ERR(cmd);
- }
-
- jump_whitelist = NULL;
- if (!trampoline)
- /* Defer failure until attempted use */
- jump_whitelist = alloc_whitelist(batch_length);
+ cmd = copy_batch(shadow->obj, batch->obj, batch_offset, batch_length,
+ shadow_map, batch_map);
shadow_addr = gen8_canonical_addr(shadow->node.start);
batch_addr = gen8_canonical_addr(batch->node.start + batch_offset);
@@ -1548,9 +1533,6 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
i915_gem_object_flush_map(shadow->obj);
- if (!IS_ERR_OR_NULL(jump_whitelist))
- kfree(jump_whitelist);
- i915_gem_object_unpin_map(shadow->obj);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 88336ff4bf09..b654b7498bcd 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -173,26 +173,30 @@ i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
break;
case I915_GGTT_VIEW_ROTATED:
- seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
+ seq_printf(m, ", rotated [(%ux%u, src_stride=%u, dst_stride=%u, offset=%u), (%ux%u, src_stride=%u, dst_stride=%u, offset=%u)]",
vma->ggtt_view.rotated.plane[0].width,
vma->ggtt_view.rotated.plane[0].height,
- vma->ggtt_view.rotated.plane[0].stride,
+ vma->ggtt_view.rotated.plane[0].src_stride,
+ vma->ggtt_view.rotated.plane[0].dst_stride,
vma->ggtt_view.rotated.plane[0].offset,
vma->ggtt_view.rotated.plane[1].width,
vma->ggtt_view.rotated.plane[1].height,
- vma->ggtt_view.rotated.plane[1].stride,
+ vma->ggtt_view.rotated.plane[1].src_stride,
+ vma->ggtt_view.rotated.plane[1].dst_stride,
vma->ggtt_view.rotated.plane[1].offset);
break;
case I915_GGTT_VIEW_REMAPPED:
- seq_printf(m, ", remapped [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
+ seq_printf(m, ", remapped [(%ux%u, src_stride=%u, dst_stride=%u, offset=%u), (%ux%u, src_stride=%u, dst_stride=%u, offset=%u)]",
vma->ggtt_view.remapped.plane[0].width,
vma->ggtt_view.remapped.plane[0].height,
- vma->ggtt_view.remapped.plane[0].stride,
+ vma->ggtt_view.remapped.plane[0].src_stride,
+ vma->ggtt_view.remapped.plane[0].dst_stride,
vma->ggtt_view.remapped.plane[0].offset,
vma->ggtt_view.remapped.plane[1].width,
vma->ggtt_view.remapped.plane[1].height,
- vma->ggtt_view.remapped.plane[1].stride,
+ vma->ggtt_view.remapped.plane[1].src_stride,
+ vma->ggtt_view.remapped.plane[1].dst_stride,
vma->ggtt_view.remapped.plane[1].offset);
break;
@@ -677,7 +681,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
static int i915_runtime_pm_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
if (!HAS_RUNTIME_PM(dev_priv))
seq_puts(m, "Runtime power management not supported\n");
@@ -904,10 +908,10 @@ i915_drop_caches_set(void *data, u64 val)
fs_reclaim_acquire(GFP_KERNEL);
if (val & DROP_BOUND)
- i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
+ i915_gem_shrink(NULL, i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
if (val & DROP_UNBOUND)
- i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
+ i915_gem_shrink(NULL, i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
if (val & DROP_SHRINK_ALL)
i915_gem_shrink_all(i915);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 8e9cb44e66e5..c2329bc44f55 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -38,7 +38,6 @@
#include <linux/slab.h>
#include <linux/vga_switcheroo.h>
#include <linux/vt.h>
-#include <acpi/video.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_ioctl.h>
@@ -47,11 +46,9 @@
#include <drm/drm_probe_helper.h>
#include "display/intel_acpi.h"
-#include "display/intel_audio.h"
#include "display/intel_bw.h"
#include "display/intel_cdclk.h"
#include "display/intel_csr.h"
-#include "display/intel_display_debugfs.h"
#include "display/intel_display_types.h"
#include "display/intel_dp.h"
#include "display/intel_fbdev.h"
@@ -93,7 +90,7 @@ static const struct drm_driver driver;
static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
{
- int domain = pci_domain_nr(dev_priv->drm.pdev->bus);
+ int domain = pci_domain_nr(to_pci_dev(dev_priv->drm.dev)->bus);
dev_priv->bridge_dev =
pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
@@ -275,7 +272,7 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
pre |= IS_HSW_EARLY_SDV(dev_priv);
pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
- pre |= IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_A0);
+ pre |= IS_KBL_GT_STEP(dev_priv, 0, STEP_A0);
pre |= IS_GLK_REVID(dev_priv, 0, GLK_REVID_A2);
if (pre) {
@@ -309,6 +306,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
return -ENODEV;
intel_device_info_subplatform_init(dev_priv);
+ intel_step_init(dev_priv);
intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug);
intel_uncore_init_early(&dev_priv->uncore, dev_priv);
@@ -352,7 +350,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
intel_irq_init(dev_priv);
intel_init_display_hooks(dev_priv);
intel_init_clock_gating_hooks(dev_priv);
- intel_init_audio_hooks(dev_priv);
intel_detect_preproduction_hw(dev_priv);
@@ -461,7 +458,6 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
*/
static int i915_set_dma_info(struct drm_i915_private *i915)
{
- struct pci_dev *pdev = i915->drm.pdev;
unsigned int mask_size = INTEL_INFO(i915)->dma_mask_size;
int ret;
@@ -471,9 +467,9 @@ static int i915_set_dma_info(struct drm_i915_private *i915)
* We don't have a max segment size, so set it to the max so sg's
* debugging layer doesn't complain
*/
- dma_set_max_seg_size(&pdev->dev, UINT_MAX);
+ dma_set_max_seg_size(i915->drm.dev, UINT_MAX);
- ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(mask_size));
+ ret = dma_set_mask(i915->drm.dev, DMA_BIT_MASK(mask_size));
if (ret)
goto mask_err;
@@ -493,7 +489,7 @@ static int i915_set_dma_info(struct drm_i915_private *i915)
if (IS_I965G(i915) || IS_I965GM(i915))
mask_size = 32;
- ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(mask_size));
+ ret = dma_set_coherent_mask(i915->drm.dev, DMA_BIT_MASK(mask_size));
if (ret)
goto mask_err;
@@ -513,7 +509,7 @@ mask_err:
*/
static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
int ret;
if (i915_inject_probe_failure(dev_priv))
@@ -571,6 +567,10 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
intel_gt_init_hw_early(&dev_priv->gt, &dev_priv->ggtt);
+ ret = intel_gt_probe_lmem(&dev_priv->gt);
+ if (ret)
+ goto err_mem_regions;
+
ret = i915_ggtt_enable_hw(dev_priv);
if (ret) {
drm_err(&dev_priv->drm, "failed to enable GGTT\n");
@@ -641,7 +641,7 @@ err_perf:
*/
static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
i915_perf_fini(dev_priv);
@@ -666,43 +666,21 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
intel_vgpu_register(dev_priv);
/* Reveal our presence to userspace */
- if (drm_dev_register(dev, 0) == 0) {
- i915_debugfs_register(dev_priv);
- if (HAS_DISPLAY(dev_priv))
- intel_display_debugfs_register(dev_priv);
- i915_setup_sysfs(dev_priv);
-
- /* Depends on sysfs having been initialized */
- i915_perf_register(dev_priv);
- } else
+ if (drm_dev_register(dev, 0)) {
drm_err(&dev_priv->drm,
"Failed to register driver for userspace access!\n");
-
- if (HAS_DISPLAY(dev_priv)) {
- /* Must be done after probing outputs */
- intel_opregion_register(dev_priv);
- acpi_video_register();
+ return;
}
- intel_gt_driver_register(&dev_priv->gt);
+ i915_debugfs_register(dev_priv);
+ i915_setup_sysfs(dev_priv);
- intel_audio_init(dev_priv);
+ /* Depends on sysfs having been initialized */
+ i915_perf_register(dev_priv);
- /*
- * Some ports require correctly set-up hpd registers for detection to
- * work properly (leading to ghost connected connector status), e.g. VGA
- * on gm45. Hence we can only set up the initial fbdev config after hpd
- * irqs are fully enabled. We do it last so that the async config
- * cannot run before the connectors are registered.
- */
- intel_fbdev_initial_config_async(dev);
+ intel_gt_driver_register(&dev_priv->gt);
- /*
- * We need to coordinate the hotplugs with the asynchronous fbdev
- * configuration, for which we use the fbdev->async_cookie.
- */
- if (HAS_DISPLAY(dev_priv))
- drm_kms_helper_poll_init(dev);
+ intel_display_driver_register(dev_priv);
intel_power_domains_enable(dev_priv);
intel_runtime_pm_enable(&dev_priv->runtime_pm);
@@ -726,20 +704,9 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
intel_runtime_pm_disable(&dev_priv->runtime_pm);
intel_power_domains_disable(dev_priv);
- intel_fbdev_unregister(dev_priv);
- intel_audio_deinit(dev_priv);
-
- /*
- * After flushing the fbdev (incl. a late async config which will
- * have delayed queuing of a hotplug event), then flush the hotplug
- * events.
- */
- drm_kms_helper_poll_fini(&dev_priv->drm);
- drm_atomic_helper_shutdown(&dev_priv->drm);
+ intel_display_driver_unregister(dev_priv);
intel_gt_driver_unregister(&dev_priv->gt);
- acpi_video_unregister();
- intel_opregion_unregister(dev_priv);
i915_perf_unregister(dev_priv);
i915_pmu_unregister(dev_priv);
@@ -841,7 +808,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (INTEL_GEN(i915) >= 9 && i915_selftest.live < 0 &&
i915->params.fake_lmem_start) {
mkwrite_device_info(i915)->memory_regions =
- REGION_SMEM | REGION_LMEM | REGION_STOLEN;
+ REGION_SMEM | REGION_LMEM | REGION_STOLEN_SMEM;
GEM_BUG_ON(!HAS_LMEM(i915));
}
}
@@ -1049,6 +1016,8 @@ static void intel_shutdown_encoders(struct drm_i915_private *dev_priv)
void i915_driver_shutdown(struct drm_i915_private *i915)
{
disable_rpm_wakeref_asserts(&i915->runtime_pm);
+ intel_runtime_pm_disable(&i915->runtime_pm);
+ intel_power_domains_disable(i915);
i915_gem_suspend(i915);
@@ -1064,7 +1033,15 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_suspend_encoders(i915);
intel_shutdown_encoders(i915);
+ /*
+ * The only requirement is to reboot with display DC states disabled,
+ * for now leaving all display power wells in the INIT power domain
+ * enabled matching the driver reload sequence.
+ */
+ intel_power_domains_driver_remove(i915);
enable_rpm_wakeref_asserts(&i915->runtime_pm);
+
+ intel_runtime_pm_driver_release(&i915->runtime_pm);
}
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
@@ -1094,7 +1071,7 @@ static int i915_drm_prepare(struct drm_device *dev)
static int i915_drm_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
pci_power_t opregion_target_state;
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
@@ -1151,7 +1128,7 @@ get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate)
static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
int ret;
@@ -1281,7 +1258,7 @@ static int i915_drm_resume(struct drm_device *dev)
static int i915_drm_resume_early(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
int ret;
/*
@@ -1719,7 +1696,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, drm_invalid_op, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index cb62ddba2035..9ec9277539ec 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -86,9 +86,10 @@
#include "gt/uc/intel_uc.h"
#include "intel_device_info.h"
+#include "intel_memory_region.h"
#include "intel_pch.h"
#include "intel_runtime_pm.h"
-#include "intel_memory_region.h"
+#include "intel_step.h"
#include "intel_uncore.h"
#include "intel_wakeref.h"
#include "intel_wopcm.h"
@@ -475,42 +476,6 @@ struct i915_drrs {
enum drrs_support_type type;
};
-struct i915_psr {
- struct mutex lock;
-
-#define I915_PSR_DEBUG_MODE_MASK 0x0f
-#define I915_PSR_DEBUG_DEFAULT 0x00
-#define I915_PSR_DEBUG_DISABLE 0x01
-#define I915_PSR_DEBUG_ENABLE 0x02
-#define I915_PSR_DEBUG_FORCE_PSR1 0x03
-#define I915_PSR_DEBUG_IRQ 0x10
-
- u32 debug;
- bool sink_support;
- bool enabled;
- struct intel_dp *dp;
- enum pipe pipe;
- enum transcoder transcoder;
- bool active;
- struct work_struct work;
- unsigned busy_frontbuffer_bits;
- bool sink_psr2_support;
- bool link_standby;
- bool colorimetry_support;
- bool psr2_enabled;
- bool psr2_sel_fetch_enabled;
- u8 sink_sync_latency;
- ktime_t last_entry_attempt;
- ktime_t last_exit;
- bool sink_not_reliable;
- bool irq_aux_error;
- u16 su_x_granularity;
- bool dc3co_enabled;
- u32 dc3co_exit_delay;
- struct delayed_work dc3co_work;
- struct drm_dp_vsc_sdp vsc;
-};
-
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
#define QUIRK_BACKLIGHT_PRESENT (1<<3)
@@ -590,12 +555,13 @@ struct i915_gem_mm {
struct notifier_block vmap_notifier;
struct shrinker shrinker;
+#ifdef CONFIG_MMU_NOTIFIER
/**
- * Workqueue to fault in userptr pages, flushed by the execbuf
- * when required but otherwise left to userspace to try again
- * on EAGAIN.
+ * notifier_lock for mmu notifiers, memory may not be allocated
+ * while holding this lock.
*/
- struct workqueue_struct *userptr_wq;
+ spinlock_t notifier_lock;
+#endif
/* shrinker accounting, also useful for userland debugging */
u64 shrink_memory;
@@ -618,7 +584,7 @@ i915_fence_timeout(const struct drm_i915_private *i915)
struct ddi_vbt_port_info {
/* Non-NULL if port present. */
- const struct child_device_config *child;
+ struct intel_bios_encoder_data *devdata;
int max_tmds_clock;
@@ -626,18 +592,9 @@ struct ddi_vbt_port_info {
u8 hdmi_level_shift;
u8 hdmi_level_shift_set:1;
- u8 supports_dvi:1;
- u8 supports_hdmi:1;
- u8 supports_dp:1;
- u8 supports_edp:1;
- u8 supports_typec_usb:1;
- u8 supports_tbt:1;
-
u8 alternate_aux_channel;
u8 alternate_ddc_pin;
- u8 dp_boost_level;
- u8 hdmi_boost_level;
int dp_max_link_rate; /* 0 for not limited by VBT */
};
@@ -649,6 +606,9 @@ enum psr_lines_to_wait {
};
struct intel_vbt_data {
+ /* bdb version */
+ u16 version;
+
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
@@ -974,8 +934,6 @@ struct drm_i915_private {
struct i915_ggtt ggtt; /* VM representing the global address space */
struct i915_gem_mm mm;
- DECLARE_HASHTABLE(mm_structs, 7);
- spinlock_t mm_lock;
/* Kernel Modesetting */
@@ -1038,8 +996,6 @@ struct drm_i915_private {
struct i915_power_domains power_domains;
- struct i915_psr psr;
-
struct i915_gpu_error gpu_error;
struct drm_i915_gem_object *vlv_pctx;
@@ -1133,7 +1089,9 @@ struct drm_i915_private {
INTEL_DRAM_DDR3,
INTEL_DRAM_DDR4,
INTEL_DRAM_LPDDR3,
- INTEL_DRAM_LPDDR4
+ INTEL_DRAM_LPDDR4,
+ INTEL_DRAM_DDR5,
+ INTEL_DRAM_LPDDR5,
} type;
u8 num_qgv_points;
} dram_info;
@@ -1279,8 +1237,13 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
#define INTEL_GEN(dev_priv) (INTEL_INFO(dev_priv)->gen)
#define INTEL_DEVID(dev_priv) (RUNTIME_INFO(dev_priv)->device_id)
+#define DISPLAY_VER(i915) (INTEL_INFO(i915)->display.version)
+#define IS_DISPLAY_RANGE(i915, from, until) \
+ (DISPLAY_VER(i915) >= (from) && DISPLAY_VER(i915) <= (until))
+#define IS_DISPLAY_VER(i915, v) (DISPLAY_VER(i915) == (v))
+
#define REVID_FOREVER 0xff
-#define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision)
+#define INTEL_REVID(dev_priv) (to_pci_dev((dev_priv)->drm.dev)->revision)
#define INTEL_GEN_MASK(s, e) ( \
BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \
@@ -1305,6 +1268,17 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
#define IS_REVID(p, since, until) \
(INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
+#define INTEL_DISPLAY_STEP(__i915) (RUNTIME_INFO(__i915)->step.display_step)
+#define INTEL_GT_STEP(__i915) (RUNTIME_INFO(__i915)->step.gt_step)
+
+#define IS_DISPLAY_STEP(__i915, since, until) \
+ (drm_WARN_ON(&(__i915)->drm, INTEL_DISPLAY_STEP(__i915) == STEP_NONE), \
+ INTEL_DISPLAY_STEP(__i915) >= (since) && INTEL_DISPLAY_STEP(__i915) <= (until))
+
+#define IS_GT_STEP(__i915, since, until) \
+ (drm_WARN_ON(&(__i915)->drm, INTEL_GT_STEP(__i915) == STEP_NONE), \
+ INTEL_GT_STEP(__i915) >= (since) && INTEL_GT_STEP(__i915) <= (until))
+
static __always_inline unsigned int
__platform_mask_index(const struct intel_runtime_info *info,
enum intel_platform p)
@@ -1334,7 +1308,7 @@ intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p)
{
const unsigned int pi = __platform_mask_index(info, p);
- return info->platform_mask[pi] & ((1 << INTEL_SUBPLATFORM_BITS) - 1);
+ return info->platform_mask[pi] & INTEL_SUBPLATFORM_MASK;
}
static __always_inline bool
@@ -1388,6 +1362,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_IRONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IRONLAKE)
#define IS_IRONLAKE_M(dev_priv) \
(IS_PLATFORM(dev_priv, INTEL_IRONLAKE) && IS_MOBILE(dev_priv))
+#define IS_SANDYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_SANDYBRIDGE)
#define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)
#define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 1)
@@ -1408,6 +1383,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_TIGERLAKE)
#define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE)
#define IS_DG1(dev_priv) IS_PLATFORM(dev_priv, INTEL_DG1)
+#define IS_ALDERLAKE_S(dev_priv) IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_S)
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev_priv) \
@@ -1490,34 +1466,10 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_BXT_REVID(dev_priv, since, until) \
(IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until))
-enum {
- KBL_REVID_A0,
- KBL_REVID_B0,
- KBL_REVID_B1,
- KBL_REVID_C0,
- KBL_REVID_D0,
- KBL_REVID_D1,
- KBL_REVID_E0,
- KBL_REVID_F0,
- KBL_REVID_G0,
-};
-
-struct i915_rev_steppings {
- u8 gt_stepping;
- u8 disp_stepping;
-};
-
-/* Defined in intel_workarounds.c */
-extern const struct i915_rev_steppings kbl_revids[];
-
-#define IS_KBL_GT_REVID(dev_priv, since, until) \
- (IS_KABYLAKE(dev_priv) && \
- kbl_revids[INTEL_REVID(dev_priv)].gt_stepping >= since && \
- kbl_revids[INTEL_REVID(dev_priv)].gt_stepping <= until)
-#define IS_KBL_DISP_REVID(dev_priv, since, until) \
- (IS_KABYLAKE(dev_priv) && \
- kbl_revids[INTEL_REVID(dev_priv)].disp_stepping >= since && \
- kbl_revids[INTEL_REVID(dev_priv)].disp_stepping <= until)
+#define IS_KBL_GT_STEP(dev_priv, since, until) \
+ (IS_KABYLAKE(dev_priv) && IS_GT_STEP(dev_priv, since, until))
+#define IS_KBL_DISPLAY_STEP(dev_priv, since, until) \
+ (IS_KABYLAKE(dev_priv) && IS_DISPLAY_STEP(dev_priv, since, until))
#define GLK_REVID_A0 0x0
#define GLK_REVID_A1 0x1
@@ -1549,55 +1501,17 @@ extern const struct i915_rev_steppings kbl_revids[];
#define IS_JSL_EHL_REVID(p, since, until) \
(IS_JSL_EHL(p) && IS_REVID(p, since, until))
-enum {
- TGL_REVID_A0,
- TGL_REVID_B0,
- TGL_REVID_B1,
- TGL_REVID_C0,
- TGL_REVID_D0,
-};
-
-#define TGL_UY_REVIDS_SIZE 4
-#define TGL_REVIDS_SIZE 2
+#define IS_TGL_DISPLAY_STEP(__i915, since, until) \
+ (IS_TIGERLAKE(__i915) && \
+ IS_DISPLAY_STEP(__i915, since, until))
-extern const struct i915_rev_steppings tgl_uy_revids[TGL_UY_REVIDS_SIZE];
-extern const struct i915_rev_steppings tgl_revids[TGL_REVIDS_SIZE];
+#define IS_TGL_UY_GT_STEP(__i915, since, until) \
+ ((IS_TGL_U(__i915) || IS_TGL_Y(__i915)) && \
+ IS_GT_STEP(__i915, since, until))
-static inline const struct i915_rev_steppings *
-tgl_revids_get(struct drm_i915_private *dev_priv)
-{
- u8 revid = INTEL_REVID(dev_priv);
- u8 size;
- const struct i915_rev_steppings *tgl_revid_tbl;
-
- if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) {
- tgl_revid_tbl = tgl_uy_revids;
- size = ARRAY_SIZE(tgl_uy_revids);
- } else {
- tgl_revid_tbl = tgl_revids;
- size = ARRAY_SIZE(tgl_revids);
- }
-
- revid = min_t(u8, revid, size - 1);
-
- return &tgl_revid_tbl[revid];
-}
-
-#define IS_TGL_DISP_REVID(p, since, until) \
- (IS_TIGERLAKE(p) && \
- tgl_revids_get(p)->disp_stepping >= (since) && \
- tgl_revids_get(p)->disp_stepping <= (until))
-
-#define IS_TGL_UY_GT_REVID(p, since, until) \
- ((IS_TGL_U(p) || IS_TGL_Y(p)) && \
- tgl_revids_get(p)->gt_stepping >= (since) && \
- tgl_revids_get(p)->gt_stepping <= (until))
-
-#define IS_TGL_GT_REVID(p, since, until) \
- (IS_TIGERLAKE(p) && \
- !(IS_TGL_U(p) || IS_TGL_Y(p)) && \
- tgl_revids_get(p)->gt_stepping >= (since) && \
- tgl_revids_get(p)->gt_stepping <= (until))
+#define IS_TGL_GT_STEP(__i915, since, until) \
+ (IS_TIGERLAKE(__i915) && !(IS_TGL_U(__i915) || IS_TGL_Y(__i915)) && \
+ IS_GT_STEP(__i915, since, until))
#define RKL_REVID_A0 0x0
#define RKL_REVID_B0 0x1
@@ -1612,6 +1526,14 @@ tgl_revids_get(struct drm_i915_private *dev_priv)
#define IS_DG1_REVID(p, since, until) \
(IS_DG1(p) && IS_REVID(p, since, until))
+#define IS_ADLS_DISPLAY_STEP(__i915, since, until) \
+ (IS_ALDERLAKE_S(__i915) && \
+ IS_DISPLAY_STEP(__i915, since, until))
+
+#define IS_ADLS_GT_STEP(__i915, since, until) \
+ (IS_ALDERLAKE_S(__i915) && \
+ IS_GT_STEP(__i915, since, until))
+
#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
#define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
#define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
@@ -1703,7 +1625,7 @@ tgl_revids_get(struct drm_i915_private *dev_priv)
#define HAS_DP_MST(dev_priv) (INTEL_INFO(dev_priv)->display.has_dp_mst)
#define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi)
-#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg)
+#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->display.has_fpga_dbg)
#define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr)
#define HAS_PSR_HW_TRACKING(dev_priv) \
(INTEL_INFO(dev_priv)->display.has_psr_hw_tracking)
@@ -1718,6 +1640,8 @@ tgl_revids_get(struct drm_i915_private *dev_priv)
#define HAS_CSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_csr)
+#define HAS_MSO(i915) (INTEL_GEN(i915) >= 12)
+
#define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm)
#define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc)
@@ -1735,7 +1659,7 @@ tgl_revids_get(struct drm_i915_private *dev_priv)
#define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch)
-#define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9)
+#define HAS_LSPCON(dev_priv) (IS_GEN_RANGE(dev_priv, 9, 10))
/* DPF == dynamic parity feature */
#define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf)
@@ -1760,6 +1684,9 @@ static inline bool run_as_guest(void)
return !hypervisor_is_type(X86_HYPER_NATIVE);
}
+#define HAS_D12_PLANE_MINIMIZATION(dev_priv) (IS_ROCKETLAKE(dev_priv) || \
+ IS_ALDERLAKE_S(dev_priv))
+
static inline bool intel_vtd_active(void)
{
#ifdef CONFIG_INTEL_IOMMU
@@ -1954,12 +1881,17 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
int intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
+unsigned long *intel_engine_cmd_parser_alloc_jump_whitelist(u32 batch_length,
+ bool trampoline);
+
int intel_engine_cmd_parser(struct intel_engine_cs *engine,
struct i915_vma *batch,
unsigned long batch_offset,
unsigned long batch_length,
struct i915_vma *shadow,
- bool trampoline);
+ unsigned long *jump_whitelist,
+ void *shadow_map,
+ const void *batch_map);
#define I915_CMD_PARSER_TRAMPOLINE_SIZE 8
/* intel_device_info.c */
@@ -1973,9 +1905,6 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
/* i915_mm.c */
-int remap_io_mapping(struct vm_area_struct *vma,
- unsigned long addr, unsigned long pfn, unsigned long size,
- struct io_mapping *iomap);
int remap_io_sg(struct vm_area_struct *vma,
unsigned long addr, unsigned long size,
struct scatterlist *sgl, resource_size_t iobase);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index aa4490934469..b23f58e94cfb 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -204,7 +204,6 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
{
unsigned int needs_clflush;
unsigned int idx, offset;
- struct dma_fence *fence;
char __user *user_data;
u64 remain;
int ret;
@@ -213,19 +212,17 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
if (ret)
return ret;
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ goto err_unlock;
+
ret = i915_gem_object_prepare_read(obj, &needs_clflush);
- if (ret) {
- i915_gem_object_unlock(obj);
- return ret;
- }
+ if (ret)
+ goto err_unpin;
- fence = i915_gem_object_lock_fence(obj);
i915_gem_object_finish_access(obj);
i915_gem_object_unlock(obj);
- if (!fence)
- return -ENOMEM;
-
remain = args->size;
user_data = u64_to_user_ptr(args->data_ptr);
offset = offset_in_page(args->offset);
@@ -243,7 +240,13 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
offset = 0;
}
- i915_gem_object_unlock_fence(obj, fence);
+ i915_gem_object_unpin_pages(obj);
+ return ret;
+
+err_unpin:
+ i915_gem_object_unpin_pages(obj);
+err_unlock:
+ i915_gem_object_unlock(obj);
return ret;
}
@@ -271,52 +274,102 @@ gtt_user_read(struct io_mapping *mapping,
return unwritten;
}
-static int
-i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
- const struct drm_i915_gem_pread *args)
+static struct i915_vma *i915_gem_gtt_prepare(struct drm_i915_gem_object *obj,
+ struct drm_mm_node *node,
+ bool write)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = &i915->ggtt;
- intel_wakeref_t wakeref;
- struct drm_mm_node node;
- struct dma_fence *fence;
- void __user *user_data;
struct i915_vma *vma;
- u64 remain, offset;
+ struct i915_gem_ww_ctx ww;
int ret;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
vma = ERR_PTR(-ENODEV);
+ ret = i915_gem_object_lock(obj, &ww);
+ if (ret)
+ goto err_ww;
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, write);
+ if (ret)
+ goto err_ww;
+
if (!i915_gem_object_is_tiled(obj))
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
- PIN_MAPPABLE |
- PIN_NONBLOCK /* NOWARN */ |
- PIN_NOEVICT);
- if (!IS_ERR(vma)) {
- node.start = i915_ggtt_offset(vma);
- node.flags = 0;
+ vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0,
+ PIN_MAPPABLE |
+ PIN_NONBLOCK /* NOWARN */ |
+ PIN_NOEVICT);
+ if (vma == ERR_PTR(-EDEADLK)) {
+ ret = -EDEADLK;
+ goto err_ww;
+ } else if (!IS_ERR(vma)) {
+ node->start = i915_ggtt_offset(vma);
+ node->flags = 0;
} else {
- ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
+ ret = insert_mappable_node(ggtt, node, PAGE_SIZE);
if (ret)
- goto out_rpm;
- GEM_BUG_ON(!drm_mm_node_allocated(&node));
+ goto err_ww;
+ GEM_BUG_ON(!drm_mm_node_allocated(node));
+ vma = NULL;
}
- ret = i915_gem_object_lock_interruptible(obj, NULL);
- if (ret)
- goto out_unpin;
-
- ret = i915_gem_object_set_to_gtt_domain(obj, false);
+ ret = i915_gem_object_pin_pages(obj);
if (ret) {
- i915_gem_object_unlock(obj);
- goto out_unpin;
+ if (drm_mm_node_allocated(node)) {
+ ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
+ remove_mappable_node(ggtt, node);
+ } else {
+ i915_vma_unpin(vma);
+ }
}
- fence = i915_gem_object_lock_fence(obj);
- i915_gem_object_unlock(obj);
- if (!fence) {
- ret = -ENOMEM;
- goto out_unpin;
+err_ww:
+ if (ret == -EDEADLK) {
+ ret = i915_gem_ww_ctx_backoff(&ww);
+ if (!ret)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+
+ return ret ? ERR_PTR(ret) : vma;
+}
+
+static void i915_gem_gtt_cleanup(struct drm_i915_gem_object *obj,
+ struct drm_mm_node *node,
+ struct i915_vma *vma)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_ggtt *ggtt = &i915->ggtt;
+
+ i915_gem_object_unpin_pages(obj);
+ if (drm_mm_node_allocated(node)) {
+ ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
+ remove_mappable_node(ggtt, node);
+ } else {
+ i915_vma_unpin(vma);
+ }
+}
+
+static int
+i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pread *args)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ intel_wakeref_t wakeref;
+ struct drm_mm_node node;
+ void __user *user_data;
+ struct i915_vma *vma;
+ u64 remain, offset;
+ int ret = 0;
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ vma = i915_gem_gtt_prepare(obj, &node, false);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto out_rpm;
}
user_data = u64_to_user_ptr(args->data_ptr);
@@ -353,14 +406,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
offset += page_length;
}
- i915_gem_object_unlock_fence(obj, fence);
-out_unpin:
- if (drm_mm_node_allocated(&node)) {
- ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
- remove_mappable_node(ggtt, &node);
- } else {
- i915_vma_unpin(vma);
- }
+ i915_gem_gtt_cleanup(obj, &node, vma);
out_rpm:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
return ret;
@@ -378,10 +424,17 @@ int
i915_gem_pread_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_pread *args = data;
struct drm_i915_gem_object *obj;
int ret;
+ /* PREAD is disallowed for all platforms after TGL-LP. This also
+ * covers all platforms with local memory.
+ */
+ if (INTEL_GEN(i915) >= 12 && !IS_TIGERLAKE(i915))
+ return -EOPNOTSUPP;
+
if (args->size == 0)
return 0;
@@ -400,6 +453,11 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
}
trace_i915_gem_object_pread(obj, args->offset, args->size);
+ ret = -ENODEV;
+ if (obj->ops->pread)
+ ret = obj->ops->pread(obj, args);
+ if (ret != -ENODEV)
+ goto out;
ret = -ENODEV;
if (obj->ops->pread)
@@ -413,15 +471,10 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
if (ret)
goto out;
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- goto out;
-
ret = i915_gem_shmem_pread(obj, args);
if (ret == -EFAULT || ret == -ENODEV)
ret = i915_gem_gtt_pread(obj, args);
- i915_gem_object_unpin_pages(obj);
out:
i915_gem_object_put(obj);
return ret;
@@ -469,11 +522,10 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
struct intel_runtime_pm *rpm = &i915->runtime_pm;
intel_wakeref_t wakeref;
struct drm_mm_node node;
- struct dma_fence *fence;
struct i915_vma *vma;
u64 remain, offset;
void __user *user_data;
- int ret;
+ int ret = 0;
if (i915_gem_object_has_struct_page(obj)) {
/*
@@ -491,37 +543,10 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
wakeref = intel_runtime_pm_get(rpm);
}
- vma = ERR_PTR(-ENODEV);
- if (!i915_gem_object_is_tiled(obj))
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
- PIN_MAPPABLE |
- PIN_NONBLOCK /* NOWARN */ |
- PIN_NOEVICT);
- if (!IS_ERR(vma)) {
- node.start = i915_ggtt_offset(vma);
- node.flags = 0;
- } else {
- ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
- if (ret)
- goto out_rpm;
- GEM_BUG_ON(!drm_mm_node_allocated(&node));
- }
-
- ret = i915_gem_object_lock_interruptible(obj, NULL);
- if (ret)
- goto out_unpin;
-
- ret = i915_gem_object_set_to_gtt_domain(obj, true);
- if (ret) {
- i915_gem_object_unlock(obj);
- goto out_unpin;
- }
-
- fence = i915_gem_object_lock_fence(obj);
- i915_gem_object_unlock(obj);
- if (!fence) {
- ret = -ENOMEM;
- goto out_unpin;
+ vma = i915_gem_gtt_prepare(obj, &node, true);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto out_rpm;
}
i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
@@ -570,14 +595,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
- i915_gem_object_unlock_fence(obj, fence);
-out_unpin:
- if (drm_mm_node_allocated(&node)) {
- ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
- remove_mappable_node(ggtt, &node);
- } else {
- i915_vma_unpin(vma);
- }
+ i915_gem_gtt_cleanup(obj, &node, vma);
out_rpm:
intel_runtime_pm_put(rpm, wakeref);
return ret;
@@ -617,7 +635,6 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
unsigned int partial_cacheline_write;
unsigned int needs_clflush;
unsigned int offset, idx;
- struct dma_fence *fence;
void __user *user_data;
u64 remain;
int ret;
@@ -626,19 +643,17 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
if (ret)
return ret;
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ goto err_unlock;
+
ret = i915_gem_object_prepare_write(obj, &needs_clflush);
- if (ret) {
- i915_gem_object_unlock(obj);
- return ret;
- }
+ if (ret)
+ goto err_unpin;
- fence = i915_gem_object_lock_fence(obj);
i915_gem_object_finish_access(obj);
i915_gem_object_unlock(obj);
- if (!fence)
- return -ENOMEM;
-
/* If we don't overwrite a cacheline completely we need to be
* careful to have up-to-date data by first clflushing. Don't
* overcomplicate things and flush the entire patch.
@@ -666,8 +681,14 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
}
i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
- i915_gem_object_unlock_fence(obj, fence);
+ i915_gem_object_unpin_pages(obj);
+ return ret;
+
+err_unpin:
+ i915_gem_object_unpin_pages(obj);
+err_unlock:
+ i915_gem_object_unlock(obj);
return ret;
}
@@ -683,10 +704,17 @@ int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_pwrite *args = data;
struct drm_i915_gem_object *obj;
int ret;
+ /* PWRITE is disallowed for all platforms after TGL-LP. This also
+ * covers all platforms with local memory.
+ */
+ if (INTEL_GEN(i915) >= 12 && !IS_TIGERLAKE(i915))
+ return -EOPNOTSUPP;
+
if (args->size == 0)
return 0;
@@ -724,10 +752,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
if (ret)
goto err;
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- goto err;
-
ret = -EFAULT;
/* We can only do the GTT pwrite on untiled buffers, as otherwise
* it would end up going through the fenced access, and we'll get
@@ -748,7 +772,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_shmem_pwrite(obj, args);
}
- i915_gem_object_unpin_pages(obj);
err:
i915_gem_object_put(obj);
return ret;
@@ -909,7 +932,11 @@ new_vma:
return ERR_PTR(ret);
}
- ret = i915_vma_pin_ww(vma, ww, size, alignment, flags | PIN_GLOBAL);
+ if (ww)
+ ret = i915_vma_pin_ww(vma, ww, size, alignment, flags | PIN_GLOBAL);
+ else
+ ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
+
if (ret)
return ERR_PTR(ret);
@@ -949,7 +976,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
if (!obj)
return -ENOENT;
- err = mutex_lock_interruptible(&obj->mm.lock);
+ err = i915_gem_object_lock_interruptible(obj, NULL);
if (err)
goto out;
@@ -995,8 +1022,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
i915_gem_object_truncate(obj);
args->retained = obj->mm.madv != __I915_MADV_PURGED;
- mutex_unlock(&obj->mm.lock);
+ i915_gem_object_unlock(obj);
out:
i915_gem_object_put(obj);
return err;
@@ -1050,10 +1077,8 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
err_unlock:
i915_gem_drain_workqueue(dev_priv);
- if (ret != -EIO) {
+ if (ret != -EIO)
intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
- i915_gem_cleanup_userptr(dev_priv);
- }
if (ret == -EIO) {
/*
@@ -1110,7 +1135,6 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv)
intel_wa_list_free(&dev_priv->gt_wa_list);
intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
- i915_gem_cleanup_userptr(dev_priv);
i915_gem_drain_freed_objects(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index e622aee6e4be..440c35f1abc9 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -105,7 +105,7 @@ static inline bool tasklet_is_locked(const struct tasklet_struct *t)
static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
{
if (!atomic_fetch_inc(&t->count))
- tasklet_unlock_wait(t);
+ tasklet_unlock_spin_wait(t);
}
static inline bool __tasklet_is_enabled(const struct tasklet_struct *t)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 3ee2f682eff6..36489be4896b 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -28,7 +28,7 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
do {
- if (dma_map_sg_attrs(&obj->base.dev->pdev->dev,
+ if (dma_map_sg_attrs(obj->base.dev->dev,
pages->sgl, pages->nents,
PCI_DMA_BIDIRECTIONAL,
DMA_ATTR_SKIP_CPU_SYNC |
@@ -44,7 +44,7 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
* the DMA remapper, i915_gem_shrink will return 0.
*/
GEM_BUG_ON(obj->mm.pages == pages);
- } while (i915_gem_shrink(to_i915(obj->base.dev),
+ } while (i915_gem_shrink(NULL, to_i915(obj->base.dev),
obj->base.size >> PAGE_SHIFT, NULL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND));
@@ -63,8 +63,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
/* Wait a bit, in the hope it avoids the hang */
usleep_range(100, 250);
- dma_unmap_sg(&i915->drm.pdev->dev,
- pages->sgl, pages->nents,
+ dma_unmap_sg(i915->drm.dev, pages->sgl, pages->nents,
PCI_DMA_BIDIRECTIONAL);
}
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index 75c3bfc2486e..24e18219eb50 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -12,6 +12,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_private *i915 = to_i915(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
const struct sseu_dev_info *sseu = &i915->gt.info.sseu;
drm_i915_getparam_t *param = data;
int value;
@@ -24,10 +25,10 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
/* Reject all old ums/dri params. */
return -ENODEV;
case I915_PARAM_CHIPSET_ID:
- value = i915->drm.pdev->device;
+ value = pdev->device;
break;
case I915_PARAM_REVISION:
- value = i915->drm.pdev->revision;
+ value = pdev->revision;
break;
case I915_PARAM_NUM_FENCES_AVAIL:
value = i915->ggtt.num_fences;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index f962693404b7..bb181fe5d47e 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -644,7 +644,7 @@ static void err_print_params(struct drm_i915_error_state_buf *m,
static void err_print_pciid(struct drm_i915_error_state_buf *m,
struct drm_i915_private *i915)
{
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 1a701367a718..7eefbdec25a2 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -192,13 +192,13 @@ static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
return;
}
- if (INTEL_GEN(dev_priv) >= 11)
+ if (DISPLAY_VER(dev_priv) >= 11)
hpd->hpd = hpd_gen11;
else if (IS_GEN9_LP(dev_priv))
hpd->hpd = hpd_bxt;
- else if (INTEL_GEN(dev_priv) >= 8)
+ else if (DISPLAY_VER(dev_priv) >= 8)
hpd->hpd = hpd_bdw;
- else if (INTEL_GEN(dev_priv) >= 7)
+ else if (DISPLAY_VER(dev_priv) >= 7)
hpd->hpd = hpd_ivb;
else
hpd->hpd = hpd_ilk;
@@ -209,8 +209,7 @@ static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
if (HAS_PCH_DG1(dev_priv))
hpd->pch_hpd = hpd_sde_dg1;
- else if (HAS_PCH_TGP(dev_priv) || HAS_PCH_JSP(dev_priv) ||
- HAS_PCH_ICP(dev_priv) || HAS_PCH_MCC(dev_priv))
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
hpd->pch_hpd = hpd_icp;
else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
hpd->pch_hpd = hpd_spt;
@@ -478,7 +477,7 @@ u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
lockdep_assert_held(&dev_priv->irq_lock);
- if (INTEL_GEN(dev_priv) < 5)
+ if (DISPLAY_VER(dev_priv) < 5)
goto out;
/*
@@ -580,7 +579,7 @@ static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
spin_lock_irq(&dev_priv->irq_lock);
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
- if (INTEL_GEN(dev_priv) >= 4)
+ if (DISPLAY_VER(dev_priv) >= 4)
i915_enable_pipestat(dev_priv, PIPE_A,
PIPE_LEGACY_BLC_EVENT_STATUS);
@@ -795,7 +794,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
int position, vtotal;
if (!crtc->active)
- return -1;
+ return 0;
vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
mode = &vblank->hwmode;
@@ -807,7 +806,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
vtotal /= 2;
- if (IS_GEN(dev_priv, 2))
+ if (IS_DISPLAY_VER(dev_priv, 2))
position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
else
position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
@@ -857,8 +856,8 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
int position;
int vbl_start, vbl_end, hsync_start, htotal, vtotal;
unsigned long irqflags;
- bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 ||
- IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
+ bool use_scanline_counter = DISPLAY_VER(dev_priv) >= 5 ||
+ IS_G4X(dev_priv) || IS_DISPLAY_VER(dev_priv, 2) ||
crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
@@ -1305,7 +1304,7 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
* don't trust that one either.
*/
if (pipe_crc->skipped <= 0 ||
- (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
+ (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
pipe_crc->skipped++;
spin_unlock(&pipe_crc->lock);
return;
@@ -1367,12 +1366,12 @@ static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
{
u32 res1, res2;
- if (INTEL_GEN(dev_priv) >= 3)
+ if (DISPLAY_VER(dev_priv) >= 3)
res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe));
else
res1 = 0;
- if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+ if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe));
else
res2 = 0;
@@ -2078,7 +2077,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
}
- if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
+ if (IS_DISPLAY_VER(dev_priv, 5) && de_iir & DE_PCU_EVENT)
gen5_rps_irq_handler(&dev_priv->gt.rps);
}
@@ -2095,10 +2094,19 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
ivb_err_int_handler(dev_priv);
if (de_iir & DE_EDP_PSR_INT_HSW) {
- u32 psr_iir = intel_uncore_read(&dev_priv->uncore, EDP_PSR_IIR);
+ struct intel_encoder *encoder;
+
+ for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ u32 psr_iir = intel_uncore_read(&dev_priv->uncore,
+ EDP_PSR_IIR);
- intel_psr_irq_handler(dev_priv, psr_iir);
- intel_uncore_write(&dev_priv->uncore, EDP_PSR_IIR, psr_iir);
+ intel_psr_irq_handler(intel_dp, psr_iir);
+ intel_uncore_write(&dev_priv->uncore,
+ EDP_PSR_IIR, psr_iir);
+ break;
+ }
}
if (de_iir & DE_AUX_CHANNEL_A_IVB)
@@ -2176,7 +2184,7 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg)
de_iir = raw_reg_read(regs, DEIIR);
if (de_iir) {
raw_reg_write(regs, DEIIR, de_iir);
- if (INTEL_GEN(i915) >= 7)
+ if (DISPLAY_VER(i915) >= 7)
ivb_display_irq_handler(i915, de_iir);
else
ilk_display_irq_handler(i915, de_iir);
@@ -2261,7 +2269,7 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
{
u32 mask;
- if (INTEL_GEN(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 12)
return TGL_DE_PORT_AUX_DDIA |
TGL_DE_PORT_AUX_DDIB |
TGL_DE_PORT_AUX_DDIC |
@@ -2274,15 +2282,15 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
mask = GEN8_AUX_CHANNEL_A;
- if (INTEL_GEN(dev_priv) >= 9)
+ if (DISPLAY_VER(dev_priv) >= 9)
mask |= GEN9_AUX_CHANNEL_B |
GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
- if (IS_CNL_WITH_PORT_F(dev_priv) || IS_GEN(dev_priv, 11))
+ if (IS_CNL_WITH_PORT_F(dev_priv) || IS_DISPLAY_VER(dev_priv, 11))
mask |= CNL_AUX_CHANNEL_F;
- if (IS_GEN(dev_priv, 11))
+ if (IS_DISPLAY_VER(dev_priv, 11))
mask |= ICL_AUX_CHANNEL_E;
return mask;
@@ -2290,11 +2298,11 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
{
- if (IS_ROCKETLAKE(dev_priv))
+ if (HAS_D12_PLANE_MINIMIZATION(dev_priv))
return RKL_DE_PIPE_IRQ_FAULT_ERRORS;
- else if (INTEL_GEN(dev_priv) >= 11)
+ else if (DISPLAY_VER(dev_priv) >= 11)
return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
- else if (INTEL_GEN(dev_priv) >= 9)
+ else if (DISPLAY_VER(dev_priv) >= 9)
return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
else
return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
@@ -2311,21 +2319,30 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
}
if (iir & GEN8_DE_EDP_PSR) {
+ struct intel_encoder *encoder;
u32 psr_iir;
i915_reg_t iir_reg;
- if (INTEL_GEN(dev_priv) >= 12)
- iir_reg = TRANS_PSR_IIR(dev_priv->psr.transcoder);
- else
- iir_reg = EDP_PSR_IIR;
+ for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- psr_iir = intel_uncore_read(&dev_priv->uncore, iir_reg);
- intel_uncore_write(&dev_priv->uncore, iir_reg, psr_iir);
+ if (DISPLAY_VER(dev_priv) >= 12)
+ iir_reg = TRANS_PSR_IIR(intel_dp->psr.transcoder);
+ else
+ iir_reg = EDP_PSR_IIR;
+
+ psr_iir = intel_uncore_read(&dev_priv->uncore, iir_reg);
+ intel_uncore_write(&dev_priv->uncore, iir_reg, psr_iir);
- if (psr_iir)
- found = true;
+ if (psr_iir)
+ found = true;
- intel_psr_irq_handler(dev_priv, psr_iir);
+ intel_psr_irq_handler(intel_dp, psr_iir);
+
+ /* prior GEN12 only have one EDP PSR */
+ if (DISPLAY_VER(dev_priv) < 12)
+ break;
+ }
}
if (!found)
@@ -2391,7 +2408,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
{
- if (INTEL_GEN(i915) >= 9)
+ if (DISPLAY_VER(i915) >= 9)
return GEN9_PIPE_PLANE1_FLIP_DONE;
else
return GEN8_PIPE_PRIMARY_FLIP_DONE;
@@ -2416,7 +2433,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
}
}
- if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
+ if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR);
if (iir) {
intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir);
@@ -2462,7 +2479,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
found = true;
}
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (DISPLAY_VER(dev_priv) >= 11) {
u32 te_trigger = iir & (DSI0_TE | DSI1_TE);
if (te_trigger) {
@@ -2792,7 +2809,7 @@ int ilk_enable_vblank(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
- u32 bit = INTEL_GEN(dev_priv) >= 7 ?
+ u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2903,7 +2920,7 @@ void ilk_disable_vblank(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
- u32 bit = INTEL_GEN(dev_priv) >= 7 ?
+ u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -3023,6 +3040,24 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
spin_unlock_irq(&dev_priv->irq_lock);
}
+static void cnp_display_clock_wa(struct drm_i915_private *dev_priv)
+{
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
+ /*
+ * Wa_14010685332:cnp/cmp,tgp,adp
+ * TODO: Clarify which platforms this applies to
+ * TODO: Figure out if this workaround can be applied in the s0ix suspend/resume handlers as
+ * on earlier platforms and whether the workaround is also needed for runtime suspend/resume
+ */
+ if (INTEL_PCH_TYPE(dev_priv) == PCH_CNP ||
+ (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && INTEL_PCH_TYPE(dev_priv) < PCH_DG1)) {
+ intel_uncore_rmw(uncore, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS,
+ SBCLK_RUN_REFCLK_DIS);
+ intel_uncore_rmw(uncore, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
+ }
+}
+
static void gen8_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
@@ -3046,6 +3081,8 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv)
if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_reset(dev_priv);
+
+ cnp_display_clock_wa(dev_priv);
}
static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
@@ -3057,7 +3094,7 @@ static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
- if (INTEL_GEN(dev_priv) >= 12) {
+ if (DISPLAY_VER(dev_priv) >= 12) {
enum transcoder trans;
for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
@@ -3087,15 +3124,7 @@ static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
GEN3_IRQ_RESET(uncore, SDE);
- /* Wa_14010685332:cnp/cmp,tgp,adp */
- if (INTEL_PCH_TYPE(dev_priv) == PCH_CNP ||
- (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP &&
- INTEL_PCH_TYPE(dev_priv) < PCH_DG1)) {
- intel_uncore_rmw(uncore, SOUTH_CHICKEN1,
- SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
- intel_uncore_rmw(uncore, SOUTH_CHICKEN1,
- SBCLK_RUN_REFCLK_DIS, 0);
- }
+ cnp_display_clock_wa(dev_priv);
}
static void gen11_irq_reset(struct drm_i915_private *dev_priv)
@@ -3494,7 +3523,7 @@ static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
- if (INTEL_GEN(dev_priv) >= 8)
+ if (DISPLAY_VER(dev_priv) >= 8)
bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
else
ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3685,13 +3714,13 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
enum pipe pipe;
- if (INTEL_GEN(dev_priv) <= 10)
+ if (DISPLAY_VER(dev_priv) <= 10)
de_misc_masked |= GEN8_DE_MISC_GSE;
if (IS_GEN9_LP(dev_priv))
de_port_masked |= BXT_DE_PORT_GMBUS;
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (DISPLAY_VER(dev_priv) >= 11) {
enum port port;
if (intel_bios_is_dsi_present(dev_priv, &port))
@@ -3708,7 +3737,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
else if (IS_BROADWELL(dev_priv))
de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK;
- if (INTEL_GEN(dev_priv) >= 12) {
+ if (DISPLAY_VER(dev_priv) >= 12) {
enum transcoder trans;
for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
@@ -3737,7 +3766,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (DISPLAY_VER(dev_priv) >= 11) {
u32 de_hpd_masked = 0;
u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
GEN11_DE_TBT_HOTPLUG_MASK;
@@ -3747,9 +3776,19 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
}
}
+static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
+{
+ struct intel_uncore *uncore = &dev_priv->uncore;
+ u32 mask = SDE_GMBUS_ICP;
+
+ GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
+}
+
static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
{
- if (HAS_PCH_SPLIT(dev_priv))
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ icp_irq_postinstall(dev_priv);
+ else if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_postinstall(dev_priv);
gen8_gt_irq_postinstall(&dev_priv->gt);
@@ -3758,13 +3797,6 @@ static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
gen8_master_intr_enable(dev_priv->uncore.regs);
}
-static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
-{
- struct intel_uncore *uncore = &dev_priv->uncore;
- u32 mask = SDE_GMBUS_ICP;
-
- GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
-}
static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
{
@@ -4283,10 +4315,12 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
} else {
if (HAS_PCH_DG1(dev_priv))
dev_priv->display.hpd_irq_setup = dg1_hpd_irq_setup;
- else if (INTEL_GEN(dev_priv) >= 11)
+ else if (DISPLAY_VER(dev_priv) >= 11)
dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
else if (IS_GEN9_LP(dev_priv))
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ dev_priv->display.hpd_irq_setup = icp_hpd_irq_setup;
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
else
@@ -4392,7 +4426,7 @@ static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
*/
int intel_irq_install(struct drm_i915_private *dev_priv)
{
- int irq = dev_priv->drm.pdev->irq;
+ int irq = to_pci_dev(dev_priv->drm.dev)->irq;
int ret;
/*
@@ -4427,7 +4461,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
*/
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
{
- int irq = dev_priv->drm.pdev->irq;
+ int irq = to_pci_dev(dev_priv->drm.dev)->irq;
/*
* FIXME we can get called twice during driver probe
@@ -4487,5 +4521,5 @@ bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
void intel_synchronize_irq(struct drm_i915_private *i915)
{
- synchronize_irq(i915->drm.pdev->irq);
+ synchronize_irq(to_pci_dev(i915->drm.dev)->irq);
}
diff --git a/drivers/gpu/drm/i915/i915_memcpy.c b/drivers/gpu/drm/i915/i915_memcpy.c
index 7b3b83bd5ab8..1b021a4902de 100644
--- a/drivers/gpu/drm/i915/i915_memcpy.c
+++ b/drivers/gpu/drm/i915/i915_memcpy.c
@@ -135,7 +135,7 @@ bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len)
* accepts that its arguments may not be aligned, but are valid for the
* potential 16-byte read past the end.
*/
-void i915_unaligned_memcpy_from_wc(void *dst, void *src, unsigned long len)
+void i915_unaligned_memcpy_from_wc(void *dst, const void *src, unsigned long len)
{
unsigned long addr;
diff --git a/drivers/gpu/drm/i915/i915_memcpy.h b/drivers/gpu/drm/i915/i915_memcpy.h
index e36d30edd987..3df063a3293b 100644
--- a/drivers/gpu/drm/i915/i915_memcpy.h
+++ b/drivers/gpu/drm/i915/i915_memcpy.h
@@ -13,7 +13,7 @@ struct drm_i915_private;
void i915_memcpy_init_early(struct drm_i915_private *i915);
bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
-void i915_unaligned_memcpy_from_wc(void *dst, void *src, unsigned long len);
+void i915_unaligned_memcpy_from_wc(void *dst, const void *src, unsigned long len);
/* The movntdqa instructions used for memcpy-from-wc require 16-byte alignment,
* as well as SSE4.1 support. i915_memcpy_from_wc() will report if it cannot
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c
index 666808cb3a32..4c8cd08c672d 100644
--- a/drivers/gpu/drm/i915/i915_mm.c
+++ b/drivers/gpu/drm/i915/i915_mm.c
@@ -28,89 +28,9 @@
#include "i915_drv.h"
-struct remap_pfn {
- struct mm_struct *mm;
- unsigned long pfn;
- pgprot_t prot;
-
- struct sgt_iter sgt;
- resource_size_t iobase;
-};
-
-static int remap_pfn(pte_t *pte, unsigned long addr, void *data)
-{
- struct remap_pfn *r = data;
-
- /* Special PTE are not associated with any struct page */
- set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot)));
- r->pfn++;
-
- return 0;
-}
-
-#define use_dma(io) ((io) != -1)
-
-static inline unsigned long sgt_pfn(const struct remap_pfn *r)
-{
- if (use_dma(r->iobase))
- return (r->sgt.dma + r->sgt.curr + r->iobase) >> PAGE_SHIFT;
- else
- return r->sgt.pfn + (r->sgt.curr >> PAGE_SHIFT);
-}
-
-static int remap_sg(pte_t *pte, unsigned long addr, void *data)
-{
- struct remap_pfn *r = data;
-
- if (GEM_WARN_ON(!r->sgt.sgp))
- return -EINVAL;
-
- /* Special PTE are not associated with any struct page */
- set_pte_at(r->mm, addr, pte,
- pte_mkspecial(pfn_pte(sgt_pfn(r), r->prot)));
- r->pfn++; /* track insertions in case we need to unwind later */
-
- r->sgt.curr += PAGE_SIZE;
- if (r->sgt.curr >= r->sgt.max)
- r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), use_dma(r->iobase));
-
- return 0;
-}
-
-/**
- * remap_io_mapping - remap an IO mapping to userspace
- * @vma: user vma to map to
- * @addr: target user address to start at
- * @pfn: physical address of kernel memory
- * @size: size of map area
- * @iomap: the source io_mapping
- *
- * Note: this is only safe if the mm semaphore is held when called.
- */
-int remap_io_mapping(struct vm_area_struct *vma,
- unsigned long addr, unsigned long pfn, unsigned long size,
- struct io_mapping *iomap)
-{
- struct remap_pfn r;
- int err;
-
#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
- GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
-
- /* We rely on prevalidation of the io-mapping to skip track_pfn(). */
- r.mm = vma->vm_mm;
- r.pfn = pfn;
- r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) |
- (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK));
- err = apply_to_page_range(r.mm, addr, size, remap_pfn, &r);
- if (unlikely(err)) {
- zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT);
- return err;
- }
-
- return 0;
-}
+#define use_dma(io) ((io) != -1)
/**
* remap_io_sg - remap an IO mapping to userspace
@@ -126,12 +46,7 @@ int remap_io_sg(struct vm_area_struct *vma,
unsigned long addr, unsigned long size,
struct scatterlist *sgl, resource_size_t iobase)
{
- struct remap_pfn r = {
- .mm = vma->vm_mm,
- .prot = vma->vm_page_prot,
- .sgt = __sgt_iter(sgl, use_dma(iobase)),
- .iobase = iobase,
- };
+ unsigned long pfn, len, remapped = 0;
int err;
/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
@@ -140,11 +55,25 @@ int remap_io_sg(struct vm_area_struct *vma,
if (!use_dma(iobase))
flush_cache_range(vma, addr, size);
- err = apply_to_page_range(r.mm, addr, size, remap_sg, &r);
- if (unlikely(err)) {
- zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT);
- return err;
- }
-
- return 0;
+ do {
+ if (use_dma(iobase)) {
+ if (!sg_dma_len(sgl))
+ break;
+ pfn = (sg_dma_address(sgl) + iobase) >> PAGE_SHIFT;
+ len = sg_dma_len(sgl);
+ } else {
+ pfn = page_to_pfn(sg_page(sgl));
+ len = sgl->length;
+ }
+
+ err = remap_pfn_range(vma, addr + remapped, pfn, len,
+ vma->vm_page_prot);
+ if (err)
+ break;
+ remapped += len;
+ } while ((sgl = __sg_next(sgl)));
+
+ if (err)
+ zap_vma_ptes(vma, addr, remapped);
+ return err;
}
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 6939634e56ed..0320878d96b0 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -197,6 +197,11 @@ i915_param_named_unsafe(fake_lmem_start, ulong, 0400,
"Fake LMEM start offset (default: 0)");
#endif
+#if CONFIG_DRM_I915_REQUEST_TIMEOUT
+i915_param_named_unsafe(request_timeout_ms, uint, 0600,
+ "Default request/fence/batch buffer expiration timeout.");
+#endif
+
static __always_inline void _print_param(struct drm_printer *p,
const char *name,
const char *type,
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index f031966af5b7..34ebb0662547 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -54,8 +54,8 @@ struct drm_printer;
param(int, enable_dc, -1, 0400) \
param(int, enable_fbc, -1, 0600) \
param(int, enable_psr, -1, 0600) \
- param(bool, psr_safest_params, false, 0600) \
- param(bool, enable_psr2_sel_fetch, false, 0600) \
+ param(bool, psr_safest_params, false, 0400) \
+ param(bool, enable_psr2_sel_fetch, false, 0400) \
param(int, disable_power_well, -1, 0400) \
param(int, enable_ips, 1, 0600) \
param(int, invert_brightness, 0, 0600) \
@@ -72,6 +72,7 @@ struct drm_printer;
param(int, enable_dpcd_backlight, -1, 0600) \
param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE, 0400) \
param(unsigned long, fake_lmem_start, 0, 0400) \
+ param(unsigned int, request_timeout_ms, CONFIG_DRM_I915_REQUEST_TIMEOUT, 0600) \
/* leave bools at the end to not create holes */ \
param(bool, enable_hangcheck, true, 0600) \
param(bool, load_detect_test, false, 0600) \
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 020b5f561f07..480553746794 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -36,7 +36,7 @@
#include "i915_selftest.h"
#define PLATFORM(x) .platform = (x)
-#define GEN(x) .gen = (x), .gen_mask = BIT((x) - 1)
+#define GEN(x) .gen = (x), .gen_mask = BIT((x) - 1), .display.version = (x)
#define I845_PIPE_OFFSETS \
.pipe_offsets = { \
@@ -154,7 +154,7 @@
.page_sizes = I915_GTT_PAGE_SIZE_4K
#define GEN_DEFAULT_REGIONS \
- .memory_regions = REGION_SMEM | REGION_STOLEN
+ .memory_regions = REGION_SMEM | REGION_STOLEN_SMEM
#define I830_FEATURES \
GEN(2), \
@@ -538,7 +538,7 @@ static const struct intel_device_info vlv_info = {
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), \
.display.has_ddi = 1, \
- .has_fpga_dbg = 1, \
+ .display.has_fpga_dbg = 1, \
.display.has_psr = 1, \
.display.has_psr_hw_tracking = 1, \
.display.has_dp_mst = 1, \
@@ -689,7 +689,7 @@ static const struct intel_device_info skl_gt4_info = {
BIT(TRANSCODER_DSI_A) | BIT(TRANSCODER_DSI_C), \
.has_64bit_reloc = 1, \
.display.has_ddi = 1, \
- .has_fpga_dbg = 1, \
+ .display.has_fpga_dbg = 1, \
.display.has_fbc = 1, \
.display.has_hdcp = 1, \
.display.has_psr = 1, \
@@ -723,6 +723,7 @@ static const struct intel_device_info bxt_info = {
static const struct intel_device_info glk_info = {
GEN9_LP_FEATURES,
PLATFORM(INTEL_GEMINILAKE),
+ .display.version = 10,
.ddb_size = 1024,
GLK_COLORS,
};
@@ -897,7 +898,6 @@ static const struct intel_device_info rkl_info = {
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C),
- .require_force_probe = 1,
.display.has_hti = 1,
.display.has_psr_hw_tracking = 0,
.platform_engine_mask =
@@ -924,6 +924,18 @@ static const struct intel_device_info dg1_info __maybe_unused = {
.ppgtt_size = 47,
};
+static const struct intel_device_info adl_s_info = {
+ GEN12_FEATURES,
+ PLATFORM(INTEL_ALDERLAKE_S),
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .require_force_probe = 1,
+ .display.has_hti = 1,
+ .display.has_psr_hw_tracking = 0,
+ .platform_engine_mask =
+ BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
+ .dma_mask_size = 46,
+};
+
#undef GEN
#undef PLATFORM
@@ -1000,6 +1012,7 @@ static const struct pci_device_id pciidlist[] = {
INTEL_JSL_IDS(&jsl_info),
INTEL_TGL_12_IDS(&tgl_info),
INTEL_RKL_IDS(&rkl_info),
+ INTEL_ADLS_IDS(&adl_s_info),
{0, 0, 0}
};
MODULE_DEVICE_TABLE(pci, pciidlist);
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index e62ad69606f6..85ad62dbabfa 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -302,7 +302,7 @@ static u32 i915_oa_max_sample_rate = 100000;
* code assumes all reports have a power-of-two size and ~(size - 1) can
* be used as a mask to align the OA tail pointer.
*/
-static const struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
+static const struct i915_oa_format oa_formats[I915_OA_FORMAT_MAX] = {
[I915_OA_FORMAT_A13] = { 0, 64 },
[I915_OA_FORMAT_A29] = { 1, 128 },
[I915_OA_FORMAT_A13_B8_C8] = { 2, 128 },
@@ -311,17 +311,9 @@ static const struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
[I915_OA_FORMAT_A45_B8_C8] = { 5, 256 },
[I915_OA_FORMAT_B4_C8_A16] = { 6, 128 },
[I915_OA_FORMAT_C4_B8] = { 7, 64 },
-};
-
-static const struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = {
[I915_OA_FORMAT_A12] = { 0, 64 },
[I915_OA_FORMAT_A12_B8_C8] = { 2, 128 },
[I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
- [I915_OA_FORMAT_C4_B8] = { 7, 64 },
-};
-
-static const struct i915_oa_format gen12_oa_formats[I915_OA_FORMAT_MAX] = {
- [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
};
#define SAMPLE_OA_REPORT (1<<0)
@@ -730,11 +722,6 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
(IS_GEN(stream->perf->i915, 12) ?
OAREPORT_REASON_MASK_EXTENDED :
OAREPORT_REASON_MASK));
- if (reason == 0) {
- if (__ratelimit(&stream->perf->spurious_report_rs))
- DRM_NOTE("Skipping spurious, invalid OA report\n");
- continue;
- }
ctx_id = report32[2] & stream->specific_ctx_id_mask;
@@ -1586,7 +1573,7 @@ static int alloc_oa_buffer(struct i915_perf_stream *stream)
stream->oa_buffer.vma = vma;
stream->oa_buffer.vaddr =
- i915_gem_object_pin_map(bo, I915_MAP_WB);
+ i915_gem_object_pin_map_unlocked(bo, I915_MAP_WB);
if (IS_ERR(stream->oa_buffer.vaddr)) {
ret = PTR_ERR(stream->oa_buffer.vaddr);
goto err_unpin;
@@ -1640,6 +1627,7 @@ static int alloc_noa_wait(struct i915_perf_stream *stream)
const u32 base = stream->engine->mmio_base;
#define CS_GPR(x) GEN8_RING_CS_GPR(base, x)
u32 *batch, *ts0, *cs, *jump;
+ struct i915_gem_ww_ctx ww;
int ret, i;
enum {
START_TS,
@@ -1657,15 +1645,21 @@ static int alloc_noa_wait(struct i915_perf_stream *stream)
return PTR_ERR(bo);
}
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ ret = i915_gem_object_lock(bo, &ww);
+ if (ret)
+ goto out_ww;
+
/*
* We pin in GGTT because we jump into this buffer now because
* multiple OA config BOs will have a jump to this address and it
* needs to be fixed during the lifetime of the i915/perf stream.
*/
- vma = i915_gem_object_ggtt_pin(bo, NULL, 0, 0, PIN_HIGH);
+ vma = i915_gem_object_ggtt_pin_ww(bo, &ww, NULL, 0, 0, PIN_HIGH);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
- goto err_unref;
+ goto out_ww;
}
batch = cs = i915_gem_object_pin_map(bo, I915_MAP_WB);
@@ -1799,12 +1793,19 @@ static int alloc_noa_wait(struct i915_perf_stream *stream)
__i915_gem_object_release_map(bo);
stream->noa_wait = vma;
- return 0;
+ goto out_ww;
err_unpin:
i915_vma_unpin_and_release(&vma, 0);
-err_unref:
- i915_gem_object_put(bo);
+out_ww:
+ if (ret == -EDEADLK) {
+ ret = i915_gem_ww_ctx_backoff(&ww);
+ if (!ret)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+ if (ret)
+ i915_gem_object_put(bo);
return ret;
}
@@ -1847,6 +1848,7 @@ alloc_oa_config_buffer(struct i915_perf_stream *stream,
{
struct drm_i915_gem_object *obj;
struct i915_oa_config_bo *oa_bo;
+ struct i915_gem_ww_ctx ww;
size_t config_length = 0;
u32 *cs;
int err;
@@ -1867,10 +1869,16 @@ alloc_oa_config_buffer(struct i915_perf_stream *stream,
goto err_free;
}
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ err = i915_gem_object_lock(obj, &ww);
+ if (err)
+ goto out_ww;
+
cs = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
- goto err_oa_bo;
+ goto out_ww;
}
cs = write_cs_mi_lri(cs,
@@ -1898,19 +1906,28 @@ alloc_oa_config_buffer(struct i915_perf_stream *stream,
NULL);
if (IS_ERR(oa_bo->vma)) {
err = PTR_ERR(oa_bo->vma);
- goto err_oa_bo;
+ goto out_ww;
}
oa_bo->oa_config = i915_oa_config_get(oa_config);
llist_add(&oa_bo->node, &stream->oa_config_bos);
- return oa_bo;
+out_ww:
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
-err_oa_bo:
- i915_gem_object_put(obj);
+ if (err)
+ i915_gem_object_put(obj);
err_free:
- kfree(oa_bo);
- return ERR_PTR(err);
+ if (err) {
+ kfree(oa_bo);
+ return ERR_PTR(err);
+ }
+ return oa_bo;
}
static struct i915_vma *
@@ -3521,6 +3538,18 @@ static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
2ULL << exponent);
}
+static __always_inline bool
+oa_format_valid(struct i915_perf *perf, enum drm_i915_oa_format format)
+{
+ return test_bit(format, perf->format_mask);
+}
+
+static __always_inline void
+oa_format_add(struct i915_perf *perf, enum drm_i915_oa_format format)
+{
+ __set_bit(format, perf->format_mask);
+}
+
/**
* read_properties_unlocked - validate + copy userspace stream open properties
* @perf: i915 perf instance
@@ -3612,7 +3641,7 @@ static int read_properties_unlocked(struct i915_perf *perf,
value);
return -EINVAL;
}
- if (!perf->oa_formats[value].size) {
+ if (!oa_format_valid(perf, value)) {
DRM_DEBUG("Unsupported OA report format %llu\n",
value);
return -EINVAL;
@@ -4256,6 +4285,50 @@ static struct ctl_table dev_root[] = {
{}
};
+static void oa_init_supported_formats(struct i915_perf *perf)
+{
+ struct drm_i915_private *i915 = perf->i915;
+ enum intel_platform platform = INTEL_INFO(i915)->platform;
+
+ switch (platform) {
+ case INTEL_HASWELL:
+ oa_format_add(perf, I915_OA_FORMAT_A13);
+ oa_format_add(perf, I915_OA_FORMAT_A13);
+ oa_format_add(perf, I915_OA_FORMAT_A29);
+ oa_format_add(perf, I915_OA_FORMAT_A13_B8_C8);
+ oa_format_add(perf, I915_OA_FORMAT_B4_C8);
+ oa_format_add(perf, I915_OA_FORMAT_A45_B8_C8);
+ oa_format_add(perf, I915_OA_FORMAT_B4_C8_A16);
+ oa_format_add(perf, I915_OA_FORMAT_C4_B8);
+ break;
+
+ case INTEL_BROADWELL:
+ case INTEL_CHERRYVIEW:
+ case INTEL_SKYLAKE:
+ case INTEL_BROXTON:
+ case INTEL_KABYLAKE:
+ case INTEL_GEMINILAKE:
+ case INTEL_COFFEELAKE:
+ case INTEL_COMETLAKE:
+ case INTEL_CANNONLAKE:
+ case INTEL_ICELAKE:
+ case INTEL_ELKHARTLAKE:
+ case INTEL_JASPERLAKE:
+ case INTEL_TIGERLAKE:
+ case INTEL_ROCKETLAKE:
+ case INTEL_DG1:
+ case INTEL_ALDERLAKE_S:
+ oa_format_add(perf, I915_OA_FORMAT_A12);
+ oa_format_add(perf, I915_OA_FORMAT_A12_B8_C8);
+ oa_format_add(perf, I915_OA_FORMAT_A32u40_A4u32_B8_C8);
+ oa_format_add(perf, I915_OA_FORMAT_C4_B8);
+ break;
+
+ default:
+ MISSING_CASE(platform);
+ }
+}
+
/**
* i915_perf_init - initialize i915-perf state on module bind
* @i915: i915 device instance
@@ -4271,6 +4344,7 @@ void i915_perf_init(struct drm_i915_private *i915)
/* XXX const struct i915_perf_ops! */
+ perf->oa_formats = oa_formats;
if (IS_HASWELL(i915)) {
perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr;
perf->ops.is_valid_mux_reg = hsw_is_valid_mux_addr;
@@ -4281,8 +4355,6 @@ void i915_perf_init(struct drm_i915_private *i915)
perf->ops.oa_disable = gen7_oa_disable;
perf->ops.read = gen7_oa_read;
perf->ops.oa_hw_tail_read = gen7_oa_hw_tail_read;
-
- perf->oa_formats = hsw_oa_formats;
} else if (HAS_LOGICAL_RING_CONTEXTS(i915)) {
/* Note: that although we could theoretically also support the
* legacy ringbuffer mode on BDW (and earlier iterations of
@@ -4293,8 +4365,6 @@ void i915_perf_init(struct drm_i915_private *i915)
perf->ops.read = gen8_oa_read;
if (IS_GEN_RANGE(i915, 8, 9)) {
- perf->oa_formats = gen8_plus_oa_formats;
-
perf->ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr;
perf->ops.is_valid_mux_reg =
@@ -4325,8 +4395,6 @@ void i915_perf_init(struct drm_i915_private *i915)
perf->gen8_valid_ctx_bit = BIT(16);
}
} else if (IS_GEN_RANGE(i915, 10, 11)) {
- perf->oa_formats = gen8_plus_oa_formats;
-
perf->ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr;
perf->ops.is_valid_mux_reg =
@@ -4349,8 +4417,6 @@ void i915_perf_init(struct drm_i915_private *i915)
}
perf->gen8_valid_ctx_bit = BIT(16);
} else if (IS_GEN(i915, 12)) {
- perf->oa_formats = gen12_oa_formats;
-
perf->ops.is_valid_b_counter_reg =
gen12_is_valid_b_counter_addr;
perf->ops.is_valid_mux_reg =
@@ -4405,6 +4471,8 @@ void i915_perf_init(struct drm_i915_private *i915)
500 * 1000 /* 500us */);
perf->i915 = i915;
+
+ oa_init_supported_formats(perf);
}
}
diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h
index a36a455ae336..aa14354a5120 100644
--- a/drivers/gpu/drm/i915/i915_perf_types.h
+++ b/drivers/gpu/drm/i915/i915_perf_types.h
@@ -15,6 +15,7 @@
#include <linux/types.h>
#include <linux/uuid.h>
#include <linux/wait.h>
+#include <uapi/drm/i915_drm.h>
#include "gt/intel_sseu.h"
#include "i915_reg.h"
@@ -441,6 +442,13 @@ struct i915_perf {
struct i915_oa_ops ops;
const struct i915_oa_format *oa_formats;
+ /**
+ * Use a format mask to store the supported formats
+ * for a platform.
+ */
+#define FORMAT_MASK_SIZE DIV_ROUND_UP(I915_OA_FORMAT_MAX - 1, BITS_PER_LONG)
+ unsigned long format_mask[FORMAT_MASK_SIZE];
+
atomic64_t noa_programming_delay;
};
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 2b88c0baa1bf..41651ac255fa 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -1124,7 +1124,7 @@ static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
static bool is_igp(struct drm_i915_private *i915)
{
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
/* IGP is 0000:00:02.0 */
return pci_domain_nr(pdev->bus) == 0 &&
diff --git a/drivers/gpu/drm/i915/i915_priolist_types.h b/drivers/gpu/drm/i915/i915_priolist_types.h
index 8aa7866ec6b6..bc2fa84f98a8 100644
--- a/drivers/gpu/drm/i915/i915_priolist_types.h
+++ b/drivers/gpu/drm/i915/i915_priolist_types.h
@@ -24,14 +24,8 @@ enum {
I915_PRIORITY_DISPLAY,
};
-#define I915_USER_PRIORITY_SHIFT 0
-#define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT)
-
-#define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT)
-#define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1)
-
/* Smallest priority value that cannot be bumped. */
-#define I915_PRIORITY_INVALID (INT_MIN | (u8)I915_PRIORITY_MASK)
+#define I915_PRIORITY_INVALID (INT_MIN)
/*
* Requests containing performance queries must not be preempted by
@@ -45,9 +39,8 @@ enum {
#define I915_PRIORITY_BARRIER (I915_PRIORITY_UNPREEMPTABLE - 1)
struct i915_priolist {
- struct list_head requests[I915_PRIORITY_COUNT];
+ struct list_head requests;
struct rb_node node;
- unsigned long used;
int priority;
};
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index aaf1f0045b16..cbf7a60afe54 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1874,10 +1874,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define _ICL_COMBOPHY_B 0x6C000
#define _EHL_COMBOPHY_C 0x160000
#define _RKL_COMBOPHY_D 0x161000
+#define _ADL_COMBOPHY_E 0x16B000
+
#define _ICL_COMBOPHY(phy) _PICK(phy, _ICL_COMBOPHY_A, \
_ICL_COMBOPHY_B, \
_EHL_COMBOPHY_C, \
- _RKL_COMBOPHY_D)
+ _RKL_COMBOPHY_D, \
+ _ADL_COMBOPHY_E)
/* CNL/ICL Port CL_DW registers */
#define _ICL_PORT_CL_DW(dw, phy) (_ICL_COMBOPHY(phy) + \
@@ -2927,7 +2930,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MBUS_BBOX_CTL_S2 _MMIO(0x45044)
#define HDPORT_STATE _MMIO(0x45050)
-#define HDPORT_DPLL_USED_MASK REG_GENMASK(14, 12)
+#define HDPORT_DPLL_USED_MASK REG_GENMASK(15, 12)
#define HDPORT_DDI_USED(phy) REG_BIT(2 * (phy) + 1)
#define HDPORT_ENABLED REG_BIT(0)
@@ -10378,7 +10381,7 @@ enum skl_power_gate {
/* ICL Clocks */
#define ICL_DPCLKA_CFGCR0 _MMIO(0x164280)
-#define ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) (1 << _PICK(phy, 10, 11, 24))
+#define ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) (1 << _PICK(phy, 10, 11, 24, 4, 5))
#define RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) REG_BIT((phy) + 10)
#define ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port) (1 << ((tc_port) < TC_PORT_4 ? \
(tc_port) + 12 : \
@@ -10413,14 +10416,38 @@ enum skl_power_gate {
#define DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_DPLL_MAP(clk_sel, phy) \
(((clk_sel) >> DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy)) + _DG1_PHY_DPLL_MAP(phy))
+/* ADLS Clocks */
+#define _ADLS_DPCLKA_CFGCR0 0x164280
+#define _ADLS_DPCLKA_CFGCR1 0x1642BC
+#define ADLS_DPCLKA_CFGCR(phy) _MMIO_PHY((phy) / 3, \
+ _ADLS_DPCLKA_CFGCR0, \
+ _ADLS_DPCLKA_CFGCR1)
+#define ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy) (((phy) % 3) * 2)
+/* ADLS DPCLKA_CFGCR0 DDI mask */
+#define ADLS_DPCLKA_DDII_SEL_MASK REG_GENMASK(5, 4)
+#define ADLS_DPCLKA_DDIB_SEL_MASK REG_GENMASK(3, 2)
+#define ADLS_DPCLKA_DDIA_SEL_MASK REG_GENMASK(1, 0)
+/* ADLS DPCLKA_CFGCR1 DDI mask */
+#define ADLS_DPCLKA_DDIK_SEL_MASK REG_GENMASK(3, 2)
+#define ADLS_DPCLKA_DDIJ_SEL_MASK REG_GENMASK(1, 0)
+#define ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy) _PICK((phy), \
+ ADLS_DPCLKA_DDIA_SEL_MASK, \
+ ADLS_DPCLKA_DDIB_SEL_MASK, \
+ ADLS_DPCLKA_DDII_SEL_MASK, \
+ ADLS_DPCLKA_DDIJ_SEL_MASK, \
+ ADLS_DPCLKA_DDIK_SEL_MASK)
+
/* CNL PLL */
#define DPLL0_ENABLE 0x46010
#define DPLL1_ENABLE 0x46014
+#define _ADLS_DPLL2_ENABLE 0x46018
+#define _ADLS_DPLL3_ENABLE 0x46030
#define PLL_ENABLE (1 << 31)
#define PLL_LOCK (1 << 30)
#define PLL_POWER_ENABLE (1 << 27)
#define PLL_POWER_STATE (1 << 26)
-#define CNL_DPLL_ENABLE(pll) _MMIO_PLL(pll, DPLL0_ENABLE, DPLL1_ENABLE)
+#define CNL_DPLL_ENABLE(pll) _MMIO_PLL3(pll, DPLL0_ENABLE, DPLL1_ENABLE, \
+ _ADLS_DPLL2_ENABLE, _ADLS_DPLL3_ENABLE)
#define TBT_PLL_ENABLE _MMIO(0x46020)
@@ -10666,6 +10693,21 @@ enum skl_power_gate {
_DG1_DPLL2_CFGCR1, \
_DG1_DPLL3_CFGCR1)
+/* For ADL-S DPLL4_CFGCR0/1 are used to control DPLL2 */
+#define _ADLS_DPLL3_CFGCR0 0x1642C0
+#define _ADLS_DPLL4_CFGCR0 0x164294
+#define ADLS_DPLL_CFGCR0(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR0, \
+ _TGL_DPLL1_CFGCR0, \
+ _ADLS_DPLL4_CFGCR0, \
+ _ADLS_DPLL3_CFGCR0)
+
+#define _ADLS_DPLL3_CFGCR1 0x1642C4
+#define _ADLS_DPLL4_CFGCR1 0x164298
+#define ADLS_DPLL_CFGCR1(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR1, \
+ _TGL_DPLL1_CFGCR1, \
+ _ADLS_DPLL4_CFGCR1, \
+ _ADLS_DPLL3_CFGCR1)
+
#define _DKL_PHY1_BASE 0x168000
#define _DKL_PHY2_BASE 0x169000
#define _DKL_PHY3_BASE 0x16A000
@@ -11427,6 +11469,9 @@ enum skl_power_gate {
#define BIG_JOINER_ENABLE (1 << 29)
#define MASTER_BIG_JOINER_ENABLE (1 << 28)
#define VGA_CENTERING_ENABLE (1 << 27)
+#define SPLITTER_CONFIGURATION_MASK REG_GENMASK(26, 25)
+#define SPLITTER_CONFIGURATION_2_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 0)
+#define SPLITTER_CONFIGURATION_4_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 1)
#define _ICL_PIPE_DSS_CTL2_PB 0x78204
#define _ICL_PIPE_DSS_CTL2_PC 0x78404
@@ -12142,6 +12187,8 @@ enum skl_power_gate {
#define GEN12_GLOBAL_MOCS(i) _MMIO(0x4000 + (i) * 4) /* Global MOCS regs */
+#define GEN12_GSMBASE _MMIO(0x108100)
+
/* gamt regs */
#define GEN8_L3_LRA_1_GPGPU _MMIO(0x4dd4)
#define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW 0x67F1427F /* max/min for LRA1/2 */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 22e39d938f17..bec9c3652188 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -33,7 +33,10 @@
#include "gem/i915_gem_context.h"
#include "gt/intel_breadcrumbs.h"
#include "gt/intel_context.h"
+#include "gt/intel_engine.h"
+#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_gpu_commands.h"
+#include "gt/intel_reset.h"
#include "gt/intel_ring.h"
#include "gt/intel_rps.h"
@@ -244,6 +247,50 @@ static void __i915_request_fill(struct i915_request *rq, u8 val)
memset(vaddr + head, val, rq->postfix - head);
}
+/**
+ * i915_request_active_engine
+ * @rq: request to inspect
+ * @active: pointer in which to return the active engine
+ *
+ * Fills the currently active engine to the @active pointer if the request
+ * is active and still not completed.
+ *
+ * Returns true if request was active or false otherwise.
+ */
+bool
+i915_request_active_engine(struct i915_request *rq,
+ struct intel_engine_cs **active)
+{
+ struct intel_engine_cs *engine, *locked;
+ bool ret = false;
+
+ /*
+ * Serialise with __i915_request_submit() so that it sees
+ * is-banned?, or we know the request is already inflight.
+ *
+ * Note that rq->engine is unstable, and so we double
+ * check that we have acquired the lock on the final engine.
+ */
+ locked = READ_ONCE(rq->engine);
+ spin_lock_irq(&locked->active.lock);
+ while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
+ spin_unlock(&locked->active.lock);
+ locked = engine;
+ spin_lock(&locked->active.lock);
+ }
+
+ if (i915_request_is_active(rq)) {
+ if (!__i915_request_is_complete(rq))
+ *active = locked;
+ ret = true;
+ }
+
+ spin_unlock_irq(&locked->active.lock);
+
+ return ret;
+}
+
+
static void remove_from_engine(struct i915_request *rq)
{
struct intel_engine_cs *engine, *locked;
@@ -274,6 +321,54 @@ static void remove_from_engine(struct i915_request *rq)
__notify_execute_cb_imm(rq);
}
+static void __rq_init_watchdog(struct i915_request *rq)
+{
+ rq->watchdog.timer.function = NULL;
+}
+
+static enum hrtimer_restart __rq_watchdog_expired(struct hrtimer *hrtimer)
+{
+ struct i915_request *rq =
+ container_of(hrtimer, struct i915_request, watchdog.timer);
+ struct intel_gt *gt = rq->engine->gt;
+
+ if (!i915_request_completed(rq)) {
+ if (llist_add(&rq->watchdog.link, &gt->watchdog.list))
+ schedule_work(&gt->watchdog.work);
+ } else {
+ i915_request_put(rq);
+ }
+
+ return HRTIMER_NORESTART;
+}
+
+static void __rq_arm_watchdog(struct i915_request *rq)
+{
+ struct i915_request_watchdog *wdg = &rq->watchdog;
+ struct intel_context *ce = rq->context;
+
+ if (!ce->watchdog.timeout_us)
+ return;
+
+ i915_request_get(rq);
+
+ hrtimer_init(&wdg->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ wdg->timer.function = __rq_watchdog_expired;
+ hrtimer_start_range_ns(&wdg->timer,
+ ns_to_ktime(ce->watchdog.timeout_us *
+ NSEC_PER_USEC),
+ NSEC_PER_MSEC,
+ HRTIMER_MODE_REL);
+}
+
+static void __rq_cancel_watchdog(struct i915_request *rq)
+{
+ struct i915_request_watchdog *wdg = &rq->watchdog;
+
+ if (wdg->timer.function && hrtimer_try_to_cancel(&wdg->timer) > 0)
+ i915_request_put(rq);
+}
+
bool i915_request_retire(struct i915_request *rq)
{
if (!__i915_request_is_complete(rq))
@@ -285,6 +380,8 @@ bool i915_request_retire(struct i915_request *rq)
trace_i915_request_retire(rq);
i915_request_mark_complete(rq);
+ __rq_cancel_watchdog(rq);
+
/*
* We know the GPU must have read the request to have
* sent us the seqno + interrupt, so use the position
@@ -498,31 +595,38 @@ void __i915_request_skip(struct i915_request *rq)
rq->infix = rq->postfix;
}
-void i915_request_set_error_once(struct i915_request *rq, int error)
+bool i915_request_set_error_once(struct i915_request *rq, int error)
{
int old;
GEM_BUG_ON(!IS_ERR_VALUE((long)error));
if (i915_request_signaled(rq))
- return;
+ return false;
old = READ_ONCE(rq->fence.error);
do {
if (fatal_error(old))
- return;
+ return false;
} while (!try_cmpxchg(&rq->fence.error, &old, error));
+
+ return true;
}
-void i915_request_mark_eio(struct i915_request *rq)
+struct i915_request *i915_request_mark_eio(struct i915_request *rq)
{
if (__i915_request_is_complete(rq))
- return;
+ return NULL;
GEM_BUG_ON(i915_request_signaled(rq));
+ /* As soon as the request is completed, it may be retired */
+ rq = i915_request_get(rq);
+
i915_request_set_error_once(rq, -EIO);
i915_request_mark_complete(rq);
+
+ return rq;
}
bool __i915_request_submit(struct i915_request *request)
@@ -678,6 +782,28 @@ void i915_request_unsubmit(struct i915_request *request)
spin_unlock_irqrestore(&engine->active.lock, flags);
}
+static void __cancel_request(struct i915_request *rq)
+{
+ struct intel_engine_cs *engine = NULL;
+
+ i915_request_active_engine(rq, &engine);
+
+ if (engine && intel_engine_pulse(engine))
+ intel_gt_handle_error(engine->gt, engine->mask, 0,
+ "request cancellation by %s",
+ current->comm);
+}
+
+void i915_request_cancel(struct i915_request *rq, int error)
+{
+ if (!i915_request_set_error_once(rq, error))
+ return;
+
+ set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
+
+ __cancel_request(rq);
+}
+
static int __i915_sw_fence_call
submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
@@ -690,6 +816,8 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
if (unlikely(fence->error))
i915_request_set_error_once(request, fence->error);
+ else
+ __rq_arm_watchdog(request);
/*
* We need to serialize use of the submit_request() callback
@@ -863,7 +991,6 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq->fence.seqno = seqno;
RCU_INIT_POINTER(rq->timeline, tl);
- RCU_INIT_POINTER(rq->hwsp_cacheline, tl->hwsp_cacheline);
rq->hwsp_seqno = tl->hwsp_seqno;
GEM_BUG_ON(__i915_request_is_complete(rq));
@@ -877,6 +1004,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
/* No zalloc, everything must be cleared after use */
rq->batch = NULL;
+ __rq_init_watchdog(rq);
GEM_BUG_ON(rq->capture_list);
GEM_BUG_ON(!llist_empty(&rq->execute_cb));
@@ -1108,9 +1236,6 @@ emit_semaphore_wait(struct i915_request *to,
if (i915_request_has_initial_breadcrumb(to))
goto await_fence;
- if (!rcu_access_pointer(from->hwsp_cacheline))
- goto await_fence;
-
/*
* If this or its dependents are waiting on an external fence
* that may fail catastrophically, then we want to avoid using
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 1bfe214a47e9..270f6cd37650 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -26,7 +26,9 @@
#define I915_REQUEST_H
#include <linux/dma-fence.h>
+#include <linux/hrtimer.h>
#include <linux/irq_work.h>
+#include <linux/llist.h>
#include <linux/lockdep.h>
#include "gem/i915_gem_context_types.h"
@@ -237,16 +239,6 @@ struct i915_request {
*/
const u32 *hwsp_seqno;
- /*
- * If we need to access the timeline's seqno for this request in
- * another request, we need to keep a read reference to this associated
- * cacheline, so that we do not free and recycle it before the foreign
- * observers have completed. Hence, we keep a pointer to the cacheline
- * inside the timeline's HWSP vma, but it is only valid while this
- * request has not completed and guarded by the timeline mutex.
- */
- struct intel_timeline_cacheline __rcu *hwsp_cacheline;
-
/** Position in the ring of the start of the request */
u32 head;
@@ -287,6 +279,12 @@ struct i915_request {
/** timeline->request entry for this request */
struct list_head link;
+ /** Watchdog support fields. */
+ struct i915_request_watchdog {
+ struct llist_node link;
+ struct hrtimer timer;
+ } watchdog;
+
I915_SELFTEST_DECLARE(struct {
struct list_head link;
unsigned long delay;
@@ -310,8 +308,8 @@ struct i915_request * __must_check
i915_request_create(struct intel_context *ce);
void __i915_request_skip(struct i915_request *rq);
-void i915_request_set_error_once(struct i915_request *rq, int error);
-void i915_request_mark_eio(struct i915_request *rq);
+bool i915_request_set_error_once(struct i915_request *rq, int error);
+struct i915_request *i915_request_mark_eio(struct i915_request *rq);
struct i915_request *__i915_request_commit(struct i915_request *request);
void __i915_request_queue(struct i915_request *rq,
@@ -366,6 +364,8 @@ void i915_request_submit(struct i915_request *request);
void __i915_request_unsubmit(struct i915_request *request);
void i915_request_unsubmit(struct i915_request *request);
+void i915_request_cancel(struct i915_request *rq, int error);
+
long i915_request_wait(struct i915_request *rq,
unsigned int flags,
long timeout)
@@ -616,4 +616,29 @@ i915_request_active_timeline(const struct i915_request *rq)
lockdep_is_held(&rq->engine->active.lock));
}
+static inline u32
+i915_request_active_seqno(const struct i915_request *rq)
+{
+ u32 hwsp_phys_base =
+ page_mask_bits(i915_request_active_timeline(rq)->hwsp_offset);
+ u32 hwsp_relative_offset = offset_in_page(rq->hwsp_seqno);
+
+ /*
+ * Because of wraparound, we cannot simply take tl->hwsp_offset,
+ * but instead use the fact that the relative for vaddr is the
+ * offset as for hwsp_offset. Take the top bits from tl->hwsp_offset
+ * and combine them with the relative offset in rq->hwsp_seqno.
+ *
+ * As rw->hwsp_seqno is rewritten when signaled, this only works
+ * when the request isn't signaled yet, but at that point you
+ * no longer need the offset.
+ */
+
+ return hwsp_phys_base + hwsp_relative_offset;
+}
+
+bool
+i915_request_active_engine(struct i915_request *rq,
+ struct intel_engine_cs **active);
+
#endif /* I915_REQUEST_H */
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 7144239f08df..efa638c3acc7 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -43,7 +43,7 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb)
static void assert_priolists(struct intel_engine_execlists * const execlists)
{
struct rb_node *rb;
- long last_prio, i;
+ long last_prio;
if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
return;
@@ -57,14 +57,6 @@ static void assert_priolists(struct intel_engine_execlists * const execlists)
GEM_BUG_ON(p->priority > last_prio);
last_prio = p->priority;
-
- GEM_BUG_ON(!p->used);
- for (i = 0; i < ARRAY_SIZE(p->requests); i++) {
- if (list_empty(&p->requests[i]))
- continue;
-
- GEM_BUG_ON(!(p->used & BIT(i)));
- }
}
}
@@ -75,14 +67,10 @@ i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
struct i915_priolist *p;
struct rb_node **parent, *rb;
bool first = true;
- int idx, i;
lockdep_assert_held(&engine->active.lock);
assert_priolists(execlists);
- /* buckets sorted from highest [in slot 0] to lowest priority */
- idx = I915_PRIORITY_COUNT - (prio & I915_PRIORITY_MASK) - 1;
- prio >>= I915_USER_PRIORITY_SHIFT;
if (unlikely(execlists->no_priolist))
prio = I915_PRIORITY_NORMAL;
@@ -99,7 +87,7 @@ find_priolist:
parent = &rb->rb_right;
first = false;
} else {
- goto out;
+ return &p->requests;
}
}
@@ -125,15 +113,12 @@ find_priolist:
}
p->priority = prio;
- for (i = 0; i < ARRAY_SIZE(p->requests); i++)
- INIT_LIST_HEAD(&p->requests[i]);
+ INIT_LIST_HEAD(&p->requests);
+
rb_link_node(&p->node, rb, parent);
rb_insert_color_cached(&p->node, &execlists->queue, first);
- p->used = 0;
-out:
- p->used |= BIT(idx);
- return &p->requests[idx];
+ return &p->requests;
}
void __i915_priolist_free(struct i915_priolist *p)
@@ -363,30 +348,6 @@ void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
spin_unlock_irq(&schedule_lock);
}
-static void __bump_priority(struct i915_sched_node *node, unsigned int bump)
-{
- struct i915_sched_attr attr = node->attr;
-
- if (attr.priority & bump)
- return;
-
- attr.priority |= bump;
- __i915_schedule(node, &attr);
-}
-
-void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
-{
- unsigned long flags;
-
- GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
- if (READ_ONCE(rq->sched.attr.priority) & bump)
- return;
-
- spin_lock_irqsave(&schedule_lock, flags);
- __bump_priority(&rq->sched, bump);
- spin_unlock_irqrestore(&schedule_lock, flags);
-}
-
void i915_sched_node_init(struct i915_sched_node *node)
{
INIT_LIST_HEAD(&node->signalers_list);
@@ -553,8 +514,7 @@ int __init i915_global_scheduler_init(void)
if (!global.slab_dependencies)
return -ENOMEM;
- global.slab_priorities = KMEM_CACHE(i915_priolist,
- SLAB_HWCACHE_ALIGN);
+ global.slab_priorities = KMEM_CACHE(i915_priolist, 0);
if (!global.slab_priorities)
goto err_priorities;
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 4501e5ac2637..858a0938f47a 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -15,17 +15,11 @@
struct drm_printer;
-#define priolist_for_each_request(it, plist, idx) \
- for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
- list_for_each_entry(it, &(plist)->requests[idx], sched.link)
+#define priolist_for_each_request(it, plist) \
+ list_for_each_entry(it, &(plist)->requests, sched.link)
-#define priolist_for_each_request_consume(it, n, plist, idx) \
- for (; \
- (plist)->used ? (idx = __ffs((plist)->used)), 1 : 0; \
- (plist)->used &= ~BIT(idx)) \
- list_for_each_entry_safe(it, n, \
- &(plist)->requests[idx], \
- sched.link)
+#define priolist_for_each_request_consume(it, n, plist) \
+ list_for_each_entry_safe(it, n, &(plist)->requests, sched.link)
void i915_sched_node_init(struct i915_sched_node *node);
void i915_sched_node_reinit(struct i915_sched_node *node);
@@ -44,8 +38,6 @@ void i915_sched_node_fini(struct i915_sched_node *node);
void i915_schedule(struct i915_request *request,
const struct i915_sched_attr *attr);
-void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump);
-
struct list_head *
i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio);
diff --git a/drivers/gpu/drm/i915/i915_selftest.h b/drivers/gpu/drm/i915/i915_selftest.h
index d53d207ab6eb..f54de0499be7 100644
--- a/drivers/gpu/drm/i915/i915_selftest.h
+++ b/drivers/gpu/drm/i915/i915_selftest.h
@@ -107,6 +107,7 @@ int __i915_subtests(const char *caller,
#define I915_SELFTEST_DECLARE(x) x
#define I915_SELFTEST_ONLY(x) unlikely(x)
+#define I915_SELFTEST_EXPORT
#else /* !IS_ENABLED(CONFIG_DRM_I915_SELFTEST) */
@@ -116,6 +117,7 @@ static inline int i915_perf_selftests(struct pci_dev *pdev) { return 0; }
#define I915_SELFTEST_DECLARE(x)
#define I915_SELFTEST_ONLY(x) 0
+#define I915_SELFTEST_EXPORT static
#endif
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 63212df33c9e..0bc7b49f843c 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -85,7 +85,7 @@ static void intel_restore_swf(struct drm_i915_private *dev_priv)
void i915_save_display(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
/* Display arbitration control */
if (INTEL_GEN(dev_priv) <= 4)
@@ -100,7 +100,7 @@ void i915_save_display(struct drm_i915_private *dev_priv)
void i915_restore_display(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
intel_restore_swf(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_switcheroo.c b/drivers/gpu/drm/i915/i915_switcheroo.c
index b3a24eac21f1..de0e224b56ce 100644
--- a/drivers/gpu/drm/i915/i915_switcheroo.c
+++ b/drivers/gpu/drm/i915/i915_switcheroo.c
@@ -54,14 +54,14 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
int i915_switcheroo_register(struct drm_i915_private *i915)
{
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
return vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false);
}
void i915_switcheroo_unregister(struct drm_i915_private *i915)
{
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
vga_switcheroo_unregister_client(pdev);
}
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 70fca72f5162..172799277dd5 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -61,7 +61,7 @@
*/
void intel_vgpu_detect(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
u64 magic;
u16 version_major;
void __iomem *shared_area;
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index caa9b041616b..07490db51cdc 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -230,7 +230,7 @@ err_vma:
}
static struct i915_vma *
-vma_lookup(struct drm_i915_gem_object *obj,
+i915_vma_lookup(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view)
{
@@ -278,7 +278,7 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
GEM_BUG_ON(!atomic_read(&vm->open));
spin_lock(&obj->vma.lock);
- vma = vma_lookup(obj, vm, view);
+ vma = i915_vma_lookup(obj, vm, view);
spin_unlock(&obj->vma.lock);
/* vma_create() will resolve the race if another creates the vma */
@@ -863,8 +863,8 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
int err;
#ifdef CONFIG_PROVE_LOCKING
- if (debug_locks && lockdep_is_held(&vma->vm->i915->drm.struct_mutex))
- WARN_ON(!ww);
+ if (debug_locks && !WARN_ON(!ww) && vma->resv)
+ assert_vma_held(vma);
#endif
BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
@@ -884,6 +884,11 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
if (flags & vma->vm->bind_async_flags) {
+ /* lock VM */
+ err = i915_vm_lock_objects(vma->vm, ww);
+ if (err)
+ goto err_rpm;
+
work = i915_vma_work();
if (!work) {
err = -ENOMEM;
@@ -1020,8 +1025,15 @@ int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+#ifdef CONFIG_LOCKDEP
+ WARN_ON(!ww && vma->resv && dma_resv_held(vma->resv));
+#endif
+
do {
- err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL);
+ if (ww)
+ err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL);
+ else
+ err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL);
if (err != -ENOSPC) {
if (!err) {
err = i915_vma_wait_for_bind(vma);
@@ -1238,9 +1250,11 @@ int i915_vma_move_to_active(struct i915_vma *vma,
obj->write_domain = I915_GEM_DOMAIN_RENDER;
obj->read_domains = 0;
} else {
- err = dma_resv_reserve_shared(vma->resv, 1);
- if (unlikely(err))
- return err;
+ if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
+ err = dma_resv_reserve_shared(vma->resv, 1);
+ if (unlikely(err))
+ return err;
+ }
dma_resv_add_shared_fence(vma->resv, &rq->fence);
obj->write_domain = 0;
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index a64adc8c883b..8df784a026d2 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -52,6 +52,9 @@ static inline bool i915_vma_is_active(const struct i915_vma *vma)
return !i915_active_is_idle(&vma->active);
}
+/* do not reserve memory to prevent deadlocks */
+#define __EXEC_OBJECT_NO_RESERVE BIT(31)
+
int __must_check __i915_vma_move_to_active(struct i915_vma *vma,
struct i915_request *rq);
int __must_check i915_vma_move_to_active(struct i915_vma *vma,
@@ -243,7 +246,22 @@ i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
static inline int __must_check
i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
{
- return i915_vma_pin_ww(vma, NULL, size, alignment, flags);
+ struct i915_gem_ww_ctx ww;
+ int err;
+
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ err = i915_gem_object_lock(vma->obj, &ww);
+ if (!err)
+ err = i915_vma_pin_ww(vma, &ww, size, alignment, flags);
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+
+ return err;
}
int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
index f5cb848b7a7e..6b1bfa230b82 100644
--- a/drivers/gpu/drm/i915/i915_vma_types.h
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -97,12 +97,16 @@ enum i915_cache_level;
struct intel_remapped_plane_info {
/* in gtt pages */
- unsigned int width, height, stride, offset;
+ u32 offset;
+ u16 width;
+ u16 height;
+ u16 src_stride;
+ u16 dst_stride;
} __packed;
struct intel_remapped_info {
struct intel_remapped_plane_info plane[2];
- unsigned int unused_mbz;
+ u32 unused_mbz;
} __packed;
struct intel_rotation_info {
@@ -123,9 +127,9 @@ enum i915_ggtt_view_type {
static inline void assert_i915_gem_gtt_types(void)
{
- BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 8*sizeof(unsigned int));
+ BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 2 * sizeof(u32) + 8 * sizeof(u16));
BUILD_BUG_ON(sizeof(struct intel_partial_info) != sizeof(u64) + sizeof(unsigned int));
- BUILD_BUG_ON(sizeof(struct intel_remapped_info) != 9*sizeof(unsigned int));
+ BUILD_BUG_ON(sizeof(struct intel_remapped_info) != 3 * sizeof(u32) + 8 * sizeof(u16));
/* Check that rotation/remapped shares offsets for simplicity */
BUILD_BUG_ON(offsetof(struct intel_remapped_info, plane[0]) !=
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index f2d5ae59081e..de02207f6ec6 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -66,6 +66,7 @@ static const char * const platform_names[] = {
PLATFORM_NAME(TIGERLAKE),
PLATFORM_NAME(ROCKETLAKE),
PLATFORM_NAME(DG1),
+ PLATFORM_NAME(ALDERLAKE_S),
};
#undef PLATFORM_NAME
@@ -204,7 +205,7 @@ void intel_device_info_subplatform_init(struct drm_i915_private *i915)
}
if (IS_TIGERLAKE(i915)) {
- struct pci_dev *root, *pdev = i915->drm.pdev;
+ struct pci_dev *root, *pdev = to_pci_dev(i915->drm.dev);
root = list_first_entry(&pdev->bus->devices, typeof(*root), bus_list);
@@ -222,7 +223,7 @@ void intel_device_info_subplatform_init(struct drm_i915_private *i915)
}
}
- GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_BITS);
+ GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_MASK);
RUNTIME_INFO(i915)->platform_mask[pi] |= mask;
}
@@ -249,7 +250,11 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
struct intel_runtime_info *runtime = RUNTIME_INFO(dev_priv);
enum pipe pipe;
- if (INTEL_GEN(dev_priv) >= 10) {
+ /* Wa_14011765242: adl-s A0 */
+ if (IS_ADLS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0))
+ for_each_pipe(dev_priv, pipe)
+ runtime->num_scalers[pipe] = 0;
+ else if (INTEL_GEN(dev_priv) >= 10) {
for_each_pipe(dev_priv, pipe)
runtime->num_scalers[pipe] = 2;
} else if (IS_GEN(dev_priv, 9)) {
@@ -260,7 +265,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
BUILD_BUG_ON(BITS_PER_TYPE(intel_engine_mask_t) < I915_NUM_ENGINES);
- if (IS_ROCKETLAKE(dev_priv))
+ if (HAS_D12_PLANE_MINIMIZATION(dev_priv))
for_each_pipe(dev_priv, pipe)
runtime->num_sprites[pipe] = 4;
else if (INTEL_GEN(dev_priv) >= 11)
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index cf2d528c6e9b..2f442d418a15 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -27,6 +27,8 @@
#include <uapi/drm/i915_drm.h>
+#include "intel_step.h"
+
#include "display/intel_display.h"
#include "gt/intel_engine_types.h"
@@ -84,6 +86,7 @@ enum intel_platform {
INTEL_TIGERLAKE,
INTEL_ROCKETLAKE,
INTEL_DG1,
+ INTEL_ALDERLAKE_S,
INTEL_MAX_PLATFORMS
};
@@ -92,7 +95,8 @@ enum intel_platform {
* it is fine for the same bit to be used on multiple parent platforms.
*/
-#define INTEL_SUBPLATFORM_BITS (3)
+#define INTEL_SUBPLATFORM_BITS (2)
+#define INTEL_SUBPLATFORM_MASK (BIT(INTEL_SUBPLATFORM_BITS) - 1)
/* HSW/BDW/SKL/KBL/CFL */
#define INTEL_SUBPLATFORM_ULT (0)
@@ -116,7 +120,6 @@ enum intel_ppgtt_type {
func(has_64bit_reloc); \
func(gpu_reset_clobbers_display); \
func(has_reset_engine); \
- func(has_fpga_dbg); \
func(has_global_mocs); \
func(has_gt_uc); \
func(has_l3_dpf); \
@@ -143,6 +146,7 @@ enum intel_ppgtt_type {
func(has_dsb); \
func(has_dsc); \
func(has_fbc); \
+ func(has_fpga_dbg); \
func(has_gmch); \
func(has_hdcp); \
func(has_hotplug); \
@@ -185,6 +189,8 @@ struct intel_device_info {
#undef DEFINE_FLAG
struct {
+ u8 version;
+
#define DEFINE_FLAG(name) u8 name:1
DEV_INFO_DISPLAY_FOR_EACH_FLAG(DEFINE_FLAG);
#undef DEFINE_FLAG
@@ -223,6 +229,8 @@ struct intel_runtime_info {
u8 num_scalers[I915_MAX_PIPES];
u32 rawclk_freq;
+
+ struct intel_step_info step;
};
struct intel_driver_caps {
diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c
index 73d256fc6830..1e53c017c30d 100644
--- a/drivers/gpu/drm/i915/intel_dram.c
+++ b/drivers/gpu/drm/i915/intel_dram.c
@@ -427,6 +427,12 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv)
case 0:
dram_info->type = INTEL_DRAM_DDR4;
break;
+ case 1:
+ dram_info->type = INTEL_DRAM_DDR5;
+ break;
+ case 2:
+ dram_info->type = INTEL_DRAM_LPDDR5;
+ break;
case 3:
dram_info->type = INTEL_DRAM_LPDDR4;
break;
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index 1bfcdd89b241..bf837b6bb185 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -6,14 +6,22 @@
#include "intel_memory_region.h"
#include "i915_drv.h"
-/* XXX: Hysterical raisins. BIT(inst) needs to just be (inst) at some point. */
-#define REGION_MAP(type, inst) \
- BIT((type) + INTEL_MEMORY_TYPE_SHIFT) | BIT(inst)
-
-static const u32 intel_region_map[] = {
- [INTEL_REGION_SMEM] = REGION_MAP(INTEL_MEMORY_SYSTEM, 0),
- [INTEL_REGION_LMEM] = REGION_MAP(INTEL_MEMORY_LOCAL, 0),
- [INTEL_REGION_STOLEN] = REGION_MAP(INTEL_MEMORY_STOLEN, 0),
+static const struct {
+ u16 class;
+ u16 instance;
+} intel_region_map[] = {
+ [INTEL_REGION_SMEM] = {
+ .class = INTEL_MEMORY_SYSTEM,
+ .instance = 0,
+ },
+ [INTEL_REGION_LMEM] = {
+ .class = INTEL_MEMORY_LOCAL,
+ .instance = 0,
+ },
+ [INTEL_REGION_STOLEN_SMEM] = {
+ .class = INTEL_MEMORY_STOLEN_SYSTEM,
+ .instance = 0,
+ },
};
struct intel_memory_region *
@@ -156,9 +164,22 @@ int intel_memory_region_init_buddy(struct intel_memory_region *mem)
void intel_memory_region_release_buddy(struct intel_memory_region *mem)
{
+ i915_buddy_free_list(&mem->mm, &mem->reserved);
i915_buddy_fini(&mem->mm);
}
+int intel_memory_region_reserve(struct intel_memory_region *mem,
+ u64 offset, u64 size)
+{
+ int ret;
+
+ mutex_lock(&mem->mm_lock);
+ ret = i915_buddy_alloc_range(&mem->mm, &mem->reserved, offset, size);
+ mutex_unlock(&mem->mm_lock);
+
+ return ret;
+}
+
struct intel_memory_region *
intel_memory_region_create(struct drm_i915_private *i915,
resource_size_t start,
@@ -185,6 +206,7 @@ intel_memory_region_create(struct drm_i915_private *i915,
mutex_init(&mem->objects.lock);
INIT_LIST_HEAD(&mem->objects.list);
INIT_LIST_HEAD(&mem->objects.purgeable);
+ INIT_LIST_HEAD(&mem->reserved);
mutex_init(&mem->mm_lock);
@@ -245,22 +267,22 @@ int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
struct intel_memory_region *mem = ERR_PTR(-ENODEV);
- u32 type;
+ u16 type, instance;
if (!HAS_REGION(i915, BIT(i)))
continue;
- type = MEMORY_TYPE_FROM_REGION(intel_region_map[i]);
+ type = intel_region_map[i].class;
+ instance = intel_region_map[i].instance;
switch (type) {
case INTEL_MEMORY_SYSTEM:
mem = i915_gem_shmem_setup(i915);
break;
- case INTEL_MEMORY_STOLEN:
+ case INTEL_MEMORY_STOLEN_SYSTEM:
mem = i915_gem_stolen_setup(i915);
break;
- case INTEL_MEMORY_LOCAL:
- mem = intel_setup_fake_lmem(i915);
- break;
+ default:
+ continue;
}
if (IS_ERR(mem)) {
@@ -271,9 +293,9 @@ int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
goto out_cleanup;
}
- mem->id = intel_region_map[i];
+ mem->id = i;
mem->type = type;
- mem->instance = MEMORY_INSTANCE_FROM_REGION(intel_region_map[i]);
+ mem->instance = instance;
i915->mm.regions[i] = mem;
}
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index 6ffc0673f005..edd49067c8ca 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -25,24 +25,19 @@ struct sg_table;
enum intel_memory_type {
INTEL_MEMORY_SYSTEM = 0,
INTEL_MEMORY_LOCAL,
- INTEL_MEMORY_STOLEN,
+ INTEL_MEMORY_STOLEN_SYSTEM,
};
enum intel_region_id {
INTEL_REGION_SMEM = 0,
INTEL_REGION_LMEM,
- INTEL_REGION_STOLEN,
+ INTEL_REGION_STOLEN_SMEM,
INTEL_REGION_UNKNOWN, /* Should be last */
};
#define REGION_SMEM BIT(INTEL_REGION_SMEM)
#define REGION_LMEM BIT(INTEL_REGION_LMEM)
-#define REGION_STOLEN BIT(INTEL_REGION_STOLEN)
-
-#define INTEL_MEMORY_TYPE_SHIFT 16
-
-#define MEMORY_TYPE_FROM_REGION(r) (ilog2((r) >> INTEL_MEMORY_TYPE_SHIFT))
-#define MEMORY_INSTANCE_FROM_REGION(r) (ilog2((r) & 0xffff))
+#define REGION_STOLEN_SMEM BIT(INTEL_REGION_STOLEN_SMEM)
#define I915_ALLOC_MIN_PAGE_SIZE BIT(0)
#define I915_ALLOC_CONTIGUOUS BIT(1)
@@ -84,11 +79,13 @@ struct intel_memory_region {
resource_size_t total;
resource_size_t avail;
- unsigned int type;
- unsigned int instance;
- unsigned int id;
+ u16 type;
+ u16 instance;
+ enum intel_region_id id;
char name[8];
+ struct list_head reserved;
+
dma_addr_t remap_addr;
struct {
@@ -113,6 +110,9 @@ void __intel_memory_region_put_pages_buddy(struct intel_memory_region *mem,
struct list_head *blocks);
void __intel_memory_region_put_block_buddy(struct i915_buddy_block *block);
+int intel_memory_region_reserve(struct intel_memory_region *mem,
+ u64 offset, u64 size);
+
struct intel_memory_region *
intel_memory_region_create(struct drm_i915_private *i915,
resource_size_t start,
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
index ecaf314d60b6..7476f0e063c6 100644
--- a/drivers/gpu/drm/i915/intel_pch.c
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -121,13 +121,18 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
case INTEL_PCH_TGP2_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Tiger Lake LP PCH\n");
drm_WARN_ON(&dev_priv->drm, !IS_TIGERLAKE(dev_priv) &&
- !IS_ROCKETLAKE(dev_priv));
+ !IS_ROCKETLAKE(dev_priv) &&
+ !IS_GEN9_BC(dev_priv));
return PCH_TGP;
case INTEL_PCH_JSP_DEVICE_ID_TYPE:
case INTEL_PCH_JSP2_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n");
drm_WARN_ON(&dev_priv->drm, !IS_JSL_EHL(dev_priv));
return PCH_JSP;
+ case INTEL_PCH_ADP_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found Alder Lake PCH\n");
+ drm_WARN_ON(&dev_priv->drm, !IS_ALDERLAKE_S(dev_priv));
+ return PCH_ADP;
default:
return PCH_NONE;
}
@@ -156,7 +161,9 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv,
* make an educated guess as to which PCH is really there.
*/
- if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv))
+ if (IS_ALDERLAKE_S(dev_priv))
+ id = INTEL_PCH_ADP_DEVICE_ID_TYPE;
+ else if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv))
id = INTEL_PCH_TGP_DEVICE_ID_TYPE;
else if (IS_JSL_EHL(dev_priv))
id = INTEL_PCH_MCC_DEVICE_ID_TYPE;
diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h
index 06d2cd50af0b..7318377503b0 100644
--- a/drivers/gpu/drm/i915/intel_pch.h
+++ b/drivers/gpu/drm/i915/intel_pch.h
@@ -26,6 +26,7 @@ enum intel_pch {
PCH_JSP, /* Jasper Lake PCH */
PCH_MCC, /* Mule Creek Canyon PCH */
PCH_TGP, /* Tiger Lake PCH */
+ PCH_ADP, /* Alder Lake PCH */
/* Fake PCHs, functionality handled on the same PCI dev */
PCH_DG1 = 1024,
@@ -53,12 +54,14 @@ enum intel_pch {
#define INTEL_PCH_TGP2_DEVICE_ID_TYPE 0x4380
#define INTEL_PCH_JSP_DEVICE_ID_TYPE 0x4D80
#define INTEL_PCH_JSP2_DEVICE_ID_TYPE 0x3880
+#define INTEL_PCH_ADP_DEVICE_ID_TYPE 0x7A80
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
+#define HAS_PCH_ADP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ADP)
#define HAS_PCH_DG1(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG1)
#define HAS_PCH_JSP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_JSP)
#define HAS_PCH_MCC(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_MCC)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 97b57acc02e2..0e2501b7fc27 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -38,6 +38,7 @@
#include "display/intel_display_types.h"
#include "display/intel_fbc.h"
#include "display/intel_sprite.h"
+#include "display/skl_universal_plane.h"
#include "gt/intel_llc.h"
@@ -2338,7 +2339,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
if (IS_I945GM(dev_priv))
wm_info = &i945_wm_info;
- else if (!IS_GEN(dev_priv, 2))
+ else if (!IS_DISPLAY_VER(dev_priv, 2))
wm_info = &i915_wm_info;
else
wm_info = &i830_a_wm_info;
@@ -2352,7 +2353,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
crtc->base.primary->state->fb;
int cpp;
- if (IS_GEN(dev_priv, 2))
+ if (IS_DISPLAY_VER(dev_priv, 2))
cpp = 4;
else
cpp = fb->format->cpp[0];
@@ -2367,7 +2368,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
planea_wm = wm_info->max_wm;
}
- if (IS_GEN(dev_priv, 2))
+ if (IS_DISPLAY_VER(dev_priv, 2))
wm_info = &i830_bc_wm_info;
fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_B);
@@ -2379,7 +2380,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
crtc->base.primary->state->fb;
int cpp;
- if (IS_GEN(dev_priv, 2))
+ if (IS_DISPLAY_VER(dev_priv, 2))
cpp = 4;
else
cpp = fb->format->cpp[0];
@@ -2651,9 +2652,9 @@ static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
static unsigned int
ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
{
- if (INTEL_GEN(dev_priv) >= 8)
+ if (DISPLAY_VER(dev_priv) >= 8)
return 3072;
- else if (INTEL_GEN(dev_priv) >= 7)
+ else if (DISPLAY_VER(dev_priv) >= 7)
return 768;
else
return 512;
@@ -2663,10 +2664,10 @@ static unsigned int
ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
int level, bool is_sprite)
{
- if (INTEL_GEN(dev_priv) >= 8)
+ if (DISPLAY_VER(dev_priv) >= 8)
/* BDW primary/sprite plane watermarks */
return level == 0 ? 255 : 2047;
- else if (INTEL_GEN(dev_priv) >= 7)
+ else if (DISPLAY_VER(dev_priv) >= 7)
/* IVB/HSW primary/sprite plane watermarks */
return level == 0 ? 127 : 1023;
else if (!is_sprite)
@@ -2680,7 +2681,7 @@ ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
static unsigned int
ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
{
- if (INTEL_GEN(dev_priv) >= 7)
+ if (DISPLAY_VER(dev_priv) >= 7)
return level == 0 ? 63 : 255;
else
return level == 0 ? 31 : 63;
@@ -2688,7 +2689,7 @@ ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
{
- if (INTEL_GEN(dev_priv) >= 8)
+ if (DISPLAY_VER(dev_priv) >= 8)
return 31;
else
return 15;
@@ -2716,7 +2717,7 @@ static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
* FIFO size is only half of the self
* refresh FIFO size on ILK/SNB.
*/
- if (INTEL_GEN(dev_priv) <= 6)
+ if (DISPLAY_VER(dev_priv) <= 6)
fifo_size /= 2;
}
@@ -2851,7 +2852,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
{
struct intel_uncore *uncore = &dev_priv->uncore;
- if (INTEL_GEN(dev_priv) >= 9) {
+ if (DISPLAY_VER(dev_priv) >= 9) {
u32 val;
int ret, i;
int level, max_level = ilk_wm_max_level(dev_priv);
@@ -2943,14 +2944,14 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
wm[2] = (sskpd >> 12) & 0xFF;
wm[3] = (sskpd >> 20) & 0x1FF;
wm[4] = (sskpd >> 32) & 0x1FF;
- } else if (INTEL_GEN(dev_priv) >= 6) {
+ } else if (DISPLAY_VER(dev_priv) >= 6) {
u32 sskpd = intel_uncore_read(uncore, MCH_SSKPD);
wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
- } else if (INTEL_GEN(dev_priv) >= 5) {
+ } else if (DISPLAY_VER(dev_priv) >= 5) {
u32 mltr = intel_uncore_read(uncore, MLTR_ILK);
/* ILK primary LP0 latency is 700 ns */
@@ -2966,7 +2967,7 @@ static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
u16 wm[5])
{
/* ILK sprite LP0 latency is 1300 ns */
- if (IS_GEN(dev_priv, 5))
+ if (IS_DISPLAY_VER(dev_priv, 5))
wm[0] = 13;
}
@@ -2974,18 +2975,18 @@ static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
u16 wm[5])
{
/* ILK cursor LP0 latency is 1300 ns */
- if (IS_GEN(dev_priv, 5))
+ if (IS_DISPLAY_VER(dev_priv, 5))
wm[0] = 13;
}
int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
{
/* how many WM levels are we expecting */
- if (INTEL_GEN(dev_priv) >= 9)
+ if (DISPLAY_VER(dev_priv) >= 9)
return 7;
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
return 4;
- else if (INTEL_GEN(dev_priv) >= 6)
+ else if (DISPLAY_VER(dev_priv) >= 6)
return 3;
else
return 2;
@@ -2993,7 +2994,7 @@ int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
const char *name,
- const u16 wm[8])
+ const u16 wm[])
{
int level, max_level = ilk_wm_max_level(dev_priv);
@@ -3011,7 +3012,7 @@ static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
* - latencies are in us on gen9.
* - before then, WM1+ latency values are in 0.5us units
*/
- if (INTEL_GEN(dev_priv) >= 9)
+ if (DISPLAY_VER(dev_priv) >= 9)
latency *= 10;
else if (level > 0)
latency *= 5;
@@ -3104,7 +3105,7 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
- if (IS_GEN(dev_priv, 6)) {
+ if (IS_DISPLAY_VER(dev_priv, 6)) {
snb_wm_latency_quirk(dev_priv);
snb_wm_lp3_irq_quirk(dev_priv);
}
@@ -3175,7 +3176,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
usable_level = max_level;
/* ILK/SNB: LP2+ watermarks only w/o sprites */
- if (INTEL_GEN(dev_priv) <= 6 && pipe_wm->sprites_enabled)
+ if (DISPLAY_VER(dev_priv) <= 6 && pipe_wm->sprites_enabled)
usable_level = 1;
/* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
@@ -3317,12 +3318,12 @@ static void ilk_wm_merge(struct drm_i915_private *dev_priv,
int last_enabled_level = max_level;
/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
- if ((INTEL_GEN(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
+ if ((DISPLAY_VER(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
config->num_pipes_active > 1)
last_enabled_level = 0;
/* ILK: FBC WM must be disabled always */
- merged->fbc_wm_enabled = INTEL_GEN(dev_priv) >= 6;
+ merged->fbc_wm_enabled = DISPLAY_VER(dev_priv) >= 6;
/* merge each WM1+ level */
for (level = 1; level <= max_level; level++) {
@@ -3353,7 +3354,7 @@ static void ilk_wm_merge(struct drm_i915_private *dev_priv,
* What we should check here is whether FBC can be
* enabled sometime later.
*/
- if (IS_GEN(dev_priv, 5) && !merged->fbc_wm_enabled &&
+ if (IS_DISPLAY_VER(dev_priv, 5) && !merged->fbc_wm_enabled &&
intel_fbc_is_active(dev_priv)) {
for (level = 2; level <= max_level; level++) {
struct intel_wm_level *wm = &merged->wm[level];
@@ -3410,7 +3411,7 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
if (r->enable)
results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
- if (INTEL_GEN(dev_priv) >= 8)
+ if (DISPLAY_VER(dev_priv) >= 8)
results->wm_lp[wm_lp - 1] |=
r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
else
@@ -3421,7 +3422,7 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
* Always set WM1S_LP_EN when spr_val != 0, even if the
* level is disabled. Doing otherwise could cause underruns.
*/
- if (INTEL_GEN(dev_priv) <= 6 && r->spr_val) {
+ if (DISPLAY_VER(dev_priv) <= 6 && r->spr_val) {
drm_WARN_ON(&dev_priv->drm, wm_lp != 1);
results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
} else
@@ -3611,7 +3612,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
previous->wm_lp_spr[0] != results->wm_lp_spr[0])
intel_uncore_write(&dev_priv->uncore, WM1S_LP_ILK, results->wm_lp_spr[0]);
- if (INTEL_GEN(dev_priv) >= 7) {
+ if (DISPLAY_VER(dev_priv) >= 7) {
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
intel_uncore_write(&dev_priv->uncore, WM2S_LP_IVB, results->wm_lp_spr[1]);
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
@@ -3659,14 +3660,14 @@ static bool skl_needs_memory_bw_wa(struct drm_i915_private *dev_priv)
static bool
intel_has_sagv(struct drm_i915_private *dev_priv)
{
- return (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) &&
+ return (IS_GEN9_BC(dev_priv) || DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv)) &&
dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED;
}
static void
skl_setup_sagv_block_time(struct drm_i915_private *dev_priv)
{
- if (INTEL_GEN(dev_priv) >= 12) {
+ if (DISPLAY_VER(dev_priv) >= 12) {
u32 val = 0;
int ret;
@@ -3679,17 +3680,17 @@ skl_setup_sagv_block_time(struct drm_i915_private *dev_priv)
}
drm_dbg(&dev_priv->drm, "Couldn't read SAGV block time!\n");
- } else if (IS_GEN(dev_priv, 11)) {
+ } else if (IS_DISPLAY_VER(dev_priv, 11)) {
dev_priv->sagv_block_time_us = 10;
return;
- } else if (IS_GEN(dev_priv, 10)) {
+ } else if (IS_DISPLAY_VER(dev_priv, 10)) {
dev_priv->sagv_block_time_us = 20;
return;
- } else if (IS_GEN(dev_priv, 9)) {
+ } else if (IS_DISPLAY_VER(dev_priv, 9)) {
dev_priv->sagv_block_time_us = 30;
return;
} else {
- MISSING_CASE(INTEL_GEN(dev_priv));
+ MISSING_CASE(DISPLAY_VER(dev_priv));
}
/* Default to an unusable block time */
@@ -3796,7 +3797,7 @@ void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
if (!new_bw_state)
return;
- if (INTEL_GEN(dev_priv) < 11 && !intel_can_enable_sagv(dev_priv, new_bw_state)) {
+ if (DISPLAY_VER(dev_priv) < 11 && !intel_can_enable_sagv(dev_priv, new_bw_state)) {
intel_disable_sagv(dev_priv);
return;
}
@@ -3847,7 +3848,7 @@ void intel_sagv_post_plane_update(struct intel_atomic_state *state)
if (!new_bw_state)
return;
- if (INTEL_GEN(dev_priv) < 11 && intel_can_enable_sagv(dev_priv, new_bw_state)) {
+ if (DISPLAY_VER(dev_priv) < 11 && intel_can_enable_sagv(dev_priv, new_bw_state)) {
intel_enable_sagv(dev_priv);
return;
}
@@ -3875,6 +3876,7 @@ static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum plane_id plane_id;
+ int max_level = INT_MAX;
if (!intel_has_sagv(dev_priv))
return false;
@@ -3891,20 +3893,31 @@ static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
int level;
/* Skip this plane if it's not enabled */
- if (!wm->wm[0].plane_en)
+ if (!wm->wm[0].enable)
continue;
/* Find the highest enabled wm level for this plane */
for (level = ilk_wm_max_level(dev_priv);
- !wm->wm[level].plane_en; --level)
+ !wm->wm[level].enable; --level)
{ }
+ /* Highest common enabled wm level for all planes */
+ max_level = min(level, max_level);
+ }
+
+ /* No enabled planes? */
+ if (max_level == INT_MAX)
+ return true;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+
/*
- * If any of the planes on this pipe don't enable wm levels that
- * incur memory latencies higher than sagv_block_time_us we
- * can't enable SAGV.
+ * All enabled planes must have enabled a common wm level that
+ * can tolerate memory latencies higher than sagv_block_time_us
*/
- if (!wm->wm[level].can_sagv)
+ if (wm->wm[0].enable && !wm->wm[max_level].can_sagv)
return false;
}
@@ -3920,12 +3933,10 @@ static bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
return true;
for_each_plane_id_on_crtc(crtc, plane_id) {
- const struct skl_ddb_entry *plane_alloc =
- &crtc_state->wm.skl.plane_ddb_y[plane_id];
const struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
- if (skl_ddb_entry_size(plane_alloc) < wm->sagv_wm0.min_ddb_alloc)
+ if (wm->wm[0].enable && !wm->sagv.wm0.enable)
return false;
}
@@ -3937,7 +3948,7 @@ static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- if (INTEL_GEN(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 12)
return tgl_crtc_can_enable_sagv(crtc_state);
else
return skl_crtc_can_enable_sagv(crtc_state);
@@ -3946,7 +3957,7 @@ static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state
bool intel_can_enable_sagv(struct drm_i915_private *dev_priv,
const struct intel_bw_state *bw_state)
{
- if (INTEL_GEN(dev_priv) < 11 &&
+ if (DISPLAY_VER(dev_priv) < 11 &&
bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes))
return false;
@@ -3999,7 +4010,7 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state)
* latter from the plane commit hooks (especially in the legacy
* cursor case)
*/
- pipe_wm->use_sagv_wm = INTEL_GEN(dev_priv) >= 12 &&
+ pipe_wm->use_sagv_wm = DISPLAY_VER(dev_priv) >= 12 &&
intel_can_enable_sagv(dev_priv, new_bw_state);
}
@@ -4023,7 +4034,7 @@ static int intel_dbuf_size(struct drm_i915_private *dev_priv)
drm_WARN_ON(&dev_priv->drm, ddb_size == 0);
- if (INTEL_GEN(dev_priv) < 11)
+ if (DISPLAY_VER(dev_priv) < 11)
return ddb_size - 4; /* 4 blocks for bypass path allocation */
return ddb_size;
@@ -4278,7 +4289,7 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
val & PLANE_CTL_ORDER_RGBX,
val & PLANE_CTL_ALPHA_MASK);
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (DISPLAY_VER(dev_priv) >= 11) {
val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id));
skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
} else {
@@ -4602,9 +4613,9 @@ static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- if (IS_GEN(dev_priv, 12))
+ if (IS_DISPLAY_VER(dev_priv, 12))
return tgl_compute_dbuf_slices(pipe, active_pipes);
- else if (IS_GEN(dev_priv, 11))
+ else if (IS_DISPLAY_VER(dev_priv, 11))
return icl_compute_dbuf_slices(pipe, active_pipes);
/*
* For anything else just return one slice yet.
@@ -4746,20 +4757,61 @@ icl_get_total_relative_data_rate(struct intel_atomic_state *state,
return total_data_rate;
}
-static const struct skl_wm_level *
-skl_plane_wm_level(const struct intel_crtc_state *crtc_state,
+const struct skl_wm_level *
+skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
enum plane_id plane_id,
int level)
{
- const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
if (level == 0 && pipe_wm->use_sagv_wm)
- return &wm->sagv_wm0;
+ return &wm->sagv.wm0;
return &wm->wm[level];
}
+const struct skl_wm_level *
+skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm,
+ enum plane_id plane_id)
+{
+ const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
+
+ if (pipe_wm->use_sagv_wm)
+ return &wm->sagv.trans_wm;
+
+ return &wm->trans_wm;
+}
+
+/*
+ * We only disable the watermarks for each plane if
+ * they exceed the ddb allocation of said plane. This
+ * is done so that we don't end up touching cursor
+ * watermarks needlessly when some other plane reduces
+ * our max possible watermark level.
+ *
+ * Bspec has this to say about the PLANE_WM enable bit:
+ * "All the watermarks at this level for all enabled
+ * planes must be enabled before the level will be used."
+ * So this is actually safe to do.
+ */
+static void
+skl_check_wm_level(struct skl_wm_level *wm, u64 total)
+{
+ if (wm->min_ddb_alloc > total)
+ memset(wm, 0, sizeof(*wm));
+}
+
+static void
+skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm,
+ u64 total, u64 uv_total)
+{
+ if (wm->min_ddb_alloc > total ||
+ uv_wm->min_ddb_alloc > uv_total) {
+ memset(wm, 0, sizeof(*wm));
+ memset(uv_wm, 0, sizeof(*uv_wm));
+ }
+}
+
static int
skl_allocate_plane_ddb(struct intel_atomic_state *state,
struct intel_crtc *crtc)
@@ -4786,7 +4838,7 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state,
if (!crtc_state->hw.active)
return 0;
- if (INTEL_GEN(dev_priv) >= 11)
+ if (DISPLAY_VER(dev_priv) >= 11)
total_data_rate =
icl_get_total_relative_data_rate(state, crtc);
else
@@ -4900,7 +4952,7 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state,
/* Gen11+ uses a separate plane for UV watermarks */
drm_WARN_ON(&dev_priv->drm,
- INTEL_GEN(dev_priv) >= 11 && uv_total[plane_id]);
+ DISPLAY_VER(dev_priv) >= 11 && uv_total[plane_id]);
/* Leave disabled planes at (0,0) */
if (total[plane_id]) {
@@ -4927,45 +4979,33 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state,
struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
- /*
- * We only disable the watermarks for each plane if
- * they exceed the ddb allocation of said plane. This
- * is done so that we don't end up touching cursor
- * watermarks needlessly when some other plane reduces
- * our max possible watermark level.
- *
- * Bspec has this to say about the PLANE_WM enable bit:
- * "All the watermarks at this level for all enabled
- * planes must be enabled before the level will be used."
- * So this is actually safe to do.
- */
- if (wm->wm[level].min_ddb_alloc > total[plane_id] ||
- wm->uv_wm[level].min_ddb_alloc > uv_total[plane_id])
- memset(&wm->wm[level], 0, sizeof(wm->wm[level]));
+ skl_check_nv12_wm_level(&wm->wm[level], &wm->uv_wm[level],
+ total[plane_id], uv_total[plane_id]);
/*
* Wa_1408961008:icl, ehl
* Underruns with WM1+ disabled
*/
- if (IS_GEN(dev_priv, 11) &&
- level == 1 && wm->wm[0].plane_en) {
- wm->wm[level].plane_res_b = wm->wm[0].plane_res_b;
- wm->wm[level].plane_res_l = wm->wm[0].plane_res_l;
+ if (IS_DISPLAY_VER(dev_priv, 11) &&
+ level == 1 && wm->wm[0].enable) {
+ wm->wm[level].blocks = wm->wm[0].blocks;
+ wm->wm[level].lines = wm->wm[0].lines;
wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
}
}
}
/*
- * Go back and disable the transition watermark if it turns out we
- * don't have enough DDB blocks for it.
+ * Go back and disable the transition and SAGV watermarks
+ * if it turns out we don't have enough DDB blocks for them.
*/
for_each_plane_id_on_crtc(crtc, plane_id) {
struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
- if (wm->trans_wm.plane_res_b >= total[plane_id])
- memset(&wm->trans_wm, 0, sizeof(wm->trans_wm));
+ skl_check_wm_level(&wm->trans_wm, total[plane_id]);
+ skl_check_wm_level(&wm->sagv.wm0, total[plane_id]);
+ skl_check_wm_level(&wm->sagv.trans_wm, total[plane_id]);
}
return 0;
@@ -4990,7 +5030,7 @@ skl_wm_method1(const struct drm_i915_private *dev_priv, u32 pixel_rate,
wm_intermediate_val = latency * pixel_rate * cpp;
ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size);
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ if (DISPLAY_VER(dev_priv) >= 10)
ret = add_fixed16_u32(ret, 1);
return ret;
@@ -5070,7 +5110,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
wp->cpp = format->cpp[color_plane];
wp->plane_pixel_rate = plane_pixel_rate;
- if (INTEL_GEN(dev_priv) >= 11 &&
+ if (DISPLAY_VER(dev_priv) >= 11 &&
modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1)
wp->dbuf_block_size = 256;
else
@@ -5104,7 +5144,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
wp->y_min_scanlines,
wp->dbuf_block_size);
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ if (DISPLAY_VER(dev_priv) >= 10)
interm_pbpl++;
wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
@@ -5113,8 +5153,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
wp->dbuf_block_size);
- if (!wp->x_tiled ||
- INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ if (!wp->x_tiled || DISPLAY_VER(dev_priv) >= 10)
interm_pbpl++;
wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
@@ -5153,7 +5192,7 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
{
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ if (DISPLAY_VER(dev_priv) >= 10)
return true;
/* The number of lines are ignored for the level 0 watermark. */
@@ -5170,7 +5209,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
uint_fixed_16_16_t method1, method2;
uint_fixed_16_16_t selected_result;
- u32 res_blocks, res_lines, min_ddb_alloc = 0;
+ u32 blocks, lines, min_ddb_alloc = 0;
if (latency == 0) {
/* reject it */
@@ -5206,8 +5245,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
(wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
selected_result = method2;
} else if (latency >= wp->linetime_us) {
- if (IS_GEN(dev_priv, 9) &&
- !IS_GEMINILAKE(dev_priv))
+ if (IS_DISPLAY_VER(dev_priv, 9))
selected_result = min_fixed16(method1, method2);
else
selected_result = method2;
@@ -5216,24 +5254,22 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
}
}
- res_blocks = fixed16_to_u32_round_up(selected_result) + 1;
- res_lines = div_round_up_fixed16(selected_result,
- wp->plane_blocks_per_line);
+ blocks = fixed16_to_u32_round_up(selected_result) + 1;
+ lines = div_round_up_fixed16(selected_result,
+ wp->plane_blocks_per_line);
if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) {
/* Display WA #1125: skl,bxt,kbl */
if (level == 0 && wp->rc_surface)
- res_blocks +=
- fixed16_to_u32_round_up(wp->y_tile_minimum);
+ blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
/* Display WA #1126: skl,bxt,kbl */
if (level >= 1 && level <= 7) {
if (wp->y_tiled) {
- res_blocks +=
- fixed16_to_u32_round_up(wp->y_tile_minimum);
- res_lines += wp->y_min_scanlines;
+ blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
+ lines += wp->y_min_scanlines;
} else {
- res_blocks++;
+ blocks++;
}
/*
@@ -5242,51 +5278,50 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
* Assumption in DDB algorithm optimization for special
* cases. Also covers Display WA #1125 for RC.
*/
- if (result_prev->plane_res_b > res_blocks)
- res_blocks = result_prev->plane_res_b;
+ if (result_prev->blocks > blocks)
+ blocks = result_prev->blocks;
}
}
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (DISPLAY_VER(dev_priv) >= 11) {
if (wp->y_tiled) {
int extra_lines;
- if (res_lines % wp->y_min_scanlines == 0)
+ if (lines % wp->y_min_scanlines == 0)
extra_lines = wp->y_min_scanlines;
else
extra_lines = wp->y_min_scanlines * 2 -
- res_lines % wp->y_min_scanlines;
+ lines % wp->y_min_scanlines;
- min_ddb_alloc = mul_round_up_u32_fixed16(res_lines + extra_lines,
+ min_ddb_alloc = mul_round_up_u32_fixed16(lines + extra_lines,
wp->plane_blocks_per_line);
} else {
- min_ddb_alloc = res_blocks +
- DIV_ROUND_UP(res_blocks, 10);
+ min_ddb_alloc = blocks + DIV_ROUND_UP(blocks, 10);
}
}
if (!skl_wm_has_lines(dev_priv, level))
- res_lines = 0;
+ lines = 0;
- if (res_lines > 31) {
+ if (lines > 31) {
/* reject it */
result->min_ddb_alloc = U16_MAX;
return;
}
/*
- * If res_lines is valid, assume we can use this watermark level
+ * If lines is valid, assume we can use this watermark level
* for now. We'll come back and disable it after we calculate the
* DDB allocation if it turns out we don't actually have enough
* blocks to satisfy it.
*/
- result->plane_res_b = res_blocks;
- result->plane_res_l = res_lines;
+ result->blocks = blocks;
+ result->lines = lines;
/* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */
- result->min_ddb_alloc = max(min_ddb_alloc, res_blocks) + 1;
- result->plane_en = true;
+ result->min_ddb_alloc = max(min_ddb_alloc, blocks) + 1;
+ result->enable = true;
- if (INTEL_GEN(dev_priv) < 12)
+ if (DISPLAY_VER(dev_priv) < 12)
result->can_sagv = latency >= dev_priv->sagv_block_time_us;
}
@@ -5315,7 +5350,7 @@ static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state,
struct skl_plane_wm *plane_wm)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- struct skl_wm_level *sagv_wm = &plane_wm->sagv_wm0;
+ struct skl_wm_level *sagv_wm = &plane_wm->sagv.wm0;
struct skl_wm_level *levels = plane_wm->wm;
unsigned int latency = dev_priv->wm.skl_latency[0] + dev_priv->sagv_block_time_us;
@@ -5324,14 +5359,13 @@ static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state,
sagv_wm);
}
-static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state,
- const struct skl_wm_params *wp,
- struct skl_plane_wm *wm)
+static void skl_compute_transition_wm(struct drm_i915_private *dev_priv,
+ struct skl_wm_level *trans_wm,
+ const struct skl_wm_level *wm0,
+ const struct skl_wm_params *wp)
{
- struct drm_device *dev = crtc_state->uapi.crtc->dev;
- const struct drm_i915_private *dev_priv = to_i915(dev);
u16 trans_min, trans_amount, trans_y_tile_min;
- u16 wm0_sel_res_b, trans_offset_b, res_blocks;
+ u16 wm0_blocks, trans_offset, blocks;
/* Transition WM don't make any sense if ipc is disabled */
if (!dev_priv->ipc_enabled)
@@ -5344,47 +5378,48 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state,
if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv))
return;
- if (INTEL_GEN(dev_priv) >= 11)
+ if (DISPLAY_VER(dev_priv) >= 11)
trans_min = 4;
else
trans_min = 14;
/* Display WA #1140: glk,cnl */
- if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ if (IS_DISPLAY_VER(dev_priv, 10))
trans_amount = 0;
else
trans_amount = 10; /* This is configurable amount */
- trans_offset_b = trans_min + trans_amount;
+ trans_offset = trans_min + trans_amount;
/*
* The spec asks for Selected Result Blocks for wm0 (the real value),
* not Result Blocks (the integer value). Pay attention to the capital
- * letters. The value wm_l0->plane_res_b is actually Result Blocks, but
+ * letters. The value wm_l0->blocks is actually Result Blocks, but
* since Result Blocks is the ceiling of Selected Result Blocks plus 1,
* and since we later will have to get the ceiling of the sum in the
* transition watermarks calculation, we can just pretend Selected
* Result Blocks is Result Blocks minus 1 and it should work for the
* current platforms.
*/
- wm0_sel_res_b = wm->wm[0].plane_res_b - 1;
+ wm0_blocks = wm0->blocks - 1;
if (wp->y_tiled) {
trans_y_tile_min =
(u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum);
- res_blocks = max(wm0_sel_res_b, trans_y_tile_min) +
- trans_offset_b;
+ blocks = max(wm0_blocks, trans_y_tile_min) + trans_offset;
} else {
- res_blocks = wm0_sel_res_b + trans_offset_b;
+ blocks = wm0_blocks + trans_offset;
}
+ blocks++;
/*
* Just assume we can enable the transition watermark. After
* computing the DDB we'll come back and disable it if that
* assumption turns out to be false.
*/
- wm->trans_wm.plane_res_b = res_blocks + 1;
- wm->trans_wm.plane_en = true;
+ trans_wm->blocks = blocks;
+ trans_wm->min_ddb_alloc = max_t(u16, wm0->min_ddb_alloc, blocks + 1);
+ trans_wm->enable = true;
}
static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
@@ -5404,10 +5439,15 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
skl_compute_wm_levels(crtc_state, &wm_params, wm->wm);
- if (INTEL_GEN(dev_priv) >= 12)
+ skl_compute_transition_wm(dev_priv, &wm->trans_wm,
+ &wm->wm[0], &wm_params);
+
+ if (DISPLAY_VER(dev_priv) >= 12) {
tgl_compute_sagv_wm(crtc_state, &wm_params, wm);
- skl_compute_transition_wm(crtc_state, &wm_params, wm);
+ skl_compute_transition_wm(dev_priv, &wm->sagv.trans_wm,
+ &wm->sagv.wm0, &wm_params);
+ }
return 0;
}
@@ -5471,12 +5511,12 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
int ret;
- memset(wm, 0, sizeof(*wm));
-
/* Watermarks calculated in master */
if (plane_state->planar_slave)
return 0;
+ memset(wm, 0, sizeof(*wm));
+
if (plane_state->planar_linked_plane) {
const struct drm_framebuffer *fb = plane_state->hw.fb;
enum plane_id y_plane_id = plane_state->planar_linked_plane->id;
@@ -5524,7 +5564,7 @@ static int skl_build_pipe_wm(struct intel_atomic_state *state,
if (plane->pipe != crtc->pipe)
continue;
- if (INTEL_GEN(dev_priv) >= 11)
+ if (DISPLAY_VER(dev_priv) >= 11)
ret = icl_build_plane_wm(crtc_state, plane_state);
else
ret = skl_build_plane_wm(crtc_state, plane_state);
@@ -5554,12 +5594,12 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv,
{
u32 val = 0;
- if (level->plane_en)
+ if (level->enable)
val |= PLANE_WM_EN;
if (level->ignore_lines)
val |= PLANE_WM_IGNORE_LINES;
- val |= level->plane_res_b;
- val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
+ val |= level->blocks;
+ val |= level->lines << PLANE_WM_LINES_SHIFT;
intel_de_write_fw(dev_priv, reg, val);
}
@@ -5571,25 +5611,21 @@ void skl_write_plane_wm(struct intel_plane *plane,
int level, max_level = ilk_wm_max_level(dev_priv);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
- const struct skl_plane_wm *wm =
- &crtc_state->wm.skl.optimal.planes[plane_id];
+ const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
+ const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
const struct skl_ddb_entry *ddb_y =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
const struct skl_ddb_entry *ddb_uv =
&crtc_state->wm.skl.plane_ddb_uv[plane_id];
- for (level = 0; level <= max_level; level++) {
- const struct skl_wm_level *wm_level;
-
- wm_level = skl_plane_wm_level(crtc_state, plane_id, level);
-
+ for (level = 0; level <= max_level; level++)
skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
- wm_level);
- }
+ skl_plane_wm_level(pipe_wm, plane_id, level));
+
skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
- &wm->trans_wm);
+ skl_plane_trans_wm(pipe_wm, plane_id));
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (DISPLAY_VER(dev_priv) >= 11) {
skl_ddb_entry_write(dev_priv,
PLANE_BUF_CFG(pipe, plane_id), ddb_y);
return;
@@ -5611,20 +5647,16 @@ void skl_write_cursor_wm(struct intel_plane *plane,
int level, max_level = ilk_wm_max_level(dev_priv);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
- const struct skl_plane_wm *wm =
- &crtc_state->wm.skl.optimal.planes[plane_id];
+ const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
const struct skl_ddb_entry *ddb =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
- for (level = 0; level <= max_level; level++) {
- const struct skl_wm_level *wm_level;
-
- wm_level = skl_plane_wm_level(crtc_state, plane_id, level);
-
+ for (level = 0; level <= max_level; level++)
skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
- wm_level);
- }
- skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
+ skl_plane_wm_level(pipe_wm, plane_id, level));
+
+ skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe),
+ skl_plane_trans_wm(pipe_wm, plane_id));
skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb);
}
@@ -5632,10 +5664,10 @@ void skl_write_cursor_wm(struct intel_plane *plane,
bool skl_wm_level_equals(const struct skl_wm_level *l1,
const struct skl_wm_level *l2)
{
- return l1->plane_en == l2->plane_en &&
+ return l1->enable == l2->enable &&
l1->ignore_lines == l2->ignore_lines &&
- l1->plane_res_l == l2->plane_res_l &&
- l1->plane_res_b == l2->plane_res_b;
+ l1->lines == l2->lines &&
+ l1->blocks == l2->blocks;
}
static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
@@ -5654,7 +5686,9 @@ static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
return false;
}
- return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm);
+ return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm) &&
+ skl_wm_level_equals(&wm1->sagv.wm0, &wm2->sagv.wm0) &&
+ skl_wm_level_equals(&wm1->sagv.trans_wm, &wm2->sagv.trans_wm);
}
static bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
@@ -5884,85 +5918,114 @@ skl_print_wm_changes(struct intel_atomic_state *state)
continue;
drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm"
- " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm\n",
+ "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm"
+ " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n",
plane->base.base.id, plane->base.name,
- enast(old_wm->wm[0].plane_en), enast(old_wm->wm[1].plane_en),
- enast(old_wm->wm[2].plane_en), enast(old_wm->wm[3].plane_en),
- enast(old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en),
- enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en),
- enast(old_wm->trans_wm.plane_en),
- enast(old_wm->sagv_wm0.plane_en),
- enast(new_wm->wm[0].plane_en), enast(new_wm->wm[1].plane_en),
- enast(new_wm->wm[2].plane_en), enast(new_wm->wm[3].plane_en),
- enast(new_wm->wm[4].plane_en), enast(new_wm->wm[5].plane_en),
- enast(new_wm->wm[6].plane_en), enast(new_wm->wm[7].plane_en),
- enast(new_wm->trans_wm.plane_en),
- enast(new_wm->sagv_wm0.plane_en));
+ enast(old_wm->wm[0].enable), enast(old_wm->wm[1].enable),
+ enast(old_wm->wm[2].enable), enast(old_wm->wm[3].enable),
+ enast(old_wm->wm[4].enable), enast(old_wm->wm[5].enable),
+ enast(old_wm->wm[6].enable), enast(old_wm->wm[7].enable),
+ enast(old_wm->trans_wm.enable),
+ enast(old_wm->sagv.wm0.enable),
+ enast(old_wm->sagv.trans_wm.enable),
+ enast(new_wm->wm[0].enable), enast(new_wm->wm[1].enable),
+ enast(new_wm->wm[2].enable), enast(new_wm->wm[3].enable),
+ enast(new_wm->wm[4].enable), enast(new_wm->wm[5].enable),
+ enast(new_wm->wm[6].enable), enast(new_wm->wm[7].enable),
+ enast(new_wm->trans_wm.enable),
+ enast(new_wm->sagv.wm0.enable),
+ enast(new_wm->sagv.trans_wm.enable));
drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d"
- " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n",
+ "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d"
+ " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n",
plane->base.base.id, plane->base.name,
- enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].plane_res_l,
- enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].plane_res_l,
- enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l,
- enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l,
- enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l,
- enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l,
- enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l,
- enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l,
- enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.plane_res_l,
- enast(old_wm->sagv_wm0.ignore_lines), old_wm->sagv_wm0.plane_res_l,
-
- enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].plane_res_l,
- enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].plane_res_l,
- enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].plane_res_l,
- enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].plane_res_l,
- enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].plane_res_l,
- enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].plane_res_l,
- enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].plane_res_l,
- enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].plane_res_l,
- enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l,
- enast(new_wm->sagv_wm0.ignore_lines), new_wm->sagv_wm0.plane_res_l);
+ enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].lines,
+ enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].lines,
+ enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].lines,
+ enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].lines,
+ enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].lines,
+ enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].lines,
+ enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].lines,
+ enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].lines,
+ enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.lines,
+ enast(old_wm->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines,
+ enast(old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm.lines,
+ enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].lines,
+ enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].lines,
+ enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].lines,
+ enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].lines,
+ enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].lines,
+ enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].lines,
+ enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].lines,
+ enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].lines,
+ enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.lines,
+ enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines,
+ enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines);
drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
- " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
+ "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
+ " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
plane->base.base.id, plane->base.name,
- old_wm->wm[0].plane_res_b, old_wm->wm[1].plane_res_b,
- old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b,
- old_wm->wm[4].plane_res_b, old_wm->wm[5].plane_res_b,
- old_wm->wm[6].plane_res_b, old_wm->wm[7].plane_res_b,
- old_wm->trans_wm.plane_res_b,
- old_wm->sagv_wm0.plane_res_b,
- new_wm->wm[0].plane_res_b, new_wm->wm[1].plane_res_b,
- new_wm->wm[2].plane_res_b, new_wm->wm[3].plane_res_b,
- new_wm->wm[4].plane_res_b, new_wm->wm[5].plane_res_b,
- new_wm->wm[6].plane_res_b, new_wm->wm[7].plane_res_b,
- new_wm->trans_wm.plane_res_b,
- new_wm->sagv_wm0.plane_res_b);
+ old_wm->wm[0].blocks, old_wm->wm[1].blocks,
+ old_wm->wm[2].blocks, old_wm->wm[3].blocks,
+ old_wm->wm[4].blocks, old_wm->wm[5].blocks,
+ old_wm->wm[6].blocks, old_wm->wm[7].blocks,
+ old_wm->trans_wm.blocks,
+ old_wm->sagv.wm0.blocks,
+ old_wm->sagv.trans_wm.blocks,
+ new_wm->wm[0].blocks, new_wm->wm[1].blocks,
+ new_wm->wm[2].blocks, new_wm->wm[3].blocks,
+ new_wm->wm[4].blocks, new_wm->wm[5].blocks,
+ new_wm->wm[6].blocks, new_wm->wm[7].blocks,
+ new_wm->trans_wm.blocks,
+ new_wm->sagv.wm0.blocks,
+ new_wm->sagv.trans_wm.blocks);
drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
- " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
+ "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
+ " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
plane->base.base.id, plane->base.name,
old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
old_wm->trans_wm.min_ddb_alloc,
- old_wm->sagv_wm0.min_ddb_alloc,
+ old_wm->sagv.wm0.min_ddb_alloc,
+ old_wm->sagv.trans_wm.min_ddb_alloc,
new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
new_wm->trans_wm.min_ddb_alloc,
- new_wm->sagv_wm0.min_ddb_alloc);
+ new_wm->sagv.wm0.min_ddb_alloc,
+ new_wm->sagv.trans_wm.min_ddb_alloc);
}
}
}
+static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
+ const struct skl_pipe_wm *old_pipe_wm,
+ const struct skl_pipe_wm *new_pipe_wm)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ int level, max_level = ilk_wm_max_level(i915);
+
+ for (level = 0; level <= max_level; level++) {
+ /*
+ * We don't check uv_wm as the hardware doesn't actually
+ * use it. It only gets used for calculating the required
+ * ddb allocation.
+ */
+ if (!skl_wm_level_equals(skl_plane_wm_level(old_pipe_wm, plane->id, level),
+ skl_plane_wm_level(new_pipe_wm, plane->id, level)))
+ return false;
+ }
+
+ return skl_wm_level_equals(skl_plane_trans_wm(old_pipe_wm, plane->id),
+ skl_plane_trans_wm(new_pipe_wm, plane->id));
+}
+
/*
* To make sure the cursor watermark registers are always consistent
* with our computed state the following scenario needs special
@@ -6008,9 +6071,9 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
* with the software state.
*/
if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) &&
- skl_plane_wm_equals(dev_priv,
- &old_crtc_state->wm.skl.optimal.planes[plane_id],
- &new_crtc_state->wm.skl.optimal.planes[plane_id]))
+ skl_plane_selected_wm_equals(plane,
+ &old_crtc_state->wm.skl.optimal,
+ &new_crtc_state->wm.skl.optimal))
continue;
plane_state = intel_atomic_get_plane_state(state, plane);
@@ -6092,7 +6155,7 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2);
/* 5/6 split only in single pipe config on IVB+ */
- if (INTEL_GEN(dev_priv) >= 7 &&
+ if (DISPLAY_VER(dev_priv) >= 7 &&
config.num_pipes_active == 1 && config.sprites_enabled) {
ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max);
ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6);
@@ -6141,10 +6204,10 @@ static void ilk_optimize_watermarks(struct intel_atomic_state *state,
static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level)
{
- level->plane_en = val & PLANE_WM_EN;
+ level->enable = val & PLANE_WM_EN;
level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
- level->plane_res_b = val & PLANE_WM_BLOCKS_MASK;
- level->plane_res_l = (val >> PLANE_WM_LINES_SHIFT) &
+ level->blocks = val & PLANE_WM_BLOCKS_MASK;
+ level->lines = (val >> PLANE_WM_LINES_SHIFT) &
PLANE_WM_LINES_MASK;
}
@@ -6171,19 +6234,18 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
skl_wm_level_from_reg_val(val, &wm->wm[level]);
}
- if (INTEL_GEN(dev_priv) >= 12)
- wm->sagv_wm0 = wm->wm[0];
-
if (plane_id != PLANE_CURSOR)
val = intel_uncore_read(&dev_priv->uncore, PLANE_WM_TRANS(pipe, plane_id));
else
val = intel_uncore_read(&dev_priv->uncore, CUR_WM_TRANS(pipe));
skl_wm_level_from_reg_val(val, &wm->trans_wm);
- }
- if (!crtc->active)
- return;
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ wm->sagv.wm0 = wm->wm[0];
+ wm->sagv.trans_wm = wm->trans_wm;
+ }
+ }
}
void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
@@ -6706,7 +6768,7 @@ void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
hw->wm_lp[2] = intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK);
hw->wm_lp_spr[0] = intel_uncore_read(&dev_priv->uncore, WM1S_LP_ILK);
- if (INTEL_GEN(dev_priv) >= 7) {
+ if (DISPLAY_VER(dev_priv) >= 7) {
hw->wm_lp_spr[1] = intel_uncore_read(&dev_priv->uncore, WM2S_LP_IVB);
hw->wm_lp_spr[2] = intel_uncore_read(&dev_priv->uncore, WM3S_LP_IVB);
}
@@ -7072,7 +7134,7 @@ static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv)
ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
/* Wa_1409825376:tgl (pre-prod)*/
- if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B1))
+ if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B1))
intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_3) |
TGL_VRH_GATING_DIS);
@@ -7171,12 +7233,12 @@ static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
FBC_LLC_FULLY_OPEN);
/* WaDisableSDEUnitClockGating:kbl */
- if (IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_B0))
+ if (IS_KBL_GT_STEP(dev_priv, 0, STEP_B0))
intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
/* WaDisableGamClockGating:kbl */
- if (IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_B0))
+ if (IS_KBL_GT_STEP(dev_priv, 0, STEP_B0))
intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1, intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) |
GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
@@ -7621,15 +7683,15 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
skl_setup_sagv_block_time(dev_priv);
/* For FIFO watermark updates */
- if (INTEL_GEN(dev_priv) >= 9) {
+ if (DISPLAY_VER(dev_priv) >= 9) {
skl_setup_wm_latency(dev_priv);
dev_priv->display.compute_global_watermarks = skl_compute_wm;
} else if (HAS_PCH_SPLIT(dev_priv)) {
ilk_setup_wm_latency(dev_priv);
- if ((IS_GEN(dev_priv, 5) && dev_priv->wm.pri_latency[1] &&
+ if ((IS_DISPLAY_VER(dev_priv, 5) && dev_priv->wm.pri_latency[1] &&
dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
- (!IS_GEN(dev_priv, 5) && dev_priv->wm.pri_latency[0] &&
+ (!IS_DISPLAY_VER(dev_priv, 5) && dev_priv->wm.pri_latency[0] &&
dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
dev_priv->display.compute_intermediate_wm =
@@ -7672,12 +7734,12 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
dev_priv->display.update_wm = NULL;
} else
dev_priv->display.update_wm = pnv_update_wm;
- } else if (IS_GEN(dev_priv, 4)) {
+ } else if (IS_DISPLAY_VER(dev_priv, 4)) {
dev_priv->display.update_wm = i965_update_wm;
- } else if (IS_GEN(dev_priv, 3)) {
+ } else if (IS_DISPLAY_VER(dev_priv, 3)) {
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
- } else if (IS_GEN(dev_priv, 2)) {
+ } else if (IS_DISPLAY_VER(dev_priv, 2)) {
if (INTEL_NUM_PIPES(dev_priv) == 1) {
dev_priv->display.update_wm = i845_update_wm;
dev_priv->display.get_fifo_size = i845_get_fifo_size;
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index 97550cf0b6df..669c8d505677 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -52,6 +52,11 @@ bool intel_can_enable_sagv(struct drm_i915_private *dev_priv,
const struct intel_bw_state *bw_state);
void intel_sagv_pre_plane_update(struct intel_atomic_state *state);
void intel_sagv_post_plane_update(struct intel_atomic_state *state);
+const struct skl_wm_level *skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
+ enum plane_id plane_id,
+ int level);
+const struct skl_wm_level *skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm,
+ enum plane_id plane_id);
bool skl_wm_level_equals(const struct skl_wm_level *l1,
const struct skl_wm_level *l2);
bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 8b725efb2254..eaf7688f517d 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -420,7 +420,7 @@ intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm)
* already active and ensures that it is powered up. It is illegal to try
* and access the HW should intel_runtime_pm_get_if_active() report failure.
*
- * If @ignore_usecount=true, a reference will be acquired even if there is no
+ * If @ignore_usecount is true, a reference will be acquired even if there is no
* user requiring the device to be powered up (dev->power.usage_count == 0).
* If the function returns false in this case then it's guaranteed that the
* device's runtime suspend hook has been called already or that it will be
@@ -644,7 +644,7 @@ void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm)
{
struct drm_i915_private *i915 =
container_of(rpm, struct drm_i915_private, runtime_pm);
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
struct device *kdev = &pdev->dev;
rpm->kdev = kdev;
diff --git a/drivers/gpu/drm/i915/intel_step.c b/drivers/gpu/drm/i915/intel_step.c
new file mode 100644
index 000000000000..4d71547a5b83
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_step.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020,2021 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_step.h"
+
+/*
+ * KBL revision ID ordering is bizarre; higher revision ID's map to lower
+ * steppings in some cases. So rather than test against the revision ID
+ * directly, let's map that into our own range of increasing ID's that we
+ * can test against in a regular manner.
+ */
+
+
+/* FIXME: what about REVID_E0 */
+static const struct intel_step_info kbl_revids[] = {
+ [0] = { .gt_step = STEP_A0, .display_step = STEP_A0 },
+ [1] = { .gt_step = STEP_B0, .display_step = STEP_B0 },
+ [2] = { .gt_step = STEP_C0, .display_step = STEP_B0 },
+ [3] = { .gt_step = STEP_D0, .display_step = STEP_B0 },
+ [4] = { .gt_step = STEP_F0, .display_step = STEP_C0 },
+ [5] = { .gt_step = STEP_C0, .display_step = STEP_B1 },
+ [6] = { .gt_step = STEP_D1, .display_step = STEP_B1 },
+ [7] = { .gt_step = STEP_G0, .display_step = STEP_C0 },
+};
+
+static const struct intel_step_info tgl_uy_revid_step_tbl[] = {
+ [0] = { .gt_step = STEP_A0, .display_step = STEP_A0 },
+ [1] = { .gt_step = STEP_B0, .display_step = STEP_C0 },
+ [2] = { .gt_step = STEP_B1, .display_step = STEP_C0 },
+ [3] = { .gt_step = STEP_C0, .display_step = STEP_D0 },
+};
+
+/* Same GT stepping between tgl_uy_revids and tgl_revids don't mean the same HW */
+static const struct intel_step_info tgl_revid_step_tbl[] = {
+ [0] = { .gt_step = STEP_A0, .display_step = STEP_B0 },
+ [1] = { .gt_step = STEP_B0, .display_step = STEP_D0 },
+};
+
+static const struct intel_step_info adls_revid_step_tbl[] = {
+ [0x0] = { .gt_step = STEP_A0, .display_step = STEP_A0 },
+ [0x1] = { .gt_step = STEP_A0, .display_step = STEP_A2 },
+ [0x4] = { .gt_step = STEP_B0, .display_step = STEP_B0 },
+ [0x8] = { .gt_step = STEP_C0, .display_step = STEP_B0 },
+ [0xC] = { .gt_step = STEP_D0, .display_step = STEP_C0 },
+};
+
+void intel_step_init(struct drm_i915_private *i915)
+{
+ const struct intel_step_info *revids = NULL;
+ int size = 0;
+ int revid = INTEL_REVID(i915);
+ struct intel_step_info step = {};
+
+ if (IS_ALDERLAKE_S(i915)) {
+ revids = adls_revid_step_tbl;
+ size = ARRAY_SIZE(adls_revid_step_tbl);
+ } else if (IS_TGL_U(i915) || IS_TGL_Y(i915)) {
+ revids = tgl_uy_revid_step_tbl;
+ size = ARRAY_SIZE(tgl_uy_revid_step_tbl);
+ } else if (IS_TIGERLAKE(i915)) {
+ revids = tgl_revid_step_tbl;
+ size = ARRAY_SIZE(tgl_revid_step_tbl);
+ } else if (IS_KABYLAKE(i915)) {
+ revids = kbl_revids;
+ size = ARRAY_SIZE(kbl_revids);
+ }
+
+ /* Not using the stepping scheme for the platform yet. */
+ if (!revids)
+ return;
+
+ if (revid < size && revids[revid].gt_step != STEP_NONE) {
+ step = revids[revid];
+ } else {
+ drm_warn(&i915->drm, "Unknown revid 0x%02x\n", revid);
+
+ /*
+ * If we hit a gap in the revid array, use the information for
+ * the next revid.
+ *
+ * This may be wrong in all sorts of ways, especially if the
+ * steppings in the array are not monotonically increasing, but
+ * it's better than defaulting to 0.
+ */
+ while (revid < size && revids[revid].gt_step == STEP_NONE)
+ revid++;
+
+ if (revid < size) {
+ drm_dbg(&i915->drm, "Using steppings for revid 0x%02x\n",
+ revid);
+ step = revids[revid];
+ } else {
+ drm_dbg(&i915->drm, "Using future steppings\n");
+ step.gt_step = STEP_FUTURE;
+ step.display_step = STEP_FUTURE;
+ }
+ }
+
+ if (drm_WARN_ON(&i915->drm, step.gt_step == STEP_NONE))
+ return;
+
+ RUNTIME_INFO(i915)->step = step;
+}
diff --git a/drivers/gpu/drm/i915/intel_step.h b/drivers/gpu/drm/i915/intel_step.h
new file mode 100644
index 000000000000..958a8bb5d677
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_step.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020,2021 Intel Corporation
+ */
+
+#ifndef __INTEL_STEP_H__
+#define __INTEL_STEP_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+
+struct intel_step_info {
+ u8 gt_step;
+ u8 display_step;
+};
+
+/*
+ * Symbolic steppings that do not match the hardware. These are valid both as gt
+ * and display steppings as symbolic names.
+ */
+enum intel_step {
+ STEP_NONE = 0,
+ STEP_A0,
+ STEP_A2,
+ STEP_B0,
+ STEP_B1,
+ STEP_C0,
+ STEP_D0,
+ STEP_D1,
+ STEP_E0,
+ STEP_F0,
+ STEP_G0,
+ STEP_FUTURE,
+ STEP_FOREVER,
+};
+
+void intel_step_init(struct drm_i915_private *i915);
+
+#endif /* __INTEL_STEP_H__ */
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 9ac501bcfdad..661b50191f2b 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -465,6 +465,22 @@ fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore)
if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
return false;
+ /*
+ * Bugs in PCI programming (or failing hardware) can occasionally cause
+ * us to lose access to the MMIO BAR. When this happens, register
+ * reads will come back with 0xFFFFFFFF for every register and things
+ * go bad very quickly. Let's try to detect that special case and at
+ * least try to print a more informative message about what has
+ * happened.
+ *
+ * During normal operation the FPGA_DBG register has several unused
+ * bits that will always read back as 0's so we can use them as canaries
+ * to recognize when MMIO accesses are just busted.
+ */
+ if (unlikely(dbg == ~0))
+ drm_err(&uncore->i915->drm,
+ "Lost access to MMIO BAR; all registers now read back as 0xFFFFFFFF!\n");
+
__raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
return true;
@@ -1780,7 +1796,7 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
static int uncore_mmio_setup(struct intel_uncore *uncore)
{
struct drm_i915_private *i915 = uncore->i915;
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
int mmio_bar;
int mmio_size;
@@ -1812,7 +1828,7 @@ static int uncore_mmio_setup(struct intel_uncore *uncore)
static void uncore_mmio_cleanup(struct intel_uncore *uncore)
{
- struct pci_dev *pdev = uncore->i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(uncore->i915->drm.dev);
pci_iounmap(pdev, uncore->regs);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_buddy.c b/drivers/gpu/drm/i915/selftests/i915_buddy.c
index 632b912b0bc9..f0f5c4df8dbc 100644
--- a/drivers/gpu/drm/i915/selftests/i915_buddy.c
+++ b/drivers/gpu/drm/i915/selftests/i915_buddy.c
@@ -727,6 +727,53 @@ err_fini:
return err;
}
+static int igt_buddy_alloc_limit(void *arg)
+{
+ struct i915_buddy_block *block;
+ struct i915_buddy_mm mm;
+ const u64 size = U64_MAX;
+ int err;
+
+ err = i915_buddy_init(&mm, size, PAGE_SIZE);
+ if (err)
+ return err;
+
+ if (mm.max_order != I915_BUDDY_MAX_ORDER) {
+ pr_err("mm.max_order(%d) != %d\n",
+ mm.max_order, I915_BUDDY_MAX_ORDER);
+ err = -EINVAL;
+ goto out_fini;
+ }
+
+ block = i915_buddy_alloc(&mm, mm.max_order);
+ if (IS_ERR(block)) {
+ err = PTR_ERR(block);
+ goto out_fini;
+ }
+
+ if (i915_buddy_block_order(block) != mm.max_order) {
+ pr_err("block order(%d) != %d\n",
+ i915_buddy_block_order(block), mm.max_order);
+ err = -EINVAL;
+ goto out_free;
+ }
+
+ if (i915_buddy_block_size(&mm, block) !=
+ BIT_ULL(mm.max_order) * PAGE_SIZE) {
+ pr_err("block size(%llu) != %llu\n",
+ i915_buddy_block_size(&mm, block),
+ BIT_ULL(mm.max_order) * PAGE_SIZE);
+ err = -EINVAL;
+ goto out_free;
+ }
+
+out_free:
+ i915_buddy_free(&mm, block);
+out_fini:
+ i915_buddy_fini(&mm);
+ return err;
+}
+
int i915_buddy_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
@@ -735,6 +782,7 @@ int i915_buddy_mock_selftests(void)
SUBTEST(igt_buddy_alloc_pathological),
SUBTEST(igt_buddy_alloc_smoke),
SUBTEST(igt_buddy_alloc_range),
+ SUBTEST(igt_buddy_alloc_limit),
};
return i915_subtests(tests, NULL);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index c1adea8765a9..45c6c0107c7c 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -121,7 +121,7 @@ fake_dma_object(struct drm_i915_private *i915, u64 size)
goto err;
drm_gem_private_object_init(&i915->drm, &obj->base, size);
- i915_gem_object_init(obj, &fake_ops, &lock_class);
+ i915_gem_object_init(obj, &fake_ops, &lock_class, 0);
i915_gem_object_set_volatile(obj);
@@ -130,7 +130,7 @@ fake_dma_object(struct drm_i915_private *i915, u64 size)
obj->cache_level = I915_CACHE_NONE;
/* Preallocate the "backing storage" */
- if (i915_gem_object_pin_pages(obj))
+ if (i915_gem_object_pin_pages_unlocked(obj))
goto err_obj;
i915_gem_object_unpin_pages(obj);
@@ -146,6 +146,7 @@ static int igt_ppgtt_alloc(void *arg)
{
struct drm_i915_private *dev_priv = arg;
struct i915_ppgtt *ppgtt;
+ struct i915_gem_ww_ctx ww;
u64 size, last, limit;
int err = 0;
@@ -171,6 +172,12 @@ static int igt_ppgtt_alloc(void *arg)
limit = totalram_pages() << PAGE_SHIFT;
limit = min(ppgtt->vm.total, limit);
+ i915_gem_ww_ctx_init(&ww, false);
+retry:
+ err = i915_vm_lock_objects(&ppgtt->vm, &ww);
+ if (err)
+ goto err_ppgtt_cleanup;
+
/* Check we can allocate the entire range */
for (size = 4096; size <= limit; size <<= 2) {
struct i915_vm_pt_stash stash = {};
@@ -215,6 +222,13 @@ static int igt_ppgtt_alloc(void *arg)
}
err_ppgtt_cleanup:
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+
i915_vm_put(&ppgtt->vm);
return err;
}
@@ -276,7 +290,7 @@ static int lowlevel_hole(struct i915_address_space *vm,
GEM_BUG_ON(obj->base.size != BIT_ULL(size));
- if (i915_gem_object_pin_pages(obj)) {
+ if (i915_gem_object_pin_pages_unlocked(obj)) {
i915_gem_object_put(obj);
kfree(order);
break;
@@ -297,20 +311,36 @@ static int lowlevel_hole(struct i915_address_space *vm,
if (vm->allocate_va_range) {
struct i915_vm_pt_stash stash = {};
+ struct i915_gem_ww_ctx ww;
+ int err;
+
+ i915_gem_ww_ctx_init(&ww, false);
+retry:
+ err = i915_vm_lock_objects(vm, &ww);
+ if (err)
+ goto alloc_vm_end;
+ err = -ENOMEM;
if (i915_vm_alloc_pt_stash(vm, &stash,
BIT_ULL(size)))
- break;
-
- if (i915_vm_pin_pt_stash(vm, &stash)) {
- i915_vm_free_pt_stash(vm, &stash);
- break;
- }
+ goto alloc_vm_end;
- vm->allocate_va_range(vm, &stash,
- addr, BIT_ULL(size));
+ err = i915_vm_pin_pt_stash(vm, &stash);
+ if (!err)
+ vm->allocate_va_range(vm, &stash,
+ addr, BIT_ULL(size));
i915_vm_free_pt_stash(vm, &stash);
+alloc_vm_end:
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+
+ if (err)
+ break;
}
mock_vma->pages = obj->mm.pages;
@@ -1076,7 +1106,8 @@ static int igt_ppgtt_shrink_boom(void *arg)
return exercise_ppgtt(arg, shrink_boom);
}
-static int sort_holes(void *priv, struct list_head *A, struct list_head *B)
+static int sort_holes(void *priv, const struct list_head *A,
+ const struct list_head *B)
{
struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
@@ -1166,7 +1197,7 @@ static int igt_ggtt_page(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- err = i915_gem_object_pin_pages(obj);
+ err = i915_gem_object_pin_pages_unlocked(obj);
if (err)
goto out_free;
@@ -1333,7 +1364,7 @@ static int igt_gtt_reserve(void *arg)
goto out;
}
- err = i915_gem_object_pin_pages(obj);
+ err = i915_gem_object_pin_pages_unlocked(obj);
if (err) {
i915_gem_object_put(obj);
goto out;
@@ -1385,7 +1416,7 @@ static int igt_gtt_reserve(void *arg)
goto out;
}
- err = i915_gem_object_pin_pages(obj);
+ err = i915_gem_object_pin_pages_unlocked(obj);
if (err) {
i915_gem_object_put(obj);
goto out;
@@ -1549,7 +1580,7 @@ static int igt_gtt_insert(void *arg)
goto out;
}
- err = i915_gem_object_pin_pages(obj);
+ err = i915_gem_object_pin_pages_unlocked(obj);
if (err) {
i915_gem_object_put(obj);
goto out;
@@ -1658,7 +1689,7 @@ static int igt_gtt_insert(void *arg)
goto out;
}
- err = i915_gem_object_pin_pages(obj);
+ err = i915_gem_object_pin_pages_unlocked(obj);
if (err) {
i915_gem_object_put(obj);
goto out;
@@ -1829,7 +1860,7 @@ static int igt_cs_tlb(void *arg)
goto out_vm;
}
- batch = i915_gem_object_pin_map(bbe, I915_MAP_WC);
+ batch = i915_gem_object_pin_map_unlocked(bbe, I915_MAP_WC);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
goto out_put_bbe;
@@ -1845,7 +1876,7 @@ static int igt_cs_tlb(void *arg)
}
/* Track the execution of each request by writing into different slot */
- batch = i915_gem_object_pin_map(act, I915_MAP_WC);
+ batch = i915_gem_object_pin_map_unlocked(act, I915_MAP_WC);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
goto out_put_act;
@@ -1892,7 +1923,7 @@ static int igt_cs_tlb(void *arg)
goto out_put_out;
GEM_BUG_ON(vma->node.start != vm->total - PAGE_SIZE);
- result = i915_gem_object_pin_map(out, I915_MAP_WB);
+ result = i915_gem_object_pin_map_unlocked(out, I915_MAP_WB);
if (IS_ERR(result)) {
err = PTR_ERR(result);
goto out_put_out;
@@ -1908,6 +1939,7 @@ static int igt_cs_tlb(void *arg)
while (!__igt_timeout(end_time, NULL)) {
struct i915_vm_pt_stash stash = {};
struct i915_request *rq;
+ struct i915_gem_ww_ctx ww;
u64 offset;
offset = igt_random_offset(&prng,
@@ -1926,19 +1958,30 @@ static int igt_cs_tlb(void *arg)
if (err)
goto end;
+ i915_gem_ww_ctx_init(&ww, false);
+retry:
+ err = i915_vm_lock_objects(vm, &ww);
+ if (err)
+ goto end_ww;
+
err = i915_vm_alloc_pt_stash(vm, &stash, chunk_size);
if (err)
- goto end;
+ goto end_ww;
err = i915_vm_pin_pt_stash(vm, &stash);
- if (err) {
- i915_vm_free_pt_stash(vm, &stash);
- goto end;
- }
-
- vm->allocate_va_range(vm, &stash, offset, chunk_size);
+ if (!err)
+ vm->allocate_va_range(vm, &stash, offset, chunk_size);
i915_vm_free_pt_stash(vm, &stash);
+end_ww:
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+ if (err)
+ goto end;
/* Prime the TLB with the dummy pages */
for (i = 0; i < count; i++) {
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index d2a678a2497e..ee8e753d98ce 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -609,6 +609,206 @@ static int live_nop_request(void *arg)
return err;
}
+static int __cancel_inactive(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ int err = 0;
+
+ if (igt_spinner_init(&spin, engine->gt))
+ return -ENOMEM;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out_spin;
+ }
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_ce;
+ }
+
+ pr_debug("%s: Cancelling inactive request\n", engine->name);
+ i915_request_cancel(rq, -EINTR);
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ struct drm_printer p = drm_info_printer(engine->i915->drm.dev);
+
+ pr_err("%s: Failed to cancel inactive request\n", engine->name);
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+ err = -ETIME;
+ goto out_rq;
+ }
+
+ if (rq->fence.error != -EINTR) {
+ pr_err("%s: fence not cancelled (%u)\n",
+ engine->name, rq->fence.error);
+ err = -EINVAL;
+ }
+
+out_rq:
+ i915_request_put(rq);
+out_ce:
+ intel_context_put(ce);
+out_spin:
+ igt_spinner_fini(&spin);
+ if (err)
+ pr_err("%s: %s error %d\n", __func__, engine->name, err);
+ return err;
+}
+
+static int __cancel_active(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ int err = 0;
+
+ if (igt_spinner_init(&spin, engine->gt))
+ return -ENOMEM;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out_spin;
+ }
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_ce;
+ }
+
+ pr_debug("%s: Cancelling active request\n", engine->name);
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ struct drm_printer p = drm_info_printer(engine->i915->drm.dev);
+
+ pr_err("Failed to start spinner on %s\n", engine->name);
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+ err = -ETIME;
+ goto out_rq;
+ }
+ i915_request_cancel(rq, -EINTR);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ struct drm_printer p = drm_info_printer(engine->i915->drm.dev);
+
+ pr_err("%s: Failed to cancel active request\n", engine->name);
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+ err = -ETIME;
+ goto out_rq;
+ }
+
+ if (rq->fence.error != -EINTR) {
+ pr_err("%s: fence not cancelled (%u)\n",
+ engine->name, rq->fence.error);
+ err = -EINVAL;
+ }
+
+out_rq:
+ i915_request_put(rq);
+out_ce:
+ intel_context_put(ce);
+out_spin:
+ igt_spinner_fini(&spin);
+ if (err)
+ pr_err("%s: %s error %d\n", __func__, engine->name, err);
+ return err;
+}
+
+static int __cancel_completed(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ int err = 0;
+
+ if (igt_spinner_init(&spin, engine->gt))
+ return -ENOMEM;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out_spin;
+ }
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_ce;
+ }
+ igt_spinner_end(&spin);
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ err = -ETIME;
+ goto out_rq;
+ }
+
+ pr_debug("%s: Cancelling completed request\n", engine->name);
+ i915_request_cancel(rq, -EINTR);
+ if (rq->fence.error) {
+ pr_err("%s: fence not cancelled (%u)\n",
+ engine->name, rq->fence.error);
+ err = -EINVAL;
+ }
+
+out_rq:
+ i915_request_put(rq);
+out_ce:
+ intel_context_put(ce);
+out_spin:
+ igt_spinner_fini(&spin);
+ if (err)
+ pr_err("%s: %s error %d\n", __func__, engine->name, err);
+ return err;
+}
+
+static int live_cancel_request(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+
+ /*
+ * Check cancellation of requests. We expect to be able to immediately
+ * cancel active requests, even if they are currently on the GPU.
+ */
+
+ for_each_uabi_engine(engine, i915) {
+ struct igt_live_test t;
+ int err, err2;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ err = igt_live_test_begin(&t, i915, __func__, engine->name);
+ if (err)
+ return err;
+
+ err = __cancel_inactive(engine);
+ if (err == 0)
+ err = __cancel_active(engine);
+ if (err == 0)
+ err = __cancel_completed(engine);
+
+ err2 = igt_live_test_end(&t);
+ if (err)
+ return err;
+ if (err2)
+ return err2;
+ }
+
+ return 0;
+}
+
static struct i915_vma *empty_batch(struct drm_i915_private *i915)
{
struct drm_i915_gem_object *obj;
@@ -620,7 +820,7 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
if (IS_ERR(obj))
return ERR_CAST(obj);
- cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto err;
@@ -782,7 +982,7 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
if (err)
goto err;
- cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto err;
@@ -817,7 +1017,7 @@ static int recursive_batch_resolve(struct i915_vma *batch)
{
u32 *cmd;
- cmd = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+ cmd = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
if (IS_ERR(cmd))
return PTR_ERR(cmd);
@@ -1070,8 +1270,8 @@ out_request:
if (!request[idx])
break;
- cmd = i915_gem_object_pin_map(request[idx]->batch->obj,
- I915_MAP_WC);
+ cmd = i915_gem_object_pin_map_unlocked(request[idx]->batch->obj,
+ I915_MAP_WC);
if (!IS_ERR(cmd)) {
*cmd = MI_BATCH_BUFFER_END;
@@ -1486,6 +1686,7 @@ int i915_request_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_sequential_engines),
SUBTEST(live_parallel_engines),
SUBTEST(live_empty_request),
+ SUBTEST(live_cancel_request),
SUBTEST(live_breadcrumbs_smoketest),
};
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index 1b6125e4c1ac..5fe7b80ca0bd 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -361,7 +361,7 @@ static unsigned long rotated_index(const struct intel_rotation_info *r,
unsigned int x,
unsigned int y)
{
- return (r->plane[n].stride * (r->plane[n].height - y - 1) +
+ return (r->plane[n].src_stride * (r->plane[n].height - y - 1) +
r->plane[n].offset + x);
}
@@ -373,6 +373,8 @@ assert_rotated(struct drm_i915_gem_object *obj,
unsigned int x, y;
for (x = 0; x < r->plane[n].width; x++) {
+ unsigned int left;
+
for (y = 0; y < r->plane[n].height; y++) {
unsigned long src_idx;
dma_addr_t src;
@@ -401,6 +403,31 @@ assert_rotated(struct drm_i915_gem_object *obj,
sg = sg_next(sg);
}
+
+ left = (r->plane[n].dst_stride - y) * PAGE_SIZE;
+
+ if (!left)
+ continue;
+
+ if (!sg) {
+ pr_err("Invalid sg table: too short at plane %d, (%d, %d)!\n",
+ n, x, y);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (sg_dma_len(sg) != left) {
+ pr_err("Invalid sg.length, found %d, expected %u for rotated page (%d, %d)\n",
+ sg_dma_len(sg), left, x, y);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (sg_dma_address(sg) != 0) {
+ pr_err("Invalid address, found %pad, expected 0 for remapped page (%d, %d)\n",
+ &sg_dma_address(sg), x, y);
+ return ERR_PTR(-EINVAL);
+ }
+
+ sg = sg_next(sg);
}
return sg;
@@ -411,7 +438,7 @@ static unsigned long remapped_index(const struct intel_remapped_info *r,
unsigned int x,
unsigned int y)
{
- return (r->plane[n].stride * y +
+ return (r->plane[n].src_stride * y +
r->plane[n].offset + x);
}
@@ -462,15 +489,55 @@ assert_remapped(struct drm_i915_gem_object *obj,
if (!left)
sg = sg_next(sg);
}
+
+ if (left) {
+ pr_err("Unexpected sg tail with %d size for remapped page (%d, %d)\n",
+ left,
+ x, y);
+ return ERR_PTR(-EINVAL);
+ }
+
+ left = (r->plane[n].dst_stride - r->plane[n].width) * PAGE_SIZE;
+
+ if (!left)
+ continue;
+
+ if (!sg) {
+ pr_err("Invalid sg table: too short at plane %d, (%d, %d)!\n",
+ n, x, y);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (sg_dma_len(sg) != left) {
+ pr_err("Invalid sg.length, found %u, expected %u for remapped page (%d, %d)\n",
+ sg_dma_len(sg), left,
+ x, y);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (sg_dma_address(sg) != 0) {
+ pr_err("Invalid address, found %pad, expected 0 for remapped page (%d, %d)\n",
+ &sg_dma_address(sg),
+ x, y);
+ return ERR_PTR(-EINVAL);
+ }
+
+ sg = sg_next(sg);
+ left = 0;
}
return sg;
}
-static unsigned int rotated_size(const struct intel_remapped_plane_info *a,
- const struct intel_remapped_plane_info *b)
+static unsigned int remapped_size(enum i915_ggtt_view_type view_type,
+ const struct intel_remapped_plane_info *a,
+ const struct intel_remapped_plane_info *b)
{
- return a->width * a->height + b->width * b->height;
+
+ if (view_type == I915_GGTT_VIEW_ROTATED)
+ return a->dst_stride * a->width + b->dst_stride * b->width;
+ else
+ return a->dst_stride * a->height + b->dst_stride * b->height;
}
static int igt_vma_rotate_remap(void *arg)
@@ -479,21 +546,26 @@ static int igt_vma_rotate_remap(void *arg)
struct i915_address_space *vm = &ggtt->vm;
struct drm_i915_gem_object *obj;
const struct intel_remapped_plane_info planes[] = {
- { .width = 1, .height = 1, .stride = 1 },
- { .width = 2, .height = 2, .stride = 2 },
- { .width = 4, .height = 4, .stride = 4 },
- { .width = 8, .height = 8, .stride = 8 },
+ { .width = 1, .height = 1, .src_stride = 1 },
+ { .width = 2, .height = 2, .src_stride = 2 },
+ { .width = 4, .height = 4, .src_stride = 4 },
+ { .width = 8, .height = 8, .src_stride = 8 },
+
+ { .width = 3, .height = 5, .src_stride = 3 },
+ { .width = 3, .height = 5, .src_stride = 4 },
+ { .width = 3, .height = 5, .src_stride = 5 },
- { .width = 3, .height = 5, .stride = 3 },
- { .width = 3, .height = 5, .stride = 4 },
- { .width = 3, .height = 5, .stride = 5 },
+ { .width = 5, .height = 3, .src_stride = 5 },
+ { .width = 5, .height = 3, .src_stride = 7 },
+ { .width = 5, .height = 3, .src_stride = 9 },
- { .width = 5, .height = 3, .stride = 5 },
- { .width = 5, .height = 3, .stride = 7 },
- { .width = 5, .height = 3, .stride = 9 },
+ { .width = 4, .height = 6, .src_stride = 6 },
+ { .width = 6, .height = 4, .src_stride = 6 },
+
+ { .width = 2, .height = 2, .src_stride = 2, .dst_stride = 2 },
+ { .width = 3, .height = 3, .src_stride = 3, .dst_stride = 4 },
+ { .width = 5, .height = 6, .src_stride = 7, .dst_stride = 8 },
- { .width = 4, .height = 6, .stride = 6 },
- { .width = 6, .height = 4, .stride = 6 },
{ }
}, *a, *b;
enum i915_ggtt_view_type types[] = {
@@ -515,22 +587,33 @@ static int igt_vma_rotate_remap(void *arg)
for (t = types; *t; t++) {
for (a = planes; a->width; a++) {
for (b = planes + ARRAY_SIZE(planes); b-- != planes; ) {
- struct i915_ggtt_view view;
+ struct i915_ggtt_view view = {
+ .type = *t,
+ .remapped.plane[0] = *a,
+ .remapped.plane[1] = *b,
+ };
+ struct intel_remapped_plane_info *plane_info = view.remapped.plane;
unsigned int n, max_offset;
- max_offset = max(a->stride * a->height,
- b->stride * b->height);
+ max_offset = max(plane_info[0].src_stride * plane_info[0].height,
+ plane_info[1].src_stride * plane_info[1].height);
GEM_BUG_ON(max_offset > max_pages);
max_offset = max_pages - max_offset;
- view.type = *t;
- view.rotated.plane[0] = *a;
- view.rotated.plane[1] = *b;
-
- for_each_prime_number_from(view.rotated.plane[0].offset, 0, max_offset) {
- for_each_prime_number_from(view.rotated.plane[1].offset, 0, max_offset) {
+ if (!plane_info[0].dst_stride)
+ plane_info[0].dst_stride = view.type == I915_GGTT_VIEW_ROTATED ?
+ plane_info[0].height :
+ plane_info[0].width;
+ if (!plane_info[1].dst_stride)
+ plane_info[1].dst_stride = view.type == I915_GGTT_VIEW_ROTATED ?
+ plane_info[1].height :
+ plane_info[1].width;
+
+ for_each_prime_number_from(plane_info[0].offset, 0, max_offset) {
+ for_each_prime_number_from(plane_info[1].offset, 0, max_offset) {
struct scatterlist *sg;
struct i915_vma *vma;
+ unsigned int expected_pages;
vma = checked_vma_instance(obj, vm, &view);
if (IS_ERR(vma)) {
@@ -544,25 +627,27 @@ static int igt_vma_rotate_remap(void *arg)
goto out_object;
}
+ expected_pages = remapped_size(view.type, &plane_info[0], &plane_info[1]);
+
if (view.type == I915_GGTT_VIEW_ROTATED &&
- vma->size != rotated_size(a, b) * PAGE_SIZE) {
+ vma->size != expected_pages * PAGE_SIZE) {
pr_err("VMA is wrong size, expected %lu, found %llu\n",
- PAGE_SIZE * rotated_size(a, b), vma->size);
+ PAGE_SIZE * expected_pages, vma->size);
err = -EINVAL;
goto out_object;
}
if (view.type == I915_GGTT_VIEW_REMAPPED &&
- vma->size > rotated_size(a, b) * PAGE_SIZE) {
+ vma->size > expected_pages * PAGE_SIZE) {
pr_err("VMA is wrong size, expected %lu, found %llu\n",
- PAGE_SIZE * rotated_size(a, b), vma->size);
+ PAGE_SIZE * expected_pages, vma->size);
err = -EINVAL;
goto out_object;
}
- if (vma->pages->nents > rotated_size(a, b)) {
+ if (vma->pages->nents > expected_pages) {
pr_err("sg table is wrong sizeo, expected %u, found %u nents\n",
- rotated_size(a, b), vma->pages->nents);
+ expected_pages, vma->pages->nents);
err = -EINVAL;
goto out_object;
}
@@ -587,17 +672,19 @@ static int igt_vma_rotate_remap(void *arg)
else
sg = assert_remapped(obj, &view.remapped, n, sg);
if (IS_ERR(sg)) {
- pr_err("Inconsistent %s VMA pages for plane %d: [(%d, %d, %d, %d), (%d, %d, %d, %d)]\n",
+ pr_err("Inconsistent %s VMA pages for plane %d: [(%d, %d, %d, %d, %d), (%d, %d, %d, %d, %d)]\n",
view.type == I915_GGTT_VIEW_ROTATED ?
"rotated" : "remapped", n,
- view.rotated.plane[0].width,
- view.rotated.plane[0].height,
- view.rotated.plane[0].stride,
- view.rotated.plane[0].offset,
- view.rotated.plane[1].width,
- view.rotated.plane[1].height,
- view.rotated.plane[1].stride,
- view.rotated.plane[1].offset);
+ plane_info[0].width,
+ plane_info[0].height,
+ plane_info[0].src_stride,
+ plane_info[0].dst_stride,
+ plane_info[0].offset,
+ plane_info[1].width,
+ plane_info[1].height,
+ plane_info[1].src_stride,
+ plane_info[1].dst_stride,
+ plane_info[1].offset);
err = -EINVAL;
goto out_object;
}
@@ -849,21 +936,26 @@ static int igt_vma_remapped_gtt(void *arg)
{
struct drm_i915_private *i915 = arg;
const struct intel_remapped_plane_info planes[] = {
- { .width = 1, .height = 1, .stride = 1 },
- { .width = 2, .height = 2, .stride = 2 },
- { .width = 4, .height = 4, .stride = 4 },
- { .width = 8, .height = 8, .stride = 8 },
+ { .width = 1, .height = 1, .src_stride = 1 },
+ { .width = 2, .height = 2, .src_stride = 2 },
+ { .width = 4, .height = 4, .src_stride = 4 },
+ { .width = 8, .height = 8, .src_stride = 8 },
- { .width = 3, .height = 5, .stride = 3 },
- { .width = 3, .height = 5, .stride = 4 },
- { .width = 3, .height = 5, .stride = 5 },
+ { .width = 3, .height = 5, .src_stride = 3 },
+ { .width = 3, .height = 5, .src_stride = 4 },
+ { .width = 3, .height = 5, .src_stride = 5 },
- { .width = 5, .height = 3, .stride = 5 },
- { .width = 5, .height = 3, .stride = 7 },
- { .width = 5, .height = 3, .stride = 9 },
+ { .width = 5, .height = 3, .src_stride = 5 },
+ { .width = 5, .height = 3, .src_stride = 7 },
+ { .width = 5, .height = 3, .src_stride = 9 },
+
+ { .width = 4, .height = 6, .src_stride = 6 },
+ { .width = 6, .height = 4, .src_stride = 6 },
+
+ { .width = 2, .height = 2, .src_stride = 2, .dst_stride = 2 },
+ { .width = 3, .height = 3, .src_stride = 3, .dst_stride = 4 },
+ { .width = 5, .height = 6, .src_stride = 7, .dst_stride = 8 },
- { .width = 4, .height = 6, .stride = 6 },
- { .width = 6, .height = 4, .stride = 6 },
{ }
}, *p;
enum i915_ggtt_view_type types[] = {
@@ -887,10 +979,10 @@ static int igt_vma_remapped_gtt(void *arg)
.type = *t,
.rotated.plane[0] = *p,
};
+ struct intel_remapped_plane_info *plane_info = view.rotated.plane;
struct i915_vma *vma;
u32 __iomem *map;
unsigned int x, y;
- int err;
i915_gem_object_lock(obj, NULL);
err = i915_gem_object_set_to_gtt_domain(obj, true);
@@ -898,6 +990,10 @@ static int igt_vma_remapped_gtt(void *arg)
if (err)
goto out;
+ if (!plane_info[0].dst_stride)
+ plane_info[0].dst_stride = *t == I915_GGTT_VIEW_ROTATED ?
+ p->height : p->width;
+
vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
@@ -913,15 +1009,15 @@ static int igt_vma_remapped_gtt(void *arg)
goto out;
}
- for (y = 0 ; y < p->height; y++) {
- for (x = 0 ; x < p->width; x++) {
+ for (y = 0 ; y < plane_info[0].height; y++) {
+ for (x = 0 ; x < plane_info[0].width; x++) {
unsigned int offset;
u32 val = y << 16 | x;
if (*t == I915_GGTT_VIEW_ROTATED)
- offset = (x * p->height + y) * PAGE_SIZE;
+ offset = (x * plane_info[0].dst_stride + y) * PAGE_SIZE;
else
- offset = (y * p->width + x) * PAGE_SIZE;
+ offset = (y * plane_info[0].dst_stride + x) * PAGE_SIZE;
iowrite32(val, &map[offset / sizeof(*map)]);
}
@@ -944,8 +1040,8 @@ static int igt_vma_remapped_gtt(void *arg)
goto out;
}
- for (y = 0 ; y < p->height; y++) {
- for (x = 0 ; x < p->width; x++) {
+ for (y = 0 ; y < plane_info[0].height; y++) {
+ for (x = 0 ; x < plane_info[0].width; x++) {
unsigned int offset, src_idx;
u32 exp = y << 16 | x;
u32 val;
@@ -960,8 +1056,9 @@ static int igt_vma_remapped_gtt(void *arg)
if (val != exp) {
pr_err("%s VMA write test failed, expected 0x%x, found 0x%x\n",
*t == I915_GGTT_VIEW_ROTATED ? "Rotated" : "Remapped",
- val, exp);
+ exp, val);
i915_vma_unpin_iomap(vma);
+ err = -EINVAL;
goto out;
}
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index 83f6e5f31fb3..cfbbe415b57c 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -12,8 +12,6 @@
int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt)
{
- unsigned int mode;
- void *vaddr;
int err;
memset(spin, 0, sizeof(*spin));
@@ -24,6 +22,7 @@ int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt)
err = PTR_ERR(spin->hws);
goto err;
}
+ i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC);
spin->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(spin->obj)) {
@@ -31,34 +30,83 @@ int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt)
goto err_hws;
}
- i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC);
- vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
- if (IS_ERR(vaddr)) {
- err = PTR_ERR(vaddr);
- goto err_obj;
- }
- spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
-
- mode = i915_coherent_map_type(gt->i915);
- vaddr = i915_gem_object_pin_map(spin->obj, mode);
- if (IS_ERR(vaddr)) {
- err = PTR_ERR(vaddr);
- goto err_unpin_hws;
- }
- spin->batch = vaddr;
-
return 0;
-err_unpin_hws:
- i915_gem_object_unpin_map(spin->hws);
-err_obj:
- i915_gem_object_put(spin->obj);
err_hws:
i915_gem_object_put(spin->hws);
err:
return err;
}
+static void *igt_spinner_pin_obj(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww,
+ struct drm_i915_gem_object *obj,
+ unsigned int mode, struct i915_vma **vma)
+{
+ void *vaddr;
+ int ret;
+
+ *vma = i915_vma_instance(obj, ce->vm, NULL);
+ if (IS_ERR(*vma))
+ return ERR_CAST(*vma);
+
+ ret = i915_gem_object_lock(obj, ww);
+ if (ret)
+ return ERR_PTR(ret);
+
+ vaddr = i915_gem_object_pin_map(obj, mode);
+
+ if (!ww)
+ i915_gem_object_unlock(obj);
+
+ if (IS_ERR(vaddr))
+ return vaddr;
+
+ if (ww)
+ ret = i915_vma_pin_ww(*vma, ww, 0, 0, PIN_USER);
+ else
+ ret = i915_vma_pin(*vma, 0, 0, PIN_USER);
+
+ if (ret) {
+ i915_gem_object_unpin_map(obj);
+ return ERR_PTR(ret);
+ }
+
+ return vaddr;
+}
+
+int igt_spinner_pin(struct igt_spinner *spin,
+ struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww)
+{
+ void *vaddr;
+
+ if (spin->ce && WARN_ON(spin->ce != ce))
+ return -ENODEV;
+ spin->ce = ce;
+
+ if (!spin->seqno) {
+ vaddr = igt_spinner_pin_obj(ce, ww, spin->hws, I915_MAP_WB, &spin->hws_vma);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
+ }
+
+ if (!spin->batch) {
+ unsigned int mode =
+ i915_coherent_map_type(spin->gt->i915);
+
+ vaddr = igt_spinner_pin_obj(ce, ww, spin->obj, mode, &spin->batch_vma);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ spin->batch = vaddr;
+ }
+
+ return 0;
+}
+
static unsigned int seqno_offset(u64 fence)
{
return offset_in_page(sizeof(u32) * fence);
@@ -103,27 +151,18 @@ igt_spinner_create_request(struct igt_spinner *spin,
if (!intel_engine_can_store_dword(ce->engine))
return ERR_PTR(-ENODEV);
- vma = i915_vma_instance(spin->obj, ce->vm, NULL);
- if (IS_ERR(vma))
- return ERR_CAST(vma);
-
- hws = i915_vma_instance(spin->hws, ce->vm, NULL);
- if (IS_ERR(hws))
- return ERR_CAST(hws);
+ if (!spin->batch) {
+ err = igt_spinner_pin(spin, ce, NULL);
+ if (err)
+ return ERR_PTR(err);
+ }
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- return ERR_PTR(err);
-
- err = i915_vma_pin(hws, 0, 0, PIN_USER);
- if (err)
- goto unpin_vma;
+ hws = spin->hws_vma;
+ vma = spin->batch_vma;
rq = intel_context_create_request(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto unpin_hws;
- }
+ if (IS_ERR(rq))
+ return ERR_CAST(rq);
err = move_to_active(vma, rq, 0);
if (err)
@@ -186,10 +225,6 @@ cancel_rq:
i915_request_set_error_once(rq, err);
i915_request_add(rq);
}
-unpin_hws:
- i915_vma_unpin(hws);
-unpin_vma:
- i915_vma_unpin(vma);
return err ? ERR_PTR(err) : rq;
}
@@ -203,6 +238,9 @@ hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
void igt_spinner_end(struct igt_spinner *spin)
{
+ if (!spin->batch)
+ return;
+
*spin->batch = MI_BATCH_BUFFER_END;
intel_gt_chipset_flush(spin->gt);
}
@@ -211,10 +249,16 @@ void igt_spinner_fini(struct igt_spinner *spin)
{
igt_spinner_end(spin);
- i915_gem_object_unpin_map(spin->obj);
+ if (spin->batch) {
+ i915_vma_unpin(spin->batch_vma);
+ i915_gem_object_unpin_map(spin->obj);
+ }
i915_gem_object_put(spin->obj);
- i915_gem_object_unpin_map(spin->hws);
+ if (spin->seqno) {
+ i915_vma_unpin(spin->hws_vma);
+ i915_gem_object_unpin_map(spin->hws);
+ }
i915_gem_object_put(spin->hws);
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.h b/drivers/gpu/drm/i915/selftests/igt_spinner.h
index ec62c9ef320b..fbe5b1625b05 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.h
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.h
@@ -20,11 +20,16 @@ struct igt_spinner {
struct intel_gt *gt;
struct drm_i915_gem_object *hws;
struct drm_i915_gem_object *obj;
+ struct intel_context *ce;
+ struct i915_vma *hws_vma, *batch_vma;
u32 *batch;
void *seqno;
};
int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt);
+int igt_spinner_pin(struct igt_spinner *spin,
+ struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww);
void igt_spinner_fini(struct igt_spinner *spin);
struct i915_request *
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index ce7adfa3bca0..a5fc0bf3feb9 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -31,10 +31,12 @@ static void close_objects(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj, *on;
list_for_each_entry_safe(obj, on, objects, st_link) {
+ i915_gem_object_lock(obj, NULL);
if (i915_gem_object_has_pinned_pages(obj))
i915_gem_object_unpin_pages(obj);
/* No polluting the memory region between tests */
__i915_gem_object_put_pages(obj);
+ i915_gem_object_unlock(obj);
list_del(&obj->st_link);
i915_gem_object_put(obj);
}
@@ -69,7 +71,7 @@ static int igt_mock_fill(void *arg)
break;
}
- err = i915_gem_object_pin_pages(obj);
+ err = i915_gem_object_pin_pages_unlocked(obj);
if (err) {
i915_gem_object_put(obj);
break;
@@ -109,7 +111,7 @@ igt_object_create(struct intel_memory_region *mem,
if (IS_ERR(obj))
return obj;
- err = i915_gem_object_pin_pages(obj);
+ err = i915_gem_object_pin_pages_unlocked(obj);
if (err)
goto put;
@@ -123,8 +125,10 @@ put:
static void igt_object_release(struct drm_i915_gem_object *obj)
{
+ i915_gem_object_lock(obj, NULL);
i915_gem_object_unpin_pages(obj);
__i915_gem_object_put_pages(obj);
+ i915_gem_object_unlock(obj);
list_del(&obj->st_link);
i915_gem_object_put(obj);
}
@@ -144,6 +148,82 @@ static bool is_contiguous(struct drm_i915_gem_object *obj)
return true;
}
+static int igt_mock_reserve(void *arg)
+{
+ struct intel_memory_region *mem = arg;
+ resource_size_t avail = resource_size(&mem->region);
+ struct drm_i915_gem_object *obj;
+ const u32 chunk_size = SZ_32M;
+ u32 i, offset, count, *order;
+ u64 allocated, cur_avail;
+ I915_RND_STATE(prng);
+ LIST_HEAD(objects);
+ int err = 0;
+
+ if (!list_empty(&mem->reserved)) {
+ pr_err("%s region reserved list is not empty\n", __func__);
+ return -EINVAL;
+ }
+
+ count = avail / chunk_size;
+ order = i915_random_order(count, &prng);
+ if (!order)
+ return 0;
+
+ /* Reserve a bunch of ranges within the region */
+ for (i = 0; i < count; ++i) {
+ u64 start = order[i] * chunk_size;
+ u64 size = i915_prandom_u32_max_state(chunk_size, &prng);
+
+ /* Allow for some really big holes */
+ if (!size)
+ continue;
+
+ size = round_up(size, PAGE_SIZE);
+ offset = igt_random_offset(&prng, 0, chunk_size, size,
+ PAGE_SIZE);
+
+ err = intel_memory_region_reserve(mem, start + offset, size);
+ if (err) {
+ pr_err("%s failed to reserve range", __func__);
+ goto out_close;
+ }
+
+ /* XXX: maybe sanity check the block range here? */
+ avail -= size;
+ }
+
+ /* Try to see if we can allocate from the remaining space */
+ allocated = 0;
+ cur_avail = avail;
+ do {
+ u32 size = i915_prandom_u32_max_state(cur_avail, &prng);
+
+ size = max_t(u32, round_up(size, PAGE_SIZE), PAGE_SIZE);
+ obj = igt_object_create(mem, &objects, size, 0);
+ if (IS_ERR(obj)) {
+ if (PTR_ERR(obj) == -ENXIO)
+ break;
+
+ err = PTR_ERR(obj);
+ goto out_close;
+ }
+ cur_avail -= size;
+ allocated += size;
+ } while (1);
+
+ if (allocated != avail) {
+ pr_err("%s mismatch between allocation and free space", __func__);
+ err = -EINVAL;
+ }
+
+out_close:
+ kfree(order);
+ close_objects(mem, &objects);
+ i915_buddy_free_list(&mem->mm, &mem->reserved);
+ return err;
+}
+
static int igt_mock_contiguous(void *arg)
{
struct intel_memory_region *mem = arg;
@@ -433,7 +513,7 @@ static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
if (err)
return err;
- ptr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ ptr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(ptr))
return PTR_ERR(ptr);
@@ -538,7 +618,7 @@ static int igt_lmem_create(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- err = i915_gem_object_pin_pages(obj);
+ err = i915_gem_object_pin_pages_unlocked(obj);
if (err)
goto out_put;
@@ -577,7 +657,7 @@ static int igt_lmem_write_gpu(void *arg)
goto out_file;
}
- err = i915_gem_object_pin_pages(obj);
+ err = i915_gem_object_pin_pages_unlocked(obj);
if (err)
goto out_put;
@@ -649,7 +729,7 @@ static int igt_lmem_write_cpu(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto out_put;
@@ -753,7 +833,7 @@ create_region_for_mapping(struct intel_memory_region *mr, u64 size, u32 type,
return obj;
}
- addr = i915_gem_object_pin_map(obj, type);
+ addr = i915_gem_object_pin_map_unlocked(obj, type);
if (IS_ERR(addr)) {
i915_gem_object_put(obj);
if (PTR_ERR(addr) == -ENXIO)
@@ -930,6 +1010,7 @@ static int perf_memcpy(void *arg)
int intel_memory_region_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
+ SUBTEST(igt_mock_reserve),
SUBTEST(igt_mock_fill),
SUBTEST(igt_mock_contiguous),
SUBTEST(igt_mock_splintered_region),
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index 7270fc8ca801..5c7ae40bba63 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -74,7 +74,7 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name)
ppgtt->vm.i915 = i915;
ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE);
ppgtt->vm.file = ERR_PTR(-ENODEV);
- ppgtt->vm.dma = &i915->drm.pdev->dev;
+ ppgtt->vm.dma = i915->drm.dev;
i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c
index 3c6021415274..5d2d010a1e22 100644
--- a/drivers/gpu/drm/i915/selftests/mock_region.c
+++ b/drivers/gpu/drm/i915/selftests/mock_region.c
@@ -27,13 +27,13 @@ static int mock_object_init(struct intel_memory_region *mem,
return -E2BIG;
drm_gem_private_object_init(&i915->drm, &obj->base, size);
- i915_gem_object_init(obj, &mock_region_obj_ops, &lock_class);
+ i915_gem_object_init(obj, &mock_region_obj_ops, &lock_class, flags);
obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
- i915_gem_object_init_memory_region(obj, mem, flags);
+ i915_gem_object_init_memory_region(obj, mem);
return 0;
}