summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2020-03-04 01:40:54 +0100
committerOlof Johansson <olof@lixom.net>2020-03-04 01:40:56 +0100
commit820d15632ec10bc0cf79595c5a635b795d149520 (patch)
tree9deb7defafc350a3c74365ebe370634f0406d821 /drivers/gpu
parentMerge tag 'arm-soc/for-5.6/defconfig-fixes' of https://github.com/Broadcom/st... (diff)
parentarm64: dts: socfpga: agilex: Fix gmac compatible (diff)
downloadlinux-820d15632ec10bc0cf79595c5a635b795d149520.tar.xz
linux-820d15632ec10bc0cf79595c5a635b795d149520.zip
Merge tag 'socfpga_dts_fix_for_v5.6_v2' of git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux into arm/fixes
arm64: dts: agilex: fix gmac compatible - The compatible for Agilex GMAC should be "altr,socfpga-stmmac-a10-s10" * tag 'socfpga_dts_fix_for_v5.6_v2' of git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux: (578 commits) arm64: dts: socfpga: agilex: Fix gmac compatible Linux 5.6-rc4 KVM: VMX: check descriptor table exits on instruction emulation ext4: potential crash on allocation error in ext4_alloc_flex_bg_array() macintosh: therm_windtunnel: fix regression when instantiating devices jbd2: fix data races at struct journal_head kvm: x86: Limit the number of "kvm: disabled by bios" messages KVM: x86: avoid useless copy of cpufreq policy KVM: allow disabling -Werror KVM: x86: allow compiling as non-module with W=1 KVM: Pre-allocate 1 cpumask variable per cpu for both pv tlb and pv ipis KVM: Introduce pv check helpers KVM: let declaration of kvm_get_running_vcpus match implementation KVM: SVM: allocate AVIC data structures based on kvm_amd module parameter MAINTAINERS: Correct Cadence PCI driver path io_uring: fix 32-bit compatability with sendmsg/recvmsg net: dsa: mv88e6xxx: Fix masking of egress port mlxsw: pci: Wait longer before accessing the device after reset sfc: fix timestamp reconstruction at 16-bit rollover points vsock: fix potential deadlock in transport->release() ... Link: https://lore.kernel.org/r/20200303153509.28248-1-dinguyen@kernel.org Signed-off-by: Olof Johansson <olof@lixom.net>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c10
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c12
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c8
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c3
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c3
-rw-r--r--drivers/gpu/drm/drm_modes.c7
-rw-r--r--drivers/gpu/drm/i915/Kconfig5
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c20
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c16
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c98
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c61
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.h8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_types.h7
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c3
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c4
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c59
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h11
-rw-r--r--drivers/gpu/drm/i915/i915_request.c21
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c6
-rw-r--r--drivers/gpu/drm/i915/i915_utils.c5
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c37
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c65
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c85
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c58
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c7
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c6
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c7
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_perfcnt.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c43
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c6
-rw-r--r--drivers/gpu/drm/selftests/drm_cmdline_selftests.h1
-rw-r--r--drivers/gpu/drm/selftests/test-drm_cmdline_parser.c15
61 files changed, 618 insertions, 284 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 94e2fd758e01..42f4febe24c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1389,7 +1389,7 @@ amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
static struct drm_driver kms_driver = {
.driver_features =
- DRIVER_USE_AGP | DRIVER_ATOMIC |
+ DRIVER_ATOMIC |
DRIVER_GEM |
DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ |
DRIVER_SYNCOBJ_TIMELINE,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index d3c27a3c43f6..7546da0cc70c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -195,6 +195,7 @@ struct amdgpu_gmc {
uint32_t srbm_soft_reset;
bool prt_warning;
uint64_t stolen_size;
+ uint32_t sdpif_register;
/* apertures */
u64 shared_aperture_start;
u64 shared_aperture_end;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 3a1570dafe34..146f96661b6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -1013,6 +1013,30 @@ static int psp_dtm_initialize(struct psp_context *psp)
return 0;
}
+static int psp_dtm_unload(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the unloading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_ta_unload_cmd_buf(cmd, psp->dtm_context.session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
{
/*
@@ -1037,7 +1061,7 @@ static int psp_dtm_terminate(struct psp_context *psp)
if (!psp->dtm_context.dtm_initialized)
return 0;
- ret = psp_hdcp_unload(psp);
+ ret = psp_dtm_unload(psp);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 1785fdad6ecb..22bbb36c768e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -3923,11 +3923,13 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
{
uint64_t clock;
+ amdgpu_gfx_off_ctrl(adev, false);
mutex_lock(&adev->gfx.gpu_clock_mutex);
WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
mutex_unlock(&adev->gfx.gpu_clock_mutex);
+ amdgpu_gfx_off_ctrl(adev, true);
return clock;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index b33a4eb39193..3afdbbd6aaad 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1193,6 +1193,14 @@ static bool gfx_v9_0_should_disable_gfxoff(struct pci_dev *pdev)
return false;
}
+static bool is_raven_kicker(struct amdgpu_device *adev)
+{
+ if (adev->pm.fw_version >= 0x41e2b)
+ return true;
+ else
+ return false;
+}
+
static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
{
if (gfx_v9_0_should_disable_gfxoff(adev->pdev))
@@ -1205,9 +1213,8 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
break;
case CHIP_RAVEN:
if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) &&
- ((adev->gfx.rlc_fw_version != 106 &&
+ ((!is_raven_kicker(adev) &&
adev->gfx.rlc_fw_version < 531) ||
- (adev->gfx.rlc_fw_version == 53815) ||
(adev->gfx.rlc_feature_version < 1) ||
!adev->gfx.rlc.is_rlc_v2_1))
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
@@ -3959,6 +3966,7 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
{
uint64_t clock;
+ amdgpu_gfx_off_ctrl(adev, false);
mutex_lock(&adev->gfx.gpu_clock_mutex);
if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) {
uint32_t tmp, lsb, msb, i = 0;
@@ -3977,6 +3985,7 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
}
mutex_unlock(&adev->gfx.gpu_clock_mutex);
+ amdgpu_gfx_off_ctrl(adev, true);
return clock;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 90216abf14a4..cc0c273a86f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1272,6 +1272,19 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
}
/**
+ * gmc_v9_0_restore_registers - restores regs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * This restores register values, saved at suspend.
+ */
+static void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
+{
+ if (adev->asic_type == CHIP_RAVEN)
+ WREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
+}
+
+/**
* gmc_v9_0_gart_enable - gart enable
*
* @adev: amdgpu_device pointer
@@ -1377,6 +1390,20 @@ static int gmc_v9_0_hw_init(void *handle)
}
/**
+ * gmc_v9_0_save_registers - saves regs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * This saves potential register values that should be
+ * restored upon resume
+ */
+static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
+{
+ if (adev->asic_type == CHIP_RAVEN)
+ adev->gmc.sdpif_register = RREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
+}
+
+/**
* gmc_v9_0_gart_disable - gart disable
*
* @adev: amdgpu_device pointer
@@ -1412,9 +1439,16 @@ static int gmc_v9_0_hw_fini(void *handle)
static int gmc_v9_0_suspend(void *handle)
{
+ int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- return gmc_v9_0_hw_fini(adev);
+ r = gmc_v9_0_hw_fini(adev);
+ if (r)
+ return r;
+
+ gmc_v9_0_save_registers(adev);
+
+ return 0;
}
static int gmc_v9_0_resume(void *handle)
@@ -1422,6 +1456,7 @@ static int gmc_v9_0_resume(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ gmc_v9_0_restore_registers(adev);
r = gmc_v9_0_hw_init(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 15f3424a1ff7..2b488dfb2f21 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -272,7 +272,12 @@ static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
static u32 soc15_get_xclk(struct amdgpu_device *adev)
{
- return adev->clock.spll.reference_freq;
+ u32 reference_clock = adev->clock.spll.reference_freq;
+
+ if (adev->asic_type == CHIP_RAVEN)
+ return reference_clock / 4;
+
+ return reference_clock;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 63e8a12a74bc..e8f66fbf399e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1911,7 +1911,7 @@ static void handle_hpd_irq(void *param)
mutex_lock(&aconnector->hpd_lock);
#ifdef CONFIG_DRM_AMD_DC_HDCP
- if (adev->asic_type >= CHIP_RAVEN)
+ if (adev->dm.hdcp_workqueue)
hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
#endif
if (aconnector->fake_enable)
@@ -2088,8 +2088,10 @@ static void handle_hpd_rx_irq(void *param)
}
}
#ifdef CONFIG_DRM_AMD_DC_HDCP
- if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ)
- hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
+ if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
+ if (adev->dm.hdcp_workqueue)
+ hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
+ }
#endif
if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
(dc_link->type == dc_connection_mst_branch))
@@ -5702,7 +5704,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
drm_connector_attach_vrr_capable_property(
&aconnector->base);
#ifdef CONFIG_DRM_AMD_DC_HDCP
- if (adev->asic_type >= CHIP_RAVEN)
+ if (adev->dm.hdcp_workqueue)
drm_connector_attach_content_protection_property(&aconnector->base, true);
#endif
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
index f730b94ac3c0..55246711700b 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
@@ -46,8 +46,8 @@ static inline enum mod_hdcp_status check_hdcp2_capable(struct mod_hdcp *hdcp)
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp))
- status = (hdcp->auth.msg.hdcp2.rxcaps_dp[2] & HDCP_2_2_RX_CAPS_VERSION_VAL) &&
- HDCP_2_2_DP_HDCP_CAPABLE(hdcp->auth.msg.hdcp2.rxcaps_dp[0]) ?
+ status = (hdcp->auth.msg.hdcp2.rxcaps_dp[0] == HDCP_2_2_RX_CAPS_VERSION_VAL) &&
+ HDCP_2_2_DP_HDCP_CAPABLE(hdcp->auth.msg.hdcp2.rxcaps_dp[2]) ?
MOD_HDCP_STATUS_SUCCESS :
MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE;
else
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
index b6f74bf4af02..27bb8c1ab858 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
@@ -7376,6 +7376,8 @@
#define mmCRTC4_CRTC_DRR_CONTROL 0x0f3e
#define mmCRTC4_CRTC_DRR_CONTROL_BASE_IDX 2
+#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x395d
+#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2
// addressBlock: dce_dc_fmt4_dispdec
// base address: 0x2000
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index 0dc49479a7eb..c9e5ce135fd4 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -898,6 +898,9 @@ int smu_v11_0_system_features_control(struct smu_context *smu,
if (ret)
return ret;
+ bitmap_zero(feature->enabled, feature->feature_num);
+ bitmap_zero(feature->supported, feature->feature_num);
+
if (en) {
ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
if (ret)
@@ -907,9 +910,6 @@ int smu_v11_0_system_features_control(struct smu_context *smu,
feature->feature_num);
bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
feature->feature_num);
- } else {
- bitmap_zero(feature->enabled, feature->feature_num);
- bitmap_zero(feature->supported, feature->feature_num);
}
return ret;
@@ -978,8 +978,12 @@ int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks;
int ret = 0;
- max_sustainable_clocks = kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks),
+ if (!smu->smu_table.max_sustainable_clocks)
+ max_sustainable_clocks = kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks),
GFP_KERNEL);
+ else
+ max_sustainable_clocks = smu->smu_table.max_sustainable_clocks;
+
smu->smu_table.max_sustainable_clocks = (void *)max_sustainable_clocks;
max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100;
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 3709e5ace724..fbdb42d4e772 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -297,7 +297,7 @@ static inline int tc_poll_timeout(struct tc_data *tc, unsigned int addr,
static int tc_aux_wait_busy(struct tc_data *tc)
{
- return tc_poll_timeout(tc, DP0_AUXSTATUS, AUX_BUSY, 0, 1000, 100000);
+ return tc_poll_timeout(tc, DP0_AUXSTATUS, AUX_BUSY, 0, 100, 100000);
}
static int tc_aux_write_data(struct tc_data *tc, const void *data,
@@ -640,7 +640,7 @@ static int tc_aux_link_setup(struct tc_data *tc)
if (ret)
goto err;
- ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 1, 1000);
+ ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 100, 100000);
if (ret == -ETIMEDOUT) {
dev_err(tc->dev, "Timeout waiting for PHY to become ready");
return ret;
@@ -876,7 +876,7 @@ static int tc_wait_link_training(struct tc_data *tc)
int ret;
ret = tc_poll_timeout(tc, DP0_LTSTAT, LT_LOOPDONE,
- LT_LOOPDONE, 1, 1000);
+ LT_LOOPDONE, 500, 100000);
if (ret) {
dev_err(tc->dev, "Link training timeout waiting for LT_LOOPDONE!\n");
return ret;
@@ -949,7 +949,7 @@ static int tc_main_link_enable(struct tc_data *tc)
dp_phy_ctrl &= ~(DP_PHY_RST | PHY_M1_RST | PHY_M0_RST);
ret = regmap_write(tc->regmap, DP_PHY_CTRL, dp_phy_ctrl);
- ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 1, 1000);
+ ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 500, 100000);
if (ret) {
dev_err(dev, "timeout waiting for phy become ready");
return ret;
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index 6f6d6d1e60ae..f195a4732e0b 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -140,7 +140,8 @@ static int tfp410_attach(struct drm_bridge *bridge)
dvi->connector_type,
dvi->ddc);
if (ret) {
- dev_err(dvi->dev, "drm_connector_init() failed: %d\n", ret);
+ dev_err(dvi->dev, "drm_connector_init_with_ddc() failed: %d\n",
+ ret);
return ret;
}
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index 6d4a29e99ae2..3035584f6dc7 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -951,7 +951,8 @@ bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation)
* depending on the hardware this may require the framebuffer
* to be in a specific tiling format.
*/
- if ((*rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_180 ||
+ if (((*rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_0 &&
+ (*rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_180) ||
!plane->rotation_property)
return false;
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 10336b144c72..d4d64518e11b 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1698,6 +1698,13 @@ static int drm_mode_parse_cmdline_options(const char *str,
if (rotation && freestanding)
return -EINVAL;
+ if (!(rotation & DRM_MODE_ROTATE_MASK))
+ rotation |= DRM_MODE_ROTATE_0;
+
+ /* Make sure there is exactly one rotation defined */
+ if (!is_power_of_2(rotation & DRM_MODE_ROTATE_MASK))
+ return -EINVAL;
+
mode->rotation_reflection = rotation;
return 0;
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index ba9595960bbe..907c4471f591 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -75,9 +75,8 @@ config DRM_I915_CAPTURE_ERROR
help
This option enables capturing the GPU state when a hang is detected.
This information is vital for triaging hangs and assists in debugging.
- Please report any hang to
- https://bugs.freedesktop.org/enter_bug.cgi?product=DRI
- for triaging.
+ Please report any hang for triaging according to:
+ https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs
If in doubt, say "Y".
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index b8c5f8934dbd..a1f2411aa21b 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -294,7 +294,7 @@ extra-$(CONFIG_DRM_I915_WERROR) += \
$(shell cd $(srctree)/$(src) && find * -name '*.h')))
quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
- cmd_hdrtest = $(CC) $(c_flags) -S -o /dev/null -x c /dev/null -include $<; touch $@
+ cmd_hdrtest = $(CC) $(filter-out $(CFLAGS_GCOV), $(c_flags)) -S -o /dev/null -x c /dev/null -include $<; touch $@
$(obj)/%.hdrtest: $(src)/%.h FORCE
$(call if_changed_dep,hdrtest)
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 33f1dc3d7c1a..d9a61f341070 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -4251,7 +4251,9 @@ static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
struct intel_crtc_state *crtc_state)
{
- if (INTEL_GEN(dev_priv) >= 11 && crtc_state->port_clock > 594000)
+ if (IS_ELKHARTLAKE(dev_priv) && crtc_state->port_clock > 594000)
+ crtc_state->min_voltage_level = 3;
+ else if (INTEL_GEN(dev_priv) >= 11 && crtc_state->port_clock > 594000)
crtc_state->min_voltage_level = 1;
else if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000)
crtc_state->min_voltage_level = 2;
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 064dd99bbc49..aa453953908b 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -11087,7 +11087,7 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
u32 base;
if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
- base = obj->phys_handle->busaddr;
+ base = sg_dma_address(obj->mm.pages->sgl);
else
base = intel_plane_ggtt_offset(plane_state);
@@ -17433,6 +17433,24 @@ retry:
* have readout for pipe gamma enable.
*/
crtc_state->uapi.color_mgmt_changed = true;
+
+ /*
+ * FIXME hack to force full modeset when DSC is being
+ * used.
+ *
+ * As long as we do not have full state readout and
+ * config comparison of crtc_state->dsc, we have no way
+ * to ensure reliable fastset. Remove once we have
+ * readout for DSC.
+ */
+ if (crtc_state->dsc.compression_enable) {
+ ret = drm_atomic_add_affected_connectors(state,
+ &crtc->base);
+ if (ret)
+ goto out;
+ crtc_state->uapi.mode_changed = true;
+ drm_dbg_kms(dev, "Force full modeset for DSC\n");
+ }
}
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index a2e57e62af30..151a1e8ae36a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -565,6 +565,22 @@ static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
return -ENODEV;
+ /*
+ * If the cancel fails, we then need to reset, cleanly!
+ *
+ * If the per-engine reset fails, all hope is lost! We resort
+ * to a full GPU reset in that unlikely case, but realistically
+ * if the engine could not reset, the full reset does not fare
+ * much better. The damage has been done.
+ *
+ * However, if we cannot reset an engine by itself, we cannot
+ * cleanup a hanging persistent context without causing
+ * colateral damage, and we should not pretend we can by
+ * exposing the interface.
+ */
+ if (!intel_has_reset_engine(&ctx->i915->gt))
+ return -ENODEV;
+
i915_gem_context_clear_persistence(ctx);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index f64ad77e6b1e..c2174da35bb0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -285,9 +285,6 @@ struct drm_i915_gem_object {
void *gvt_info;
};
-
- /** for phys allocated objects */
- struct drm_dma_handle *phys_handle;
};
static inline struct drm_i915_gem_object *
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index b1b7c1b3038a..b07bb40edd5a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -22,88 +22,87 @@
static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
{
struct address_space *mapping = obj->base.filp->f_mapping;
- struct drm_dma_handle *phys;
- struct sg_table *st;
struct scatterlist *sg;
- char *vaddr;
+ struct sg_table *st;
+ dma_addr_t dma;
+ void *vaddr;
+ void *dst;
int i;
- int err;
if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
return -EINVAL;
- /* Always aligning to the object size, allows a single allocation
+ /*
+ * Always aligning to the object size, allows a single allocation
* to handle all possible callers, and given typical object sizes,
* the alignment of the buddy allocation will naturally match.
*/
- phys = drm_pci_alloc(obj->base.dev,
- roundup_pow_of_two(obj->base.size),
- roundup_pow_of_two(obj->base.size));
- if (!phys)
+ vaddr = dma_alloc_coherent(&obj->base.dev->pdev->dev,
+ roundup_pow_of_two(obj->base.size),
+ &dma, GFP_KERNEL);
+ if (!vaddr)
return -ENOMEM;
- vaddr = phys->vaddr;
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ goto err_pci;
+
+ if (sg_alloc_table(st, 1, GFP_KERNEL))
+ goto err_st;
+
+ sg = st->sgl;
+ sg->offset = 0;
+ sg->length = obj->base.size;
+
+ sg_assign_page(sg, (struct page *)vaddr);
+ sg_dma_address(sg) = dma;
+ sg_dma_len(sg) = obj->base.size;
+
+ dst = vaddr;
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
struct page *page;
- char *src;
+ void *src;
page = shmem_read_mapping_page(mapping, i);
- if (IS_ERR(page)) {
- err = PTR_ERR(page);
- goto err_phys;
- }
+ if (IS_ERR(page))
+ goto err_st;
src = kmap_atomic(page);
- memcpy(vaddr, src, PAGE_SIZE);
- drm_clflush_virt_range(vaddr, PAGE_SIZE);
+ memcpy(dst, src, PAGE_SIZE);
+ drm_clflush_virt_range(dst, PAGE_SIZE);
kunmap_atomic(src);
put_page(page);
- vaddr += PAGE_SIZE;
+ dst += PAGE_SIZE;
}
intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st) {
- err = -ENOMEM;
- goto err_phys;
- }
-
- if (sg_alloc_table(st, 1, GFP_KERNEL)) {
- kfree(st);
- err = -ENOMEM;
- goto err_phys;
- }
-
- sg = st->sgl;
- sg->offset = 0;
- sg->length = obj->base.size;
-
- sg_dma_address(sg) = phys->busaddr;
- sg_dma_len(sg) = obj->base.size;
-
- obj->phys_handle = phys;
-
__i915_gem_object_set_pages(obj, st, sg->length);
return 0;
-err_phys:
- drm_pci_free(obj->base.dev, phys);
-
- return err;
+err_st:
+ kfree(st);
+err_pci:
+ dma_free_coherent(&obj->base.dev->pdev->dev,
+ roundup_pow_of_two(obj->base.size),
+ vaddr, dma);
+ return -ENOMEM;
}
static void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
+ dma_addr_t dma = sg_dma_address(pages->sgl);
+ void *vaddr = sg_page(pages->sgl);
+
__i915_gem_object_release_shmem(obj, pages, false);
if (obj->mm.dirty) {
struct address_space *mapping = obj->base.filp->f_mapping;
- char *vaddr = obj->phys_handle->vaddr;
+ void *src = vaddr;
int i;
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
@@ -115,15 +114,16 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
continue;
dst = kmap_atomic(page);
- drm_clflush_virt_range(vaddr, PAGE_SIZE);
- memcpy(dst, vaddr, PAGE_SIZE);
+ drm_clflush_virt_range(src, PAGE_SIZE);
+ memcpy(dst, src, PAGE_SIZE);
kunmap_atomic(dst);
set_page_dirty(page);
if (obj->mm.madv == I915_MADV_WILLNEED)
mark_page_accessed(page);
put_page(page);
- vaddr += PAGE_SIZE;
+
+ src += PAGE_SIZE;
}
obj->mm.dirty = false;
}
@@ -131,7 +131,9 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
sg_free_table(pages);
kfree(pages);
- drm_pci_free(obj->base.dev, obj->phys_handle);
+ dma_free_coherent(&obj->base.dev->pdev->dev,
+ roundup_pow_of_two(obj->base.size),
+ vaddr, dma);
}
static void phys_release(struct drm_i915_gem_object *obj)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index f7e4b39c734f..59b387ade49c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -256,8 +256,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
freed = i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND |
- I915_SHRINK_ACTIVE);
+ I915_SHRINK_UNBOUND);
}
return freed;
@@ -336,7 +335,6 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
freed_pages = 0;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
freed_pages += i915_gem_shrink(i915, -1UL, NULL,
- I915_SHRINK_ACTIVE |
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_WRITEBACK);
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index 0ba524a414c6..cbad7fe722ce 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -136,6 +136,9 @@ static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl)
struct intel_engine_cs *engine =
container_of(b, struct intel_engine_cs, breadcrumbs);
+ if (unlikely(intel_engine_is_virtual(engine)))
+ engine = intel_virtual_engine_get_sibling(engine, 0);
+
intel_engine_add_retire(engine, tl);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
index 7ef1d37970f6..8a5054f21bf8 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
@@ -99,6 +99,9 @@ static bool add_retire(struct intel_engine_cs *engine,
void intel_engine_add_retire(struct intel_engine_cs *engine,
struct intel_timeline *tl)
{
+ /* We don't deal well with the engine disappearing beneath us */
+ GEM_BUG_ON(intel_engine_is_virtual(engine));
+
if (add_retire(engine, tl))
schedule_work(&engine->retire_work);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index a13a8c4b65ab..fe8a59aaa629 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -237,7 +237,8 @@ static void execlists_init_reg_state(u32 *reg_state,
bool close);
static void
__execlists_update_reg_state(const struct intel_context *ce,
- const struct intel_engine_cs *engine);
+ const struct intel_engine_cs *engine,
+ u32 head);
static void mark_eio(struct i915_request *rq)
{
@@ -1186,12 +1187,11 @@ static void reset_active(struct i915_request *rq,
head = rq->tail;
else
head = active_request(ce->timeline, rq)->head;
- ce->ring->head = intel_ring_wrap(ce->ring, head);
- intel_ring_update_space(ce->ring);
+ head = intel_ring_wrap(ce->ring, head);
/* Scrub the context image to prevent replaying the previous batch */
restore_default_state(ce, engine);
- __execlists_update_reg_state(ce, engine);
+ __execlists_update_reg_state(ce, engine, head);
/* We've switched away, so this should be a no-op, but intent matters */
ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
@@ -1321,7 +1321,7 @@ static u64 execlists_update_context(struct i915_request *rq)
{
struct intel_context *ce = rq->context;
u64 desc = ce->lrc_desc;
- u32 tail;
+ u32 tail, prev;
/*
* WaIdleLiteRestore:bdw,skl
@@ -1334,9 +1334,15 @@ static u64 execlists_update_context(struct i915_request *rq)
* subsequent resubmissions (for lite restore). Should that fail us,
* and we try and submit the same tail again, force the context
* reload.
+ *
+ * If we need to return to a preempted context, we need to skip the
+ * lite-restore and force it to reload the RING_TAIL. Otherwise, the
+ * HW has a tendency to ignore us rewinding the TAIL to the end of
+ * an earlier request.
*/
tail = intel_ring_set_tail(rq->ring, rq->tail);
- if (unlikely(ce->lrc_reg_state[CTX_RING_TAIL] == tail))
+ prev = ce->lrc_reg_state[CTX_RING_TAIL];
+ if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0))
desc |= CTX_DESC_FORCE_RESTORE;
ce->lrc_reg_state[CTX_RING_TAIL] = tail;
rq->tail = rq->wa_tail;
@@ -1605,6 +1611,11 @@ last_active(const struct intel_engine_execlists *execlists)
return *last;
}
+#define for_each_waiter(p__, rq__) \
+ list_for_each_entry_lockless(p__, \
+ &(rq__)->sched.waiters_list, \
+ wait_link)
+
static void defer_request(struct i915_request *rq, struct list_head * const pl)
{
LIST_HEAD(list);
@@ -1622,7 +1633,7 @@ static void defer_request(struct i915_request *rq, struct list_head * const pl)
GEM_BUG_ON(i915_request_is_active(rq));
list_move_tail(&rq->sched.link, pl);
- list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
+ for_each_waiter(p, rq) {
struct i915_request *w =
container_of(p->waiter, typeof(*w), sched);
@@ -1834,14 +1845,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
*/
__unwind_incomplete_requests(engine);
- /*
- * If we need to return to the preempted context, we
- * need to skip the lite-restore and force it to
- * reload the RING_TAIL. Otherwise, the HW has a
- * tendency to ignore us rewinding the TAIL to the
- * end of an earlier request.
- */
- last->context->lrc_desc |= CTX_DESC_FORCE_RESTORE;
last = NULL;
} else if (need_timeslice(engine, last) &&
timer_expired(&engine->execlists.timer)) {
@@ -2860,16 +2863,17 @@ static void execlists_context_unpin(struct intel_context *ce)
static void
__execlists_update_reg_state(const struct intel_context *ce,
- const struct intel_engine_cs *engine)
+ const struct intel_engine_cs *engine,
+ u32 head)
{
struct intel_ring *ring = ce->ring;
u32 *regs = ce->lrc_reg_state;
- GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
+ GEM_BUG_ON(!intel_ring_offset_valid(ring, head));
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
- regs[CTX_RING_HEAD] = ring->head;
+ regs[CTX_RING_HEAD] = head;
regs[CTX_RING_TAIL] = ring->tail;
/* RPCS */
@@ -2898,7 +2902,7 @@ __execlists_context_pin(struct intel_context *ce,
ce->lrc_desc = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE;
ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
- __execlists_update_reg_state(ce, engine);
+ __execlists_update_reg_state(ce, engine, ce->ring->tail);
return 0;
}
@@ -2939,7 +2943,7 @@ static void execlists_context_reset(struct intel_context *ce)
/* Scrub away the garbage */
execlists_init_reg_state(ce->lrc_reg_state,
ce, ce->engine, ce->ring, true);
- __execlists_update_reg_state(ce, ce->engine);
+ __execlists_update_reg_state(ce, ce->engine, ce->ring->tail);
ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
}
@@ -3494,6 +3498,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
struct intel_engine_execlists * const execlists = &engine->execlists;
struct intel_context *ce;
struct i915_request *rq;
+ u32 head;
mb(); /* paranoia: read the CSB pointers from after the reset */
clflush(execlists->csb_write);
@@ -3521,15 +3526,15 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
if (i915_request_completed(rq)) {
/* Idle context; tidy up the ring so we can restart afresh */
- ce->ring->head = intel_ring_wrap(ce->ring, rq->tail);
+ head = intel_ring_wrap(ce->ring, rq->tail);
goto out_replay;
}
/* Context has requests still in-flight; it should not be idle! */
GEM_BUG_ON(i915_active_is_idle(&ce->active));
rq = active_request(ce->timeline, rq);
- ce->ring->head = intel_ring_wrap(ce->ring, rq->head);
- GEM_BUG_ON(ce->ring->head == ce->ring->tail);
+ head = intel_ring_wrap(ce->ring, rq->head);
+ GEM_BUG_ON(head == ce->ring->tail);
/*
* If this request hasn't started yet, e.g. it is waiting on a
@@ -3574,10 +3579,9 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
out_replay:
ENGINE_TRACE(engine, "replay {head:%04x, tail:%04x}\n",
- ce->ring->head, ce->ring->tail);
- intel_ring_update_space(ce->ring);
+ head, ce->ring->tail);
__execlists_reset_reg_state(ce, engine);
- __execlists_update_reg_state(ce, engine);
+ __execlists_update_reg_state(ce, engine, head);
ce->lrc_desc |= CTX_DESC_FORCE_RESTORE; /* paranoid: GPU was reset! */
unwind:
@@ -5220,10 +5224,7 @@ void intel_lr_context_reset(struct intel_engine_cs *engine,
restore_default_state(ce, engine);
/* Rerun the request; its payload has been neutered (if guilty). */
- ce->ring->head = head;
- intel_ring_update_space(ce->ring);
-
- __execlists_update_reg_state(ce, engine);
+ __execlists_update_reg_state(ce, engine, head);
}
bool
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
index 374b28f13ca0..6ff803f397c4 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -145,6 +145,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
kref_init(&ring->ref);
ring->size = size;
+ ring->wrap = BITS_PER_TYPE(ring->size) - ilog2(size);
/*
* Workaround an erratum on the i830 which causes a hang if
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.h b/drivers/gpu/drm/i915/gt/intel_ring.h
index ea2839d9e044..5bdce24994aa 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.h
+++ b/drivers/gpu/drm/i915/gt/intel_ring.h
@@ -56,6 +56,14 @@ static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
return pos & (ring->size - 1);
}
+static inline int intel_ring_direction(const struct intel_ring *ring,
+ u32 next, u32 prev)
+{
+ typecheck(typeof(ring->size), next);
+ typecheck(typeof(ring->size), prev);
+ return (next - prev) << ring->wrap;
+}
+
static inline bool
intel_ring_offset_valid(const struct intel_ring *ring,
unsigned int pos)
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_types.h b/drivers/gpu/drm/i915/gt/intel_ring_types.h
index d9f17f38e0cc..1a189ea00fd8 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_ring_types.h
@@ -39,12 +39,13 @@ struct intel_ring {
*/
atomic_t pin_count;
- u32 head;
- u32 tail;
- u32 emit;
+ u32 head; /* updated during retire, loosely tracks RING_HEAD */
+ u32 tail; /* updated on submission, used for RING_TAIL */
+ u32 emit; /* updated during request construction */
u32 space;
u32 size;
+ u32 wrap;
u32 effective_size;
};
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 65718ca2326e..b292f8cbd0bf 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -186,7 +186,7 @@ static int live_unlite_restore(struct intel_gt *gt, int prio)
}
GEM_BUG_ON(!ce[1]->ring->size);
intel_ring_reset(ce[1]->ring, ce[1]->ring->size / 2);
- __execlists_update_reg_state(ce[1], engine);
+ __execlists_update_reg_state(ce[1], engine, ce[1]->ring->head);
rq[0] = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
if (IS_ERR(rq[0])) {
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 2477a1e5a166..ae139f0877ae 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -151,12 +151,12 @@ static void dmabuf_gem_object_free(struct kref *kref)
dmabuf_obj = container_of(pos,
struct intel_vgpu_dmabuf_obj, list);
if (dmabuf_obj == obj) {
+ list_del(pos);
intel_gvt_hypervisor_put_vfio_device(vgpu);
idr_remove(&vgpu->object_idr,
dmabuf_obj->dmabuf_id);
kfree(dmabuf_obj->info);
kfree(dmabuf_obj);
- list_del(pos);
break;
}
}
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 85bd9bf4f6ee..487af6ea9972 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -560,9 +560,9 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
intel_vgpu_reset_mmio(vgpu, dmlr);
populate_pvinfo_page(vgpu);
- intel_vgpu_reset_display(vgpu);
if (dmlr) {
+ intel_vgpu_reset_display(vgpu);
intel_vgpu_reset_cfg_space(vgpu);
/* only reset the failsafe mode when dmlr reset */
vgpu->failsafe = false;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c2de2f45b459..5f6e63952821 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -180,7 +180,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file)
{
- void *vaddr = obj->phys_handle->vaddr + args->offset;
+ void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
char __user *user_data = u64_to_user_ptr(args->data_ptr);
/*
@@ -844,10 +844,10 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_gtt_pwrite_fast(obj, args);
if (ret == -EFAULT || ret == -ENOSPC) {
- if (obj->phys_handle)
- ret = i915_gem_phys_pwrite(obj, args, file);
- else
+ if (i915_gem_object_has_struct_page(obj))
ret = i915_gem_shmem_pwrite(obj, args);
+ else
+ ret = i915_gem_phys_pwrite(obj, args, file);
}
i915_gem_object_unpin_pages(obj);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 594341e27a47..9e401a5fcae8 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1852,7 +1852,8 @@ void i915_error_state_store(struct i915_gpu_coredump *error)
if (!xchg(&warned, true) &&
ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
- pr_info("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
+ pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n");
+ pr_info("Please see https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs for details.\n");
pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n");
pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n",
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 83f01401b8b5..f631f6d21127 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -437,7 +437,7 @@ static const struct intel_device_info snb_m_gt2_info = {
.has_rc6 = 1, \
.has_rc6p = 1, \
.has_rps = true, \
- .ppgtt_type = INTEL_PPGTT_FULL, \
+ .ppgtt_type = INTEL_PPGTT_ALIASING, \
.ppgtt_size = 31, \
IVB_PIPE_OFFSETS, \
IVB_CURSOR_OFFSETS, \
@@ -494,7 +494,7 @@ static const struct intel_device_info vlv_info = {
.has_rps = true,
.display.has_gmch = 1,
.display.has_hotplug = 1,
- .ppgtt_type = INTEL_PPGTT_FULL,
+ .ppgtt_type = INTEL_PPGTT_ALIASING,
.ppgtt_size = 31,
.has_snoop = true,
.has_coherent_ggtt = false,
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index ec0299490dd4..aa729d04abe2 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -822,11 +822,6 @@ static ssize_t i915_pmu_event_show(struct device *dev,
return sprintf(buf, "config=0x%lx\n", eattr->val);
}
-static struct attribute_group i915_pmu_events_attr_group = {
- .name = "events",
- /* Patch in attrs at runtime. */
-};
-
static ssize_t
i915_pmu_get_attr_cpumask(struct device *dev,
struct device_attribute *attr,
@@ -846,13 +841,6 @@ static const struct attribute_group i915_pmu_cpumask_attr_group = {
.attrs = i915_cpumask_attrs,
};
-static const struct attribute_group *i915_pmu_attr_groups[] = {
- &i915_pmu_format_attr_group,
- &i915_pmu_events_attr_group,
- &i915_pmu_cpumask_attr_group,
- NULL
-};
-
#define __event(__config, __name, __unit) \
{ \
.config = (__config), \
@@ -1026,23 +1014,23 @@ err_alloc:
static void free_event_attributes(struct i915_pmu *pmu)
{
- struct attribute **attr_iter = i915_pmu_events_attr_group.attrs;
+ struct attribute **attr_iter = pmu->events_attr_group.attrs;
for (; *attr_iter; attr_iter++)
kfree((*attr_iter)->name);
- kfree(i915_pmu_events_attr_group.attrs);
+ kfree(pmu->events_attr_group.attrs);
kfree(pmu->i915_attr);
kfree(pmu->pmu_attr);
- i915_pmu_events_attr_group.attrs = NULL;
+ pmu->events_attr_group.attrs = NULL;
pmu->i915_attr = NULL;
pmu->pmu_attr = NULL;
}
static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
{
- struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
+ struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
GEM_BUG_ON(!pmu->base.event_init);
@@ -1055,7 +1043,7 @@ static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
{
- struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
+ struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
unsigned int target;
GEM_BUG_ON(!pmu->base.event_init);
@@ -1072,8 +1060,6 @@ static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
return 0;
}
-static enum cpuhp_state cpuhp_slot = CPUHP_INVALID;
-
static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
{
enum cpuhp_state slot;
@@ -1087,21 +1073,22 @@ static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
return ret;
slot = ret;
- ret = cpuhp_state_add_instance(slot, &pmu->node);
+ ret = cpuhp_state_add_instance(slot, &pmu->cpuhp.node);
if (ret) {
cpuhp_remove_multi_state(slot);
return ret;
}
- cpuhp_slot = slot;
+ pmu->cpuhp.slot = slot;
return 0;
}
static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
{
- WARN_ON(cpuhp_slot == CPUHP_INVALID);
- WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &pmu->node));
- cpuhp_remove_multi_state(cpuhp_slot);
+ WARN_ON(pmu->cpuhp.slot == CPUHP_INVALID);
+ WARN_ON(cpuhp_state_remove_instance(pmu->cpuhp.slot, &pmu->cpuhp.node));
+ cpuhp_remove_multi_state(pmu->cpuhp.slot);
+ pmu->cpuhp.slot = CPUHP_INVALID;
}
static bool is_igp(struct drm_i915_private *i915)
@@ -1118,6 +1105,13 @@ static bool is_igp(struct drm_i915_private *i915)
void i915_pmu_register(struct drm_i915_private *i915)
{
struct i915_pmu *pmu = &i915->pmu;
+ const struct attribute_group *attr_groups[] = {
+ &i915_pmu_format_attr_group,
+ &pmu->events_attr_group,
+ &i915_pmu_cpumask_attr_group,
+ NULL
+ };
+
int ret = -ENOMEM;
if (INTEL_GEN(i915) <= 2) {
@@ -1128,6 +1122,7 @@ void i915_pmu_register(struct drm_i915_private *i915)
spin_lock_init(&pmu->lock);
hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
pmu->timer.function = i915_sample;
+ pmu->cpuhp.slot = CPUHP_INVALID;
if (!is_igp(i915)) {
pmu->name = kasprintf(GFP_KERNEL,
@@ -1143,11 +1138,16 @@ void i915_pmu_register(struct drm_i915_private *i915)
if (!pmu->name)
goto err;
- i915_pmu_events_attr_group.attrs = create_event_attributes(pmu);
- if (!i915_pmu_events_attr_group.attrs)
+ pmu->events_attr_group.name = "events";
+ pmu->events_attr_group.attrs = create_event_attributes(pmu);
+ if (!pmu->events_attr_group.attrs)
goto err_name;
- pmu->base.attr_groups = i915_pmu_attr_groups;
+ pmu->base.attr_groups = kmemdup(attr_groups, sizeof(attr_groups),
+ GFP_KERNEL);
+ if (!pmu->base.attr_groups)
+ goto err_attr;
+
pmu->base.task_ctx_nr = perf_invalid_context;
pmu->base.event_init = i915_pmu_event_init;
pmu->base.add = i915_pmu_event_add;
@@ -1159,7 +1159,7 @@ void i915_pmu_register(struct drm_i915_private *i915)
ret = perf_pmu_register(&pmu->base, pmu->name, -1);
if (ret)
- goto err_attr;
+ goto err_groups;
ret = i915_pmu_register_cpuhp_state(pmu);
if (ret)
@@ -1169,6 +1169,8 @@ void i915_pmu_register(struct drm_i915_private *i915)
err_unreg:
perf_pmu_unregister(&pmu->base);
+err_groups:
+ kfree(pmu->base.attr_groups);
err_attr:
pmu->base.event_init = NULL;
free_event_attributes(pmu);
@@ -1194,6 +1196,7 @@ void i915_pmu_unregister(struct drm_i915_private *i915)
perf_pmu_unregister(&pmu->base);
pmu->base.event_init = NULL;
+ kfree(pmu->base.attr_groups);
if (!is_igp(i915))
kfree(pmu->name);
free_event_attributes(pmu);
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index 6c1647c5daf2..f1d6cad0d7d5 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -39,9 +39,12 @@ struct i915_pmu_sample {
struct i915_pmu {
/**
- * @node: List node for CPU hotplug handling.
+ * @cpuhp: Struct used for CPU hotplug handling.
*/
- struct hlist_node node;
+ struct {
+ struct hlist_node node;
+ enum cpuhp_state slot;
+ } cpuhp;
/**
* @base: PMU base.
*/
@@ -105,6 +108,10 @@ struct i915_pmu {
*/
ktime_t sleep_last;
/**
+ * @events_attr_group: Device events attribute group.
+ */
+ struct attribute_group events_attr_group;
+ /**
* @i915_attr: Memory block holding device attributes.
*/
void *i915_attr;
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 78a5f5d3c070..f56b046a32de 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -595,6 +595,8 @@ static void __i915_request_ctor(void *arg)
i915_sw_fence_init(&rq->submit, submit_notify);
i915_sw_fence_init(&rq->semaphore, semaphore_notify);
+ dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock, 0, 0);
+
rq->file_priv = NULL;
rq->capture_list = NULL;
@@ -653,25 +655,30 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
}
}
- ret = intel_timeline_get_seqno(tl, rq, &seqno);
- if (ret)
- goto err_free;
-
rq->i915 = ce->engine->i915;
rq->context = ce;
rq->engine = ce->engine;
rq->ring = ce->ring;
rq->execution_mask = ce->engine->mask;
+ kref_init(&rq->fence.refcount);
+ rq->fence.flags = 0;
+ rq->fence.error = 0;
+ INIT_LIST_HEAD(&rq->fence.cb_list);
+
+ ret = intel_timeline_get_seqno(tl, rq, &seqno);
+ if (ret)
+ goto err_free;
+
+ rq->fence.context = tl->fence_context;
+ rq->fence.seqno = seqno;
+
RCU_INIT_POINTER(rq->timeline, tl);
RCU_INIT_POINTER(rq->hwsp_cacheline, tl->hwsp_cacheline);
rq->hwsp_seqno = tl->hwsp_seqno;
rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
- dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
- tl->fence_context, seqno);
-
/* We bump the ref for the fence chain */
i915_sw_fence_reinit(&i915_request_get(rq)->submit);
i915_sw_fence_reinit(&i915_request_get(rq)->semaphore);
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 5d96cfba40f8..34b654b4e58a 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -423,8 +423,6 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
if (!node_signaled(signal)) {
INIT_LIST_HEAD(&dep->dfs_link);
- list_add(&dep->wait_link, &signal->waiters_list);
- list_add(&dep->signal_link, &node->signalers_list);
dep->signaler = signal;
dep->waiter = node;
dep->flags = flags;
@@ -434,6 +432,10 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
!node_started(signal))
node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
+ /* All set, now publish. Beware the lockless walkers. */
+ list_add(&dep->signal_link, &node->signalers_list);
+ list_add_rcu(&dep->wait_link, &signal->waiters_list);
+
/*
* As we do not allow WAIT to preempt inflight requests,
* once we have executed a request, along with triggering
diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
index c47261ae86ea..632d6953c78d 100644
--- a/drivers/gpu/drm/i915/i915_utils.c
+++ b/drivers/gpu/drm/i915/i915_utils.c
@@ -8,9 +8,8 @@
#include "i915_drv.h"
#include "i915_utils.h"
-#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
-#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
- "providing the dmesg log by booting with drm.debug=0xf"
+#define FDO_BUG_URL "https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs"
+#define FDO_BUG_MSG "Please file a bug on drm/i915; see " FDO_BUG_URL " for details."
void
__i915_printk(struct drm_i915_private *dev_priv, const char *level,
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 983afeaee737..748cd379065f 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -796,12 +796,41 @@ bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
return true;
}
+#define GBIF_CLIENT_HALT_MASK BIT(0)
+#define GBIF_ARB_HALT_MASK BIT(1)
+
+static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
+{
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ if (!a6xx_has_gbif(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
+ spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
+ 0xf) == 0xf);
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
+
+ return;
+ }
+
+ /* Halt new client requests on GBIF */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
+ spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
+ (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
+
+ /* Halt all AXI requests on GBIF */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
+ spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
+ (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
+
+ /* The GBIF halt needs to be explicitly cleared */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
+}
+
/* Gracefully try to shut down the GMU and by extension the GPU */
static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
- struct msm_gpu *gpu = &adreno_gpu->base;
u32 val;
/*
@@ -819,11 +848,7 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
return;
}
- /* Clear the VBIF pipe before shutting down */
- gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
- spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & 0xf)
- == 0xf);
- gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
+ a6xx_bus_clear_pending_transactions(adreno_gpu);
/* tell the GMU we want to slumber */
a6xx_gmu_notify_slumber(gmu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index daf07800cde0..68af24150de5 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -378,18 +378,6 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
int ret;
- /*
- * During a previous slumber, GBIF halt is asserted to ensure
- * no further transaction can go through GPU before GPU
- * headswitch is turned off.
- *
- * This halt is deasserted once headswitch goes off but
- * incase headswitch doesn't goes off clear GBIF halt
- * here to ensure GPU wake-up doesn't fail because of
- * halted GPU transactions.
- */
- gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
-
/* Make sure the GMU keeps the GPU on while we set it up */
a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
@@ -470,10 +458,12 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
/* Select CP0 to always count cycles */
gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
- gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1);
- gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1);
- gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1);
- gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, 2 << 21);
+ if (adreno_is_a630(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1);
+ gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1);
+ gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1);
+ gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, 2 << 21);
+ }
/* Enable fault detection */
gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
@@ -748,39 +738,6 @@ static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL),
};
-#define GBIF_CLIENT_HALT_MASK BIT(0)
-#define GBIF_ARB_HALT_MASK BIT(1)
-
-static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
-{
- struct msm_gpu *gpu = &adreno_gpu->base;
-
- if(!a6xx_has_gbif(adreno_gpu)){
- gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
- spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
- 0xf) == 0xf);
- gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
-
- return;
- }
-
- /* Halt new client requests on GBIF */
- gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
- spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
- (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
-
- /* Halt all AXI requests on GBIF */
- gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
- spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
- (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
-
- /*
- * GMU needs DDR access in slumber path. Deassert GBIF halt now
- * to allow for GMU to access system memory.
- */
- gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
-}
-
static int a6xx_pm_resume(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -805,16 +762,6 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
devfreq_suspend_device(gpu->devfreq.devfreq);
- /*
- * Make sure the GMU is idle before continuing (because some transitions
- * may use VBIF
- */
- a6xx_gmu_wait_for_idle(&a6xx_gpu->gmu);
-
- /* Clear the VBIF pipe before shutting down */
- /* FIXME: This accesses the GPU - do we need to make sure it is on? */
- a6xx_bus_clear_pending_transactions(adreno_gpu);
-
return a6xx_gmu_stop(a6xx_gpu);
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
index eda11abc5f01..e450e0b97211 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -7,6 +7,7 @@
#include "a6xx_gmu.h"
#include "a6xx_gmu.xml.h"
+#include "a6xx_gpu.h"
#define HFI_MSG_ID(val) [val] = #val
@@ -216,48 +217,82 @@ static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
NULL, 0);
}
-static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
+static void a618_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{
- struct a6xx_hfi_msg_bw_table msg = { 0 };
+ /* Send a single "off" entry since the 618 GMU doesn't do bus scaling */
+ msg->bw_level_num = 1;
+
+ msg->ddr_cmds_num = 3;
+ msg->ddr_wait_bitmask = 0x01;
+
+ msg->ddr_cmds_addrs[0] = 0x50000;
+ msg->ddr_cmds_addrs[1] = 0x5003c;
+ msg->ddr_cmds_addrs[2] = 0x5000c;
+
+ msg->ddr_cmds_data[0][0] = 0x40000000;
+ msg->ddr_cmds_data[0][1] = 0x40000000;
+ msg->ddr_cmds_data[0][2] = 0x40000000;
/*
- * The sdm845 GMU doesn't do bus frequency scaling on its own but it
- * does need at least one entry in the list because it might be accessed
- * when the GMU is shutting down. Send a single "off" entry.
+ * These are the CX (CNOC) votes - these are used by the GMU but the
+ * votes are known and fixed for the target
*/
+ msg->cnoc_cmds_num = 1;
+ msg->cnoc_wait_bitmask = 0x01;
+
+ msg->cnoc_cmds_addrs[0] = 0x5007c;
+ msg->cnoc_cmds_data[0][0] = 0x40000000;
+ msg->cnoc_cmds_data[1][0] = 0x60000001;
+}
- msg.bw_level_num = 1;
+static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
+{
+ /* Send a single "off" entry since the 630 GMU doesn't do bus scaling */
+ msg->bw_level_num = 1;
- msg.ddr_cmds_num = 3;
- msg.ddr_wait_bitmask = 0x07;
+ msg->ddr_cmds_num = 3;
+ msg->ddr_wait_bitmask = 0x07;
- msg.ddr_cmds_addrs[0] = 0x50000;
- msg.ddr_cmds_addrs[1] = 0x5005c;
- msg.ddr_cmds_addrs[2] = 0x5000c;
+ msg->ddr_cmds_addrs[0] = 0x50000;
+ msg->ddr_cmds_addrs[1] = 0x5005c;
+ msg->ddr_cmds_addrs[2] = 0x5000c;
- msg.ddr_cmds_data[0][0] = 0x40000000;
- msg.ddr_cmds_data[0][1] = 0x40000000;
- msg.ddr_cmds_data[0][2] = 0x40000000;
+ msg->ddr_cmds_data[0][0] = 0x40000000;
+ msg->ddr_cmds_data[0][1] = 0x40000000;
+ msg->ddr_cmds_data[0][2] = 0x40000000;
/*
* These are the CX (CNOC) votes. This is used but the values for the
* sdm845 GMU are known and fixed so we can hard code them.
*/
- msg.cnoc_cmds_num = 3;
- msg.cnoc_wait_bitmask = 0x05;
+ msg->cnoc_cmds_num = 3;
+ msg->cnoc_wait_bitmask = 0x05;
- msg.cnoc_cmds_addrs[0] = 0x50034;
- msg.cnoc_cmds_addrs[1] = 0x5007c;
- msg.cnoc_cmds_addrs[2] = 0x5004c;
+ msg->cnoc_cmds_addrs[0] = 0x50034;
+ msg->cnoc_cmds_addrs[1] = 0x5007c;
+ msg->cnoc_cmds_addrs[2] = 0x5004c;
- msg.cnoc_cmds_data[0][0] = 0x40000000;
- msg.cnoc_cmds_data[0][1] = 0x00000000;
- msg.cnoc_cmds_data[0][2] = 0x40000000;
+ msg->cnoc_cmds_data[0][0] = 0x40000000;
+ msg->cnoc_cmds_data[0][1] = 0x00000000;
+ msg->cnoc_cmds_data[0][2] = 0x40000000;
+
+ msg->cnoc_cmds_data[1][0] = 0x60000001;
+ msg->cnoc_cmds_data[1][1] = 0x20000001;
+ msg->cnoc_cmds_data[1][2] = 0x60000001;
+}
+
+
+static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_msg_bw_table msg = { 0 };
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
- msg.cnoc_cmds_data[1][0] = 0x60000001;
- msg.cnoc_cmds_data[1][1] = 0x20000001;
- msg.cnoc_cmds_data[1][2] = 0x60000001;
+ if (adreno_is_a618(adreno_gpu))
+ a618_build_bw_table(&msg);
+ else
+ a6xx_build_bw_table(&msg);
return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg),
NULL, 0);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
index 528632690f1e..a05282dede91 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
@@ -255,13 +255,13 @@ static const struct dpu_format dpu_format_map[] = {
INTERLEAVED_RGB_FMT(RGB565,
0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
false, 2, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGR565,
0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
false, 2, 0,
DPU_FETCH_LINEAR, 1),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
index 29705e773a4b..80d3cfc14007 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
@@ -12,6 +12,7 @@
#define to_dpu_mdss(x) container_of(x, struct dpu_mdss, base)
+#define HW_REV 0x0
#define HW_INTR_STATUS 0x0010
/* Max BW defined in KBps */
@@ -22,6 +23,17 @@ struct dpu_irq_controller {
struct irq_domain *domain;
};
+struct dpu_hw_cfg {
+ u32 val;
+ u32 offset;
+};
+
+struct dpu_mdss_hw_init_handler {
+ u32 hw_rev;
+ u32 hw_reg_count;
+ struct dpu_hw_cfg* hw_cfg;
+};
+
struct dpu_mdss {
struct msm_mdss base;
void __iomem *mmio;
@@ -32,6 +44,44 @@ struct dpu_mdss {
u32 num_paths;
};
+static struct dpu_hw_cfg hw_cfg[] = {
+ {
+ /* UBWC global settings */
+ .val = 0x1E,
+ .offset = 0x144,
+ }
+};
+
+static struct dpu_mdss_hw_init_handler cfg_handler[] = {
+ { .hw_rev = DPU_HW_VER_620,
+ .hw_reg_count = ARRAY_SIZE(hw_cfg),
+ .hw_cfg = hw_cfg
+ },
+};
+
+static void dpu_mdss_hw_init(struct dpu_mdss *dpu_mdss, u32 hw_rev)
+{
+ int i;
+ u32 count = 0;
+ struct dpu_hw_cfg *hw_cfg = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(cfg_handler); i++) {
+ if (cfg_handler[i].hw_rev == hw_rev) {
+ hw_cfg = cfg_handler[i].hw_cfg;
+ count = cfg_handler[i].hw_reg_count;
+ break;
+ }
+ }
+
+ for (i = 0; i < count; i++ ) {
+ writel_relaxed(hw_cfg->val,
+ dpu_mdss->mmio + hw_cfg->offset);
+ hw_cfg++;
+ }
+
+ return;
+}
+
static int dpu_mdss_parse_data_bus_icc_path(struct drm_device *dev,
struct dpu_mdss *dpu_mdss)
{
@@ -174,12 +224,18 @@ static int dpu_mdss_enable(struct msm_mdss *mdss)
struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
struct dss_module_power *mp = &dpu_mdss->mp;
int ret;
+ u32 mdss_rev;
dpu_mdss_icc_request_bw(mdss);
ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
- if (ret)
+ if (ret) {
DPU_ERROR("clock enable failed, ret:%d\n", ret);
+ return ret;
+ }
+
+ mdss_rev = readl_relaxed(dpu_mdss->mmio + HW_REV);
+ dpu_mdss_hw_init(dpu_mdss, mdss_rev);
return ret;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index 05cc04f729d6..e1cc541e0ef2 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -1109,8 +1109,8 @@ static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
msecs_to_jiffies(50));
if (ret == 0)
- dev_warn(dev->dev, "pp done time out, lm=%d\n",
- mdp5_cstate->pipeline.mixer->lm);
+ dev_warn_ratelimited(dev->dev, "pp done time out, lm=%d\n",
+ mdp5_cstate->pipeline.mixer->lm);
}
static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 104115d112eb..4864b9558f65 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -336,7 +336,7 @@ static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
return num;
}
-static int dsi_mgr_connector_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status dsi_mgr_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
int id = dsi_mgr_connector_get_id(connector);
@@ -506,6 +506,7 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
struct mipi_dsi_host *host = msm_dsi->host;
struct drm_panel *panel = msm_dsi->panel;
+ struct msm_dsi_pll *src_pll;
bool is_dual_dsi = IS_DUAL_DSI();
int ret;
@@ -539,6 +540,10 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
id, ret);
}
+ /* Save PLL status if it is a clock source */
+ src_pll = msm_dsi_phy_get_pll(msm_dsi->phy);
+ msm_dsi_pll_save_state(src_pll);
+
ret = msm_dsi_host_power_off(host);
if (ret)
pr_err("%s: host %d power off failed,%d\n", __func__, id, ret);
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index b0cfa67d2a57..f509ebd77500 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -724,10 +724,6 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
if (!phy || !phy->cfg->ops.disable)
return;
- /* Save PLL status if it is a clock source */
- if (phy->usecase != MSM_DSI_PHY_SLAVE)
- msm_dsi_pll_save_state(phy->pll);
-
phy->cfg->ops.disable(phy);
dsi_phy_regulator_disable(phy);
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
index 1c894548dd72..6ac04fc303f5 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
@@ -411,6 +411,12 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
if (pll_10nm->slave)
dsi_pll_enable_pll_bias(pll_10nm->slave);
+ rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0);
+ if (rc) {
+ pr_err("vco_set_rate failed, rc=%d\n", rc);
+ return rc;
+ }
+
/* Start PLL */
pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL,
0x01);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 890315291b01..bb737f9281e6 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -458,6 +458,8 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
asyw->clr.ntfy = armw->ntfy.handle != 0;
asyw->clr.sema = armw->sema.handle != 0;
asyw->clr.xlut = armw->xlut.handle != 0;
+ if (asyw->clr.xlut && asyw->visible)
+ asyw->set.xlut = asyw->xlut.handle != 0;
asyw->clr.csc = armw->csc.valid;
if (wndw->func->image_clr)
asyw->clr.image = armw->image.handle[0] != 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index c7d700916eae..8ebbe1656008 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2579,6 +2579,7 @@ nv166_chipset = {
static const struct nvkm_device_chip
nv167_chipset = {
.name = "TU117",
+ .acr = tu102_acr_new,
.bar = tu102_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2607,6 +2608,7 @@ nv167_chipset = {
.disp = tu102_disp_new,
.dma = gv100_dma_new,
.fifo = tu102_fifo_new,
+ .gr = tu102_gr_new,
.nvdec[0] = gm107_nvdec_new,
.nvenc[0] = gm107_nvenc_new,
.sec2 = tu102_sec2_new,
@@ -2615,6 +2617,7 @@ nv167_chipset = {
static const struct nvkm_device_chip
nv168_chipset = {
.name = "TU116",
+ .acr = tu102_acr_new,
.bar = tu102_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2643,6 +2646,7 @@ nv168_chipset = {
.disp = tu102_disp_new,
.dma = gv100_dma_new,
.fifo = tu102_fifo_new,
+ .gr = tu102_gr_new,
.nvdec[0] = gm107_nvdec_new,
.nvenc[0] = gm107_nvenc_new,
.sec2 = tu102_sec2_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
index 454668b1cf54..a9efa4d78be9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
@@ -164,6 +164,32 @@ MODULE_FIRMWARE("nvidia/tu106/gr/sw_nonctx.bin");
MODULE_FIRMWARE("nvidia/tu106/gr/sw_bundle_init.bin");
MODULE_FIRMWARE("nvidia/tu106/gr/sw_method_init.bin");
+MODULE_FIRMWARE("nvidia/tu117/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/tu117/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/tu117/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/tu117/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/tu117/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/tu117/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/tu117/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/tu117/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/tu117/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/tu117/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/tu117/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/tu117/gr/sw_method_init.bin");
+
+MODULE_FIRMWARE("nvidia/tu116/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/tu116/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/tu116/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/tu116/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/tu116/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/tu116/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/tu116/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/tu116/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/tu116/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/tu116/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/tu116/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/tu116/gr/sw_method_init.bin");
+
static const struct gf100_gr_fwif
tu102_gr_fwif[] = {
{ 0, gm200_gr_load, &tu102_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c
index 7f4b89d82d32..d28d8f36ae24 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c
@@ -107,6 +107,12 @@ MODULE_FIRMWARE("nvidia/tu104/acr/ucode_unload.bin");
MODULE_FIRMWARE("nvidia/tu106/acr/unload_bl.bin");
MODULE_FIRMWARE("nvidia/tu106/acr/ucode_unload.bin");
+MODULE_FIRMWARE("nvidia/tu116/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/tu116/acr/ucode_unload.bin");
+
+MODULE_FIRMWARE("nvidia/tu117/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/tu117/acr/ucode_unload.bin");
+
static const struct nvkm_acr_hsf_fwif
tu102_acr_unload_fwif[] = {
{ 0, nvkm_acr_hsfw_load, &gp108_acr_unload_0 },
@@ -130,6 +136,8 @@ tu102_acr_asb_0 = {
MODULE_FIRMWARE("nvidia/tu102/acr/ucode_asb.bin");
MODULE_FIRMWARE("nvidia/tu104/acr/ucode_asb.bin");
MODULE_FIRMWARE("nvidia/tu106/acr/ucode_asb.bin");
+MODULE_FIRMWARE("nvidia/tu116/acr/ucode_asb.bin");
+MODULE_FIRMWARE("nvidia/tu117/acr/ucode_asb.bin");
static const struct nvkm_acr_hsf_fwif
tu102_acr_asb_fwif[] = {
@@ -154,6 +162,12 @@ MODULE_FIRMWARE("nvidia/tu104/acr/ucode_ahesasc.bin");
MODULE_FIRMWARE("nvidia/tu106/acr/bl.bin");
MODULE_FIRMWARE("nvidia/tu106/acr/ucode_ahesasc.bin");
+MODULE_FIRMWARE("nvidia/tu116/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/tu116/acr/ucode_ahesasc.bin");
+
+MODULE_FIRMWARE("nvidia/tu117/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/tu117/acr/ucode_ahesasc.bin");
+
static const struct nvkm_acr_hsf_fwif
tu102_acr_ahesasc_fwif[] = {
{ 0, nvkm_acr_hsfw_load, &tu102_acr_ahesasc_0 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
index 389bad312bf2..10ff5d053f7e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
@@ -51,3 +51,5 @@ MODULE_FIRMWARE("nvidia/gv100/nvdec/scrubber.bin");
MODULE_FIRMWARE("nvidia/tu102/nvdec/scrubber.bin");
MODULE_FIRMWARE("nvidia/tu104/nvdec/scrubber.bin");
MODULE_FIRMWARE("nvidia/tu106/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/tu116/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/tu117/nvdec/scrubber.bin");
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 7157dfd7dea3..9a1a72a748e7 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -280,12 +280,8 @@ static void panfrost_job_cleanup(struct kref *ref)
}
if (job->bos) {
- struct panfrost_gem_object *bo;
-
- for (i = 0; i < job->bo_count; i++) {
- bo = to_panfrost_bo(job->bos[i]);
+ for (i = 0; i < job->bo_count; i++)
drm_gem_object_put_unlocked(job->bos[i]);
- }
kvfree(job->bos);
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 763cfca886a7..3107b0738e40 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -151,7 +151,12 @@ u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
as = mmu->as;
if (as >= 0) {
int en = atomic_inc_return(&mmu->as_count);
- WARN_ON(en >= NUM_JOB_SLOTS);
+
+ /*
+ * AS can be retained by active jobs or a perfcnt context,
+ * hence the '+ 1' here.
+ */
+ WARN_ON(en >= (NUM_JOB_SLOTS + 1));
list_move(&mmu->list, &pfdev->as_lru_list);
goto out;
diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
index 684820448be3..6913578d5aa7 100644
--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
@@ -73,7 +73,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
struct panfrost_file_priv *user = file_priv->driver_priv;
struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
struct drm_gem_shmem_object *bo;
- u32 cfg;
+ u32 cfg, as;
int ret;
if (user == perfcnt->user)
@@ -126,12 +126,8 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
perfcnt->user = user;
- /*
- * Always use address space 0 for now.
- * FIXME: this needs to be updated when we start using different
- * address space.
- */
- cfg = GPU_PERFCNT_CFG_AS(0) |
+ as = panfrost_mmu_as_get(pfdev, perfcnt->mapping->mmu);
+ cfg = GPU_PERFCNT_CFG_AS(as) |
GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_MANUAL);
/*
@@ -195,6 +191,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, perfcnt->buf);
perfcnt->buf = NULL;
panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv);
+ panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu);
panfrost_gem_mapping_put(perfcnt->mapping);
perfcnt->mapping = NULL;
pm_runtime_mark_last_busy(pfdev->dev);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index fd74e2611185..8696af1ee14d 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -37,6 +37,7 @@
#include <linux/vga_switcheroo.h>
#include <linux/mmu_notifier.h>
+#include <drm/drm_agpsupport.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
@@ -325,6 +326,7 @@ static int radeon_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
unsigned long flags = 0;
+ struct drm_device *dev;
int ret;
if (!ent)
@@ -365,7 +367,44 @@ static int radeon_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- return drm_get_pci_dev(pdev, ent, &kms_driver);
+ dev = drm_dev_alloc(&kms_driver, &pdev->dev);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ goto err_free;
+
+ dev->pdev = pdev;
+#ifdef __alpha__
+ dev->hose = pdev->sysdata;
+#endif
+
+ pci_set_drvdata(pdev, dev);
+
+ if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP))
+ dev->agp = drm_agp_init(dev);
+ if (dev->agp) {
+ dev->agp->agp_mtrr = arch_phys_wc_add(
+ dev->agp->agp_info.aper_base,
+ dev->agp->agp_info.aper_size *
+ 1024 * 1024);
+ }
+
+ ret = drm_dev_register(dev, ent->driver_data);
+ if (ret)
+ goto err_agp;
+
+ return 0;
+
+err_agp:
+ if (dev->agp)
+ arch_phys_wc_del(dev->agp->agp_mtrr);
+ kfree(dev->agp);
+ pci_disable_device(pdev);
+err_free:
+ drm_dev_put(dev);
+ return ret;
}
static void
@@ -575,7 +614,7 @@ radeon_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
static struct drm_driver kms_driver = {
.driver_features =
- DRIVER_USE_AGP | DRIVER_GEM | DRIVER_RENDER,
+ DRIVER_GEM | DRIVER_RENDER,
.load = radeon_driver_load_kms,
.open = radeon_driver_open_kms,
.postclose = radeon_driver_postclose_kms,
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index d24f23a81656..dd2f19b8022b 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -32,6 +32,7 @@
#include <linux/uaccess.h>
#include <linux/vga_switcheroo.h>
+#include <drm/drm_agpsupport.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
@@ -77,6 +78,11 @@ void radeon_driver_unload_kms(struct drm_device *dev)
radeon_modeset_fini(rdev);
radeon_device_fini(rdev);
+ if (dev->agp)
+ arch_phys_wc_del(dev->agp->agp_mtrr);
+ kfree(dev->agp);
+ dev->agp = NULL;
+
done_free:
kfree(rdev);
dev->dev_private = NULL;
diff --git a/drivers/gpu/drm/selftests/drm_cmdline_selftests.h b/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
index ceac7af9a172..29e367db6118 100644
--- a/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
+++ b/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
@@ -53,6 +53,7 @@ cmdline_test(drm_cmdline_test_rotate_0)
cmdline_test(drm_cmdline_test_rotate_90)
cmdline_test(drm_cmdline_test_rotate_180)
cmdline_test(drm_cmdline_test_rotate_270)
+cmdline_test(drm_cmdline_test_rotate_multiple)
cmdline_test(drm_cmdline_test_rotate_invalid_val)
cmdline_test(drm_cmdline_test_rotate_truncated)
cmdline_test(drm_cmdline_test_hmirror)
diff --git a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c b/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
index 520f3e66a384..d96cd890def6 100644
--- a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
+++ b/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
@@ -856,6 +856,17 @@ static int drm_cmdline_test_rotate_270(void *ignored)
return 0;
}
+static int drm_cmdline_test_rotate_multiple(void *ignored)
+{
+ struct drm_cmdline_mode mode = { };
+
+ FAIL_ON(drm_mode_parse_command_line_for_connector("720x480,rotate=0,rotate=90",
+ &no_connector,
+ &mode));
+
+ return 0;
+}
+
static int drm_cmdline_test_rotate_invalid_val(void *ignored)
{
struct drm_cmdline_mode mode = { };
@@ -888,7 +899,7 @@ static int drm_cmdline_test_hmirror(void *ignored)
FAIL_ON(!mode.specified);
FAIL_ON(mode.xres != 720);
FAIL_ON(mode.yres != 480);
- FAIL_ON(mode.rotation_reflection != DRM_MODE_REFLECT_X);
+ FAIL_ON(mode.rotation_reflection != (DRM_MODE_ROTATE_0 | DRM_MODE_REFLECT_X));
FAIL_ON(mode.refresh_specified);
@@ -913,7 +924,7 @@ static int drm_cmdline_test_vmirror(void *ignored)
FAIL_ON(!mode.specified);
FAIL_ON(mode.xres != 720);
FAIL_ON(mode.yres != 480);
- FAIL_ON(mode.rotation_reflection != DRM_MODE_REFLECT_Y);
+ FAIL_ON(mode.rotation_reflection != (DRM_MODE_ROTATE_0 | DRM_MODE_REFLECT_Y));
FAIL_ON(mode.refresh_specified);