summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c26
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c30
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c17
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h26
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c21
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h132
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c28
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c66
-rw-r--r--drivers/gpu/drm/ast/ast_sil164.c2
-rw-r--r--drivers/gpu/drm/ast/ast_vga.c2
-rw-r--r--drivers/gpu/drm/bridge/aux-bridge.c3
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c2
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c2
-rw-r--r--drivers/gpu/drm/bridge/samsung-dsim.c2
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c2
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c1
-rw-r--r--drivers/gpu/drm/bridge/tc358775.c2
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c2
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c10
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_state_helper.c4
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c2
-rw-r--r--drivers/gpu/drm/drm_debugfs.c4
-rw-r--r--drivers/gpu/drm/drm_fbdev_dma.c3
-rw-r--r--drivers/gpu/drm/i915/Kconfig3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c40
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c10
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c11
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_crtc.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ddp_comp.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ddp_comp.h10
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_drv.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c74
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp.c85
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ethdr.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ethdr.h1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.c15
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.h4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c38
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h14
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200eh.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200eh3.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200er.c10
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200ev.c10
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200ew3.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200se.c10
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200wb.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c77
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c16
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c20
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c68
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c5
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c19
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c9
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/os.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c2
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx83102.c12
-rw-r--r--drivers/gpu/drm/panthor/panthor_drv.c1
-rw-r--r--drivers/gpu/drm/panthor/panthor_fw.c4
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.c11
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c24
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.h1
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c56
-rw-r--r--drivers/gpu/drm/radeon/atom.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c1
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c14
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c21
-rw-r--r--drivers/gpu/drm/tegra/drm.c4
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c46
-rw-r--r--drivers/gpu/drm/tests/drm_connector_test.c24
-rw-r--r--drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c8
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_helpers.c42
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c2
-rw-r--r--drivers/gpu/drm/v3d/v3d_perfmon.c9
-rw-r--r--drivers/gpu/drm/vboxvideo/hgsmi_base.c10
-rw-r--r--drivers/gpu/drm/vboxvideo/vboxvideo.h4
-rw-r--r--drivers/gpu/drm/vc4/vc4_perfmon.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c34
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c9
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c65
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.h8
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gt_regs.h12
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c4
-rw-r--r--drivers/gpu/drm/xe/xe_debugfs.c2
-rw-r--r--drivers/gpu/drm/xe/xe_device.c18
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h19
-rw-r--r--drivers/gpu/drm/xe/xe_drm_client.c9
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c12
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c6
-rw-r--r--drivers/gpu/drm/xe/xe_force_wake.c12
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.c10
-rw-r--r--drivers/gpu/drm/xe/xe_gpu_scheduler.c5
-rw-r--r--drivers/gpu/drm/xe/xe_gpu_scheduler.h4
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c5
-rw-r--r--drivers/gpu/drm/xe/xe_gt_freq.c4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c39
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sysfs.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c29
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h1
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c62
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c129
-rw-r--r--drivers/gpu/drm/xe/xe_guc_types.h9
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c9
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c2
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c6
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c2
-rw-r--r--drivers/gpu/drm/xe/xe_query.c6
-rw-r--r--drivers/gpu/drm/xe/xe_sync.c5
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.c28
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c36
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c4
-rw-r--r--drivers/gpu/drm/xe/xe_wait_user_fence.c3
148 files changed, 1278 insertions, 850 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index f85ace0384d2..1f5a296f5ed2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -147,6 +147,7 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
struct acpi_buffer *params)
{
acpi_status status;
+ union acpi_object *obj;
union acpi_object atif_arg_elements[2];
struct acpi_object_list atif_arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -169,16 +170,24 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
status = acpi_evaluate_object(atif->handle, NULL, &atif_arg,
&buffer);
+ obj = (union acpi_object *)buffer.pointer;
- /* Fail only if calling the method fails and ATIF is supported */
+ /* Fail if calling the method fails and ATIF is supported */
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
acpi_format_exception(status));
- kfree(buffer.pointer);
+ kfree(obj);
return NULL;
}
- return buffer.pointer;
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ DRM_DEBUG_DRIVER("bad object returned from ATIF: %d\n",
+ obj->type);
+ kfree(obj);
+ return NULL;
+ }
+
+ return obj;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index ce5ca304dba9..fa572ba7f9fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1439,8 +1439,8 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
list_add_tail(&vm->vm_list_node,
&(vm->process_info->vm_list_head));
vm->process_info->n_vms++;
-
- *ef = dma_fence_get(&vm->process_info->eviction_fence->base);
+ if (ef)
+ *ef = dma_fence_get(&vm->process_info->eviction_fence->base);
mutex_unlock(&vm->process_info->lock);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 1e475eb01417..d891ab779ca7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -265,7 +265,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
/* Only a single BO list is allowed to simplify handling. */
if (p->bo_list)
- ret = -EINVAL;
+ goto free_partial_kdata;
ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata);
if (ret)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 83e54697f0ee..f1ffab5a1eae 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -1635,11 +1635,9 @@ int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev)
{
int r;
- if (!amdgpu_sriov_vf(adev)) {
- r = device_create_file(adev->dev, &dev_attr_enforce_isolation);
- if (r)
- return r;
- }
+ r = device_create_file(adev->dev, &dev_attr_enforce_isolation);
+ if (r)
+ return r;
r = device_create_file(adev->dev, &dev_attr_run_cleaner_shader);
if (r)
@@ -1650,8 +1648,7 @@ int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev)
void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev)
{
- if (!amdgpu_sriov_vf(adev))
- device_remove_file(adev->dev, &dev_attr_enforce_isolation);
+ device_remove_file(adev->dev, &dev_attr_enforce_isolation);
device_remove_file(adev->dev, &dev_attr_run_cleaner_shader);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index 10b61ff63802..7d4b540340e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -1203,8 +1203,10 @@ int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
AMDGPU_RING_PRIO_DEFAULT, NULL);
- if (r)
+ if (r) {
+ amdgpu_mes_unlock(&adev->mes);
goto clean_up_memory;
+ }
amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
@@ -1237,7 +1239,6 @@ clean_up_ring:
amdgpu_ring_fini(ring);
clean_up_memory:
kfree(ring);
- amdgpu_mes_unlock(&adev->mes);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index 09715b506468..81d195d366ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -27,7 +27,7 @@
#include <linux/slab.h>
#include <linux/string_helpers.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <drm/drm_util.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index 8d27421689c9..a37a6801c9ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -621,7 +621,7 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe)
if (amdgpu_mes_log_enable) {
mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
- mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr;
+ mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr + pipe * AMDGPU_MES_LOG_BUFFER_SIZE;
}
return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
@@ -1336,7 +1336,7 @@ static int mes_v12_0_sw_init(void *handle)
adev->mes.kiq_hw_fini = &mes_v12_0_kiq_hw_fini;
adev->mes.enable_legacy_queue_map = true;
- adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE;
+ adev->mes.event_log_size = adev->enable_uni_mes ? (AMDGPU_MAX_MES_PIPES * AMDGPU_MES_LOG_BUFFER_SIZE) : AMDGPU_MES_LOG_BUFFER_SIZE;
r = amdgpu_mes_init(adev);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
index a8763496aed3..9288f37a3cc5 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
@@ -51,6 +51,12 @@ MODULE_FIRMWARE("amdgpu/sdma_7_0_1.bin");
#define SDMA0_HYP_DEC_REG_END 0x589a
#define SDMA1_HYP_DEC_REG_OFFSET 0x20
+/*define for compression field for sdma7*/
+#define SDMA_PKT_CONSTANT_FILL_HEADER_compress_offset 0
+#define SDMA_PKT_CONSTANT_FILL_HEADER_compress_mask 0x00000001
+#define SDMA_PKT_CONSTANT_FILL_HEADER_compress_shift 16
+#define SDMA_PKT_CONSTANT_FILL_HEADER_COMPRESS(x) (((x) & SDMA_PKT_CONSTANT_FILL_HEADER_compress_mask) << SDMA_PKT_CONSTANT_FILL_HEADER_compress_shift)
+
static const struct amdgpu_hwip_reg_entry sdma_reg_list_7_0[] = {
SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS_REG),
SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS1_REG),
@@ -1724,7 +1730,8 @@ static void sdma_v7_0_emit_fill_buffer(struct amdgpu_ib *ib,
uint64_t dst_offset,
uint32_t byte_count)
{
- ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_CONST_FILL);
+ ib->ptr[ib->length_dw++] = SDMA_PKT_CONSTANT_FILL_HEADER_OP(SDMA_OP_CONST_FILL) |
+ SDMA_PKT_CONSTANT_FILL_HEADER_COMPRESS(1);
ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
ib->ptr[ib->length_dw++] = src_data;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 9044bdb38cf4..3e6b4736a7fe 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -1148,7 +1148,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM)
size >>= 1;
- WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + PAGE_ALIGN(size));
+ atomic64_add(PAGE_ALIGN(size), &pdd->vram_usage);
}
mutex_unlock(&p->mutex);
@@ -1219,7 +1219,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
kfd_process_device_remove_obj_handle(
pdd, GET_IDR_HANDLE(args->handle));
- WRITE_ONCE(pdd->vram_usage, pdd->vram_usage - size);
+ atomic64_sub(size, &pdd->vram_usage);
err_unlock:
err_pdd:
@@ -2347,7 +2347,7 @@ static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd,
} else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
bo_bucket->restored_offset = offset;
/* Update the VRAM usage count */
- WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + bo_bucket->size);
+ atomic64_add(bo_bucket->size, &pdd->vram_usage);
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index d6530febabad..26e48fdc8728 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -775,7 +775,7 @@ struct kfd_process_device {
enum kfd_pdd_bound bound;
/* VRAM usage */
- uint64_t vram_usage;
+ atomic64_t vram_usage;
struct attribute attr_vram;
char vram_filename[MAX_SYSFS_FILENAME_LEN];
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index d07acf1b2f93..d4aa843aacfd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -332,7 +332,7 @@ static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
} else if (strncmp(attr->name, "vram_", 5) == 0) {
struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
attr_vram);
- return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage));
+ return snprintf(buffer, PAGE_SIZE, "%llu\n", atomic64_read(&pdd->vram_usage));
} else if (strncmp(attr->name, "sdma_", 5) == 0) {
struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
attr_sdma);
@@ -1625,7 +1625,7 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
pdd->bound = PDD_UNBOUND;
pdd->already_dequeued = false;
pdd->runtime_inuse = false;
- pdd->vram_usage = 0;
+ atomic64_set(&pdd->vram_usage, 0);
pdd->sdma_past_activity_counter = 0;
pdd->user_gpu_id = dev->id;
atomic64_set(&pdd->evict_duration_counter, 0);
@@ -1702,12 +1702,15 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, avm,
&p->kgd_process_info,
- &ef);
+ p->ef ? NULL : &ef);
if (ret) {
dev_err(dev->adev->dev, "Failed to create process VM object\n");
return ret;
}
- RCU_INIT_POINTER(p->ef, ef);
+
+ if (!p->ef)
+ RCU_INIT_POINTER(p->ef, ef);
+
pdd->drm_priv = drm_file->private_data;
ret = kfd_process_device_reserve_ib_mem(pdd);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 04e746923697..1893c27746a5 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -405,6 +405,27 @@ static void svm_range_bo_release(struct kref *kref)
spin_lock(&svm_bo->list_lock);
}
spin_unlock(&svm_bo->list_lock);
+
+ if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
+ struct kfd_process_device *pdd;
+ struct kfd_process *p;
+ struct mm_struct *mm;
+
+ mm = svm_bo->eviction_fence->mm;
+ /*
+ * The forked child process takes svm_bo device pages ref, svm_bo could be
+ * released after parent process is gone.
+ */
+ p = kfd_lookup_process_by_mm(mm);
+ if (p) {
+ pdd = kfd_get_process_device_data(svm_bo->node, p);
+ if (pdd)
+ atomic64_sub(amdgpu_bo_size(svm_bo->bo), &pdd->vram_usage);
+ kfd_unref_process(p);
+ }
+ mmput(mm);
+ }
+
if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base))
/* We're not in the eviction worker. Signal the fence. */
dma_fence_signal(&svm_bo->eviction_fence->base);
@@ -532,6 +553,7 @@ int
svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
bool clear)
{
+ struct kfd_process_device *pdd;
struct amdgpu_bo_param bp;
struct svm_range_bo *svm_bo;
struct amdgpu_bo_user *ubo;
@@ -623,6 +645,10 @@ svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
list_add(&prange->svm_bo_list, &svm_bo->range_list);
spin_unlock(&svm_bo->list_lock);
+ pdd = svm_range_get_pdd_by_node(prange, node);
+ if (pdd)
+ atomic64_add(amdgpu_bo_size(bo), &pdd->vram_usage);
+
return 0;
reserve_bo_failed:
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 6e79028c5d78..13421a58210d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -770,6 +770,12 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
return;
}
+ /* Skip DMUB HPD IRQ in suspend/resume. We will probe them later. */
+ if (notify->type == DMUB_NOTIFICATION_HPD && adev->in_suspend) {
+ DRM_INFO("Skip DMUB HPD IRQ callback in suspend/resume\n");
+ return;
+ }
+
link_index = notify->link_index;
link = adev->dm.dc->links[link_index];
dev = adev->dm.ddev;
@@ -2026,7 +2032,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
}
- if (adev->dm.dc->caps.ips_support && adev->dm.dc->config.disable_ips == DMUB_IPS_ENABLE)
+ if (adev->dm.dc->caps.ips_support &&
+ adev->dm.dc->config.disable_ips != DMUB_IPS_DISABLE_ALL)
adev->dm.idle_workqueue = idle_create_workqueue(adev);
if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
@@ -2965,10 +2972,11 @@ static int dm_suspend(void *handle)
hpd_rx_irq_work_suspend(dm);
- if (adev->dm.dc->caps.ips_support)
- dc_allow_idle_optimizations(adev->dm.dc, true);
-
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
+
+ if (dm->dc->caps.ips_support && adev->in_s0ix)
+ dc_allow_idle_optimizations(dm->dc, true);
+
dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3);
return 0;
@@ -6735,12 +6743,21 @@ create_stream_for_sink(struct drm_connector *connector,
if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
stream->signal == SIGNAL_TYPE_EDP) {
+ const struct dc_edid_caps *edid_caps;
+ unsigned int disable_colorimetry = 0;
+
+ if (aconnector->dc_sink) {
+ edid_caps = &aconnector->dc_sink->edid_caps;
+ disable_colorimetry = edid_caps->panel_patch.disable_colorimetry;
+ }
+
//
// should decide stream support vsc sdp colorimetry capability
// before building vsc info packet
//
stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
- stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED;
+ stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED &&
+ !disable_colorimetry;
if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22)
tf = TRANSFER_FUNC_GAMMA_22;
@@ -8357,7 +8374,8 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
if (amdgpu_ip_version(adev, DCE_HWIP, 0) <
IP_VERSION(3, 5, 0) ||
acrtc_state->stream->link->psr_settings.psr_version <
- DC_PSR_VERSION_UNSUPPORTED) {
+ DC_PSR_VERSION_UNSUPPORTED ||
+ !(adev->flags & AMD_IS_APU)) {
timing = &acrtc_state->stream->timing;
/* at least 2 frames */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 50109d13d967..eea317dcbe8c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -44,6 +44,7 @@
#include "dm_helpers.h"
#include "ddc_service_types.h"
+#include "clk_mgr.h"
static u32 edid_extract_panel_id(struct edid *edid)
{
@@ -73,6 +74,10 @@ static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps)
DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id);
edid_caps->panel_patch.remove_sink_ext_caps = true;
break;
+ case drm_edid_encode_panel_id('S', 'D', 'C', 0x4154):
+ DRM_DEBUG_DRIVER("Disabling VSC on monitor with panel id %X\n", panel_id);
+ edid_caps->panel_patch.disable_colorimetry = true;
+ break;
default:
return;
}
@@ -1117,6 +1122,8 @@ bool dm_helpers_dp_handle_test_pattern_request(
struct pipe_ctx *pipe_ctx = NULL;
struct amdgpu_dm_connector *aconnector = link->priv;
struct drm_device *dev = aconnector->base.dev;
+ struct dc_state *dc_state = ctx->dc->current_state;
+ struct clk_mgr *clk_mgr = ctx->dc->clk_mgr;
int i;
for (i = 0; i < MAX_PIPES; i++) {
@@ -1217,6 +1224,16 @@ bool dm_helpers_dp_handle_test_pattern_request(
pipe_ctx->stream->test_pattern.type = test_pattern;
pipe_ctx->stream->test_pattern.color_space = test_pattern_color_space;
+ /* Temp W/A for compliance test failure */
+ dc_state->bw_ctx.bw.dcn.clk.p_state_change_support = false;
+ dc_state->bw_ctx.bw.dcn.clk.dramclk_khz = clk_mgr->dc_mode_softmax_enabled ?
+ clk_mgr->bw_params->dc_mode_softmax_memclk : clk_mgr->bw_params->max_memclk_mhz;
+ dc_state->bw_ctx.bw.dcn.clk.idle_dramclk_khz = dc_state->bw_ctx.bw.dcn.clk.dramclk_khz;
+ ctx->dc->clk_mgr->funcs->update_clocks(
+ ctx->dc->clk_mgr,
+ dc_state,
+ false);
+
dc_link_dp_set_test_pattern(
(struct dc_link *) link,
test_pattern,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 83a31b97e96b..a08e8a0b696c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -1027,6 +1027,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
int remaining_to_try = 0;
int ret;
uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
+ int var_pbn;
for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled
@@ -1057,13 +1058,18 @@ static int try_disable_dsc(struct drm_atomic_state *state,
break;
DRM_DEBUG_DRIVER("MST_DSC index #%d, try no compression\n", next_index);
+ var_pbn = vars[next_index].pbn;
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
vars[next_index].pbn);
- if (ret < 0)
+ if (ret < 0) {
+ DRM_DEBUG_DRIVER("%s:%d MST_DSC index #%d, failed to set pbn to the state, %d\n",
+ __func__, __LINE__, next_index, ret);
+ vars[next_index].pbn = var_pbn;
return ret;
+ }
ret = drm_dp_mst_atomic_check(state);
if (ret == 0) {
@@ -1071,14 +1077,17 @@ static int try_disable_dsc(struct drm_atomic_state *state,
vars[next_index].dsc_enabled = false;
vars[next_index].bpp_x16 = 0;
} else {
- DRM_DEBUG_DRIVER("MST_DSC index #%d, restore minimum compression\n", next_index);
- vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps, fec_overhead_multiplier_x1000);
+ DRM_DEBUG_DRIVER("MST_DSC index #%d, restore optimized pbn value\n", next_index);
+ vars[next_index].pbn = var_pbn;
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
vars[next_index].pbn);
- if (ret < 0)
+ if (ret < 0) {
+ DRM_DEBUG_DRIVER("%s:%d MST_DSC index #%d, failed to set pbn to the state, %d\n",
+ __func__, __LINE__, next_index, ret);
return ret;
+ }
}
tried[next_index] = true;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 5c39390ecbd5..a88f1b6ea64c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -5065,11 +5065,26 @@ static bool update_planes_and_stream_v3(struct dc *dc,
return true;
}
+static void clear_update_flags(struct dc_surface_update *srf_updates,
+ int surface_count, struct dc_stream_state *stream)
+{
+ int i;
+
+ if (stream)
+ stream->update_flags.raw = 0;
+
+ for (i = 0; i < surface_count; i++)
+ if (srf_updates[i].surface)
+ srf_updates[i].surface->update_flags.raw = 0;
+}
+
bool dc_update_planes_and_stream(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
struct dc_stream_update *stream_update)
{
+ bool ret = false;
+
dc_exit_ips_for_hw_access(dc);
/*
* update planes and stream version 3 separates FULL and FAST updates
@@ -5086,10 +5101,16 @@ bool dc_update_planes_and_stream(struct dc *dc,
* features as they are now transparent to the new sequence.
*/
if (dc->ctx->dce_version >= DCN_VERSION_4_01)
- return update_planes_and_stream_v3(dc, srf_updates,
+ ret = update_planes_and_stream_v3(dc, srf_updates,
surface_count, stream, stream_update);
- return update_planes_and_stream_v2(dc, srf_updates,
+ else
+ ret = update_planes_and_stream_v2(dc, srf_updates,
surface_count, stream, stream_update);
+
+ if (ret)
+ clear_update_flags(srf_updates, surface_count, stream);
+
+ return ret;
}
void dc_commit_updates_for_stream(struct dc *dc,
@@ -5099,6 +5120,8 @@ void dc_commit_updates_for_stream(struct dc *dc,
struct dc_stream_update *stream_update,
struct dc_state *state)
{
+ bool ret = false;
+
dc_exit_ips_for_hw_access(dc);
/* TODO: Since change commit sequence can have a huge impact,
* we decided to only enable it for DCN3x. However, as soon as
@@ -5106,17 +5129,17 @@ void dc_commit_updates_for_stream(struct dc *dc,
* the new sequence for all ASICs.
*/
if (dc->ctx->dce_version >= DCN_VERSION_4_01) {
- update_planes_and_stream_v3(dc, srf_updates, surface_count,
+ ret = update_planes_and_stream_v3(dc, srf_updates, surface_count,
stream, stream_update);
- return;
- }
- if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
- update_planes_and_stream_v2(dc, srf_updates, surface_count,
+ } else if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
+ ret = update_planes_and_stream_v2(dc, srf_updates, surface_count,
stream, stream_update);
- return;
- }
- update_planes_and_stream_v1(dc, srf_updates, surface_count, stream,
- stream_update, state);
+ } else
+ ret = update_planes_and_stream_v1(dc, srf_updates, surface_count, stream,
+ stream_update, state);
+
+ if (ret)
+ clear_update_flags(srf_updates, surface_count, stream);
}
uint8_t dc_get_current_stream_count(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index fd6dca735714..6d7989b751e2 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -178,6 +178,7 @@ struct dc_panel_patch {
unsigned int skip_avmute;
unsigned int mst_start_top_delay;
unsigned int remove_sink_ext_caps;
+ unsigned int disable_colorimetry;
};
struct dc_edid_caps {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
index da9101b83e8c..70abd32ce2ad 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
@@ -766,6 +766,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_dmub_reallow_idle = false,
.static_screen_wait_frames = 2,
.notify_dpia_hr_bw = true,
+ .min_disp_clk_khz = 50000,
};
static const struct dc_panel_config panel_config_defaults = {
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 3cd52e7a9c77..95838c7ab054 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -841,6 +841,8 @@ bool is_psr_su_specific_panel(struct dc_link *link)
isPSRSUSupported = false;
else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x03)
isPSRSUSupported = false;
+ else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x01)
+ isPSRSUSupported = false;
else if (dpcd_caps->psr_info.force_psrsu_cap == 0x1)
isPSRSUSupported = true;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
index 9118fcddbf11..227bf0e84a13 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
@@ -60,7 +60,7 @@ struct vi_dpm_level {
struct vi_dpm_table {
uint32_t count;
- struct vi_dpm_level dpm_level[] __counted_by(count);
+ struct vi_dpm_level dpm_level[];
};
#define PCIE_PERF_REQ_REMOVE_REGISTRY 0
@@ -91,7 +91,7 @@ struct phm_set_power_state_input {
struct phm_clock_array {
uint32_t count;
- uint32_t values[] __counted_by(count);
+ uint32_t values[];
};
struct phm_clock_voltage_dependency_record {
@@ -123,7 +123,7 @@ struct phm_acpclock_voltage_dependency_record {
struct phm_clock_voltage_dependency_table {
uint32_t count;
- struct phm_clock_voltage_dependency_record entries[] __counted_by(count);
+ struct phm_clock_voltage_dependency_record entries[];
};
struct phm_phase_shedding_limits_record {
@@ -140,7 +140,7 @@ struct phm_uvd_clock_voltage_dependency_record {
struct phm_uvd_clock_voltage_dependency_table {
uint8_t count;
- struct phm_uvd_clock_voltage_dependency_record entries[] __counted_by(count);
+ struct phm_uvd_clock_voltage_dependency_record entries[];
};
struct phm_acp_clock_voltage_dependency_record {
@@ -150,7 +150,7 @@ struct phm_acp_clock_voltage_dependency_record {
struct phm_acp_clock_voltage_dependency_table {
uint32_t count;
- struct phm_acp_clock_voltage_dependency_record entries[] __counted_by(count);
+ struct phm_acp_clock_voltage_dependency_record entries[];
};
struct phm_vce_clock_voltage_dependency_record {
@@ -161,32 +161,32 @@ struct phm_vce_clock_voltage_dependency_record {
struct phm_phase_shedding_limits_table {
uint32_t count;
- struct phm_phase_shedding_limits_record entries[] __counted_by(count);
+ struct phm_phase_shedding_limits_record entries[];
};
struct phm_vceclock_voltage_dependency_table {
uint8_t count;
- struct phm_vceclock_voltage_dependency_record entries[] __counted_by(count);
+ struct phm_vceclock_voltage_dependency_record entries[];
};
struct phm_uvdclock_voltage_dependency_table {
uint8_t count;
- struct phm_uvdclock_voltage_dependency_record entries[] __counted_by(count);
+ struct phm_uvdclock_voltage_dependency_record entries[];
};
struct phm_samuclock_voltage_dependency_table {
uint8_t count;
- struct phm_samuclock_voltage_dependency_record entries[] __counted_by(count);
+ struct phm_samuclock_voltage_dependency_record entries[];
};
struct phm_acpclock_voltage_dependency_table {
uint32_t count;
- struct phm_acpclock_voltage_dependency_record entries[] __counted_by(count);
+ struct phm_acpclock_voltage_dependency_record entries[];
};
struct phm_vce_clock_voltage_dependency_table {
uint8_t count;
- struct phm_vce_clock_voltage_dependency_record entries[] __counted_by(count);
+ struct phm_vce_clock_voltage_dependency_record entries[];
};
@@ -393,7 +393,7 @@ union phm_cac_leakage_record {
struct phm_cac_leakage_table {
uint32_t count;
- union phm_cac_leakage_record entries[] __counted_by(count);
+ union phm_cac_leakage_record entries[];
};
struct phm_samu_clock_voltage_dependency_record {
@@ -404,7 +404,7 @@ struct phm_samu_clock_voltage_dependency_record {
struct phm_samu_clock_voltage_dependency_table {
uint8_t count;
- struct phm_samu_clock_voltage_dependency_record entries[] __counted_by(count);
+ struct phm_samu_clock_voltage_dependency_record entries[];
};
struct phm_cac_tdp_table {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index bb3bc68dfc39..80e60ea2d11e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1234,6 +1234,14 @@ static void smu_init_xgmi_plpd_mode(struct smu_context *smu)
}
}
+static bool smu_is_workload_profile_available(struct smu_context *smu,
+ u32 profile)
+{
+ if (profile >= PP_SMC_POWER_PROFILE_COUNT)
+ return false;
+ return smu->workload_map && smu->workload_map[profile].valid_mapping;
+}
+
static int smu_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1264,7 +1272,12 @@ static int smu_sw_init(void *handle)
smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
- smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
+
+ if (smu->is_apu ||
+ !smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D))
+ smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
+ else
+ smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
@@ -2226,7 +2239,7 @@ static int smu_bump_power_profile_mode(struct smu_context *smu,
static int smu_adjust_power_state_dynamic(struct smu_context *smu,
enum amd_dpm_forced_level level,
bool skip_display_settings,
- bool force_update)
+ bool init)
{
int ret = 0;
int index = 0;
@@ -2255,7 +2268,7 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
}
}
- if (force_update || smu_dpm_ctx->dpm_level != level) {
+ if (smu_dpm_ctx->dpm_level != level) {
ret = smu_asic_set_performance_level(smu, level);
if (ret) {
dev_err(smu->adev->dev, "Failed to set performance level!");
@@ -2272,7 +2285,7 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
workload[0] = smu->workload_setting[index];
- if (force_update || smu->power_profile_mode != workload[0])
+ if (init || smu->power_profile_mode != workload[0])
smu_bump_power_profile_mode(smu, workload, 0);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h
index ee457a6f0813..c2fd0a4a13e5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h
@@ -25,7 +25,7 @@
#define SMU14_DRIVER_IF_V14_0_H
//Increment this version if SkuTable_t or BoardTable_t change
-#define PPTABLE_VERSION 0x18
+#define PPTABLE_VERSION 0x1B
#define NUM_GFXCLK_DPM_LEVELS 16
#define NUM_SOCCLK_DPM_LEVELS 8
@@ -145,7 +145,7 @@ typedef enum {
} FEATURE_BTC_e;
// Debug Overrides Bitmask
-#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_VCN_FCLK 0x00000001
+#define DEBUG_OVERRIDE_NOT_USE 0x00000001
#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_DCN_FCLK 0x00000002
#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_MP0_FCLK 0x00000004
#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_VCN_DCFCLK 0x00000008
@@ -161,6 +161,7 @@ typedef enum {
#define DEBUG_OVERRIDE_ENABLE_SOC_VF_BRINGUP_MODE 0x00002000
#define DEBUG_OVERRIDE_ENABLE_PER_WGP_RESIENCY 0x00004000
#define DEBUG_OVERRIDE_DISABLE_MEMORY_VOLTAGE_SCALING 0x00008000
+#define DEBUG_OVERRIDE_DFLL_BTC_FCW_LOG 0x00010000
// VR Mapping Bit Defines
#define VR_MAPPING_VR_SELECT_MASK 0x01
@@ -391,6 +392,21 @@ typedef struct {
EccInfo_t EccInfo[24];
} EccInfoTable_t;
+#define EPCS_HIGH_POWER 600
+#define EPCS_NORMAL_POWER 450
+#define EPCS_LOW_POWER 300
+#define EPCS_SHORTED_POWER 150
+#define EPCS_NO_BOOTUP 0
+
+typedef enum{
+ EPCS_SHORTED_LIMIT,
+ EPCS_LOW_POWER_LIMIT,
+ EPCS_NORMAL_POWER_LIMIT,
+ EPCS_HIGH_POWER_LIMIT,
+ EPCS_NOT_CONFIGURED,
+ EPCS_STATUS_COUNT,
+} EPCS_STATUS_e;
+
//D3HOT sequences
typedef enum {
BACO_SEQUENCE,
@@ -662,7 +678,7 @@ typedef enum {
} PP_GRTAVFS_FW_SEP_FUSE_e;
#define PP_NUM_RTAVFS_PWL_ZONES 5
-
+#define PP_NUM_PSM_DIDT_PWL_ZONES 3
// VBIOS or PPLIB configures telemetry slope and offset. Only slope expected to be set for SVI3
// Slope Q1.7, Offset Q1.2
@@ -746,10 +762,10 @@ typedef struct {
uint16_t Padding;
//Frequency changes
- int16_t GfxclkFmin; // MHz
- int16_t GfxclkFmax; // MHz
- uint16_t UclkFmin; // MHz
- uint16_t UclkFmax; // MHz
+ int16_t GfxclkFoffset;
+ uint16_t Padding1;
+ uint16_t UclkFmin;
+ uint16_t UclkFmax;
uint16_t FclkFmin;
uint16_t FclkFmax;
@@ -770,19 +786,23 @@ typedef struct {
uint8_t MaxOpTemp;
uint8_t AdvancedOdModeEnabled;
- uint8_t Padding1[3];
+ uint8_t Padding2[3];
uint16_t GfxVoltageFullCtrlMode;
uint16_t SocVoltageFullCtrlMode;
uint16_t GfxclkFullCtrlMode;
uint16_t UclkFullCtrlMode;
uint16_t FclkFullCtrlMode;
- uint16_t Padding2;
+ uint16_t Padding3;
int16_t GfxEdc;
int16_t GfxPccLimitControl;
- uint32_t Spare[10];
+ uint16_t GfxclkFmaxVmax;
+ uint8_t GfxclkFmaxVmaxTemperature;
+ uint8_t Padding4[1];
+
+ uint32_t Spare[9];
uint32_t MmHubPadding[8]; // SMU internal use. Adding here instead of external as a workaround
} OverDriveTable_t;
@@ -802,8 +822,8 @@ typedef struct {
uint16_t VddSocVmax;
//gfxclk
- int16_t GfxclkFmin; // MHz
- int16_t GfxclkFmax; // MHz
+ int16_t GfxclkFoffset;
+ uint16_t Padding;
//uclk
uint16_t UclkFmin; // MHz
uint16_t UclkFmax; // MHz
@@ -828,7 +848,7 @@ typedef struct {
uint8_t FanZeroRpmEnable;
//temperature
uint8_t MaxOpTemp;
- uint8_t Padding[2];
+ uint8_t Padding1[2];
//Full Ctrl
uint16_t GfxVoltageFullCtrlMode;
@@ -839,7 +859,7 @@ typedef struct {
//EDC
int16_t GfxEdc;
int16_t GfxPccLimitControl;
- int16_t Padding1;
+ int16_t Padding2;
uint32_t Spare[5];
} OverDriveLimits_t;
@@ -987,8 +1007,9 @@ typedef struct {
uint16_t BaseClockDc;
uint16_t GameClockDc;
uint16_t BoostClockDc;
-
- uint32_t Reserved[4];
+ uint16_t MaxReportedClock;
+ uint16_t Padding;
+ uint32_t Reserved[3];
} DriverReportedClocks_t;
typedef struct {
@@ -1132,7 +1153,7 @@ typedef struct {
uint32_t DcModeMaxFreq [PPCLK_COUNT ]; // In MHz
uint16_t GfxclkAibFmax;
- uint16_t GfxclkFreqCap;
+ uint16_t GfxDpmPadding;
//GFX Idle Power Settings
uint16_t GfxclkFgfxoffEntry; // Entry in RLC stage (PLL), in Mhz
@@ -1172,8 +1193,7 @@ typedef struct {
uint32_t DvoFmaxLowScaler; //Unitless float
// GFX DCS
- uint16_t DcsGfxOffVoltage; //Voltage in mV(Q2) applied to VDDGFX when entering DCS GFXOFF phase
- uint16_t PaddingDcs;
+ uint32_t PaddingDcs;
uint16_t DcsMinGfxOffTime; //Minimum amount of time PMFW shuts GFX OFF as part of GFX DCS phase
uint16_t DcsMaxGfxOffTime; //Maximum amount of time PMFW can shut GFX OFF as part of GFX DCS phase at a stretch.
@@ -1205,8 +1225,7 @@ typedef struct {
uint16_t DalDcModeMaxUclkFreq;
uint8_t PaddingsMem[2];
//FCLK Section
- uint16_t FclkDpmDisallowPstateFreq; //Frequency which FW will target when indicated that display config cannot support P-state. Set to 0 use FW calculated value
- uint16_t PaddingFclk;
+ uint32_t PaddingFclk;
// Link DPM Settings
uint8_t PcieGenSpeed[NUM_LINK_LEVELS]; ///< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3 3:PciE-gen4 4:PciE-gen5
@@ -1215,12 +1234,19 @@ typedef struct {
// SECTION: VDD_GFX AVFS
uint8_t OverrideGfxAvfsFuses;
- uint8_t GfxAvfsPadding[3];
+ uint8_t GfxAvfsPadding[1];
+ uint16_t DroopGBStDev;
uint32_t SocHwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; //new added for Soc domain
uint32_t GfxL2HwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; //see fusedoc for encoding
//uint32_t GfxSeHwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT];
- uint32_t spare_HwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT];
+
+ uint16_t PsmDidt_Vcross[PP_NUM_PSM_DIDT_PWL_ZONES-1];
+ uint32_t PsmDidt_StaticDroop_A[PP_NUM_PSM_DIDT_PWL_ZONES];
+ uint32_t PsmDidt_StaticDroop_B[PP_NUM_PSM_DIDT_PWL_ZONES];
+ uint32_t PsmDidt_DynDroop_A[PP_NUM_PSM_DIDT_PWL_ZONES];
+ uint32_t PsmDidt_DynDroop_B[PP_NUM_PSM_DIDT_PWL_ZONES];
+ uint32_t spare_HwRtAvfsFuses[19];
uint32_t SocCommonRtAvfs[PP_GRTAVFS_FW_COMMON_FUSE_COUNT];
uint32_t GfxCommonRtAvfs[PP_GRTAVFS_FW_COMMON_FUSE_COUNT];
@@ -1246,11 +1272,7 @@ typedef struct {
uint32_t dGbV_dT_vmin;
uint32_t dGbV_dT_vmax;
- //Unused: PMFW-9370
- uint32_t V2F_vmin_range_low;
- uint32_t V2F_vmin_range_high;
- uint32_t V2F_vmax_range_low;
- uint32_t V2F_vmax_range_high;
+ uint32_t PaddingV2F[4];
AvfsDcBtcParams_t DcBtcGfxParams;
QuadraticInt_t SSCurve_GFX;
@@ -1327,18 +1349,18 @@ typedef struct {
uint16_t PsmDidtReleaseTimer;
uint32_t PsmDidtStallPattern; //Will be written to both pattern 1 and didt_static_level_prog
// CAC EDC
- uint32_t Leakage_C0; // in IEEE float
- uint32_t Leakage_C1; // in IEEE float
- uint32_t Leakage_C2; // in IEEE float
- uint32_t Leakage_C3; // in IEEE float
- uint32_t Leakage_C4; // in IEEE float
- uint32_t Leakage_C5; // in IEEE float
- uint32_t GFX_CLK_SCALAR; // in IEEE float
- uint32_t GFX_CLK_INTERCEPT; // in IEEE float
- uint32_t GFX_CAC_M; // in IEEE float
- uint32_t GFX_CAC_B; // in IEEE float
- uint32_t VDD_GFX_CurrentLimitGuardband; // in IEEE float
- uint32_t DynToTotalCacScalar; // in IEEE
+ uint32_t CacEdcCacLeakageC0;
+ uint32_t CacEdcCacLeakageC1;
+ uint32_t CacEdcCacLeakageC2;
+ uint32_t CacEdcCacLeakageC3;
+ uint32_t CacEdcCacLeakageC4;
+ uint32_t CacEdcCacLeakageC5;
+ uint32_t CacEdcGfxClkScalar;
+ uint32_t CacEdcGfxClkIntercept;
+ uint32_t CacEdcCac_m;
+ uint32_t CacEdcCac_b;
+ uint32_t CacEdcCurrLimitGuardband;
+ uint32_t CacEdcDynToTotalCacRatio;
// GFX EDC XVMIN
uint32_t XVmin_Gfx_EdcThreshScalar;
uint32_t XVmin_Gfx_EdcEnableFreq;
@@ -1467,7 +1489,7 @@ typedef struct {
uint8_t VddqOffEnabled;
uint8_t PaddingUmcFlags[2];
- uint32_t PostVoltageSetBacoDelay; // in microseconds. Amount of time FW will wait after power good is established or PSI0 command is issued
+ uint32_t Paddign1;
uint32_t BacoEntryDelay; // in milliseconds. Amount of time FW will wait to trigger BACO entry after receiving entry notification from OS
uint8_t FuseWritePowerMuxPresent;
@@ -1530,7 +1552,7 @@ typedef struct {
int16_t FuzzyFan_ErrorSetDelta;
int16_t FuzzyFan_ErrorRateSetDelta;
int16_t FuzzyFan_PwmSetDelta;
- uint16_t FuzzyFan_Reserved;
+ uint16_t FanPadding2;
uint16_t FwCtfLimit[TEMP_COUNT];
@@ -1547,9 +1569,10 @@ typedef struct {
uint16_t FanSpare[1];
uint8_t FanIntakeSensorSupport;
uint8_t FanIntakePadding;
- uint32_t FanAmbientPerfBoostThreshold;
uint32_t FanSpare2[12];
+ uint32_t ODFeatureCtrlMask;
+
uint16_t TemperatureLimit_Hynix; // In degrees Celsius. Memory temperature limit associated with Hynix
uint16_t TemperatureLimit_Micron; // In degrees Celsius. Memory temperature limit associated with Micron
uint16_t TemperatureFwCtfLimit_Hynix;
@@ -1637,7 +1660,7 @@ typedef struct {
uint16_t AverageDclk0Frequency ;
uint16_t AverageVclk1Frequency ;
uint16_t AverageDclk1Frequency ;
- uint16_t PCIeBusy ;
+ uint16_t AveragePCIeBusy ;
uint16_t dGPU_W_MAX ;
uint16_t padding ;
@@ -1665,12 +1688,12 @@ typedef struct {
uint16_t AverageGfxActivity ;
uint16_t AverageUclkActivity ;
- uint16_t Vcn0ActivityPercentage ;
+ uint16_t AverageVcn0ActivityPercentage;
uint16_t Vcn1ActivityPercentage ;
uint32_t EnergyAccumulator;
uint16_t AverageSocketPower;
- uint16_t MovingAverageTotalBoardPower;
+ uint16_t AverageTotalBoardPower;
uint16_t AvgTemperature[TEMP_COUNT];
uint16_t AvgTemperatureFanIntake;
@@ -1684,7 +1707,8 @@ typedef struct {
uint8_t ThrottlingPercentage[THROTTLER_COUNT];
- uint8_t padding1[3];
+ uint8_t VmaxThrottlingPercentage;
+ uint8_t padding1[2];
//metrics for D3hot entry/exit and driver ARM msgs
uint32_t D3HotEntryCountPerMode[D3HOT_SEQUENCE_COUNT];
@@ -1693,7 +1717,7 @@ typedef struct {
uint16_t ApuSTAPMSmartShiftLimit;
uint16_t ApuSTAPMLimit;
- uint16_t MovingAvgApuSocketPower;
+ uint16_t AvgApuSocketPower;
uint16_t AverageUclkActivity_MAX;
@@ -1823,6 +1847,17 @@ typedef struct {
#define TABLE_TRANSFER_FAILED 0xFF
#define TABLE_TRANSFER_PENDING 0xAB
+#define TABLE_PPT_FAILED 0x100
+#define TABLE_TDC_FAILED 0x200
+#define TABLE_TEMP_FAILED 0x400
+#define TABLE_FAN_TARGET_TEMP_FAILED 0x800
+#define TABLE_FAN_STOP_TEMP_FAILED 0x1000
+#define TABLE_FAN_START_TEMP_FAILED 0x2000
+#define TABLE_FAN_PWM_MIN_FAILED 0x4000
+#define TABLE_ACOUSTIC_TARGET_RPM_FAILED 0x8000
+#define TABLE_ACOUSTIC_LIMIT_RPM_FAILED 0x10000
+#define TABLE_MGPU_ACOUSTIC_TARGET_RPM_FAILED 0x20000
+
// Table types
#define TABLE_PPTABLE 0
#define TABLE_COMBO_PPTABLE 1
@@ -1849,5 +1884,6 @@ typedef struct {
#define IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7
#define IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8
#define IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9
+#define IH_INTERRUPT_CONTEXT_ID_DYNAMIC_TABLE 0xA
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
index 46b456590a08..727d5b405435 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
@@ -28,7 +28,7 @@
#define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7
#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_1 0x6
-#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x26
+#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x2E
#define FEATURE_MASK(feature) (1ULL << feature)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 22737b11b1bf..1fe020f1f4db 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -242,7 +242,9 @@ static int vangogh_tables_init(struct smu_context *smu)
goto err0_out;
smu_table->metrics_time = 0;
- smu_table->gpu_metrics_table_size = max(sizeof(struct gpu_metrics_v2_3), sizeof(struct gpu_metrics_v2_2));
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2);
+ smu_table->gpu_metrics_table_size = max(smu_table->gpu_metrics_table_size, sizeof(struct gpu_metrics_v2_3));
+ smu_table->gpu_metrics_table_size = max(smu_table->gpu_metrics_table_size, sizeof(struct gpu_metrics_v2_4));
smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
if (!smu_table->gpu_metrics_table)
goto err1_out;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 1d024b122b0c..d53e162dcd8d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -2485,7 +2485,7 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
DpmActivityMonitorCoeffInt_t *activity_monitor =
&(activity_monitor_external.DpmActivityMonitorCoeffInt);
int workload_type, ret = 0;
- u32 workload_mask;
+ u32 workload_mask, selected_workload_mask;
smu->power_profile_mode = input[size];
@@ -2552,21 +2552,19 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
if (workload_type < 0)
return -EINVAL;
- workload_mask = 1 << workload_type;
+ selected_workload_mask = workload_mask = 1 << workload_type;
/* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */
- if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) {
- if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
- ((smu->adev->pm.fw_version == 0x004e6601) ||
- (smu->adev->pm.fw_version >= 0x004e7300))) ||
- (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
- smu->adev->pm.fw_version >= 0x00504500)) {
- workload_type = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_WORKLOAD,
- PP_SMC_POWER_PROFILE_POWERSAVING);
- if (workload_type >= 0)
- workload_mask |= 1 << workload_type;
- }
+ if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
+ ((smu->adev->pm.fw_version == 0x004e6601) ||
+ (smu->adev->pm.fw_version >= 0x004e7300))) ||
+ (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
+ smu->adev->pm.fw_version >= 0x00504500)) {
+ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ PP_SMC_POWER_PROFILE_POWERSAVING);
+ if (workload_type >= 0)
+ workload_mask |= 1 << workload_type;
}
ret = smu_cmn_send_smc_msg_with_param(smu,
@@ -2574,7 +2572,7 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
workload_mask,
NULL);
if (!ret)
- smu->workload_mask = workload_mask;
+ smu->workload_mask = selected_workload_mask;
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 5899d01fa73d..e83ea2bc7f9c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -1077,12 +1077,9 @@ static void smu_v14_0_2_get_od_setting_limits(struct smu_context *smu,
switch (od_feature_bit) {
case PP_OD_FEATURE_GFXCLK_FMIN:
- od_min_setting = overdrive_lowerlimits->GfxclkFmin;
- od_max_setting = overdrive_upperlimits->GfxclkFmin;
- break;
case PP_OD_FEATURE_GFXCLK_FMAX:
- od_min_setting = overdrive_lowerlimits->GfxclkFmax;
- od_max_setting = overdrive_upperlimits->GfxclkFmax;
+ od_min_setting = overdrive_lowerlimits->GfxclkFoffset;
+ od_max_setting = overdrive_upperlimits->GfxclkFoffset;
break;
case PP_OD_FEATURE_UCLK_FMIN:
od_min_setting = overdrive_lowerlimits->UclkFmin;
@@ -1269,10 +1266,16 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
PP_OD_FEATURE_GFXCLK_BIT))
break;
- size += sysfs_emit_at(buf, size, "OD_SCLK:\n");
- size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n",
- od_table->OverDriveTable.GfxclkFmin,
- od_table->OverDriveTable.GfxclkFmax);
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ const OverDriveLimits_t * const overdrive_upperlimits =
+ &pptable->SkuTable.OverDriveLimitsBasicMax;
+ const OverDriveLimits_t * const overdrive_lowerlimits =
+ &pptable->SkuTable.OverDriveLimitsBasicMin;
+
+ size += sysfs_emit_at(buf, size, "OD_SCLK_OFFSET:\n");
+ size += sysfs_emit_at(buf, size, "0: %dMhz\n1: %uMhz\n",
+ overdrive_lowerlimits->GfxclkFoffset,
+ overdrive_upperlimits->GfxclkFoffset);
break;
case SMU_OD_MCLK:
@@ -1414,7 +1417,7 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
PP_OD_FEATURE_GFXCLK_FMAX,
NULL,
&max_value);
- size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
+ size += sysfs_emit_at(buf, size, "SCLK_OFFSET: %7dMhz %10uMhz\n",
min_value, max_value);
}
@@ -1796,7 +1799,7 @@ static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu,
DpmActivityMonitorCoeffInt_t *activity_monitor =
&(activity_monitor_external.DpmActivityMonitorCoeffInt);
int workload_type, ret = 0;
-
+ uint32_t current_profile_mode = smu->power_profile_mode;
smu->power_profile_mode = input[size];
if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) {
@@ -1854,6 +1857,11 @@ static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu,
}
}
+ if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE)
+ smu_v14_0_deep_sleep_control(smu, false);
+ else if (current_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE)
+ smu_v14_0_deep_sleep_control(smu, true);
+
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
@@ -2158,7 +2166,7 @@ static ssize_t smu_v14_0_2_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity;
gpu_metrics->average_umc_activity = metrics->AverageUclkActivity;
- gpu_metrics->average_mm_activity = max(metrics->Vcn0ActivityPercentage,
+ gpu_metrics->average_mm_activity = max(metrics->AverageVcn0ActivityPercentage,
metrics->Vcn1ActivityPercentage);
gpu_metrics->average_socket_power = metrics->AverageSocketPower;
@@ -2217,8 +2225,7 @@ static void smu_v14_0_2_dump_od_table(struct smu_context *smu,
{
struct amdgpu_device *adev = smu->adev;
- dev_dbg(adev->dev, "OD: Gfxclk: (%d, %d)\n", od_table->OverDriveTable.GfxclkFmin,
- od_table->OverDriveTable.GfxclkFmax);
+ dev_dbg(adev->dev, "OD: Gfxclk offset: (%d)\n", od_table->OverDriveTable.GfxclkFoffset);
dev_dbg(adev->dev, "OD: Uclk: (%d, %d)\n", od_table->OverDriveTable.UclkFmin,
od_table->OverDriveTable.UclkFmax);
}
@@ -2309,10 +2316,8 @@ static int smu_v14_0_2_set_default_od_settings(struct smu_context *smu)
memcpy(user_od_table,
boot_od_table,
sizeof(OverDriveTableExternal_t));
- user_od_table->OverDriveTable.GfxclkFmin =
- user_od_table_bak.OverDriveTable.GfxclkFmin;
- user_od_table->OverDriveTable.GfxclkFmax =
- user_od_table_bak.OverDriveTable.GfxclkFmax;
+ user_od_table->OverDriveTable.GfxclkFoffset =
+ user_od_table_bak.OverDriveTable.GfxclkFoffset;
user_od_table->OverDriveTable.UclkFmin =
user_od_table_bak.OverDriveTable.UclkFmin;
user_od_table->OverDriveTable.UclkFmax =
@@ -2441,22 +2446,6 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu,
}
switch (input[i]) {
- case 0:
- smu_v14_0_2_get_od_setting_limits(smu,
- PP_OD_FEATURE_GFXCLK_FMIN,
- &minimum,
- &maximum);
- if (input[i + 1] < minimum ||
- input[i + 1] > maximum) {
- dev_info(adev->dev, "GfxclkFmin (%ld) must be within [%u, %u]!\n",
- input[i + 1], minimum, maximum);
- return -EINVAL;
- }
-
- od_table->OverDriveTable.GfxclkFmin = input[i + 1];
- od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
- break;
-
case 1:
smu_v14_0_2_get_od_setting_limits(smu,
PP_OD_FEATURE_GFXCLK_FMAX,
@@ -2469,7 +2458,7 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu,
return -EINVAL;
}
- od_table->OverDriveTable.GfxclkFmax = input[i + 1];
+ od_table->OverDriveTable.GfxclkFoffset = input[i + 1];
od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
break;
@@ -2480,13 +2469,6 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu,
}
}
- if (od_table->OverDriveTable.GfxclkFmin > od_table->OverDriveTable.GfxclkFmax) {
- dev_err(adev->dev,
- "Invalid setting: GfxclkFmin(%u) is bigger than GfxclkFmax(%u)\n",
- (uint32_t)od_table->OverDriveTable.GfxclkFmin,
- (uint32_t)od_table->OverDriveTable.GfxclkFmax);
- return -EINVAL;
- }
break;
case PP_OD_EDIT_MCLK_VDDC_TABLE:
diff --git a/drivers/gpu/drm/ast/ast_sil164.c b/drivers/gpu/drm/ast/ast_sil164.c
index 496c7120e515..c231389936bd 100644
--- a/drivers/gpu/drm/ast/ast_sil164.c
+++ b/drivers/gpu/drm/ast/ast_sil164.c
@@ -29,6 +29,8 @@ static int ast_sil164_connector_helper_get_modes(struct drm_connector *connector
if (ast_connector->physical_status == connector_status_connected) {
count = drm_connector_helper_get_modes(connector);
} else {
+ drm_edid_connector_update(connector, NULL);
+
/*
* There's no EDID data without a connected monitor. Set BMC-
* compatible modes in this case. The XGA default resolution
diff --git a/drivers/gpu/drm/ast/ast_vga.c b/drivers/gpu/drm/ast/ast_vga.c
index 3e815da43fbd..dd389a0a8f4a 100644
--- a/drivers/gpu/drm/ast/ast_vga.c
+++ b/drivers/gpu/drm/ast/ast_vga.c
@@ -29,6 +29,8 @@ static int ast_vga_connector_helper_get_modes(struct drm_connector *connector)
if (ast_connector->physical_status == connector_status_connected) {
count = drm_connector_helper_get_modes(connector);
} else {
+ drm_edid_connector_update(connector, NULL);
+
/*
* There's no EDID data without a connected monitor. Set BMC-
* compatible modes in this case. The XGA default resolution
diff --git a/drivers/gpu/drm/bridge/aux-bridge.c b/drivers/gpu/drm/bridge/aux-bridge.c
index b29980f95379..295e9d031e2d 100644
--- a/drivers/gpu/drm/bridge/aux-bridge.c
+++ b/drivers/gpu/drm/bridge/aux-bridge.c
@@ -58,9 +58,10 @@ int drm_aux_bridge_register(struct device *parent)
adev->id = ret;
adev->name = "aux_bridge";
adev->dev.parent = parent;
- adev->dev.of_node = of_node_get(parent->of_node);
adev->dev.release = drm_aux_bridge_release;
+ device_set_of_node_from_dev(&adev->dev, parent);
+
ret = auxiliary_device_init(adev);
if (ret) {
ida_free(&drm_aux_bridge_ida, adev->id);
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index dee640ab1d3a..41f72d458487 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -47,7 +47,7 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "cdns-mhdp8546-core.h"
#include "cdns-mhdp8546-hdcp.h"
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
index 5e3b8edcf794..31832ba4017f 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
@@ -9,7 +9,7 @@
#include <linux/io.h>
#include <linux/iopoll.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <drm/display/drm_hdcp_helper.h>
diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c
index e7e53a9e42af..430f8adebf9c 100644
--- a/drivers/gpu/drm/bridge/samsung-dsim.c
+++ b/drivers/gpu/drm/bridge/samsung-dsim.c
@@ -10,7 +10,7 @@
* Tomasz Figa <t.figa@samsung.com>
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/clk.h>
#include <linux/delay.h>
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 6bb755e9f0a5..26b8d137bce0 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -6,7 +6,7 @@
* Andrzej Hajda <a.hajda@samsung.com>
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <drm/bridge/mhl.h>
#include <drm/drm_bridge.h>
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 290e2532fab1..f3afdab55c11 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -2391,6 +2391,7 @@ static int tc_probe_bridge_endpoint(struct tc_data *tc)
if (tc->pre_emphasis[0] < 0 || tc->pre_emphasis[0] > 2 ||
tc->pre_emphasis[1] < 0 || tc->pre_emphasis[1] > 2) {
dev_err(dev, "Incorrect Pre-Emphasis setting, use either 0=0dB 1=3.5dB 2=6dB\n");
+ of_node_put(node);
return -EINVAL;
}
}
diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c
index 3b7cc3be2ccd..0b4efaca6d68 100644
--- a/drivers/gpu/drm/bridge/tc358775.c
+++ b/drivers/gpu/drm/bridge/tc358775.c
@@ -19,7 +19,7 @@
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_atomic_helper.h>
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 84698a0b27a8..582cf4f73a74 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -21,7 +21,7 @@
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <drm/display/drm_dp_aux_bus.h>
#include <drm/display/drm_dp_helper.h>
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index a040d7dfced1..ac90118b9e7a 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -6083,6 +6083,7 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
struct drm_dp_aux *immediate_upstream_aux;
struct drm_dp_mst_port *fec_port;
struct drm_dp_desc desc = {};
+ u8 upstream_dsc;
u8 endpoint_fec;
u8 endpoint_dsc;
@@ -6109,8 +6110,6 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
/* DP-to-DP peer device */
if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
- u8 upstream_dsc;
-
if (drm_dp_dpcd_read(&port->aux,
DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
return NULL;
@@ -6156,6 +6155,13 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD)) {
u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
+ if (drm_dp_dpcd_read(immediate_upstream_aux,
+ DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
+ return NULL;
+
+ if (!(upstream_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED))
+ return NULL;
+
if (drm_dp_read_dpcd_caps(immediate_upstream_aux, dpcd_ext) < 0)
return NULL;
diff --git a/drivers/gpu/drm/display/drm_hdmi_state_helper.c b/drivers/gpu/drm/display/drm_hdmi_state_helper.c
index 7854820089ec..feb7a3a75981 100644
--- a/drivers/gpu/drm/display/drm_hdmi_state_helper.c
+++ b/drivers/gpu/drm/display/drm_hdmi_state_helper.c
@@ -521,8 +521,6 @@ int drm_atomic_helper_connector_hdmi_check(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_atomic_helper_connector_hdmi_check);
-#define HDMI_MAX_INFOFRAME_SIZE 29
-
static int clear_device_infoframe(struct drm_connector *connector,
enum hdmi_infoframe_type type)
{
@@ -563,7 +561,7 @@ static int write_device_infoframe(struct drm_connector *connector,
{
const struct drm_connector_hdmi_funcs *funcs = connector->hdmi.funcs;
struct drm_device *dev = connector->dev;
- u8 buffer[HDMI_MAX_INFOFRAME_SIZE];
+ u8 buffer[HDMI_INFOFRAME_SIZE(MAX)];
int ret;
int len;
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 7936c2023955..370dc676e3aa 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -543,7 +543,7 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
&state->fb_damage_clips,
val,
-1,
- sizeof(struct drm_rect),
+ sizeof(struct drm_mode_rect),
&replaced);
return ret;
} else if (property == plane->scaling_filter_property) {
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 6b239a24f1df..9d3e6dd68810 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -520,8 +520,6 @@ static const struct file_operations drm_connector_fops = {
.write = connector_write
};
-#define HDMI_MAX_INFOFRAME_SIZE 29
-
static ssize_t
audio_infoframe_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos)
{
@@ -579,7 +577,7 @@ static ssize_t _f##_read_infoframe(struct file *filp, \
struct drm_connector *connector; \
union hdmi_infoframe *frame; \
struct drm_device *dev; \
- u8 buf[HDMI_MAX_INFOFRAME_SIZE]; \
+ u8 buf[HDMI_INFOFRAME_SIZE(MAX)]; \
ssize_t len = 0; \
\
connector = filp->private_data; \
diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c
index b0602c4f3628..51c2d742d199 100644
--- a/drivers/gpu/drm/drm_fbdev_dma.c
+++ b/drivers/gpu/drm/drm_fbdev_dma.c
@@ -50,7 +50,8 @@ static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
if (!fb_helper->dev)
return;
- fb_deferred_io_cleanup(info);
+ if (info->fbdefio)
+ fb_deferred_io_cleanup(info);
drm_fb_helper_fini(fb_helper);
drm_client_buffer_vunmap(fb_helper->buffer);
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index faa253b27664..14ac351fd76d 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -123,9 +123,8 @@ config DRM_I915_USERPTR
config DRM_I915_GVT_KVMGT
tristate "Enable KVM host support Intel GVT-g graphics virtualization"
depends on DRM_I915
- depends on X86
+ depends on KVM_X86
depends on 64BIT
- depends on KVM
depends on VFIO
select DRM_I915_GVT
select KVM_EXTERNAL_WRITE_TRACKING
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 15541932b809..eeaedd979354 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -89,25 +89,19 @@ static int intel_dp_mst_max_dpt_bpp(const struct intel_crtc_state *crtc_state,
static int intel_dp_mst_bw_overhead(const struct intel_crtc_state *crtc_state,
const struct intel_connector *connector,
- bool ssc, bool dsc, int bpp_x16)
+ bool ssc, int dsc_slice_count, int bpp_x16)
{
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
unsigned long flags = DRM_DP_BW_OVERHEAD_MST;
- int dsc_slice_count = 0;
int overhead;
flags |= intel_dp_is_uhbr(crtc_state) ? DRM_DP_BW_OVERHEAD_UHBR : 0;
flags |= ssc ? DRM_DP_BW_OVERHEAD_SSC_REF_CLK : 0;
flags |= crtc_state->fec_enable ? DRM_DP_BW_OVERHEAD_FEC : 0;
- if (dsc) {
+ if (dsc_slice_count)
flags |= DRM_DP_BW_OVERHEAD_DSC;
- dsc_slice_count = intel_dp_dsc_get_slice_count(connector,
- adjusted_mode->clock,
- adjusted_mode->hdisplay,
- crtc_state->joiner_pipes);
- }
overhead = drm_dp_bw_overhead(crtc_state->lane_count,
adjusted_mode->hdisplay,
@@ -153,6 +147,19 @@ static int intel_dp_mst_calc_pbn(int pixel_clock, int bpp_x16, int bw_overhead)
return DIV_ROUND_UP(effective_data_rate * 64, 54 * 1000);
}
+static int intel_dp_mst_dsc_get_slice_count(const struct intel_connector *connector,
+ const struct intel_crtc_state *crtc_state)
+{
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ int num_joined_pipes = crtc_state->joiner_pipes;
+
+ return intel_dp_dsc_get_slice_count(connector,
+ adjusted_mode->clock,
+ adjusted_mode->hdisplay,
+ num_joined_pipes);
+}
+
static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
int max_bpp,
@@ -172,6 +179,7 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
int bpp, slots = -EINVAL;
+ int dsc_slice_count = 0;
int max_dpt_bpp;
int ret = 0;
@@ -203,6 +211,15 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
drm_dbg_kms(&i915->drm, "Looking for slots in range min bpp %d max bpp %d\n",
min_bpp, max_bpp);
+ if (dsc) {
+ dsc_slice_count = intel_dp_mst_dsc_get_slice_count(connector, crtc_state);
+ if (!dsc_slice_count) {
+ drm_dbg_kms(&i915->drm, "Can't get valid DSC slice count\n");
+
+ return -ENOSPC;
+ }
+ }
+
for (bpp = max_bpp; bpp >= min_bpp; bpp -= step) {
int local_bw_overhead;
int remote_bw_overhead;
@@ -216,9 +233,9 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
intel_dp_output_bpp(crtc_state->output_format, bpp));
local_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, connector,
- false, dsc, link_bpp_x16);
+ false, dsc_slice_count, link_bpp_x16);
remote_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, connector,
- true, dsc, link_bpp_x16);
+ true, dsc_slice_count, link_bpp_x16);
intel_dp_mst_compute_m_n(crtc_state, connector,
local_bw_overhead,
@@ -449,6 +466,9 @@ hblank_expansion_quirk_needs_dsc(const struct intel_connector *connector,
if (mode_hblank_period_ns(adjusted_mode) > hblank_limit)
return false;
+ if (!intel_dp_mst_dsc_get_slice_count(connector, crtc_state))
+ return false;
+
return true;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index d8951464bd2b..f0e3be0fe420 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -32,7 +32,7 @@
#include <linux/slab.h>
#include <linux/string_helpers.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 5be7bb43e2e0..35557d98d7a7 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -438,6 +438,19 @@ bool intel_fb_needs_64k_phys(u64 modifier)
INTEL_PLANE_CAP_NEED64K_PHYS);
}
+/**
+ * intel_fb_is_tile4_modifier: Check if a modifier is a tile4 modifier type
+ * @modifier: Modifier to check
+ *
+ * Returns:
+ * Returns %true if @modifier is a tile4 modifier.
+ */
+bool intel_fb_is_tile4_modifier(u64 modifier)
+{
+ return plane_caps_contain_any(lookup_modifier(modifier)->plane_caps,
+ INTEL_PLANE_CAP_TILING_4);
+}
+
static bool check_modifier_display_ver_range(const struct intel_modifier_desc *md,
u8 display_ver_from, u8 display_ver_until)
{
diff --git a/drivers/gpu/drm/i915/display/intel_fb.h b/drivers/gpu/drm/i915/display/intel_fb.h
index 10de437e8ef8..827be3f7934c 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.h
+++ b/drivers/gpu/drm/i915/display/intel_fb.h
@@ -35,6 +35,7 @@ bool intel_fb_is_ccs_modifier(u64 modifier);
bool intel_fb_is_rc_ccs_cc_modifier(u64 modifier);
bool intel_fb_is_mc_ccs_modifier(u64 modifier);
bool intel_fb_needs_64k_phys(u64 modifier);
+bool intel_fb_is_tile4_modifier(u64 modifier);
bool intel_fb_is_ccs_aux_plane(const struct drm_framebuffer *fb, int color_plane);
int intel_fb_rc_ccs_cc_plane(const struct drm_framebuffer *fb);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 6980b98792c2..377939de0ff4 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -1094,7 +1094,8 @@ static void intel_hdcp_update_value(struct intel_connector *connector,
hdcp->value = value;
if (update_property) {
drm_connector_get(&connector->base);
- queue_work(i915->unordered_wq, &hdcp->prop_work);
+ if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
+ drm_connector_put(&connector->base);
}
}
@@ -2524,7 +2525,8 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
mutex_lock(&hdcp->mutex);
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
drm_connector_get(&connector->base);
- queue_work(i915->unordered_wq, &hdcp->prop_work);
+ if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
+ drm_connector_put(&connector->base);
mutex_unlock(&hdcp->mutex);
}
@@ -2541,7 +2543,9 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
*/
if (!desired_and_not_enabled && !content_protection_type_changed) {
drm_connector_get(&connector->base);
- queue_work(i915->unordered_wq, &hdcp->prop_work);
+ if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
+ drm_connector_put(&connector->base);
+
}
}
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index 17d4c880ecc4..c8720d31d101 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -1591,6 +1591,17 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
return -EINVAL;
}
+ /*
+ * Display20 onward tile4 hflip is not supported
+ */
+ if (rotation & DRM_MODE_REFLECT_X &&
+ intel_fb_is_tile4_modifier(fb->modifier) &&
+ DISPLAY_VER(dev_priv) >= 20) {
+ drm_dbg_kms(&dev_priv->drm,
+ "horizontal flip is not supported with tile4 surface formats\n");
+ return -EINVAL;
+ }
+
if (drm_rotation_90_or_270(rotation)) {
if (!intel_fb_supports_90_270_rotation(to_intel_framebuffer(fb))) {
drm_dbg_kms(&dev_priv->drm,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 5c72462d1f57..b22e2019768f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -1131,7 +1131,7 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
GEM_WARN_ON(!i915_ttm_cpu_maps_iomem(bo->resource));
}
- if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
+ if (wakeref && CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND != 0)
intel_wakeref_auto(&to_i915(obj->base.dev)->runtime_pm.userfault_wakeref,
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.c b/drivers/gpu/drm/mediatek/mtk_crtc.c
index 175b00e5a253..eb0e1233ad04 100644
--- a/drivers/gpu/drm/mediatek/mtk_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_crtc.c
@@ -127,9 +127,8 @@ static void mtk_crtc_destroy(struct drm_crtc *crtc)
mtk_mutex_put(mtk_crtc->mutex);
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
- cmdq_pkt_destroy(&mtk_crtc->cmdq_client, &mtk_crtc->cmdq_handle);
-
if (mtk_crtc->cmdq_client.chan) {
+ cmdq_pkt_destroy(&mtk_crtc->cmdq_client, &mtk_crtc->cmdq_handle);
mbox_free_channel(mtk_crtc->cmdq_client.chan);
mtk_crtc->cmdq_client.chan = NULL;
}
@@ -913,6 +912,7 @@ static int mtk_crtc_init_comp_planes(struct drm_device *drm_dev,
BIT(pipe),
mtk_crtc_plane_type(mtk_crtc->layer_nr, num_planes),
mtk_ddp_comp_supported_rotations(comp),
+ mtk_ddp_comp_get_blend_modes(comp),
mtk_ddp_comp_get_formats(comp),
mtk_ddp_comp_get_num_formats(comp), i);
if (ret)
diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
index be66d94be361..edc6417639e6 100644
--- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
@@ -363,6 +363,7 @@ static const struct mtk_ddp_comp_funcs ddp_ovl = {
.layer_config = mtk_ovl_layer_config,
.bgclr_in_on = mtk_ovl_bgclr_in_on,
.bgclr_in_off = mtk_ovl_bgclr_in_off,
+ .get_blend_modes = mtk_ovl_get_blend_modes,
.get_formats = mtk_ovl_get_formats,
.get_num_formats = mtk_ovl_get_num_formats,
};
@@ -416,6 +417,7 @@ static const struct mtk_ddp_comp_funcs ddp_ovl_adaptor = {
.disconnect = mtk_ovl_adaptor_disconnect,
.add = mtk_ovl_adaptor_add_comp,
.remove = mtk_ovl_adaptor_remove_comp,
+ .get_blend_modes = mtk_ovl_adaptor_get_blend_modes,
.get_formats = mtk_ovl_adaptor_get_formats,
.get_num_formats = mtk_ovl_adaptor_get_num_formats,
.mode_valid = mtk_ovl_adaptor_mode_valid,
diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h
index ecf6dc283cd7..39720b27f4e9 100644
--- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h
@@ -80,6 +80,7 @@ struct mtk_ddp_comp_funcs {
void (*ctm_set)(struct device *dev,
struct drm_crtc_state *state);
struct device * (*dma_dev_get)(struct device *dev);
+ u32 (*get_blend_modes)(struct device *dev);
const u32 *(*get_formats)(struct device *dev);
size_t (*get_num_formats)(struct device *dev);
void (*connect)(struct device *dev, struct device *mmsys_dev, unsigned int next);
@@ -267,6 +268,15 @@ static inline struct device *mtk_ddp_comp_dma_dev_get(struct mtk_ddp_comp *comp)
}
static inline
+u32 mtk_ddp_comp_get_blend_modes(struct mtk_ddp_comp *comp)
+{
+ if (comp->funcs && comp->funcs->get_blend_modes)
+ return comp->funcs->get_blend_modes(comp->dev);
+
+ return 0;
+}
+
+static inline
const u32 *mtk_ddp_comp_get_formats(struct mtk_ddp_comp *comp)
{
if (comp->funcs && comp->funcs->get_formats)
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_drv.h b/drivers/gpu/drm/mediatek/mtk_disp_drv.h
index 082ac18fe04a..04154db9085c 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_disp_drv.h
@@ -103,6 +103,7 @@ void mtk_ovl_register_vblank_cb(struct device *dev,
void mtk_ovl_unregister_vblank_cb(struct device *dev);
void mtk_ovl_enable_vblank(struct device *dev);
void mtk_ovl_disable_vblank(struct device *dev);
+u32 mtk_ovl_get_blend_modes(struct device *dev);
const u32 *mtk_ovl_get_formats(struct device *dev);
size_t mtk_ovl_get_num_formats(struct device *dev);
@@ -131,6 +132,7 @@ void mtk_ovl_adaptor_start(struct device *dev);
void mtk_ovl_adaptor_stop(struct device *dev);
unsigned int mtk_ovl_adaptor_layer_nr(struct device *dev);
struct device *mtk_ovl_adaptor_dma_dev_get(struct device *dev);
+u32 mtk_ovl_adaptor_get_blend_modes(struct device *dev);
const u32 *mtk_ovl_adaptor_get_formats(struct device *dev);
size_t mtk_ovl_adaptor_get_num_formats(struct device *dev);
enum drm_mode_status mtk_ovl_adaptor_mode_valid(struct device *dev,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 89b439dcf3a6..e0c0bb01f65a 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -65,8 +65,8 @@
#define OVL_CON_CLRFMT_RGB (1 << 12)
#define OVL_CON_CLRFMT_ARGB8888 (2 << 12)
#define OVL_CON_CLRFMT_RGBA8888 (3 << 12)
-#define OVL_CON_CLRFMT_ABGR8888 (OVL_CON_CLRFMT_RGBA8888 | OVL_CON_BYTE_SWAP)
-#define OVL_CON_CLRFMT_BGRA8888 (OVL_CON_CLRFMT_ARGB8888 | OVL_CON_BYTE_SWAP)
+#define OVL_CON_CLRFMT_ABGR8888 (OVL_CON_CLRFMT_ARGB8888 | OVL_CON_BYTE_SWAP)
+#define OVL_CON_CLRFMT_BGRA8888 (OVL_CON_CLRFMT_RGBA8888 | OVL_CON_BYTE_SWAP)
#define OVL_CON_CLRFMT_UYVY (4 << 12)
#define OVL_CON_CLRFMT_YUYV (5 << 12)
#define OVL_CON_MTX_YUV_TO_RGB (6 << 16)
@@ -146,6 +146,7 @@ struct mtk_disp_ovl_data {
bool fmt_rgb565_is_0;
bool smi_id_en;
bool supports_afbc;
+ const u32 blend_modes;
const u32 *formats;
size_t num_formats;
bool supports_clrfmt_ext;
@@ -214,6 +215,13 @@ void mtk_ovl_disable_vblank(struct device *dev)
writel_relaxed(0x0, ovl->regs + DISP_REG_OVL_INTEN);
}
+u32 mtk_ovl_get_blend_modes(struct device *dev)
+{
+ struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
+
+ return ovl->data->blend_modes;
+}
+
const u32 *mtk_ovl_get_formats(struct device *dev)
{
struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
@@ -386,14 +394,27 @@ void mtk_ovl_layer_off(struct device *dev, unsigned int idx,
DISP_REG_OVL_RDMA_CTRL(idx));
}
-static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt,
- unsigned int blend_mode)
+static unsigned int mtk_ovl_fmt_convert(struct mtk_disp_ovl *ovl,
+ struct mtk_plane_state *state)
{
- /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX"
- * is defined in mediatek HW data sheet.
- * The alphabet order in XXX is no relation to data
- * arrangement in memory.
+ unsigned int fmt = state->pending.format;
+ unsigned int blend_mode = DRM_MODE_BLEND_COVERAGE;
+
+ /*
+ * For the platforms where OVL_CON_CLRFMT_MAN is defined in the hardware data sheet
+ * and supports premultiplied color formats, such as OVL_CON_CLRFMT_PARGB8888.
+ *
+ * Check blend_modes in the driver data to see if premultiplied mode is supported.
+ * If not, use coverage mode instead to set it to the supported color formats.
+ *
+ * Current DRM assumption is that alpha is default premultiplied, so the bitmask of
+ * blend_modes must include BIT(DRM_MODE_BLEND_PREMULTI). Otherwise, mtk_plane_init()
+ * will get an error return from drm_plane_create_blend_mode_property() and
+ * state->base.pixel_blend_mode should not be used.
*/
+ if (ovl->data->blend_modes & BIT(DRM_MODE_BLEND_PREMULTI))
+ blend_mode = state->base.pixel_blend_mode;
+
switch (fmt) {
default:
case DRM_FORMAT_RGB565:
@@ -471,20 +492,26 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
return;
}
- con = ovl_fmt_convert(ovl, fmt, blend_mode);
+ con = mtk_ovl_fmt_convert(ovl, state);
if (state->base.fb) {
- con |= OVL_CON_AEN;
con |= state->base.alpha & OVL_CON_ALPHA;
- }
- /* CONST_BLD must be enabled for XRGB formats although the alpha channel
- * can be ignored, or OVL will still read the value from memory.
- * For RGB888 related formats, whether CONST_BLD is enabled or not won't
- * affect the result. Therefore we use !has_alpha as the condition.
- */
- if ((state->base.fb && !state->base.fb->format->has_alpha) ||
- blend_mode == DRM_MODE_BLEND_PIXEL_NONE)
- ignore_pixel_alpha = OVL_CONST_BLEND;
+ /*
+ * For blend_modes supported SoCs, always enable alpha blending.
+ * For blend_modes unsupported SoCs, enable alpha blending when has_alpha is set.
+ */
+ if (blend_mode || state->base.fb->format->has_alpha)
+ con |= OVL_CON_AEN;
+
+ /*
+ * Although the alpha channel can be ignored, CONST_BLD must be enabled
+ * for XRGB format, otherwise OVL will still read the value from memory.
+ * For RGB888 related formats, whether CONST_BLD is enabled or not won't
+ * affect the result. Therefore we use !has_alpha as the condition.
+ */
+ if (blend_mode == DRM_MODE_BLEND_PIXEL_NONE || !state->base.fb->format->has_alpha)
+ ignore_pixel_alpha = OVL_CONST_BLEND;
+ }
if (pending->rotation & DRM_MODE_REFLECT_Y) {
con |= OVL_CON_VIRT_FLIP;
@@ -663,6 +690,9 @@ static const struct mtk_disp_ovl_data mt8192_ovl_driver_data = {
.layer_nr = 4,
.fmt_rgb565_is_0 = true,
.smi_id_en = true,
+ .blend_modes = BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE) |
+ BIT(DRM_MODE_BLEND_PIXEL_NONE),
.formats = mt8173_formats,
.num_formats = ARRAY_SIZE(mt8173_formats),
};
@@ -673,6 +703,9 @@ static const struct mtk_disp_ovl_data mt8192_ovl_2l_driver_data = {
.layer_nr = 2,
.fmt_rgb565_is_0 = true,
.smi_id_en = true,
+ .blend_modes = BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE) |
+ BIT(DRM_MODE_BLEND_PIXEL_NONE),
.formats = mt8173_formats,
.num_formats = ARRAY_SIZE(mt8173_formats),
};
@@ -684,6 +717,9 @@ static const struct mtk_disp_ovl_data mt8195_ovl_driver_data = {
.fmt_rgb565_is_0 = true,
.smi_id_en = true,
.supports_afbc = true,
+ .blend_modes = BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE) |
+ BIT(DRM_MODE_BLEND_PIXEL_NONE),
.formats = mt8195_formats,
.num_formats = ARRAY_SIZE(mt8195_formats),
.supports_clrfmt_ext = true,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
index c6768210b08b..bf2546c4681a 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
@@ -400,6 +400,13 @@ void mtk_ovl_adaptor_disable_vblank(struct device *dev)
mtk_ethdr_disable_vblank(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0]);
}
+u32 mtk_ovl_adaptor_get_blend_modes(struct device *dev)
+{
+ struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);
+
+ return mtk_ethdr_get_blend_modes(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0]);
+}
+
const u32 *mtk_ovl_adaptor_get_formats(struct device *dev)
{
struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
index d8796a904eca..f2bee617f063 100644
--- a/drivers/gpu/drm/mediatek/mtk_dp.c
+++ b/drivers/gpu/drm/mediatek/mtk_dp.c
@@ -145,6 +145,89 @@ struct mtk_dp_data {
u16 audio_m_div2_bit;
};
+static const struct mtk_dp_efuse_fmt mt8188_dp_efuse_fmt[MTK_DP_CAL_MAX] = {
+ [MTK_DP_CAL_GLB_BIAS_TRIM] = {
+ .idx = 0,
+ .shift = 10,
+ .mask = 0x1f,
+ .min_val = 1,
+ .max_val = 0x1e,
+ .default_val = 0xf,
+ },
+ [MTK_DP_CAL_CLKTX_IMPSE] = {
+ .idx = 0,
+ .shift = 15,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_0] = {
+ .idx = 1,
+ .shift = 0,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_1] = {
+ .idx = 1,
+ .shift = 8,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_2] = {
+ .idx = 1,
+ .shift = 16,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_3] = {
+ .idx = 1,
+ .shift = 24,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_0] = {
+ .idx = 1,
+ .shift = 4,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_1] = {
+ .idx = 1,
+ .shift = 12,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_2] = {
+ .idx = 1,
+ .shift = 20,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_3] = {
+ .idx = 1,
+ .shift = 28,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+};
+
static const struct mtk_dp_efuse_fmt mt8195_edp_efuse_fmt[MTK_DP_CAL_MAX] = {
[MTK_DP_CAL_GLB_BIAS_TRIM] = {
.idx = 3,
@@ -2771,7 +2854,7 @@ static SIMPLE_DEV_PM_OPS(mtk_dp_pm_ops, mtk_dp_suspend, mtk_dp_resume);
static const struct mtk_dp_data mt8188_dp_data = {
.bridge_type = DRM_MODE_CONNECTOR_DisplayPort,
.smc_cmd = MTK_DP_SIP_ATF_VIDEO_UNMUTE,
- .efuse_fmt = mt8195_dp_efuse_fmt,
+ .efuse_fmt = mt8188_dp_efuse_fmt,
.audio_supported = true,
.audio_pkt_in_hblank_area = true,
.audio_m_div2_bit = MT8188_AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_DIV_2,
diff --git a/drivers/gpu/drm/mediatek/mtk_ethdr.c b/drivers/gpu/drm/mediatek/mtk_ethdr.c
index d1d9cf8b10e1..0f22e7d337cb 100644
--- a/drivers/gpu/drm/mediatek/mtk_ethdr.c
+++ b/drivers/gpu/drm/mediatek/mtk_ethdr.c
@@ -145,6 +145,13 @@ static irqreturn_t mtk_ethdr_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
+u32 mtk_ethdr_get_blend_modes(struct device *dev)
+{
+ return BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE) |
+ BIT(DRM_MODE_BLEND_PIXEL_NONE);
+}
+
void mtk_ethdr_layer_config(struct device *dev, unsigned int idx,
struct mtk_plane_state *state,
struct cmdq_pkt *cmdq_pkt)
diff --git a/drivers/gpu/drm/mediatek/mtk_ethdr.h b/drivers/gpu/drm/mediatek/mtk_ethdr.h
index 81af9edea3f7..a72aeee46829 100644
--- a/drivers/gpu/drm/mediatek/mtk_ethdr.h
+++ b/drivers/gpu/drm/mediatek/mtk_ethdr.h
@@ -13,6 +13,7 @@ void mtk_ethdr_clk_disable(struct device *dev);
void mtk_ethdr_config(struct device *dev, unsigned int w,
unsigned int h, unsigned int vrefresh,
unsigned int bpc, struct cmdq_pkt *cmdq_pkt);
+u32 mtk_ethdr_get_blend_modes(struct device *dev);
void mtk_ethdr_layer_config(struct device *dev, unsigned int idx,
struct mtk_plane_state *state,
struct cmdq_pkt *cmdq_pkt);
diff --git a/drivers/gpu/drm/mediatek/mtk_plane.c b/drivers/gpu/drm/mediatek/mtk_plane.c
index 7d2cb4e0fafa..8a48b3b0a956 100644
--- a/drivers/gpu/drm/mediatek/mtk_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_plane.c
@@ -320,8 +320,8 @@ static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = {
int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
unsigned long possible_crtcs, enum drm_plane_type type,
- unsigned int supported_rotations, const u32 *formats,
- size_t num_formats, unsigned int plane_idx)
+ unsigned int supported_rotations, const u32 blend_modes,
+ const u32 *formats, size_t num_formats, unsigned int plane_idx)
{
int err;
@@ -366,12 +366,11 @@ int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
if (err)
DRM_ERROR("failed to create property: alpha\n");
- err = drm_plane_create_blend_mode_property(plane,
- BIT(DRM_MODE_BLEND_PREMULTI) |
- BIT(DRM_MODE_BLEND_COVERAGE) |
- BIT(DRM_MODE_BLEND_PIXEL_NONE));
- if (err)
- DRM_ERROR("failed to create property: blend_mode\n");
+ if (blend_modes) {
+ err = drm_plane_create_blend_mode_property(plane, blend_modes);
+ if (err)
+ DRM_ERROR("failed to create property: blend_mode\n");
+ }
drm_plane_helper_add(plane, &mtk_plane_helper_funcs);
diff --git a/drivers/gpu/drm/mediatek/mtk_plane.h b/drivers/gpu/drm/mediatek/mtk_plane.h
index 5b177eac67b7..3b13b89989c7 100644
--- a/drivers/gpu/drm/mediatek/mtk_plane.h
+++ b/drivers/gpu/drm/mediatek/mtk_plane.h
@@ -48,6 +48,6 @@ to_mtk_plane_state(struct drm_plane_state *state)
int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
unsigned long possible_crtcs, enum drm_plane_type type,
- unsigned int supported_rotations, const u32 *formats,
- size_t num_formats, unsigned int plane_idx);
+ unsigned int supported_rotations, const u32 blend_modes,
+ const u32 *formats, size_t num_formats, unsigned int plane_idx);
#endif
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 6623ee4e3277..9f5925693686 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -18,7 +18,6 @@
#include <drm/drm_managed.h>
#include <drm/drm_module.h>
#include <drm/drm_pciids.h>
-#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -85,34 +84,6 @@ resource_size_t mgag200_probe_vram(void __iomem *mem, resource_size_t size)
return offset - 65536;
}
-static irqreturn_t mgag200_irq_handler(int irq, void *arg)
-{
- struct drm_device *dev = arg;
- struct mga_device *mdev = to_mga_device(dev);
- struct drm_crtc *crtc;
- u32 status, ien;
-
- status = RREG32(MGAREG_STATUS);
-
- if (status & MGAREG_STATUS_VLINEPEN) {
- ien = RREG32(MGAREG_IEN);
- if (!(ien & MGAREG_IEN_VLINEIEN))
- goto out;
-
- crtc = drm_crtc_from_index(dev, 0);
- if (WARN_ON_ONCE(!crtc))
- goto out;
- drm_crtc_handle_vblank(crtc);
-
- WREG32(MGAREG_ICLEAR, MGAREG_ICLEAR_VLINEICLR);
-
- return IRQ_HANDLED;
- }
-
-out:
- return IRQ_NONE;
-}
-
/*
* DRM driver
*/
@@ -196,7 +167,6 @@ int mgag200_device_init(struct mga_device *mdev,
const struct mgag200_device_funcs *funcs)
{
struct drm_device *dev = &mdev->base;
- struct pci_dev *pdev = to_pci_dev(dev->dev);
u8 crtcext3, misc;
int ret;
@@ -223,14 +193,6 @@ int mgag200_device_init(struct mga_device *mdev,
mutex_unlock(&mdev->rmmio_lock);
WREG32(MGAREG_IEN, 0);
- WREG32(MGAREG_ICLEAR, MGAREG_ICLEAR_VLINEICLR);
-
- ret = devm_request_irq(&pdev->dev, pdev->irq, mgag200_irq_handler, IRQF_SHARED,
- dev->driver->name, dev);
- if (ret) {
- drm_err(dev, "Failed to acquire interrupt, error %d\n", ret);
- return ret;
- }
return 0;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 4760ba92871b..988967eafbf2 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -391,24 +391,17 @@ int mgag200_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_st
void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *old_state);
void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *old_state);
void mgag200_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *old_state);
-bool mgag200_crtc_helper_get_scanout_position(struct drm_crtc *crtc, bool in_vblank_irq,
- int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode);
#define MGAG200_CRTC_HELPER_FUNCS \
.mode_valid = mgag200_crtc_helper_mode_valid, \
.atomic_check = mgag200_crtc_helper_atomic_check, \
.atomic_flush = mgag200_crtc_helper_atomic_flush, \
.atomic_enable = mgag200_crtc_helper_atomic_enable, \
- .atomic_disable = mgag200_crtc_helper_atomic_disable, \
- .get_scanout_position = mgag200_crtc_helper_get_scanout_position
+ .atomic_disable = mgag200_crtc_helper_atomic_disable
void mgag200_crtc_reset(struct drm_crtc *crtc);
struct drm_crtc_state *mgag200_crtc_atomic_duplicate_state(struct drm_crtc *crtc);
void mgag200_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state);
-int mgag200_crtc_enable_vblank(struct drm_crtc *crtc);
-void mgag200_crtc_disable_vblank(struct drm_crtc *crtc);
#define MGAG200_CRTC_FUNCS \
.reset = mgag200_crtc_reset, \
@@ -416,10 +409,7 @@ void mgag200_crtc_disable_vblank(struct drm_crtc *crtc);
.set_config = drm_atomic_helper_set_config, \
.page_flip = drm_atomic_helper_page_flip, \
.atomic_duplicate_state = mgag200_crtc_atomic_duplicate_state, \
- .atomic_destroy_state = mgag200_crtc_atomic_destroy_state, \
- .enable_vblank = mgag200_crtc_enable_vblank, \
- .disable_vblank = mgag200_crtc_disable_vblank, \
- .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp
+ .atomic_destroy_state = mgag200_crtc_atomic_destroy_state
void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mode *mode,
bool set_vidrst);
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200.c b/drivers/gpu/drm/mgag200/mgag200_g200.c
index 77ce8d36cef0..f874e2949840 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200.c
@@ -8,7 +8,6 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -404,9 +403,5 @@ struct mga_device *mgag200_g200_device_create(struct pci_dev *pdev, const struct
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
- ret = drm_vblank_init(dev, 1);
- if (ret)
- return ERR_PTR(ret);
-
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200eh.c b/drivers/gpu/drm/mgag200/mgag200_g200eh.c
index 09ced65c1d2f..e2305f8e00f8 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200eh.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200eh.c
@@ -8,7 +8,6 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -276,9 +275,5 @@ struct mga_device *mgag200_g200eh_device_create(struct pci_dev *pdev, const stru
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
- ret = drm_vblank_init(dev, 1);
- if (ret)
- return ERR_PTR(ret);
-
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200eh3.c b/drivers/gpu/drm/mgag200/mgag200_g200eh3.c
index 5daa469137bd..11ae76eb081d 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200eh3.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200eh3.c
@@ -7,7 +7,6 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -181,9 +180,5 @@ struct mga_device *mgag200_g200eh3_device_create(struct pci_dev *pdev,
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
- ret = drm_vblank_init(dev, 1);
- if (ret)
- return ERR_PTR(ret);
-
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200er.c b/drivers/gpu/drm/mgag200/mgag200_g200er.c
index 09cfffafe130..c20ed0ab50ec 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200er.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200er.c
@@ -8,7 +8,6 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -206,8 +205,6 @@ static void mgag200_g200er_crtc_helper_atomic_enable(struct drm_crtc *crtc,
mgag200_crtc_set_gamma_linear(mdev, format);
mgag200_enable_display(mdev);
-
- drm_crtc_vblank_on(crtc);
}
static const struct drm_crtc_helper_funcs mgag200_g200er_crtc_helper_funcs = {
@@ -215,8 +212,7 @@ static const struct drm_crtc_helper_funcs mgag200_g200er_crtc_helper_funcs = {
.atomic_check = mgag200_crtc_helper_atomic_check,
.atomic_flush = mgag200_crtc_helper_atomic_flush,
.atomic_enable = mgag200_g200er_crtc_helper_atomic_enable,
- .atomic_disable = mgag200_crtc_helper_atomic_disable,
- .get_scanout_position = mgag200_crtc_helper_get_scanout_position,
+ .atomic_disable = mgag200_crtc_helper_atomic_disable
};
static const struct drm_crtc_funcs mgag200_g200er_crtc_funcs = {
@@ -312,9 +308,5 @@ struct mga_device *mgag200_g200er_device_create(struct pci_dev *pdev, const stru
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
- ret = drm_vblank_init(dev, 1);
- if (ret)
- return ERR_PTR(ret);
-
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ev.c b/drivers/gpu/drm/mgag200/mgag200_g200ev.c
index 3d48baa91d8b..78be964eb97c 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200ev.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200ev.c
@@ -8,7 +8,6 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -207,8 +206,6 @@ static void mgag200_g200ev_crtc_helper_atomic_enable(struct drm_crtc *crtc,
mgag200_crtc_set_gamma_linear(mdev, format);
mgag200_enable_display(mdev);
-
- drm_crtc_vblank_on(crtc);
}
static const struct drm_crtc_helper_funcs mgag200_g200ev_crtc_helper_funcs = {
@@ -216,8 +213,7 @@ static const struct drm_crtc_helper_funcs mgag200_g200ev_crtc_helper_funcs = {
.atomic_check = mgag200_crtc_helper_atomic_check,
.atomic_flush = mgag200_crtc_helper_atomic_flush,
.atomic_enable = mgag200_g200ev_crtc_helper_atomic_enable,
- .atomic_disable = mgag200_crtc_helper_atomic_disable,
- .get_scanout_position = mgag200_crtc_helper_get_scanout_position,
+ .atomic_disable = mgag200_crtc_helper_atomic_disable
};
static const struct drm_crtc_funcs mgag200_g200ev_crtc_funcs = {
@@ -317,9 +313,5 @@ struct mga_device *mgag200_g200ev_device_create(struct pci_dev *pdev, const stru
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
- ret = drm_vblank_init(dev, 1);
- if (ret)
- return ERR_PTR(ret);
-
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ew3.c b/drivers/gpu/drm/mgag200/mgag200_g200ew3.c
index dabc778e64e8..31624c9ab7b7 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200ew3.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200ew3.c
@@ -7,7 +7,6 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -199,9 +198,5 @@ struct mga_device *mgag200_g200ew3_device_create(struct pci_dev *pdev,
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
- ret = drm_vblank_init(dev, 1);
- if (ret)
- return ERR_PTR(ret);
-
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200se.c b/drivers/gpu/drm/mgag200/mgag200_g200se.c
index 9dcbe8304271..7a32d3b1d226 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200se.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200se.c
@@ -8,7 +8,6 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -338,8 +337,6 @@ static void mgag200_g200se_crtc_helper_atomic_enable(struct drm_crtc *crtc,
mgag200_crtc_set_gamma_linear(mdev, format);
mgag200_enable_display(mdev);
-
- drm_crtc_vblank_on(crtc);
}
static const struct drm_crtc_helper_funcs mgag200_g200se_crtc_helper_funcs = {
@@ -347,8 +344,7 @@ static const struct drm_crtc_helper_funcs mgag200_g200se_crtc_helper_funcs = {
.atomic_check = mgag200_crtc_helper_atomic_check,
.atomic_flush = mgag200_crtc_helper_atomic_flush,
.atomic_enable = mgag200_g200se_crtc_helper_atomic_enable,
- .atomic_disable = mgag200_crtc_helper_atomic_disable,
- .get_scanout_position = mgag200_crtc_helper_get_scanout_position,
+ .atomic_disable = mgag200_crtc_helper_atomic_disable
};
static const struct drm_crtc_funcs mgag200_g200se_crtc_funcs = {
@@ -517,9 +513,5 @@ struct mga_device *mgag200_g200se_device_create(struct pci_dev *pdev, const stru
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
- ret = drm_vblank_init(dev, 1);
- if (ret)
- return ERR_PTR(ret);
-
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200wb.c b/drivers/gpu/drm/mgag200/mgag200_g200wb.c
index 83a24aedbf2f..a0e7b9ad46cd 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200wb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200wb.c
@@ -8,7 +8,6 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -323,9 +322,5 @@ struct mga_device *mgag200_g200wb_device_create(struct pci_dev *pdev, const stru
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
- ret = drm_vblank_init(dev, 1);
- if (ret)
- return ERR_PTR(ret);
-
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 7159909aca1e..fb71658c3117 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -22,7 +22,6 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_panic.h>
#include <drm/drm_print.h>
-#include <drm/drm_vblank.h>
#include "mgag200_ddc.h"
#include "mgag200_drv.h"
@@ -227,14 +226,7 @@ void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mod
vblkstr = mode->crtc_vblank_start;
vblkend = vtotal + 1;
- /*
- * There's no VBLANK interrupt on Matrox chipsets, so we use
- * the VLINE interrupt instead. It triggers when the current
- * <linecomp> has been reached. For VBLANK, this is the first
- * non-visible line at the bottom of the screen. Therefore,
- * keep <linecomp> in sync with <vblkstr>.
- */
- linecomp = vblkstr;
+ linecomp = vdispend;
misc = RREG8(MGA_MISC_IN);
@@ -645,8 +637,6 @@ void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_s
struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
struct drm_device *dev = crtc->dev;
struct mga_device *mdev = to_mga_device(dev);
- struct drm_pending_vblank_event *event;
- unsigned long flags;
if (crtc_state->enable && crtc_state->color_mgmt_changed) {
const struct drm_format_info *format = mgag200_crtc_state->format;
@@ -656,18 +646,6 @@ void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_s
else
mgag200_crtc_set_gamma_linear(mdev, format);
}
-
- event = crtc->state->event;
- if (event) {
- crtc->state->event = NULL;
-
- spin_lock_irqsave(&dev->event_lock, flags);
- if (drm_crtc_vblank_get(crtc) != 0)
- drm_crtc_send_vblank_event(crtc, event);
- else
- drm_crtc_arm_vblank_event(crtc, event);
- spin_unlock_irqrestore(&dev->event_lock, flags);
- }
}
void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *old_state)
@@ -692,44 +670,15 @@ void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_
mgag200_crtc_set_gamma_linear(mdev, format);
mgag200_enable_display(mdev);
-
- drm_crtc_vblank_on(crtc);
}
void mgag200_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *old_state)
{
struct mga_device *mdev = to_mga_device(crtc->dev);
- drm_crtc_vblank_off(crtc);
-
mgag200_disable_display(mdev);
}
-bool mgag200_crtc_helper_get_scanout_position(struct drm_crtc *crtc, bool in_vblank_irq,
- int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode)
-{
- struct mga_device *mdev = to_mga_device(crtc->dev);
- u32 vcount;
-
- if (stime)
- *stime = ktime_get();
-
- if (vpos) {
- vcount = RREG32(MGAREG_VCOUNT);
- *vpos = vcount & GENMASK(11, 0);
- }
-
- if (hpos)
- *hpos = mode->htotal >> 1; // near middle of scanline on average
-
- if (etime)
- *etime = ktime_get();
-
- return true;
-}
-
void mgag200_crtc_reset(struct drm_crtc *crtc)
{
struct mgag200_crtc_state *mgag200_crtc_state;
@@ -774,30 +723,6 @@ void mgag200_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_st
kfree(mgag200_crtc_state);
}
-int mgag200_crtc_enable_vblank(struct drm_crtc *crtc)
-{
- struct mga_device *mdev = to_mga_device(crtc->dev);
- u32 ien;
-
- WREG32(MGAREG_ICLEAR, MGAREG_ICLEAR_VLINEICLR);
-
- ien = RREG32(MGAREG_IEN);
- ien |= MGAREG_IEN_VLINEIEN;
- WREG32(MGAREG_IEN, ien);
-
- return 0;
-}
-
-void mgag200_crtc_disable_vblank(struct drm_crtc *crtc)
-{
- struct mga_device *mdev = to_mga_device(crtc->dev);
- u32 ien;
-
- ien = RREG32(MGAREG_IEN);
- ien &= ~(MGAREG_IEN_VLINEIEN);
- WREG32(MGAREG_IEN, ien);
-}
-
/*
* Mode config
*/
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 06cab2c6fd66..702b8d4b3497 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -101,9 +101,10 @@ static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter,
}
static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
- struct msm_ringbuffer *ring, struct msm_file_private *ctx)
+ struct msm_ringbuffer *ring, struct msm_gem_submit *submit)
{
bool sysprof = refcount_read(&a6xx_gpu->base.base.sysprof_active) > 1;
+ struct msm_file_private *ctx = submit->queue->ctx;
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
phys_addr_t ttbr;
u32 asid;
@@ -115,6 +116,15 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid))
return;
+ if (adreno_gpu->info->family >= ADRENO_7XX_GEN1) {
+ /* Wait for previous submit to complete before continuing: */
+ OUT_PKT7(ring, CP_WAIT_TIMESTAMP, 4);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
+ OUT_RING(ring, submit->seqno - 1);
+ }
+
if (!sysprof) {
if (!adreno_is_a7xx(adreno_gpu)) {
/* Turn off protected mode to write to special registers */
@@ -193,7 +203,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
struct msm_ringbuffer *ring = submit->ring;
unsigned int i, ibs = 0;
- a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx);
+ a6xx_set_pagetable(a6xx_gpu, ring, submit);
get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
rbmemptr_stats(ring, index, cpcycles_start));
@@ -283,7 +293,7 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
OUT_RING(ring, CP_THREAD_CONTROL_0_SYNC_THREADS | CP_SET_THREAD_BR);
- a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx);
+ a6xx_set_pagetable(a6xx_gpu, ring, submit);
get_stats_counter(ring, REG_A7XX_RBBM_PERFCTR_CP(0),
rbmemptr_stats(ring, index, cpcycles_start));
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 4c1be2f0555f..db6c57900781 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -711,12 +711,13 @@ void dpu_crtc_complete_commit(struct drm_crtc *crtc)
_dpu_crtc_complete_flip(crtc);
}
-static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
+static int _dpu_crtc_check_and_setup_lm_bounds(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
struct drm_display_mode *adj_mode = &state->adjusted_mode;
u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
+ struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
int i;
for (i = 0; i < cstate->num_mixers; i++) {
@@ -727,7 +728,12 @@ static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
r->y2 = adj_mode->vdisplay;
trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
+
+ if (drm_rect_width(r) > dpu_kms->catalog->caps->max_mixer_width)
+ return -E2BIG;
}
+
+ return 0;
}
static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state,
@@ -803,7 +809,7 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
- _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
+ _dpu_crtc_check_and_setup_lm_bounds(crtc, crtc->state);
/* encoder will trigger pending mask now */
drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
@@ -1091,9 +1097,6 @@ static void dpu_crtc_disable(struct drm_crtc *crtc,
dpu_core_perf_crtc_update(crtc, 0);
- memset(cstate->mixers, 0, sizeof(cstate->mixers));
- cstate->num_mixers = 0;
-
/* disable clk & bw control until clk & bw properties are set */
cstate->bw_control = false;
cstate->bw_split_vote = false;
@@ -1192,8 +1195,11 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
if (crtc_state->active_changed)
crtc_state->mode_changed = true;
- if (cstate->num_mixers)
- _dpu_crtc_setup_lm_bounds(crtc, crtc_state);
+ if (cstate->num_mixers) {
+ rc = _dpu_crtc_check_and_setup_lm_bounds(crtc, crtc_state);
+ if (rc)
+ return rc;
+ }
/* FIXME: move this to dpu_plane_atomic_check? */
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 3b171bf227d1..bd3698bf0cf7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -624,6 +624,40 @@ static struct msm_display_topology dpu_encoder_get_topology(
return topology;
}
+static void dpu_encoder_assign_crtc_resources(struct dpu_kms *dpu_kms,
+ struct drm_encoder *drm_enc,
+ struct dpu_global_state *global_state,
+ struct drm_crtc_state *crtc_state)
+{
+ struct dpu_crtc_state *cstate;
+ struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC];
+ int num_lm, num_ctl, num_dspp, i;
+
+ cstate = to_dpu_crtc_state(crtc_state);
+
+ memset(cstate->mixers, 0, sizeof(cstate->mixers));
+
+ num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
+ num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
+ num_dspp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp,
+ ARRAY_SIZE(hw_dspp));
+
+ for (i = 0; i < num_lm; i++) {
+ int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
+
+ cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
+ cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
+ cstate->mixers[i].hw_dspp = i < num_dspp ? to_dpu_hw_dspp(hw_dspp[i]) : NULL;
+ }
+
+ cstate->num_mixers = num_lm;
+}
+
static int dpu_encoder_virt_atomic_check(
struct drm_encoder *drm_enc,
struct drm_crtc_state *crtc_state,
@@ -692,6 +726,9 @@ static int dpu_encoder_virt_atomic_check(
if (!crtc_state->active_changed || crtc_state->enable)
ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
drm_enc, crtc_state, topology);
+ if (!ret)
+ dpu_encoder_assign_crtc_resources(dpu_kms, drm_enc,
+ global_state, crtc_state);
}
trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags);
@@ -1093,14 +1130,11 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
struct dpu_encoder_virt *dpu_enc;
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
- struct dpu_crtc_state *cstate;
struct dpu_global_state *global_state;
struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
- struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
- struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL };
struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC];
- int num_lm, num_ctl, num_pp, num_dsc;
+ int num_ctl, num_pp, num_dsc;
unsigned int dsc_mask = 0;
int i;
@@ -1129,11 +1163,6 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
ARRAY_SIZE(hw_pp));
num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
- num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
- drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
- dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
- drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp,
- ARRAY_SIZE(hw_dspp));
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
@@ -1159,36 +1188,23 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
dpu_enc->cur_master->hw_cdm = hw_cdm ? to_dpu_hw_cdm(hw_cdm) : NULL;
}
- cstate = to_dpu_crtc_state(crtc_state);
-
- for (i = 0; i < num_lm; i++) {
- int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
-
- cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
- cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
- cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]);
- }
-
- cstate->num_mixers = num_lm;
-
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- if (!dpu_enc->hw_pp[i]) {
+ phys->hw_pp = dpu_enc->hw_pp[i];
+ if (!phys->hw_pp) {
DPU_ERROR_ENC(dpu_enc,
"no pp block assigned at idx: %d\n", i);
return;
}
- if (!hw_ctl[i]) {
+ phys->hw_ctl = i < num_ctl ? to_dpu_hw_ctl(hw_ctl[i]) : NULL;
+ if (!phys->hw_ctl) {
DPU_ERROR_ENC(dpu_enc,
"no ctl block assigned at idx: %d\n", i);
return;
}
- phys->hw_pp = dpu_enc->hw_pp[i];
- phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]);
-
phys->cached_mode = crtc_state->adjusted_mode;
if (phys->ops.atomic_mode_set)
phys->ops.atomic_mode_set(phys, crtc_state, conn_state);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index ba8878d21cf0..d8a2edebfe8c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -302,7 +302,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
intf_cfg.stream_sel = 0; /* Don't care value for video mode */
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc);
- if (phys_enc->hw_pp->merge_3d)
+ if (intf_cfg.mode_3d && phys_enc->hw_pp->merge_3d)
intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
@@ -440,10 +440,12 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
struct dpu_hw_ctl *ctl;
const struct msm_format *fmt;
u32 fmt_fourcc;
+ u32 mode_3d;
ctl = phys_enc->hw_ctl;
fmt_fourcc = dpu_encoder_get_drm_fmt(phys_enc);
fmt = mdp_get_format(&phys_enc->dpu_kms->base, fmt_fourcc, 0);
+ mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
DPU_DEBUG_VIDENC(phys_enc, "\n");
@@ -466,7 +468,8 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
goto skip_flush;
ctl->ops.update_pending_flush_intf(ctl, phys_enc->hw_intf->idx);
- if (ctl->ops.update_pending_flush_merge_3d && phys_enc->hw_pp->merge_3d)
+ if (mode_3d && ctl->ops.update_pending_flush_merge_3d &&
+ phys_enc->hw_pp->merge_3d)
ctl->ops.update_pending_flush_merge_3d(ctl, phys_enc->hw_pp->merge_3d->idx);
if (ctl->ops.update_pending_flush_cdm && phys_enc->hw_cdm)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
index 882c717859ce..07035ab77b79 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
@@ -275,6 +275,7 @@ static void _dpu_encoder_phys_wb_update_flush(struct dpu_encoder_phys *phys_enc)
struct dpu_hw_pingpong *hw_pp;
struct dpu_hw_cdm *hw_cdm;
u32 pending_flush = 0;
+ u32 mode_3d;
if (!phys_enc)
return;
@@ -283,6 +284,7 @@ static void _dpu_encoder_phys_wb_update_flush(struct dpu_encoder_phys *phys_enc)
hw_pp = phys_enc->hw_pp;
hw_ctl = phys_enc->hw_ctl;
hw_cdm = phys_enc->hw_cdm;
+ mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
DPU_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
@@ -294,7 +296,8 @@ static void _dpu_encoder_phys_wb_update_flush(struct dpu_encoder_phys *phys_enc)
if (hw_ctl->ops.update_pending_flush_wb)
hw_ctl->ops.update_pending_flush_wb(hw_ctl, hw_wb->idx);
- if (hw_ctl->ops.update_pending_flush_merge_3d && hw_pp && hw_pp->merge_3d)
+ if (mode_3d && hw_ctl->ops.update_pending_flush_merge_3d &&
+ hw_pp && hw_pp->merge_3d)
hw_ctl->ops.update_pending_flush_merge_3d(hw_ctl,
hw_pp->merge_3d->idx);
diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
index add72bbc28b1..4d55e3cf570f 100644
--- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
@@ -26,7 +26,7 @@ static void msm_disp_state_dump_regs(u32 **reg, u32 aligned_len, void __iomem *b
end_addr = base_addr + aligned_len;
if (!(*reg))
- *reg = kzalloc(len_padded, GFP_KERNEL);
+ *reg = kvzalloc(len_padded, GFP_KERNEL);
if (*reg)
dump_addr = *reg;
@@ -48,20 +48,21 @@ static void msm_disp_state_dump_regs(u32 **reg, u32 aligned_len, void __iomem *b
}
}
-static void msm_disp_state_print_regs(u32 **reg, u32 len, void __iomem *base_addr,
- struct drm_printer *p)
+static void msm_disp_state_print_regs(const u32 *dump_addr, u32 len,
+ void __iomem *base_addr, struct drm_printer *p)
{
int i;
- u32 *dump_addr = NULL;
void __iomem *addr;
u32 num_rows;
+ if (!dump_addr) {
+ drm_printf(p, "Registers not stored\n");
+ return;
+ }
+
addr = base_addr;
num_rows = len / REG_DUMP_ALIGN;
- if (*reg)
- dump_addr = *reg;
-
for (i = 0; i < num_rows; i++) {
drm_printf(p, "0x%lx : %08x %08x %08x %08x\n",
(unsigned long)(addr - base_addr),
@@ -89,7 +90,7 @@ void msm_disp_state_print(struct msm_disp_state *state, struct drm_printer *p)
list_for_each_entry_safe(block, tmp, &state->blocks, node) {
drm_printf(p, "====================%s================\n", block->name);
- msm_disp_state_print_regs(&block->state, block->size, block->base_addr, p);
+ msm_disp_state_print_regs(block->state, block->size, block->base_addr, p);
}
drm_printf(p, "===================dpu drm state================\n");
@@ -161,7 +162,7 @@ void msm_disp_state_free(void *data)
list_for_each_entry_safe(block, tmp, &disp_state->blocks, node) {
list_del(&block->node);
- kfree(block->state);
+ kvfree(block->state);
kfree(block);
}
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 185d7de0bf37..a98d24b7cb00 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -542,7 +542,7 @@ static unsigned long dsi_adjust_pclk_for_compression(const struct drm_display_mo
int new_htotal = mode->htotal - mode->hdisplay + new_hdisplay;
- return new_htotal * mode->vtotal * drm_mode_vrefresh(mode);
+ return mult_frac(mode->clock * 1000u, new_htotal, mode->htotal);
}
static unsigned long dsi_get_pclk_rate(const struct drm_display_mode *mode,
@@ -550,7 +550,7 @@ static unsigned long dsi_get_pclk_rate(const struct drm_display_mode *mode,
{
unsigned long pclk_rate;
- pclk_rate = mode->clock * 1000;
+ pclk_rate = mode->clock * 1000u;
if (dsc)
pclk_rate = dsi_adjust_pclk_for_compression(mode, dsc);
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c
index 0e3a2b16a2ce..e6ffaf92d26d 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c
@@ -153,15 +153,6 @@ static inline u32 pll_get_pll_cmp(u64 fdata, unsigned long ref_clk)
return dividend - 1;
}
-static inline u64 pll_cmp_to_fdata(u32 pll_cmp, unsigned long ref_clk)
-{
- u64 fdata = ((u64)pll_cmp) * ref_clk * 10;
-
- do_div(fdata, HDMI_PLL_CMP_CNT);
-
- return fdata;
-}
-
#define HDMI_REF_CLOCK_HZ ((u64)19200000)
#define HDMI_MHZ_TO_HZ ((u64)1000000)
static int pll_get_post_div(struct hdmi_8998_post_divider *pd, u64 bclk)
diff --git a/drivers/gpu/drm/nouveau/include/nvif/os.h b/drivers/gpu/drm/nouveau/include/nvif/os.h
index a2eaf3929ac3..4a1123b81fee 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/os.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/os.h
@@ -30,7 +30,7 @@
#include <linux/iommu.h>
#include <linux/of_device.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <soc/tegra/fuse.h>
#include <soc/tegra/pmc.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
index 9e6f39912368..a2055f2a014a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
@@ -210,7 +210,7 @@ struct nvkm_gsp {
} *rm;
struct {
- struct mutex mutex;;
+ struct mutex mutex;
struct idr idr;
} client_id;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 1f2d649f4b96..1a072568cef6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -193,7 +193,7 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
if (!spage || !(src & MIGRATE_PFN_MIGRATE))
goto done;
- dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address);
+ dpage = alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vmf->vma, vmf->address);
if (!dpage)
goto done;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index f6e78dba594f..34985771b2a2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -331,7 +331,7 @@ nouveau_accel_ce_init(struct nouveau_drm *drm)
return;
}
- ret = nouveau_channel_new(&drm->client, false, runm, NvDmaFB, NvDmaTT, &drm->cechan);
+ ret = nouveau_channel_new(&drm->client, true, runm, NvDmaFB, NvDmaTT, &drm->cechan);
if (ret)
NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
}
diff --git a/drivers/gpu/drm/panel/panel-himax-hx83102.c b/drivers/gpu/drm/panel/panel-himax-hx83102.c
index 6e4b7e4644ce..8b48bba18131 100644
--- a/drivers/gpu/drm/panel/panel-himax-hx83102.c
+++ b/drivers/gpu/drm/panel/panel-himax-hx83102.c
@@ -298,7 +298,7 @@ static int ivo_t109nw41_init(struct hx83102 *ctx)
msleep(60);
hx83102_enable_extended_cmds(&dsi_ctx, true);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPOWER, 0x2c, 0xed, 0xed, 0x0f, 0xcf, 0x42,
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPOWER, 0x2c, 0xed, 0xed, 0x27, 0xe7, 0x52,
0xf5, 0x39, 0x36, 0x36, 0x36, 0x36, 0x32, 0x8b, 0x11, 0x65, 0x00, 0x88,
0xfa, 0xff, 0xff, 0x8f, 0xff, 0x08, 0xd6, 0x33);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETDISP, 0x00, 0x47, 0xb0, 0x80, 0x00, 0x12,
@@ -343,11 +343,11 @@ static int ivo_t109nw41_init(struct hx83102 *ctx)
0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGMA, 0x04, 0x04, 0x06, 0x0a, 0x0a, 0x05,
- 0x12, 0x14, 0x17, 0x13, 0x2c, 0x33, 0x39, 0x4b, 0x4c, 0x56, 0x61, 0x78,
- 0x7a, 0x41, 0x50, 0x68, 0x73, 0x04, 0x04, 0x06, 0x0a, 0x0a, 0x05, 0x12,
- 0x14, 0x17, 0x13, 0x2c, 0x33, 0x39, 0x4b, 0x4c, 0x56, 0x61, 0x78, 0x7a,
- 0x41, 0x50, 0x68, 0x73);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGMA, 0x00, 0x07, 0x10, 0x17, 0x1c, 0x33,
+ 0x48, 0x50, 0x57, 0x50, 0x68, 0x6e, 0x71, 0x7f, 0x81, 0x8a, 0x8e, 0x9b,
+ 0x9c, 0x4d, 0x56, 0x5d, 0x73, 0x00, 0x07, 0x10, 0x17, 0x1c, 0x33, 0x48,
+ 0x50, 0x57, 0x50, 0x68, 0x6e, 0x71, 0x7f, 0x81, 0x8a, 0x8e, 0x9b, 0x9c,
+ 0x4d, 0x56, 0x5d, 0x73);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETTP1, 0x07, 0x10, 0x10, 0x1a, 0x26, 0x9e,
0x00, 0x4f, 0xa0, 0x14, 0x14, 0x00, 0x00, 0x00, 0x00, 0x12, 0x0a, 0x02,
0x02, 0x00, 0x33, 0x02, 0x04, 0x18, 0x01);
diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c
index 34182f67136c..c520f156e2d7 100644
--- a/drivers/gpu/drm/panthor/panthor_drv.c
+++ b/drivers/gpu/drm/panthor/panthor_drv.c
@@ -1383,6 +1383,7 @@ static const struct file_operations panthor_drm_driver_fops = {
.read = drm_read,
.llseek = noop_llseek,
.mmap = panthor_mmap,
+ .fop_flags = FOP_UNSIGNED_OFFSET,
};
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c
index ef232c0c2049..4e2d3a02ea06 100644
--- a/drivers/gpu/drm/panthor/panthor_fw.c
+++ b/drivers/gpu/drm/panthor/panthor_fw.c
@@ -487,6 +487,7 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
struct panthor_fw_binary_iter *iter,
u32 ehdr)
{
+ ssize_t vm_pgsz = panthor_vm_page_size(ptdev->fw->vm);
struct panthor_fw_binary_section_entry_hdr hdr;
struct panthor_fw_section *section;
u32 section_size;
@@ -515,8 +516,7 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
return -EINVAL;
}
- if ((hdr.va.start & ~PAGE_MASK) != 0 ||
- (hdr.va.end & ~PAGE_MASK) != 0) {
+ if (!IS_ALIGNED(hdr.va.start, vm_pgsz) || !IS_ALIGNED(hdr.va.end, vm_pgsz)) {
drm_err(&ptdev->base, "Firmware corrupted, virtual addresses not page aligned: 0x%x-0x%x\n",
hdr.va.start, hdr.va.end);
return -EINVAL;
diff --git a/drivers/gpu/drm/panthor/panthor_gem.c b/drivers/gpu/drm/panthor/panthor_gem.c
index 38f560864879..be97d56bc011 100644
--- a/drivers/gpu/drm/panthor/panthor_gem.c
+++ b/drivers/gpu/drm/panthor/panthor_gem.c
@@ -44,8 +44,7 @@ void panthor_kernel_bo_destroy(struct panthor_kernel_bo *bo)
to_panthor_bo(bo->obj)->exclusive_vm_root_gem != panthor_vm_root_gem(vm)))
goto out_free_bo;
- ret = panthor_vm_unmap_range(vm, bo->va_node.start,
- panthor_kernel_bo_size(bo));
+ ret = panthor_vm_unmap_range(vm, bo->va_node.start, bo->va_node.size);
if (ret)
goto out_free_bo;
@@ -95,10 +94,16 @@ panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
}
bo = to_panthor_bo(&obj->base);
- size = obj->base.size;
kbo->obj = &obj->base;
bo->flags = bo_flags;
+ /* The system and GPU MMU page size might differ, which becomes a
+ * problem for FW sections that need to be mapped at explicit address
+ * since our PAGE_SIZE alignment might cover a VA range that's
+ * expected to be used for another section.
+ * Make sure we never map more than we need.
+ */
+ size = ALIGN(size, panthor_vm_page_size(vm));
ret = panthor_vm_alloc_va(vm, gpu_va, size, &kbo->va_node);
if (ret)
goto err_put_obj;
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index bbc12728437f..5d5e25b1be95 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -826,6 +826,14 @@ void panthor_vm_idle(struct panthor_vm *vm)
mutex_unlock(&ptdev->mmu->as.slots_lock);
}
+u32 panthor_vm_page_size(struct panthor_vm *vm)
+{
+ const struct io_pgtable *pgt = io_pgtable_ops_to_pgtable(vm->pgtbl_ops);
+ u32 pg_shift = ffs(pgt->cfg.pgsize_bitmap) - 1;
+
+ return 1u << pg_shift;
+}
+
static void panthor_vm_stop(struct panthor_vm *vm)
{
drm_sched_stop(&vm->sched, NULL);
@@ -1025,12 +1033,13 @@ int
panthor_vm_alloc_va(struct panthor_vm *vm, u64 va, u64 size,
struct drm_mm_node *va_node)
{
+ ssize_t vm_pgsz = panthor_vm_page_size(vm);
int ret;
- if (!size || (size & ~PAGE_MASK))
+ if (!size || !IS_ALIGNED(size, vm_pgsz))
return -EINVAL;
- if (va != PANTHOR_VM_KERNEL_AUTO_VA && (va & ~PAGE_MASK))
+ if (va != PANTHOR_VM_KERNEL_AUTO_VA && !IS_ALIGNED(va, vm_pgsz))
return -EINVAL;
mutex_lock(&vm->mm_lock);
@@ -1251,9 +1260,17 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
goto err_cleanup;
}
+ /* drm_gpuvm_bo_obtain_prealloc() will call drm_gpuvm_bo_put() on our
+ * pre-allocated BO if the <BO,VM> association exists. Given we
+ * only have one ref on preallocated_vm_bo, drm_gpuvm_bo_destroy() will
+ * be called immediately, and we have to hold the VM resv lock when
+ * calling this function.
+ */
+ dma_resv_lock(panthor_vm_resv(vm), NULL);
mutex_lock(&bo->gpuva_list_lock);
op_ctx->map.vm_bo = drm_gpuvm_bo_obtain_prealloc(preallocated_vm_bo);
mutex_unlock(&bo->gpuva_list_lock);
+ dma_resv_unlock(panthor_vm_resv(vm));
/* If the a vm_bo for this <VM,BO> combination exists, it already
* retains a pin ref, and we can release the one we took earlier.
@@ -2358,11 +2375,12 @@ panthor_vm_bind_prepare_op_ctx(struct drm_file *file,
const struct drm_panthor_vm_bind_op *op,
struct panthor_vm_op_ctx *op_ctx)
{
+ ssize_t vm_pgsz = panthor_vm_page_size(vm);
struct drm_gem_object *gem;
int ret;
/* Aligned on page size. */
- if ((op->va | op->size) & ~PAGE_MASK)
+ if (!IS_ALIGNED(op->va | op->size, vm_pgsz))
return -EINVAL;
switch (op->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) {
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.h b/drivers/gpu/drm/panthor/panthor_mmu.h
index 6788771071e3..8d21e83d8aba 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.h
+++ b/drivers/gpu/drm/panthor/panthor_mmu.h
@@ -30,6 +30,7 @@ panthor_vm_get_bo_for_va(struct panthor_vm *vm, u64 va, u64 *bo_offset);
int panthor_vm_active(struct panthor_vm *vm);
void panthor_vm_idle(struct panthor_vm *vm);
+u32 panthor_vm_page_size(struct panthor_vm *vm);
int panthor_vm_as(struct panthor_vm *vm);
int panthor_vm_flush_all(struct panthor_vm *vm);
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index 91a31b70c037..9929e22f4d8d 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -589,10 +589,11 @@ struct panthor_group {
* @timedout: True when a timeout occurred on any of the queues owned by
* this group.
*
- * Timeouts can be reported by drm_sched or by the FW. In any case, any
- * timeout situation is unrecoverable, and the group becomes useless.
- * We simply wait for all references to be dropped so we can release the
- * group object.
+ * Timeouts can be reported by drm_sched or by the FW. If a reset is required,
+ * and the group can't be suspended, this also leads to a timeout. In any case,
+ * any timeout situation is unrecoverable, and the group becomes useless. We
+ * simply wait for all references to be dropped so we can release the group
+ * object.
*/
bool timedout;
@@ -1103,7 +1104,13 @@ cs_slot_sync_queue_state_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs
list_move_tail(&group->wait_node,
&group->ptdev->scheduler->groups.waiting);
}
- group->blocked_queues |= BIT(cs_id);
+
+ /* The queue is only blocked if there's no deferred operation
+ * pending, which can be checked through the scoreboard status.
+ */
+ if (!cs_iface->output->status_scoreboards)
+ group->blocked_queues |= BIT(cs_id);
+
queue->syncwait.gpu_va = cs_iface->output->status_wait_sync_ptr;
queue->syncwait.ref = cs_iface->output->status_wait_sync_value;
status_wait_cond = cs_iface->output->status_wait & CS_STATUS_WAIT_SYNC_COND_MASK;
@@ -2046,6 +2053,7 @@ static void
tick_ctx_cleanup(struct panthor_scheduler *sched,
struct panthor_sched_tick_ctx *ctx)
{
+ struct panthor_device *ptdev = sched->ptdev;
struct panthor_group *group, *tmp;
u32 i;
@@ -2054,7 +2062,7 @@ tick_ctx_cleanup(struct panthor_scheduler *sched,
/* If everything went fine, we should only have groups
* to be terminated in the old_groups lists.
*/
- drm_WARN_ON(&group->ptdev->base, !ctx->csg_upd_failed_mask &&
+ drm_WARN_ON(&ptdev->base, !ctx->csg_upd_failed_mask &&
group_can_run(group));
if (!group_can_run(group)) {
@@ -2077,7 +2085,7 @@ tick_ctx_cleanup(struct panthor_scheduler *sched,
/* If everything went fine, the groups to schedule lists should
* be empty.
*/
- drm_WARN_ON(&group->ptdev->base,
+ drm_WARN_ON(&ptdev->base,
!ctx->csg_upd_failed_mask && !list_empty(&ctx->groups[i]));
list_for_each_entry_safe(group, tmp, &ctx->groups[i], run_node) {
@@ -2633,6 +2641,12 @@ void panthor_sched_suspend(struct panthor_device *ptdev)
csgs_upd_ctx_init(&upd_ctx);
while (slot_mask) {
u32 csg_id = ffs(slot_mask) - 1;
+ struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
+
+ /* We consider group suspension failures as fatal and flag the
+ * group as unusable by setting timedout=true.
+ */
+ csg_slot->group->timedout = true;
csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
CSG_STATE_TERMINATE,
@@ -3242,6 +3256,18 @@ int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle)
return 0;
}
+static struct panthor_group *group_from_handle(struct panthor_group_pool *pool,
+ u32 group_handle)
+{
+ struct panthor_group *group;
+
+ xa_lock(&pool->xa);
+ group = group_get(xa_load(&pool->xa, group_handle));
+ xa_unlock(&pool->xa);
+
+ return group;
+}
+
int panthor_group_get_state(struct panthor_file *pfile,
struct drm_panthor_group_get_state *get_state)
{
@@ -3253,7 +3279,7 @@ int panthor_group_get_state(struct panthor_file *pfile,
if (get_state->pad)
return -EINVAL;
- group = group_get(xa_load(&gpool->xa, get_state->group_handle));
+ group = group_from_handle(gpool, get_state->group_handle);
if (!group)
return -EINVAL;
@@ -3384,12 +3410,17 @@ panthor_job_create(struct panthor_file *pfile,
job->call_info.latest_flush = qsubmit->latest_flush;
INIT_LIST_HEAD(&job->node);
- job->group = group_get(xa_load(&gpool->xa, group_handle));
+ job->group = group_from_handle(gpool, group_handle);
if (!job->group) {
ret = -EINVAL;
goto err_put_job;
}
+ if (!group_can_run(job->group)) {
+ ret = -EINVAL;
+ goto err_put_job;
+ }
+
if (job->queue_idx >= job->group->queue_count ||
!job->group->queues[job->queue_idx]) {
ret = -EINVAL;
@@ -3424,13 +3455,8 @@ void panthor_job_update_resvs(struct drm_exec *exec, struct drm_sched_job *sched
{
struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
- /* Still not sure why we want USAGE_WRITE for external objects, since I
- * was assuming this would be handled through explicit syncs being imported
- * to external BOs with DMA_BUF_IOCTL_IMPORT_SYNC_FILE, but other drivers
- * seem to pass DMA_RESV_USAGE_WRITE, so there must be a good reason.
- */
panthor_vm_update_resvs(job->group->vm, exec, &sched_job->s_fence->finished,
- DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_WRITE);
+ DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
}
void panthor_sched_unplug(struct panthor_device *ptdev)
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 5bc3e6b41c34..b31125eb9a65 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -27,7 +27,7 @@
#include <linux/slab.h>
#include <linux/string_helpers.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <drm/drm_device.h>
#include <drm/drm_util.h>
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index fca8b08535a5..6328627b7c34 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -228,10 +228,8 @@ void radeon_dp_aux_init(struct radeon_connector *radeon_connector)
{
struct drm_device *dev = radeon_connector->base.dev;
struct radeon_device *rdev = dev->dev_private;
- int ret;
radeon_connector->ddc_bus->rec.hpd = radeon_connector->hpd.hpd;
- radeon_connector->ddc_bus->aux.dev = radeon_connector->base.kdev;
radeon_connector->ddc_bus->aux.drm_dev = radeon_connector->base.dev;
if (ASIC_IS_DCE5(rdev)) {
if (radeon_auxch)
@@ -242,11 +240,8 @@ void radeon_dp_aux_init(struct radeon_connector *radeon_connector)
radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer_atom;
}
- ret = drm_dp_aux_register(&radeon_connector->ddc_bus->aux);
- if (!ret)
- radeon_connector->ddc_bus->has_aux = true;
-
- WARN(ret, "drm_dp_aux_register() failed with error %d\n", ret);
+ drm_dp_aux_init(&radeon_connector->ddc_bus->aux);
+ radeon_connector->ddc_bus->has_aux = true;
}
/***** general DP utility functions *****/
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 528a8f3677c2..f9c73c55f04f 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1786,6 +1786,20 @@ static enum drm_mode_status radeon_dp_mode_valid(struct drm_connector *connector
return MODE_OK;
}
+static int
+radeon_connector_late_register(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ int r = 0;
+
+ if (radeon_connector->ddc_bus->has_aux) {
+ radeon_connector->ddc_bus->aux.dev = radeon_connector->base.kdev;
+ r = drm_dp_aux_register(&radeon_connector->ddc_bus->aux);
+ }
+
+ return r;
+}
+
static const struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = {
.get_modes = radeon_dp_get_modes,
.mode_valid = radeon_dp_mode_valid,
@@ -1800,6 +1814,7 @@ static const struct drm_connector_funcs radeon_dp_connector_funcs = {
.early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.force = radeon_dvi_force,
+ .late_register = radeon_connector_late_register,
};
static const struct drm_connector_funcs radeon_edp_connector_funcs = {
@@ -1810,6 +1825,7 @@ static const struct drm_connector_funcs radeon_edp_connector_funcs = {
.early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.force = radeon_dvi_force,
+ .late_register = radeon_connector_late_register,
};
static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = {
@@ -1820,6 +1836,7 @@ static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = {
.early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.force = radeon_dvi_force,
+ .late_register = radeon_connector_late_register,
};
void
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 0f723292409e..fafed331e0a0 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -43,7 +43,7 @@ static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_encoder *clone_encoder;
- uint32_t index_mask = 0;
+ uint32_t index_mask = drm_encoder_mask(encoder);
int count;
/* DIG routing gets problematic */
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 9735f4968b86..bf2d4b16dc2a 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -44,8 +44,6 @@ struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
int radeon_gem_prime_pin(struct drm_gem_object *obj);
void radeon_gem_prime_unpin(struct drm_gem_object *obj);
-const struct drm_gem_object_funcs radeon_gem_object_funcs;
-
static vm_fault_t radeon_gem_fault(struct vm_fault *vmf)
{
struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
@@ -132,7 +130,6 @@ retry:
return r;
}
*obj = &robj->tbo.base;
- (*obj)->funcs = &radeon_gem_object_funcs;
robj->pid = task_pid_nr(current);
mutex_lock(&rdev->gem.mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index d0e4b43d155c..7672404fdb29 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -151,6 +151,7 @@ int radeon_bo_create(struct radeon_device *rdev,
if (bo == NULL)
return -ENOMEM;
drm_gem_private_object_init(rdev_to_drm(rdev), &bo->tbo.base, size);
+ bo->tbo.base.funcs = &radeon_gem_object_funcs;
bo->rdev = rdev;
bo->surface_reg = -1;
INIT_LIST_HEAD(&bo->list);
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 58c8161289fe..a75eede8bf8d 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -133,8 +133,10 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
{
WARN_ON(!num_sched_list || !sched_list);
+ spin_lock(&entity->rq_lock);
entity->sched_list = sched_list;
entity->num_sched_list = num_sched_list;
+ spin_unlock(&entity->rq_lock);
}
EXPORT_SYMBOL(drm_sched_entity_modify_sched);
@@ -380,7 +382,7 @@ static void drm_sched_entity_wakeup(struct dma_fence *f,
container_of(cb, struct drm_sched_entity, cb);
drm_sched_entity_clear_dep(f, cb);
- drm_sched_wakeup(entity->rq->sched, entity);
+ drm_sched_wakeup(entity->rq->sched);
}
/**
@@ -597,6 +599,9 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
/* first job wakes up scheduler */
if (first) {
+ struct drm_gpu_scheduler *sched;
+ struct drm_sched_rq *rq;
+
/* Add the entity to the run queue */
spin_lock(&entity->rq_lock);
if (entity->stopped) {
@@ -606,13 +611,16 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
return;
}
- drm_sched_rq_add_entity(entity->rq, entity);
+ rq = entity->rq;
+ sched = rq->sched;
+
+ drm_sched_rq_add_entity(rq, entity);
spin_unlock(&entity->rq_lock);
if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
drm_sched_rq_update_fifo(entity, submit_ts);
- drm_sched_wakeup(entity->rq->sched, entity);
+ drm_sched_wakeup(sched);
}
}
EXPORT_SYMBOL(drm_sched_entity_push_job);
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index ab53ab486fe6..e97c6c60bc96 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -87,6 +87,12 @@
#define CREATE_TRACE_POINTS
#include "gpu_scheduler_trace.h"
+#ifdef CONFIG_LOCKDEP
+static struct lockdep_map drm_sched_lockdep_map = {
+ .name = "drm_sched_lockdep_map"
+};
+#endif
+
#define to_drm_sched_job(sched_job) \
container_of((sched_job), struct drm_sched_job, queue_node)
@@ -1013,15 +1019,12 @@ EXPORT_SYMBOL(drm_sched_job_cleanup);
/**
* drm_sched_wakeup - Wake up the scheduler if it is ready to queue
* @sched: scheduler instance
- * @entity: the scheduler entity
*
* Wake up the scheduler if we can queue jobs.
*/
-void drm_sched_wakeup(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity)
+void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
{
- if (drm_sched_can_queue(sched, entity))
- drm_sched_run_job_queue(sched);
+ drm_sched_run_job_queue(sched);
}
/**
@@ -1272,7 +1275,13 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
sched->submit_wq = submit_wq;
sched->own_submit_wq = false;
} else {
- sched->submit_wq = alloc_ordered_workqueue(name, 0);
+#ifdef CONFIG_LOCKDEP
+ sched->submit_wq = alloc_ordered_workqueue_lockdep_map(name,
+ WQ_MEM_RECLAIM,
+ &drm_sched_lockdep_map);
+#else
+ sched->submit_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
+#endif
if (!sched->submit_wq)
return -ENOMEM;
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index c9eb329665ec..34d22ba210b0 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -1153,8 +1153,8 @@ static int host1x_drm_probe(struct host1x_device *dev)
if (host1x_drm_wants_iommu(dev) && device_iommu_mapped(dma_dev)) {
tegra->domain = iommu_paging_domain_alloc(dma_dev);
- if (!tegra->domain) {
- err = -ENOMEM;
+ if (IS_ERR(tegra->domain)) {
+ err = PTR_ERR(tegra->domain);
goto free;
}
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
index 4de1ea0fc7c0..00c8564520e7 100644
--- a/drivers/gpu/drm/tegra/gr3d.c
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -46,7 +46,6 @@ struct gr3d {
unsigned int nclocks;
struct reset_control_bulk_data resets[RST_GR3D_MAX];
unsigned int nresets;
- struct dev_pm_domain_list *pd_list;
DECLARE_BITMAP(addr_regs, GR3D_NUM_REGS);
};
@@ -370,12 +369,18 @@ static int gr3d_power_up_legacy_domain(struct device *dev, const char *name,
return 0;
}
+static void gr3d_del_link(void *link)
+{
+ device_link_del(link);
+}
+
static int gr3d_init_power(struct device *dev, struct gr3d *gr3d)
{
- struct dev_pm_domain_attach_data pd_data = {
- .pd_names = (const char *[]) { "3d0", "3d1" },
- .num_pd_names = 2,
- };
+ static const char * const opp_genpd_names[] = { "3d0", "3d1", NULL };
+ const u32 link_flags = DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME;
+ struct device **opp_virt_devs, *pd_dev;
+ struct device_link *link;
+ unsigned int i;
int err;
err = of_count_phandle_with_args(dev->of_node, "power-domains",
@@ -409,10 +414,29 @@ static int gr3d_init_power(struct device *dev, struct gr3d *gr3d)
if (dev->pm_domain)
return 0;
- err = dev_pm_domain_attach_list(dev, &pd_data, &gr3d->pd_list);
- if (err < 0)
+ err = devm_pm_opp_attach_genpd(dev, opp_genpd_names, &opp_virt_devs);
+ if (err)
return err;
+ for (i = 0; opp_genpd_names[i]; i++) {
+ pd_dev = opp_virt_devs[i];
+ if (!pd_dev) {
+ dev_err(dev, "failed to get %s power domain\n",
+ opp_genpd_names[i]);
+ return -EINVAL;
+ }
+
+ link = device_link_add(dev, pd_dev, link_flags);
+ if (!link) {
+ dev_err(dev, "failed to link to %s\n", dev_name(pd_dev));
+ return -EINVAL;
+ }
+
+ err = devm_add_action_or_reset(dev, gr3d_del_link, link);
+ if (err)
+ return err;
+ }
+
return 0;
}
@@ -503,13 +527,13 @@ static int gr3d_probe(struct platform_device *pdev)
err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
if (err)
- goto err;
+ return err;
err = host1x_client_register(&gr3d->client.base);
if (err < 0) {
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
err);
- goto err;
+ return err;
}
/* initialize address register map */
@@ -517,9 +541,6 @@ static int gr3d_probe(struct platform_device *pdev)
set_bit(gr3d_addr_regs[i], gr3d->addr_regs);
return 0;
-err:
- dev_pm_domain_detach_list(gr3d->pd_list);
- return err;
}
static void gr3d_remove(struct platform_device *pdev)
@@ -528,7 +549,6 @@ static void gr3d_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
host1x_client_unregister(&gr3d->client.base);
- dev_pm_domain_detach_list(gr3d->pd_list);
}
static int __maybe_unused gr3d_runtime_suspend(struct device *dev)
diff --git a/drivers/gpu/drm/tests/drm_connector_test.c b/drivers/gpu/drm/tests/drm_connector_test.c
index 15e36a8db685..6bba97d0be88 100644
--- a/drivers/gpu/drm/tests/drm_connector_test.c
+++ b/drivers/gpu/drm/tests/drm_connector_test.c
@@ -996,7 +996,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb(struct kunit *test)
unsigned long long rate;
struct drm_device *drm = &priv->drm;
- mode = drm_display_mode_from_cea_vic(drm, 16);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1017,7 +1017,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb_10bpc(struct kunit *test)
unsigned long long rate;
struct drm_device *drm = &priv->drm;
- mode = drm_display_mode_from_cea_vic(drm, 16);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1038,7 +1038,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb_10bpc_vic_1(struct kunit *t
unsigned long long rate;
struct drm_device *drm = &priv->drm;
- mode = drm_display_mode_from_cea_vic(drm, 1);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
rate = drm_hdmi_compute_mode_clock(mode, 10, HDMI_COLORSPACE_RGB);
@@ -1056,7 +1056,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb_12bpc(struct kunit *test)
unsigned long long rate;
struct drm_device *drm = &priv->drm;
- mode = drm_display_mode_from_cea_vic(drm, 16);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1077,7 +1077,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb_12bpc_vic_1(struct kunit *t
unsigned long long rate;
struct drm_device *drm = &priv->drm;
- mode = drm_display_mode_from_cea_vic(drm, 1);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
rate = drm_hdmi_compute_mode_clock(mode, 12, HDMI_COLORSPACE_RGB);
@@ -1095,7 +1095,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb_double(struct kunit *test)
unsigned long long rate;
struct drm_device *drm = &priv->drm;
- mode = drm_display_mode_from_cea_vic(drm, 6);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 6);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_TRUE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1118,7 +1118,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv420_valid(struct kunit
unsigned long long rate;
unsigned int vic = *(unsigned int *)test->param_value;
- mode = drm_display_mode_from_cea_vic(drm, vic);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, vic);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1155,7 +1155,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv420_10_bpc(struct kuni
drm_hdmi_compute_mode_clock_yuv420_vic_valid_tests[0];
unsigned long long rate;
- mode = drm_display_mode_from_cea_vic(drm, vic);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, vic);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1180,7 +1180,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv420_12_bpc(struct kuni
drm_hdmi_compute_mode_clock_yuv420_vic_valid_tests[0];
unsigned long long rate;
- mode = drm_display_mode_from_cea_vic(drm, vic);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, vic);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1203,7 +1203,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv422_8_bpc(struct kunit
struct drm_device *drm = &priv->drm;
unsigned long long rate;
- mode = drm_display_mode_from_cea_vic(drm, 16);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1225,7 +1225,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv422_10_bpc(struct kuni
struct drm_device *drm = &priv->drm;
unsigned long long rate;
- mode = drm_display_mode_from_cea_vic(drm, 16);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1247,7 +1247,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv422_12_bpc(struct kuni
struct drm_device *drm = &priv->drm;
unsigned long long rate;
- mode = drm_display_mode_from_cea_vic(drm, 16);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
diff --git a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
index 34ee95d41f29..294773342e71 100644
--- a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
@@ -441,7 +441,7 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test)
ctx = drm_kunit_helper_acquire_ctx_alloc(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
- mode = drm_display_mode_from_cea_vic(drm, 1);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
drm = &priv->drm;
@@ -555,7 +555,7 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test)
ctx = drm_kunit_helper_acquire_ctx_alloc(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
- mode = drm_display_mode_from_cea_vic(drm, 1);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
drm = &priv->drm;
@@ -671,7 +671,7 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te
ctx = drm_kunit_helper_acquire_ctx_alloc(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
- mode = drm_display_mode_from_cea_vic(drm, 1);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
drm = &priv->drm;
@@ -1263,7 +1263,7 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
ctx = drm_kunit_helper_acquire_ctx_alloc(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
- mode = drm_display_mode_from_cea_vic(drm, 1);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
/*
diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c
index aa62719dab0e..04a6b8cc62ac 100644
--- a/drivers/gpu/drm/tests/drm_kunit_helpers.c
+++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c
@@ -3,6 +3,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_kunit_helpers.h>
#include <drm/drm_managed.h>
@@ -311,6 +312,47 @@ drm_kunit_helper_create_crtc(struct kunit *test,
}
EXPORT_SYMBOL_GPL(drm_kunit_helper_create_crtc);
+static void kunit_action_drm_mode_destroy(void *ptr)
+{
+ struct drm_display_mode *mode = ptr;
+
+ drm_mode_destroy(NULL, mode);
+}
+
+/**
+ * drm_kunit_display_mode_from_cea_vic() - return a mode for CEA VIC
+ for a KUnit test
+ * @test: The test context object
+ * @dev: DRM device
+ * @video_code: CEA VIC of the mode
+ *
+ * Creates a new mode matching the specified CEA VIC for a KUnit test.
+ *
+ * Resources will be cleaned up automatically.
+ *
+ * Returns: A new drm_display_mode on success or NULL on failure
+ */
+struct drm_display_mode *
+drm_kunit_display_mode_from_cea_vic(struct kunit *test, struct drm_device *dev,
+ u8 video_code)
+{
+ struct drm_display_mode *mode;
+ int ret;
+
+ mode = drm_display_mode_from_cea_vic(dev, video_code);
+ if (!mode)
+ return NULL;
+
+ ret = kunit_add_action_or_reset(test,
+ kunit_action_drm_mode_destroy,
+ mode);
+ if (ret)
+ return NULL;
+
+ return mode;
+}
+EXPORT_SYMBOL_GPL(drm_kunit_display_mode_from_cea_vic);
+
MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>");
MODULE_DESCRIPTION("KUnit test suite helper functions");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
index 5ff1037a3453..62224992988f 100644
--- a/drivers/gpu/drm/udl/udl_transfer.c
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -7,7 +7,7 @@
* Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "udl_drv.h"
#include "udl_proto.h"
diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c
index cd7f1eedf17f..00cd081d7873 100644
--- a/drivers/gpu/drm/v3d/v3d_perfmon.c
+++ b/drivers/gpu/drm/v3d/v3d_perfmon.c
@@ -306,6 +306,11 @@ void v3d_perfmon_open_file(struct v3d_file_priv *v3d_priv)
static int v3d_perfmon_idr_del(int id, void *elem, void *data)
{
struct v3d_perfmon *perfmon = elem;
+ struct v3d_dev *v3d = (struct v3d_dev *)data;
+
+ /* If the active perfmon is being destroyed, stop it first */
+ if (perfmon == v3d->active_perfmon)
+ v3d_perfmon_stop(v3d, perfmon, false);
v3d_perfmon_put(perfmon);
@@ -314,8 +319,10 @@ static int v3d_perfmon_idr_del(int id, void *elem, void *data)
void v3d_perfmon_close_file(struct v3d_file_priv *v3d_priv)
{
+ struct v3d_dev *v3d = v3d_priv->v3d;
+
mutex_lock(&v3d_priv->perfmon.lock);
- idr_for_each(&v3d_priv->perfmon.idr, v3d_perfmon_idr_del, NULL);
+ idr_for_each(&v3d_priv->perfmon.idr, v3d_perfmon_idr_del, v3d);
idr_destroy(&v3d_priv->perfmon.idr);
mutex_unlock(&v3d_priv->perfmon.lock);
mutex_destroy(&v3d_priv->perfmon.lock);
diff --git a/drivers/gpu/drm/vboxvideo/hgsmi_base.c b/drivers/gpu/drm/vboxvideo/hgsmi_base.c
index 8c041d7ce4f1..87dccaecc3e5 100644
--- a/drivers/gpu/drm/vboxvideo/hgsmi_base.c
+++ b/drivers/gpu/drm/vboxvideo/hgsmi_base.c
@@ -139,7 +139,15 @@ int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags,
flags |= VBOX_MOUSE_POINTER_VISIBLE;
}
- p = hgsmi_buffer_alloc(ctx, sizeof(*p) + pixel_len, HGSMI_CH_VBVA,
+ /*
+ * The 4 extra bytes come from switching struct vbva_mouse_pointer_shape
+ * from having a 4 bytes fixed array at the end to using a proper VLA
+ * at the end. These 4 extra bytes were not subtracted from sizeof(*p)
+ * before the switch to the VLA, so this way the behavior is unchanged.
+ * Chances are these 4 extra bytes are not necessary but they are kept
+ * to avoid regressions.
+ */
+ p = hgsmi_buffer_alloc(ctx, sizeof(*p) + pixel_len + 4, HGSMI_CH_VBVA,
VBVA_MOUSE_POINTER_SHAPE);
if (!p)
return -ENOMEM;
diff --git a/drivers/gpu/drm/vboxvideo/vboxvideo.h b/drivers/gpu/drm/vboxvideo/vboxvideo.h
index f60d82504da0..79ec8481de0e 100644
--- a/drivers/gpu/drm/vboxvideo/vboxvideo.h
+++ b/drivers/gpu/drm/vboxvideo/vboxvideo.h
@@ -351,10 +351,8 @@ struct vbva_mouse_pointer_shape {
* Bytes in the gap between the AND and the XOR mask are undefined.
* XOR mask scanlines have no gap between them and size of XOR mask is:
* xor_len = width * 4 * height.
- *
- * Preallocate 4 bytes for accessing actual data as p->data.
*/
- u8 data[4];
+ u8 data[];
} __packed;
/* pointer is visible */
diff --git a/drivers/gpu/drm/vc4/vc4_perfmon.c b/drivers/gpu/drm/vc4/vc4_perfmon.c
index c4ac2c946238..c00a5cc2316d 100644
--- a/drivers/gpu/drm/vc4/vc4_perfmon.c
+++ b/drivers/gpu/drm/vc4/vc4_perfmon.c
@@ -116,6 +116,11 @@ void vc4_perfmon_open_file(struct vc4_file *vc4file)
static int vc4_perfmon_idr_del(int id, void *elem, void *data)
{
struct vc4_perfmon *perfmon = elem;
+ struct vc4_dev *vc4 = (struct vc4_dev *)data;
+
+ /* If the active perfmon is being destroyed, stop it first */
+ if (perfmon == vc4->active_perfmon)
+ vc4_perfmon_stop(vc4, perfmon, false);
vc4_perfmon_put(perfmon);
@@ -130,7 +135,7 @@ void vc4_perfmon_close_file(struct vc4_file *vc4file)
return;
mutex_lock(&vc4file->perfmon.lock);
- idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, NULL);
+ idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, vc4);
idr_destroy(&vc4file->perfmon.idr);
mutex_unlock(&vc4file->perfmon.lock);
mutex_destroy(&vc4file->perfmon.lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index 890a66a2361f..64bd7d74854e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -635,10 +635,8 @@ out:
kunmap_atomic(d.src_addr);
if (d.dst_addr)
kunmap_atomic(d.dst_addr);
- if (src_pages)
- kvfree(src_pages);
- if (dst_pages)
- kvfree(dst_pages);
+ kvfree(src_pages);
+ kvfree(dst_pages);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 3f4719b3c268..4e2807f5f94c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -62,7 +62,7 @@
#define VMWGFX_DRIVER_MINOR 20
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
-#define VMWGFX_MAX_DISPLAYS 16
+#define VMWGFX_NUM_DISPLAY_UNITS 8
#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
#define VMWGFX_MIN_INITIAL_WIDTH 1280
@@ -82,7 +82,7 @@
#define VMWGFX_NUM_GB_CONTEXT 256
#define VMWGFX_NUM_GB_SHADER 20000
#define VMWGFX_NUM_GB_SURFACE 32768
-#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
+#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_NUM_DISPLAY_UNITS
#define VMWGFX_NUM_DXCONTEXT 256
#define VMWGFX_NUM_DXQUERY 512
#define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 288ed0bb75cb..63b8d7591253 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1283,7 +1283,6 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
{
struct drm_device *dev = &dev_priv->drm;
struct vmw_framebuffer_surface *vfbs;
- enum SVGA3dSurfaceFormat format;
struct vmw_surface *surface;
int ret;
@@ -1320,34 +1319,6 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
return -EINVAL;
}
- switch (mode_cmd->pixel_format) {
- case DRM_FORMAT_ARGB8888:
- format = SVGA3D_A8R8G8B8;
- break;
- case DRM_FORMAT_XRGB8888:
- format = SVGA3D_X8R8G8B8;
- break;
- case DRM_FORMAT_RGB565:
- format = SVGA3D_R5G6B5;
- break;
- case DRM_FORMAT_XRGB1555:
- format = SVGA3D_A1R5G5B5;
- break;
- default:
- DRM_ERROR("Invalid pixel format: %p4cc\n",
- &mode_cmd->pixel_format);
- return -EINVAL;
- }
-
- /*
- * For DX, surface format validation is done when surface->scanout
- * is set.
- */
- if (!has_sm4_context(dev_priv) && format != surface->metadata.format) {
- DRM_ERROR("Invalid surface format for requested mode.\n");
- return -EINVAL;
- }
-
vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
if (!vfbs) {
ret = -ENOMEM;
@@ -1539,6 +1510,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
DRM_ERROR("Surface size cannot exceed %dx%d\n",
dev_priv->texture_max_width,
dev_priv->texture_max_height);
+ ret = -EINVAL;
goto err_out;
}
@@ -2225,7 +2197,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_vmw_update_layout_arg *arg =
(struct drm_vmw_update_layout_arg *)data;
- void __user *user_rects;
+ const void __user *user_rects;
struct drm_vmw_rect *rects;
struct drm_rect *drm_rects;
unsigned rects_size;
@@ -2237,6 +2209,8 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
VMWGFX_MIN_INITIAL_HEIGHT};
vmw_du_update_layout(dev_priv, 1, &def_rect);
return 0;
+ } else if (arg->num_outputs > VMWGFX_NUM_DISPLAY_UNITS) {
+ return -E2BIG;
}
rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 6141fadf81ef..2a6c6d6581e0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -199,9 +199,6 @@ struct vmw_kms_dirty {
s32 unit_y2;
};
-#define VMWGFX_NUM_DISPLAY_UNITS 8
-
-
#define vmw_framebuffer_to_vfb(x) \
container_of(x, struct vmw_framebuffer, base)
#define vmw_framebuffer_to_vfbs(x) \
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index fab155a68054..82d18b88f4a7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -886,6 +886,10 @@ static int vmw_stdu_connector_atomic_check(struct drm_connector *conn,
struct drm_crtc_state *new_crtc_state;
conn_state = drm_atomic_get_connector_state(state, conn);
+
+ if (IS_ERR(conn_state))
+ return PTR_ERR(conn_state);
+
du = vmw_connector_to_stdu(conn);
if (!conn_state->crtc)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 1625b30d9970..5721c74da3e0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -2276,9 +2276,12 @@ int vmw_dumb_create(struct drm_file *file_priv,
const struct SVGA3dSurfaceDesc *desc = vmw_surface_get_desc(format);
SVGA3dSurfaceAllFlags flags = SVGA3D_SURFACE_HINT_TEXTURE |
SVGA3D_SURFACE_HINT_RENDERTARGET |
- SVGA3D_SURFACE_SCREENTARGET |
- SVGA3D_SURFACE_BIND_SHADER_RESOURCE |
- SVGA3D_SURFACE_BIND_RENDER_TARGET;
+ SVGA3D_SURFACE_SCREENTARGET;
+
+ if (vmw_surface_is_dx_screen_target_format(format)) {
+ flags |= SVGA3D_SURFACE_BIND_SHADER_RESOURCE |
+ SVGA3D_SURFACE_BIND_RENDER_TARGET;
+ }
/*
* Without mob support we're just going to use raw memory buffer
diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
index 75736faf2a80..c6e0c8d77a70 100644
--- a/drivers/gpu/drm/xe/display/xe_display.c
+++ b/drivers/gpu/drm/xe/display/xe_display.c
@@ -309,18 +309,7 @@ static void xe_display_flush_cleanup_work(struct xe_device *xe)
}
/* TODO: System and runtime suspend/resume sequences will be sanitized as a follow-up. */
-void xe_display_pm_runtime_suspend(struct xe_device *xe)
-{
- if (!xe->info.probe_display)
- return;
-
- if (xe->d3cold.allowed)
- xe_display_pm_suspend(xe, true);
-
- intel_hpd_poll_enable(xe);
-}
-
-void xe_display_pm_suspend(struct xe_device *xe, bool runtime)
+static void __xe_display_pm_suspend(struct xe_device *xe, bool runtime)
{
struct intel_display *display = &xe->display;
bool s2idle = suspend_to_idle();
@@ -353,28 +342,38 @@ void xe_display_pm_suspend(struct xe_device *xe, bool runtime)
intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold);
intel_dmc_suspend(xe);
+
+ if (runtime && has_display(xe))
+ intel_hpd_poll_enable(xe);
}
-void xe_display_pm_suspend_late(struct xe_device *xe)
+void xe_display_pm_suspend(struct xe_device *xe)
+{
+ __xe_display_pm_suspend(xe, false);
+}
+
+void xe_display_pm_runtime_suspend(struct xe_device *xe)
{
- bool s2idle = suspend_to_idle();
if (!xe->info.probe_display)
return;
- intel_power_domains_suspend(xe, s2idle);
+ if (xe->d3cold.allowed) {
+ __xe_display_pm_suspend(xe, true);
+ return;
+ }
- intel_display_power_suspend_late(xe);
+ intel_hpd_poll_enable(xe);
}
-void xe_display_pm_runtime_resume(struct xe_device *xe)
+void xe_display_pm_suspend_late(struct xe_device *xe)
{
+ bool s2idle = suspend_to_idle();
if (!xe->info.probe_display)
return;
- intel_hpd_poll_disable(xe);
+ intel_power_domains_suspend(xe, s2idle);
- if (xe->d3cold.allowed)
- xe_display_pm_resume(xe, true);
+ intel_display_power_suspend_late(xe);
}
void xe_display_pm_resume_early(struct xe_device *xe)
@@ -387,7 +386,7 @@ void xe_display_pm_resume_early(struct xe_device *xe)
intel_power_domains_resume(xe);
}
-void xe_display_pm_resume(struct xe_device *xe, bool runtime)
+static void __xe_display_pm_resume(struct xe_device *xe, bool runtime)
{
struct intel_display *display = &xe->display;
@@ -411,9 +410,11 @@ void xe_display_pm_resume(struct xe_device *xe, bool runtime)
intel_display_driver_resume(xe);
drm_kms_helper_poll_enable(&xe->drm);
intel_display_driver_enable_user_access(xe);
- intel_hpd_poll_disable(xe);
}
+ if (has_display(xe))
+ intel_hpd_poll_disable(xe);
+
intel_opregion_resume(display);
intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_RUNNING, false);
@@ -421,6 +422,26 @@ void xe_display_pm_resume(struct xe_device *xe, bool runtime)
intel_power_domains_enable(xe);
}
+void xe_display_pm_resume(struct xe_device *xe)
+{
+ __xe_display_pm_resume(xe, false);
+}
+
+void xe_display_pm_runtime_resume(struct xe_device *xe)
+{
+ if (!xe->info.probe_display)
+ return;
+
+ if (xe->d3cold.allowed) {
+ __xe_display_pm_resume(xe, true);
+ return;
+ }
+
+ intel_hpd_init(xe);
+ intel_hpd_poll_disable(xe);
+}
+
+
static void display_device_remove(struct drm_device *dev, void *arg)
{
struct xe_device *xe = arg;
diff --git a/drivers/gpu/drm/xe/display/xe_display.h b/drivers/gpu/drm/xe/display/xe_display.h
index 53d727fd792b..bed55fd26f30 100644
--- a/drivers/gpu/drm/xe/display/xe_display.h
+++ b/drivers/gpu/drm/xe/display/xe_display.h
@@ -34,10 +34,10 @@ void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir);
void xe_display_irq_reset(struct xe_device *xe);
void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt);
-void xe_display_pm_suspend(struct xe_device *xe, bool runtime);
+void xe_display_pm_suspend(struct xe_device *xe);
void xe_display_pm_suspend_late(struct xe_device *xe);
void xe_display_pm_resume_early(struct xe_device *xe);
-void xe_display_pm_resume(struct xe_device *xe, bool runtime);
+void xe_display_pm_resume(struct xe_device *xe);
void xe_display_pm_runtime_suspend(struct xe_device *xe);
void xe_display_pm_runtime_resume(struct xe_device *xe);
@@ -65,10 +65,10 @@ static inline void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
static inline void xe_display_irq_reset(struct xe_device *xe) {}
static inline void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt) {}
-static inline void xe_display_pm_suspend(struct xe_device *xe, bool runtime) {}
+static inline void xe_display_pm_suspend(struct xe_device *xe) {}
static inline void xe_display_pm_suspend_late(struct xe_device *xe) {}
static inline void xe_display_pm_resume_early(struct xe_device *xe) {}
-static inline void xe_display_pm_resume(struct xe_device *xe, bool runtime) {}
+static inline void xe_display_pm_resume(struct xe_device *xe) {}
static inline void xe_display_pm_runtime_suspend(struct xe_device *xe) {}
static inline void xe_display_pm_runtime_resume(struct xe_device *xe) {}
diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
index 660ff42e45a6..00ad34ed73a5 100644
--- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
@@ -169,6 +169,8 @@
#define XEHP_SLICE_COMMON_ECO_CHICKEN1 XE_REG_MCR(0x731c, XE_REG_OPTION_MASKED)
#define MSC_MSAA_REODER_BUF_BYPASS_DISABLE REG_BIT(14)
+#define XE2LPM_CCCHKNREG1 XE_REG(0x82a8)
+
#define VF_PREEMPTION XE_REG(0x83a4, XE_REG_OPTION_MASKED)
#define PREEMPTION_VERTEX_COUNT REG_GENMASK(15, 0)
@@ -378,6 +380,9 @@
#define L3SQCREG3 XE_REG_MCR(0xb108)
#define COMPPWOVERFETCHEN REG_BIT(28)
+#define SCRATCH3_LBCF XE_REG_MCR(0xb154)
+#define RWFLUSHALLEN REG_BIT(17)
+
#define XEHP_L3SQCREG5 XE_REG_MCR(0xb158)
#define L3_PWM_TIMER_INIT_VAL_MASK REG_GENMASK(9, 0)
@@ -388,8 +393,11 @@
#define XE2_GLOBAL_INVAL XE_REG(0xb404)
-#define SCRATCH1LPFC XE_REG(0xb474)
-#define EN_L3_RW_CCS_CACHE_FLUSH REG_BIT(0)
+#define XE2LPM_L3SQCREG2 XE_REG_MCR(0xb604)
+
+#define XE2LPM_L3SQCREG3 XE_REG_MCR(0xb608)
+
+#define XE2LPM_SCRATCH3_LBCF XE_REG_MCR(0xb654)
#define XE2LPM_L3SQCREG5 XE_REG_MCR(0xb658)
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index f379df3a12bf..e5f51fd23c65 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -680,8 +680,8 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
tt_has_data = ttm && (ttm_tt_is_populated(ttm) ||
(ttm->page_flags & TTM_TT_FLAG_SWAPPED));
- move_lacks_source = handle_system_ccs ? (!bo->ccs_cleared) :
- (!mem_type_is_vram(old_mem_type) && !tt_has_data);
+ move_lacks_source = !old_mem || (handle_system_ccs ? (!bo->ccs_cleared) :
+ (!mem_type_is_vram(old_mem_type) && !tt_has_data));
needs_clear = (ttm && ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC) ||
(!ttm && ttm_bo->type == ttm_bo_type_device);
diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c
index 668615c6b172..fe4319eb13fd 100644
--- a/drivers/gpu/drm/xe/xe_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_debugfs.c
@@ -187,7 +187,7 @@ void xe_debugfs_register(struct xe_device *xe)
debugfs_create_file("forcewake_all", 0400, root, xe,
&forcewake_all_fops);
- debugfs_create_file("wedged_mode", 0400, root, xe,
+ debugfs_create_file("wedged_mode", 0600, root, xe,
&wedged_mode_fops);
for (mem_type = XE_PL_VRAM0; mem_type <= XE_PL_VRAM1; ++mem_type) {
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 70d4e4d46c3c..10fd4601b9f2 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -171,10 +171,8 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file)
xe_exec_queue_kill(q);
xe_exec_queue_put(q);
}
- mutex_lock(&xef->vm.lock);
xa_for_each(&xef->vm.xa, idx, vm)
xe_vm_close_and_put(vm);
- mutex_unlock(&xef->vm.lock);
xe_file_put(xef);
@@ -298,6 +296,9 @@ static void xe_device_destroy(struct drm_device *dev, void *dummy)
if (xe->unordered_wq)
destroy_workqueue(xe->unordered_wq);
+ if (xe->destroy_wq)
+ destroy_workqueue(xe->destroy_wq);
+
ttm_device_fini(&xe->ttm);
}
@@ -336,9 +337,7 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
init_waitqueue_head(&xe->ufence_wq);
- err = drmm_mutex_init(&xe->drm, &xe->usm.lock);
- if (err)
- goto err;
+ init_rwsem(&xe->usm.lock);
xa_init_flags(&xe->usm.asid_to_vm, XA_FLAGS_ALLOC);
@@ -363,8 +362,9 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq", 0);
xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0);
xe->unordered_wq = alloc_workqueue("xe-unordered-wq", 0, 0);
+ xe->destroy_wq = alloc_workqueue("xe-destroy-wq", 0, 0);
if (!xe->ordered_wq || !xe->unordered_wq ||
- !xe->preempt_fence_wq) {
+ !xe->preempt_fence_wq || !xe->destroy_wq) {
/*
* Cleanup done in xe_device_destroy via
* drmm_add_action_or_reset register above
@@ -890,7 +890,7 @@ void xe_device_l2_flush(struct xe_device *xe)
spin_lock(&gt->global_invl_lock);
xe_mmio_write32(gt, XE2_GLOBAL_INVAL, 0x1);
- if (xe_mmio_wait32(gt, XE2_GLOBAL_INVAL, 0x1, 0x0, 150, NULL, true))
+ if (xe_mmio_wait32(gt, XE2_GLOBAL_INVAL, 0x1, 0x0, 500, NULL, true))
xe_gt_err_once(gt, "Global invalidation timeout\n");
spin_unlock(&gt->global_invl_lock);
@@ -980,13 +980,13 @@ void xe_device_declare_wedged(struct xe_device *xe)
return;
}
+ xe_pm_runtime_get_noresume(xe);
+
if (drmm_add_action_or_reset(&xe->drm, xe_device_wedged_fini, xe)) {
drm_err(&xe->drm, "Failed to register xe_device_wedged_fini clean-up. Although device is wedged.\n");
return;
}
- xe_pm_runtime_get_noresume(xe);
-
if (!atomic_xchg(&xe->wedged.flag, 1)) {
xe->needs_flr_on_fini = true;
drm_err(&xe->drm,
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index ec7eb7811126..09d731a9125c 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -369,7 +369,7 @@ struct xe_device {
/** @usm.next_asid: next ASID, used to cyclical alloc asids */
u32 next_asid;
/** @usm.lock: protects UM state */
- struct mutex lock;
+ struct rw_semaphore lock;
} usm;
/** @pinned: pinned BO state */
@@ -396,6 +396,9 @@ struct xe_device {
/** @unordered_wq: used to serialize unordered work, mostly display */
struct workqueue_struct *unordered_wq;
+ /** @destroy_wq: used to serialize user destroy work, like queue */
+ struct workqueue_struct *destroy_wq;
+
/** @tiles: device tiles */
struct xe_tile tiles[XE_MAX_TILES_PER_DEVICE];
@@ -567,15 +570,23 @@ struct xe_file {
struct {
/** @vm.xe: xarray to store VMs */
struct xarray xa;
- /** @vm.lock: protects file VM state */
+ /**
+ * @vm.lock: Protects VM lookup + reference and removal a from
+ * file xarray. Not an intended to be an outer lock which does
+ * thing while being held.
+ */
struct mutex lock;
} vm;
/** @exec_queue: Submission exec queue state for file */
struct {
- /** @exec_queue.xe: xarray to store engines */
+ /** @exec_queue.xa: xarray to store exece queues */
struct xarray xa;
- /** @exec_queue.lock: protects file engine state */
+ /**
+ * @exec_queue.lock: Protects exec queue lookup + reference and
+ * removal a frommfile xarray. Not an intended to be an outer
+ * lock which does thing while being held.
+ */
struct mutex lock;
} exec_queue;
diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c
index c4add8b38bbd..fb52a23e28f8 100644
--- a/drivers/gpu/drm/xe/xe_drm_client.c
+++ b/drivers/gpu/drm/xe/xe_drm_client.c
@@ -283,8 +283,15 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file)
/* Accumulate all the exec queues from this client */
mutex_lock(&xef->exec_queue.lock);
- xa_for_each(&xef->exec_queue.xa, i, q)
+ xa_for_each(&xef->exec_queue.xa, i, q) {
+ xe_exec_queue_get(q);
+ mutex_unlock(&xef->exec_queue.lock);
+
xe_exec_queue_update_run_ticks(q);
+
+ mutex_lock(&xef->exec_queue.lock);
+ xe_exec_queue_put(q);
+ }
mutex_unlock(&xef->exec_queue.lock);
/* Get the total GPU cycles */
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index 7b38485817dc..f23ac1e2ed88 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -41,11 +41,6 @@
* user knows an exec writes to a BO and reads from the BO in the next exec, it
* is the user's responsibility to pass in / out fence between the two execs).
*
- * Implicit dependencies for external BOs are handled by using the dma-buf
- * implicit dependency uAPI (TODO: add link). To make this works each exec must
- * install the job's fence into the DMA_RESV_USAGE_WRITE slot of every external
- * BO mapped in the VM.
- *
* We do not allow a user to trigger a bind at exec time rather we have a VM
* bind IOCTL which uses the same in / out fence interface as exec. In that
* sense, a VM bind is basically the same operation as an exec from the user
@@ -59,8 +54,8 @@
* behind any pending kernel operations on any external BOs in VM or any BOs
* private to the VM. This is accomplished by the rebinds waiting on BOs
* DMA_RESV_USAGE_KERNEL slot (kernel ops) and kernel ops waiting on all BOs
- * slots (inflight execs are in the DMA_RESV_USAGE_BOOKING for private BOs and
- * in DMA_RESV_USAGE_WRITE for external BOs).
+ * slots (inflight execs are in the DMA_RESV_USAGE_BOOKKEEP for private BOs and
+ * for external BOs).
*
* Rebinds / dma-resv usage applies to non-compute mode VMs only as for compute
* mode VMs we use preempt fences and a rebind worker (TODO: add link).
@@ -304,7 +299,8 @@ retry:
xe_sched_job_arm(job);
if (!xe_vm_in_lr_mode(vm))
drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, &job->drm.s_fence->finished,
- DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_WRITE);
+ DMA_RESV_USAGE_BOOKKEEP,
+ DMA_RESV_USAGE_BOOKKEEP);
for (i = 0; i < num_syncs; i++) {
xe_sync_entry_signal(&syncs[i], &job->drm.s_fence->finished);
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index 7f28b7fc68d5..d098d2dd1b2d 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -635,14 +635,14 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
}
}
- mutex_lock(&xef->exec_queue.lock);
+ q->xef = xe_file_get(xef);
+
+ /* user id alloc must always be last in ioctl to prevent UAF */
err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
- mutex_unlock(&xef->exec_queue.lock);
if (err)
goto kill_exec_queue;
args->exec_queue_id = id;
- q->xef = xe_file_get(xef);
return 0;
diff --git a/drivers/gpu/drm/xe/xe_force_wake.c b/drivers/gpu/drm/xe/xe_force_wake.c
index b263fff15273..7d9fc489dcb8 100644
--- a/drivers/gpu/drm/xe/xe_force_wake.c
+++ b/drivers/gpu/drm/xe/xe_force_wake.c
@@ -115,9 +115,15 @@ static int __domain_wait(struct xe_gt *gt, struct xe_force_wake_domain *domain,
XE_FORCE_WAKE_ACK_TIMEOUT_MS * USEC_PER_MSEC,
&value, true);
if (ret)
- xe_gt_notice(gt, "Force wake domain %d failed to ack %s (%pe) reg[%#x] = %#x\n",
- domain->id, str_wake_sleep(wake), ERR_PTR(ret),
- domain->reg_ack.addr, value);
+ xe_gt_err(gt, "Force wake domain %d failed to ack %s (%pe) reg[%#x] = %#x\n",
+ domain->id, str_wake_sleep(wake), ERR_PTR(ret),
+ domain->reg_ack.addr, value);
+ if (value == ~0) {
+ xe_gt_err(gt,
+ "Force wake domain %d: %s. MMIO unreliable (forcewake register returns 0xFFFFFFFF)!\n",
+ domain->id, str_wake_sleep(wake));
+ ret = -EIO;
+ }
return ret;
}
diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
index 2895f154654c..ff19eca5d358 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.c
+++ b/drivers/gpu/drm/xe/xe_ggtt.c
@@ -397,6 +397,16 @@ static void ggtt_invalidate_gt_tlb(struct xe_gt *gt)
static void xe_ggtt_invalidate(struct xe_ggtt *ggtt)
{
+ struct xe_device *xe = tile_to_xe(ggtt->tile);
+
+ /*
+ * XXX: Barrier for GGTT pages. Unsure exactly why this required but
+ * without this LNL is having issues with the GuC reading scratch page
+ * vs. correct GGTT page. Not particularly a hot code path so blindly
+ * do a mmio read here which results in GuC reading correct GGTT page.
+ */
+ xe_mmio_read32(xe_root_mmio_gt(xe), VF_CAP_REG);
+
/* Each GT in a tile has its own TLB to cache GGTT lookups */
ggtt_invalidate_gt_tlb(ggtt->tile->primary_gt);
ggtt_invalidate_gt_tlb(ggtt->tile->media_gt);
diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.c b/drivers/gpu/drm/xe/xe_gpu_scheduler.c
index c518d1d16d82..50361b4638f9 100644
--- a/drivers/gpu/drm/xe/xe_gpu_scheduler.c
+++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.c
@@ -90,6 +90,11 @@ void xe_sched_submission_stop(struct xe_gpu_scheduler *sched)
cancel_work_sync(&sched->work_process_msg);
}
+void xe_sched_submission_resume_tdr(struct xe_gpu_scheduler *sched)
+{
+ drm_sched_resume_timeout(&sched->base, sched->base.timeout);
+}
+
void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
struct xe_sched_msg *msg)
{
diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.h b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
index cee9c6809fc0..64b2ae6839db 100644
--- a/drivers/gpu/drm/xe/xe_gpu_scheduler.h
+++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
@@ -22,6 +22,8 @@ void xe_sched_fini(struct xe_gpu_scheduler *sched);
void xe_sched_submission_start(struct xe_gpu_scheduler *sched);
void xe_sched_submission_stop(struct xe_gpu_scheduler *sched);
+void xe_sched_submission_resume_tdr(struct xe_gpu_scheduler *sched);
+
void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
struct xe_sched_msg *msg);
void xe_sched_add_msg_locked(struct xe_gpu_scheduler *sched,
@@ -61,7 +63,9 @@ xe_sched_invalidate_job(struct xe_sched_job *job, int threshold)
static inline void xe_sched_add_pending_job(struct xe_gpu_scheduler *sched,
struct xe_sched_job *job)
{
+ spin_lock(&sched->base.job_list_lock);
list_add(&job->drm.list, &sched->base.pending_list);
+ spin_unlock(&sched->base.job_list_lock);
}
static inline
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index f0dc2bf24c7b..d5fd6a089b7c 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -108,7 +108,6 @@ static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
return;
if (!xe_gt_is_media_type(gt)) {
- xe_mmio_write32(gt, SCRATCH1LPFC, EN_L3_RW_CCS_CACHE_FLUSH);
reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL);
reg |= CG_DIS_CNTLBUS;
xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
@@ -874,7 +873,9 @@ int xe_gt_sanitize_freq(struct xe_gt *gt)
int ret = 0;
if ((!xe_uc_fw_is_available(&gt->uc.gsc.fw) ||
- xe_uc_fw_is_loaded(&gt->uc.gsc.fw)) && XE_WA(gt, 22019338487))
+ xe_uc_fw_is_loaded(&gt->uc.gsc.fw) ||
+ xe_uc_fw_is_in_error_state(&gt->uc.gsc.fw)) &&
+ XE_WA(gt, 22019338487))
ret = xe_guc_pc_restore_stashed_freq(&gt->uc.guc.pc);
return ret;
diff --git a/drivers/gpu/drm/xe/xe_gt_freq.c b/drivers/gpu/drm/xe/xe_gt_freq.c
index 68a5778b4319..ab76973f3e1e 100644
--- a/drivers/gpu/drm/xe/xe_gt_freq.c
+++ b/drivers/gpu/drm/xe/xe_gt_freq.c
@@ -237,11 +237,11 @@ int xe_gt_freq_init(struct xe_gt *gt)
if (!gt->freq)
return -ENOMEM;
- err = devm_add_action(xe->drm.dev, freq_fini, gt->freq);
+ err = sysfs_create_files(gt->freq, freq_attrs);
if (err)
return err;
- err = sysfs_create_files(gt->freq, freq_attrs);
+ err = devm_add_action_or_reset(xe->drm.dev, freq_fini, gt->freq);
if (err)
return err;
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c
index 7d7bd0be6233..c834f64b0178 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.c
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.c
@@ -439,7 +439,7 @@ void xe_gt_mcr_init(struct xe_gt *gt)
if (gt->info.type == XE_GT_TYPE_MEDIA) {
drm_WARN_ON(&xe->drm, MEDIA_VER(xe) < 13);
- if (MEDIA_VER(xe) >= 20) {
+ if (MEDIA_VERx100(xe) >= 1301) {
gt->steering[OADDRM].ranges = xe2lpm_gpmxmt_steering_table;
gt->steering[INSTANCE0].ranges = xe2lpm_instance0_steering_table;
} else {
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index 00af059a8971..79c426dc2505 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -185,6 +185,21 @@ unlock_dma_resv:
return err;
}
+static struct xe_vm *asid_to_vm(struct xe_device *xe, u32 asid)
+{
+ struct xe_vm *vm;
+
+ down_read(&xe->usm.lock);
+ vm = xa_load(&xe->usm.asid_to_vm, asid);
+ if (vm && xe_vm_in_fault_mode(vm))
+ xe_vm_get(vm);
+ else
+ vm = ERR_PTR(-EINVAL);
+ up_read(&xe->usm.lock);
+
+ return vm;
+}
+
static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
{
struct xe_device *xe = gt_to_xe(gt);
@@ -197,16 +212,9 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
if (pf->trva_fault)
return -EFAULT;
- /* ASID to VM */
- mutex_lock(&xe->usm.lock);
- vm = xa_load(&xe->usm.asid_to_vm, pf->asid);
- if (vm && xe_vm_in_fault_mode(vm))
- xe_vm_get(vm);
- else
- vm = NULL;
- mutex_unlock(&xe->usm.lock);
- if (!vm)
- return -EINVAL;
+ vm = asid_to_vm(xe, pf->asid);
+ if (IS_ERR(vm))
+ return PTR_ERR(vm);
/*
* TODO: Change to read lock? Using write lock for simplicity.
@@ -548,14 +556,9 @@ static int handle_acc(struct xe_gt *gt, struct acc *acc)
if (acc->access_type != ACC_TRIGGER)
return -EINVAL;
- /* ASID to VM */
- mutex_lock(&xe->usm.lock);
- vm = xa_load(&xe->usm.asid_to_vm, acc->asid);
- if (vm)
- xe_vm_get(vm);
- mutex_unlock(&xe->usm.lock);
- if (!vm || !xe_vm_in_fault_mode(vm))
- return -EINVAL;
+ vm = asid_to_vm(xe, acc->asid);
+ if (IS_ERR(vm))
+ return PTR_ERR(vm);
down_read(&vm->lock);
diff --git a/drivers/gpu/drm/xe/xe_gt_sysfs.c b/drivers/gpu/drm/xe/xe_gt_sysfs.c
index a05c3699e8b9..ec2b8246204b 100644
--- a/drivers/gpu/drm/xe/xe_gt_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_sysfs.c
@@ -51,5 +51,5 @@ int xe_gt_sysfs_init(struct xe_gt *gt)
gt->sysfs = &kg->base;
- return devm_add_action(xe->drm.dev, gt_sysfs_fini, gt);
+ return devm_add_action_or_reset(xe->drm.dev, gt_sysfs_fini, gt);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index cca9cf536f76..bbb9e411d21f 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -37,6 +37,15 @@ static long tlb_timeout_jiffies(struct xe_gt *gt)
return hw_tlb_timeout + 2 * delay;
}
+static void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence)
+{
+ if (WARN_ON_ONCE(!fence->gt))
+ return;
+
+ xe_pm_runtime_put(gt_to_xe(fence->gt));
+ fence->gt = NULL; /* fini() should be called once */
+}
+
static void
__invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
{
@@ -204,7 +213,7 @@ static int send_tlb_invalidation(struct xe_guc *guc,
tlb_timeout_jiffies(gt));
}
spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
- } else if (ret < 0) {
+ } else {
__invalidation_fence_signal(xe, fence);
}
if (!ret) {
@@ -267,10 +276,8 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
xe_gt_tlb_invalidation_fence_init(gt, &fence, true);
ret = xe_gt_tlb_invalidation_guc(gt, &fence);
- if (ret < 0) {
- xe_gt_tlb_invalidation_fence_fini(&fence);
+ if (ret)
return ret;
- }
xe_gt_tlb_invalidation_fence_wait(&fence);
} else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) {
@@ -496,7 +503,8 @@ static const struct dma_fence_ops invalidation_fence_ops = {
* @stack: fence is stack variable
*
* Initialize TLB invalidation fence for use. xe_gt_tlb_invalidation_fence_fini
- * must be called if fence is not signaled.
+ * will be automatically called when fence is signalled (all fences must signal),
+ * even on error.
*/
void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
@@ -516,14 +524,3 @@ void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
dma_fence_get(&fence->base);
fence->gt = gt;
}
-
-/**
- * xe_gt_tlb_invalidation_fence_fini - Finalize TLB invalidation fence
- * @fence: TLB invalidation fence to finalize
- *
- * Drop PM ref which fence took durinig init.
- */
-void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence)
-{
- xe_pm_runtime_put(gt_to_xe(fence->gt));
-}
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
index a84065fa324c..f430d5797af7 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
@@ -28,7 +28,6 @@ int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
bool stack);
-void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence);
static inline void
xe_gt_tlb_invalidation_fence_wait(struct xe_gt_tlb_invalidation_fence *fence)
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index f24dd5223926..17986bfd8818 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -667,16 +667,12 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
num_g2h = 1;
if (g2h_fence_needs_alloc(g2h_fence)) {
- void *ptr;
-
g2h_fence->seqno = next_ct_seqno(ct, true);
- ptr = xa_store(&ct->fence_lookup,
- g2h_fence->seqno,
- g2h_fence, GFP_ATOMIC);
- if (IS_ERR(ptr)) {
- ret = PTR_ERR(ptr);
+ ret = xa_err(xa_store(&ct->fence_lookup,
+ g2h_fence->seqno, g2h_fence,
+ GFP_ATOMIC));
+ if (ret)
goto out;
- }
}
seqno = g2h_fence->seqno;
@@ -879,14 +875,11 @@ retry:
retry_same_fence:
ret = guc_ct_send(ct, action, len, 0, 0, &g2h_fence);
if (unlikely(ret == -ENOMEM)) {
- void *ptr;
-
/* Retry allocation /w GFP_KERNEL */
- ptr = xa_store(&ct->fence_lookup,
- g2h_fence.seqno,
- &g2h_fence, GFP_KERNEL);
- if (IS_ERR(ptr))
- return PTR_ERR(ptr);
+ ret = xa_err(xa_store(&ct->fence_lookup, g2h_fence.seqno,
+ &g2h_fence, GFP_KERNEL));
+ if (ret)
+ return ret;
goto retry_same_fence;
} else if (unlikely(ret)) {
@@ -903,16 +896,44 @@ retry_same_fence:
}
ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ);
+
+ /*
+ * Occasionally it is seen that the G2H worker starts running after a delay of more than
+ * a second even after being queued and activated by the Linux workqueue subsystem. This
+ * leads to G2H timeout error. The root cause of issue lies with scheduling latency of
+ * Lunarlake Hybrid CPU. Issue dissappears if we disable Lunarlake atom cores from BIOS
+ * and this is beyond xe kmd.
+ *
+ * TODO: Drop this change once workqueue scheduling delay issue is fixed on LNL Hybrid CPU.
+ */
+ if (!ret) {
+ flush_work(&ct->g2h_worker);
+ if (g2h_fence.done) {
+ xe_gt_warn(gt, "G2H fence %u, action %04x, done\n",
+ g2h_fence.seqno, action[0]);
+ ret = 1;
+ }
+ }
+
+ /*
+ * Ensure we serialize with completion side to prevent UAF with fence going out of scope on
+ * the stack, since we have no clue if it will fire after the timeout before we can erase
+ * from the xa. Also we have some dependent loads and stores below for which we need the
+ * correct ordering, and we lack the needed barriers.
+ */
+ mutex_lock(&ct->lock);
if (!ret) {
- xe_gt_err(gt, "Timed out wait for G2H, fence %u, action %04x",
- g2h_fence.seqno, action[0]);
+ xe_gt_err(gt, "Timed out wait for G2H, fence %u, action %04x, done %s",
+ g2h_fence.seqno, action[0], str_yes_no(g2h_fence.done));
xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno);
+ mutex_unlock(&ct->lock);
return -ETIME;
}
if (g2h_fence.retry) {
xe_gt_dbg(gt, "H2G action %#x retrying: reason %#x\n",
action[0], g2h_fence.reason);
+ mutex_unlock(&ct->lock);
goto retry;
}
if (g2h_fence.fail) {
@@ -921,7 +942,12 @@ retry_same_fence:
ret = -EIO;
}
- return ret > 0 ? response_buffer ? g2h_fence.response_len : g2h_fence.response_data : ret;
+ if (ret > 0)
+ ret = response_buffer ? g2h_fence.response_len : g2h_fence.response_data;
+
+ mutex_unlock(&ct->lock);
+
+ return ret;
}
/**
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index fbbe6a487bbb..f903b0772722 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -224,64 +224,11 @@ static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q)
EXEC_QUEUE_STATE_BANNED));
}
-#ifdef CONFIG_PROVE_LOCKING
-static int alloc_submit_wq(struct xe_guc *guc)
-{
- int i;
-
- for (i = 0; i < NUM_SUBMIT_WQ; ++i) {
- guc->submission_state.submit_wq_pool[i] =
- alloc_ordered_workqueue("submit_wq", 0);
- if (!guc->submission_state.submit_wq_pool[i])
- goto err_free;
- }
-
- return 0;
-
-err_free:
- while (i)
- destroy_workqueue(guc->submission_state.submit_wq_pool[--i]);
-
- return -ENOMEM;
-}
-
-static void free_submit_wq(struct xe_guc *guc)
-{
- int i;
-
- for (i = 0; i < NUM_SUBMIT_WQ; ++i)
- destroy_workqueue(guc->submission_state.submit_wq_pool[i]);
-}
-
-static struct workqueue_struct *get_submit_wq(struct xe_guc *guc)
-{
- int idx = guc->submission_state.submit_wq_idx++ % NUM_SUBMIT_WQ;
-
- return guc->submission_state.submit_wq_pool[idx];
-}
-#else
-static int alloc_submit_wq(struct xe_guc *guc)
-{
- return 0;
-}
-
-static void free_submit_wq(struct xe_guc *guc)
-{
-
-}
-
-static struct workqueue_struct *get_submit_wq(struct xe_guc *guc)
-{
- return NULL;
-}
-#endif
-
static void guc_submit_fini(struct drm_device *drm, void *arg)
{
struct xe_guc *guc = arg;
xa_destroy(&guc->submission_state.exec_queue_lookup);
- free_submit_wq(guc);
}
static void guc_submit_wedged_fini(void *arg)
@@ -290,9 +237,15 @@ static void guc_submit_wedged_fini(void *arg)
struct xe_exec_queue *q;
unsigned long index;
- xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
- if (exec_queue_wedged(q))
+ mutex_lock(&guc->submission_state.lock);
+ xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
+ if (exec_queue_wedged(q)) {
+ mutex_unlock(&guc->submission_state.lock);
xe_exec_queue_put(q);
+ mutex_lock(&guc->submission_state.lock);
+ }
+ }
+ mutex_unlock(&guc->submission_state.lock);
}
static const struct xe_exec_queue_ops guc_exec_queue_ops;
@@ -337,14 +290,12 @@ int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids)
if (err)
return err;
- err = alloc_submit_wq(guc);
- if (err)
- return err;
-
gt->exec_queue_ops = &guc_exec_queue_ops;
xa_init(&guc->submission_state.exec_queue_lookup);
+ init_waitqueue_head(&guc->submission_state.fini_wq);
+
primelockdep(guc);
return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc);
@@ -361,12 +312,14 @@ static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa
xe_guc_id_mgr_release_locked(&guc->submission_state.idm,
q->guc->id, q->width);
+
+ if (xa_empty(&guc->submission_state.exec_queue_lookup))
+ wake_up(&guc->submission_state.fini_wq);
}
static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
{
int ret;
- void *ptr;
int i;
/*
@@ -386,12 +339,10 @@ static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
q->guc->id = ret;
for (i = 0; i < q->width; ++i) {
- ptr = xa_store(&guc->submission_state.exec_queue_lookup,
- q->guc->id + i, q, GFP_NOWAIT);
- if (IS_ERR(ptr)) {
- ret = PTR_ERR(ptr);
+ ret = xa_err(xa_store(&guc->submission_state.exec_queue_lookup,
+ q->guc->id + i, q, GFP_NOWAIT));
+ if (ret)
goto err_release;
- }
}
return 0;
@@ -965,12 +916,22 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
static bool check_timeout(struct xe_exec_queue *q, struct xe_sched_job *job)
{
struct xe_gt *gt = guc_to_gt(exec_queue_to_guc(q));
- u32 ctx_timestamp = xe_lrc_ctx_timestamp(q->lrc[0]);
- u32 ctx_job_timestamp = xe_lrc_ctx_job_timestamp(q->lrc[0]);
+ u32 ctx_timestamp, ctx_job_timestamp;
u32 timeout_ms = q->sched_props.job_timeout_ms;
u32 diff;
u64 running_time_ms;
+ if (!xe_sched_job_started(job)) {
+ xe_gt_warn(gt, "Check job timeout: seqno=%u, lrc_seqno=%u, guc_id=%d, not started",
+ xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job),
+ q->guc->id);
+
+ return xe_sched_invalidate_job(job, 2);
+ }
+
+ ctx_timestamp = xe_lrc_ctx_timestamp(q->lrc[0]);
+ ctx_job_timestamp = xe_lrc_ctx_job_timestamp(q->lrc[0]);
+
/*
* Counter wraps at ~223s at the usual 19.2MHz, be paranoid catch
* possible overflows with a high timeout.
@@ -1079,10 +1040,13 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
/*
* TDR has fired before free job worker. Common if exec queue
- * immediately closed after last fence signaled.
+ * immediately closed after last fence signaled. Add back to pending
+ * list so job can be freed and kick scheduler ensuring free job is not
+ * lost.
*/
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags)) {
- guc_exec_queue_free_job(drm_job);
+ xe_sched_add_pending_job(sched, job);
+ xe_sched_submission_start(sched);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
@@ -1095,10 +1059,6 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
exec_queue_killed_or_banned_or_wedged(q) ||
exec_queue_destroyed(q);
- /* Job hasn't started, can't be timed out */
- if (!skip_timeout_check && !xe_sched_job_started(job))
- goto rearm;
-
/*
* XXX: Sampling timeout doesn't work in wedged mode as we have to
* modify scheduling state to read timestamp. We could read the
@@ -1268,13 +1228,16 @@ static void __guc_exec_queue_fini_async(struct work_struct *w)
static void guc_exec_queue_fini_async(struct xe_exec_queue *q)
{
+ struct xe_guc *guc = exec_queue_to_guc(q);
+ struct xe_device *xe = guc_to_xe(guc);
+
INIT_WORK(&q->guc->fini_async, __guc_exec_queue_fini_async);
/* We must block on kernel engines so slabs are empty on driver unload */
if (q->flags & EXEC_QUEUE_FLAG_PERMANENT || exec_queue_wedged(q))
__guc_exec_queue_fini_async(&q->guc->fini_async);
else
- queue_work(system_wq, &q->guc->fini_async);
+ queue_work(xe->destroy_wq, &q->guc->fini_async);
}
static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q)
@@ -1452,8 +1415,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
msecs_to_jiffies(q->sched_props.job_timeout_ms);
err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops,
- get_submit_wq(guc),
- q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64,
+ NULL, q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64,
timeout, guc_to_gt(guc)->ordered_wq, NULL,
q->name, gt_to_xe(q->gt)->drm.dev);
if (err)
@@ -1770,8 +1732,13 @@ void xe_guc_submit_stop(struct xe_guc *guc)
mutex_lock(&guc->submission_state.lock);
- xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
+ xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
+ /* Prevent redundant attempts to stop parallel queues */
+ if (q->guc->id != index)
+ continue;
+
guc_exec_queue_stop(guc, q);
+ }
mutex_unlock(&guc->submission_state.lock);
@@ -1796,6 +1763,7 @@ static void guc_exec_queue_start(struct xe_exec_queue *q)
}
xe_sched_submission_start(sched);
+ xe_sched_submission_resume_tdr(sched);
}
int xe_guc_submit_start(struct xe_guc *guc)
@@ -1808,8 +1776,13 @@ int xe_guc_submit_start(struct xe_guc *guc)
mutex_lock(&guc->submission_state.lock);
atomic_dec(&guc->submission_state.stopped);
- xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
+ xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
+ /* Prevent redundant attempts to start parallel queues */
+ if (q->guc->id != index)
+ continue;
+
guc_exec_queue_start(q);
+ }
mutex_unlock(&guc->submission_state.lock);
wake_up_all(&guc->ct.wq);
diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h
index 546ac6350a31..ed150fc09ad0 100644
--- a/drivers/gpu/drm/xe/xe_guc_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_types.h
@@ -72,15 +72,10 @@ struct xe_guc {
atomic_t stopped;
/** @submission_state.lock: protects submission state */
struct mutex lock;
-#ifdef CONFIG_PROVE_LOCKING
-#define NUM_SUBMIT_WQ 256
- /** @submission_state.submit_wq_pool: submission ordered workqueues pool */
- struct workqueue_struct *submit_wq_pool[NUM_SUBMIT_WQ];
- /** @submission_state.submit_wq_idx: submission ordered workqueue index */
- int submit_wq_idx;
-#endif
/** @submission_state.enabled: submission is enabled */
bool enabled;
+ /** @submission_state.fini_wq: submit fini wait queue */
+ wait_queue_head_t fini_wq;
} submission_state;
/** @hwconfig: Hardware config state */
struct {
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index eae38a49ee8e..2804f14f8f29 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -709,8 +709,7 @@ static int xe_oa_configure_oar_context(struct xe_oa_stream *stream, bool enable)
{
RING_CONTEXT_CONTROL(stream->hwe->mmio_base),
regs_offset + CTX_CONTEXT_CONTROL,
- _MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE,
- enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0)
+ _MASKED_BIT_ENABLE(CTX_CTRL_OAC_CONTEXT_ENABLE),
},
};
struct xe_oa_reg reg_lri = { OAR_OACONTROL, oacontrol };
@@ -742,10 +741,8 @@ static int xe_oa_configure_oac_context(struct xe_oa_stream *stream, bool enable)
{
RING_CONTEXT_CONTROL(stream->hwe->mmio_base),
regs_offset + CTX_CONTEXT_CONTROL,
- _MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE,
- enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0) |
- _MASKED_FIELD(CTX_CTRL_RUN_ALONE,
- enable ? CTX_CTRL_RUN_ALONE : 0),
+ _MASKED_BIT_ENABLE(CTX_CTRL_OAC_CONTEXT_ENABLE) |
+ _MASKED_FIELD(CTX_CTRL_RUN_ALONE, enable ? CTX_CTRL_RUN_ALONE : 0),
},
};
struct xe_oa_reg reg_lri = { OAC_OACONTROL, oacontrol };
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index 937c3e064f0d..5e962e72c97e 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -924,6 +924,8 @@ static int xe_pci_resume(struct device *dev)
if (err)
return err;
+ pci_restore_state(pdev);
+
err = pci_enable_device(pdev);
if (err)
return err;
diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
index 7cf2160fe040..33eb039053e4 100644
--- a/drivers/gpu/drm/xe/xe_pm.c
+++ b/drivers/gpu/drm/xe/xe_pm.c
@@ -123,7 +123,7 @@ int xe_pm_suspend(struct xe_device *xe)
for_each_gt(gt, xe, id)
xe_gt_suspend_prepare(gt);
- xe_display_pm_suspend(xe, false);
+ xe_display_pm_suspend(xe);
/* FIXME: Super racey... */
err = xe_bo_evict_all(xe);
@@ -133,7 +133,7 @@ int xe_pm_suspend(struct xe_device *xe)
for_each_gt(gt, xe, id) {
err = xe_gt_suspend(gt);
if (err) {
- xe_display_pm_resume(xe, false);
+ xe_display_pm_resume(xe);
goto err;
}
}
@@ -187,7 +187,7 @@ int xe_pm_resume(struct xe_device *xe)
for_each_gt(gt, xe, id)
xe_gt_resume(gt);
- xe_display_pm_resume(xe, false);
+ xe_display_pm_resume(xe);
err = xe_bo_restore_user(xe);
if (err)
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index d6353e8969f0..f27f579f4d85 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -2188,5 +2188,5 @@ void xe_pt_update_ops_abort(struct xe_tile *tile, struct xe_vma_ops *vops)
pt_op->num_entries);
}
- xe_bo_put_commit(&vops->pt_update_ops[tile->id].deferred);
+ xe_pt_update_ops_fini(tile, vops);
}
diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
index 28d9bb3b825d..848da8e68c7a 100644
--- a/drivers/gpu/drm/xe/xe_query.c
+++ b/drivers/gpu/drm/xe/xe_query.c
@@ -161,7 +161,11 @@ query_engine_cycles(struct xe_device *xe,
cpu_clock);
xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- resp.width = 36;
+
+ if (GRAPHICS_VER(xe) >= 20)
+ resp.width = 64;
+ else
+ resp.width = 36;
/* Only write to the output fields of user query */
if (put_user(resp.cpu_timestamp, &query_ptr->cpu_timestamp))
diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c
index bb3c2a830362..2e72c06fd40d 100644
--- a/drivers/gpu/drm/xe/xe_sync.c
+++ b/drivers/gpu/drm/xe/xe_sync.c
@@ -54,11 +54,12 @@ static struct xe_user_fence *user_fence_create(struct xe_device *xe, u64 addr,
{
struct xe_user_fence *ufence;
u64 __user *ptr = u64_to_user_ptr(addr);
+ u64 __maybe_unused prefetch_val;
- if (!access_ok(ptr, sizeof(*ptr)))
+ if (get_user(prefetch_val, ptr))
return ERR_PTR(-EFAULT);
- ufence = kmalloc(sizeof(*ufence), GFP_KERNEL);
+ ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
if (!ufence)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/xe/xe_tuning.c b/drivers/gpu/drm/xe/xe_tuning.c
index faa1bf42e50e..0d5e04158917 100644
--- a/drivers/gpu/drm/xe/xe_tuning.c
+++ b/drivers/gpu/drm/xe/xe_tuning.c
@@ -42,20 +42,48 @@ static const struct xe_rtp_entry_sr gt_tunings[] = {
XE_RTP_ACTIONS(CLR(CCCHKNREG1, ENCOMPPERFFIX),
SET(CCCHKNREG1, L3CMPCTRL))
},
+ { XE_RTP_NAME("Tuning: Compression Overfetch - media"),
+ XE_RTP_RULES(MEDIA_VERSION(2000)),
+ XE_RTP_ACTIONS(CLR(XE2LPM_CCCHKNREG1, ENCOMPPERFFIX),
+ SET(XE2LPM_CCCHKNREG1, L3CMPCTRL))
+ },
{ XE_RTP_NAME("Tuning: Enable compressible partial write overfetch in L3"),
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
XE_RTP_ACTIONS(SET(L3SQCREG3, COMPPWOVERFETCHEN))
},
+ { XE_RTP_NAME("Tuning: Enable compressible partial write overfetch in L3 - media"),
+ XE_RTP_RULES(MEDIA_VERSION(2000)),
+ XE_RTP_ACTIONS(SET(XE2LPM_L3SQCREG3, COMPPWOVERFETCHEN))
+ },
{ XE_RTP_NAME("Tuning: L2 Overfetch Compressible Only"),
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
XE_RTP_ACTIONS(SET(L3SQCREG2,
COMPMEMRD256BOVRFETCHEN))
},
+ { XE_RTP_NAME("Tuning: L2 Overfetch Compressible Only - media"),
+ XE_RTP_RULES(MEDIA_VERSION(2000)),
+ XE_RTP_ACTIONS(SET(XE2LPM_L3SQCREG2,
+ COMPMEMRD256BOVRFETCHEN))
+ },
{ XE_RTP_NAME("Tuning: Stateless compression control"),
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
XE_RTP_ACTIONS(FIELD_SET(STATELESS_COMPRESSION_CTRL, UNIFIED_COMPRESSION_FORMAT,
REG_FIELD_PREP(UNIFIED_COMPRESSION_FORMAT, 0)))
},
+ { XE_RTP_NAME("Tuning: Stateless compression control - media"),
+ XE_RTP_RULES(MEDIA_VERSION_RANGE(1301, 2000)),
+ XE_RTP_ACTIONS(FIELD_SET(STATELESS_COMPRESSION_CTRL, UNIFIED_COMPRESSION_FORMAT,
+ REG_FIELD_PREP(UNIFIED_COMPRESSION_FORMAT, 0)))
+ },
+ { XE_RTP_NAME("Tuning: L3 RW flush all Cache"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2004)),
+ XE_RTP_ACTIONS(SET(SCRATCH3_LBCF, RWFLUSHALLEN))
+ },
+ { XE_RTP_NAME("Tuning: L3 RW flush all cache - media"),
+ XE_RTP_RULES(MEDIA_VERSION(2000)),
+ XE_RTP_ACTIONS(SET(XE2LPM_SCRATCH3_LBCF, RWFLUSHALLEN))
+ },
+
{}
};
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 7acd5fc9d032..c99380271de6 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -1613,7 +1613,7 @@ void xe_vm_close_and_put(struct xe_vm *vm)
up_write(&vm->lock);
- mutex_lock(&xe->usm.lock);
+ down_write(&xe->usm.lock);
if (vm->usm.asid) {
void *lookup;
@@ -1623,7 +1623,7 @@ void xe_vm_close_and_put(struct xe_vm *vm)
lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
xe_assert(xe, lookup == vm);
}
- mutex_unlock(&xe->usm.lock);
+ up_write(&xe->usm.lock);
for_each_tile(tile, xe, id)
xe_range_fence_tree_fini(&vm->rftree[id]);
@@ -1765,25 +1765,18 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
if (IS_ERR(vm))
return PTR_ERR(vm);
- mutex_lock(&xef->vm.lock);
- err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
- mutex_unlock(&xef->vm.lock);
- if (err)
- goto err_close_and_put;
-
if (xe->info.has_asid) {
- mutex_lock(&xe->usm.lock);
+ down_write(&xe->usm.lock);
err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
XA_LIMIT(1, XE_MAX_ASID - 1),
&xe->usm.next_asid, GFP_KERNEL);
- mutex_unlock(&xe->usm.lock);
+ up_write(&xe->usm.lock);
if (err < 0)
- goto err_free_id;
+ goto err_close_and_put;
vm->usm.asid = asid;
}
- args->vm_id = id;
vm->xef = xe_file_get(xef);
/* Record BO memory for VM pagetable created against client */
@@ -1796,12 +1789,15 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
#endif
+ /* user id alloc must always be last in ioctl to prevent UAF */
+ err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
+ if (err)
+ goto err_close_and_put;
+
+ args->vm_id = id;
+
return 0;
-err_free_id:
- mutex_lock(&xef->vm.lock);
- xa_erase(&xef->vm.xa, id);
- mutex_unlock(&xef->vm.lock);
err_close_and_put:
xe_vm_close_and_put(vm);
@@ -3203,10 +3199,8 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
ret = xe_gt_tlb_invalidation_vma(tile->primary_gt,
&fence[fence_id], vma);
- if (ret < 0) {
- xe_gt_tlb_invalidation_fence_fini(&fence[fence_id]);
+ if (ret)
goto wait;
- }
++fence_id;
if (!tile->media_gt)
@@ -3218,10 +3212,8 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
ret = xe_gt_tlb_invalidation_vma(tile->media_gt,
&fence[fence_id], vma);
- if (ret < 0) {
- xe_gt_tlb_invalidation_fence_fini(&fence[fence_id]);
+ if (ret)
goto wait;
- }
++fence_id;
}
}
diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
index d424992514a4..353936a0f877 100644
--- a/drivers/gpu/drm/xe/xe_wa.c
+++ b/drivers/gpu/drm/xe/xe_wa.c
@@ -710,6 +710,10 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
DIS_PARTIAL_AUTOSTRIP |
DIS_AUTOSTRIP))
},
+ { XE_RTP_NAME("15016589081"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2004), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_CLIP_NEGATIVE_BOUNDING_BOX))
+ },
/* Xe2_HPG */
{ XE_RTP_NAME("15010599737"),
diff --git a/drivers/gpu/drm/xe/xe_wait_user_fence.c b/drivers/gpu/drm/xe/xe_wait_user_fence.c
index d46fa8374980..f5deb81eba01 100644
--- a/drivers/gpu/drm/xe/xe_wait_user_fence.c
+++ b/drivers/gpu/drm/xe/xe_wait_user_fence.c
@@ -169,9 +169,6 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data,
args->timeout = 0;
}
- if (!timeout && !(err < 0))
- err = -ETIME;
-
if (q)
xe_exec_queue_put(q);