summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c144
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c462
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c162
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c73
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c102
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c107
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cikd.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c399
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c365
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c341
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c375
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c694
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3_cleaner_shader.asm153
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3_cleaner_shader.h64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c93
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c215
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c75
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c102
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c104
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c106
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c106
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c112
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c113
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15d.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.c173
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c112
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c113
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c118
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c113
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c114
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c115
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c113
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c110
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vid.h1
139 files changed, 6107 insertions, 1093 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 38408e4e158e..c7b18c52825d 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -39,23 +39,7 @@ ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \
-I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm \
-I$(FULL_AMD_PATH)/amdkfd
-subdir-ccflags-y := -Wextra
-subdir-ccflags-y += -Wunused
-subdir-ccflags-y += -Wmissing-prototypes
-subdir-ccflags-y += -Wmissing-declarations
-subdir-ccflags-y += -Wmissing-include-dirs
-subdir-ccflags-y += -Wold-style-definition
-subdir-ccflags-y += -Wmissing-format-attribute
-# Need this to avoid recursive variable evaluation issues
-cond-flags := $(call cc-option, -Wunused-but-set-variable) \
- $(call cc-option, -Wunused-const-variable) \
- $(call cc-option, -Wstringop-truncation) \
- $(call cc-option, -Wpacked-not-aligned)
-subdir-ccflags-y += $(cond-flags)
-subdir-ccflags-y += -Wno-unused-parameter
-subdir-ccflags-y += -Wno-type-limits
-subdir-ccflags-y += -Wno-sign-compare
-subdir-ccflags-y += -Wno-missing-field-initializers
+# Locally disable W=1 warnings enabled in drm subsystem Makefile
subdir-ccflags-y += -Wno-override-init
subdir-ccflags-$(CONFIG_DRM_AMDGPU_WERROR) += -Werror
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 137a88b8de45..dcd59040c449 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -118,6 +118,8 @@
#define MAX_GPU_INSTANCE 64
+#define GFX_SLICE_PERIOD msecs_to_jiffies(250)
+
struct amdgpu_gpu_instance {
struct amdgpu_device *adev;
int mgpu_fan_enabled;
@@ -235,6 +237,7 @@ extern int sched_policy;
extern bool debug_evictions;
extern bool no_system_mem_limit;
extern int halt_if_hws_hang;
+extern uint amdgpu_svm_default_granularity;
#else
static const int __maybe_unused sched_policy = KFD_SCHED_POLICY_HWS;
static const bool __maybe_unused debug_evictions; /* = false */
@@ -347,9 +350,9 @@ enum amdgpu_kiq_irq {
AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
AMDGPU_CP_KIQ_IRQ_LAST
};
-#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */
-#define MAX_KIQ_REG_WAIT (amdgpu_sriov_vf(adev) ? 50000 : 5000) /* in usecs, extend for VF */
-#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
+#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */
+#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
+#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
#define MAX_KIQ_REG_TRY 1000
int amdgpu_device_ip_set_clockgating_state(void *dev,
@@ -823,17 +826,6 @@ struct amdgpu_mqd {
struct amdgpu_reset_domain;
struct amdgpu_fru_info;
-struct amdgpu_reset_info {
- /* reset dump register */
- u32 *reset_dump_reg_list;
- u32 *reset_dump_reg_value;
- int num_regs;
-
-#ifdef CONFIG_DEV_COREDUMP
- struct amdgpu_coredump_info *coredump_info;
-#endif
-};
-
/*
* Non-zero (true) if the GPU has VRAM. Zero (false) otherwise.
*/
@@ -1157,8 +1149,6 @@ struct amdgpu_device {
struct mutex benchmark_mutex;
- struct amdgpu_reset_info reset_info;
-
bool scpm_enabled;
uint32_t scpm_status;
@@ -1175,6 +1165,11 @@ struct amdgpu_device {
bool debug_disable_soft_recovery;
bool debug_use_vram_fw_buf;
bool debug_enable_ras_aca;
+ bool debug_exp_resets;
+
+ bool enforce_isolation[MAX_XCP];
+ /* Added this mutex for cleaner shader isolation between GFX and compute processes */
+ struct mutex enforce_isolation_mutex;
};
static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev,
@@ -1484,7 +1479,6 @@ extern const int amdgpu_max_kms_ioctl;
int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags);
void amdgpu_driver_unload_kms(struct drm_device *dev);
-void amdgpu_driver_lastclose_kms(struct drm_device *dev);
int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
void amdgpu_driver_postclose_kms(struct drm_device *dev,
struct drm_file *file_priv);
@@ -1588,13 +1582,6 @@ static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return
static inline void amdgpu_choose_low_power_state(struct amdgpu_device *adev) { }
#endif
-#if defined(CONFIG_DRM_AMD_DC)
-int amdgpu_dm_display_resume(struct amdgpu_device *adev );
-#else
-static inline int amdgpu_dm_display_resume(struct amdgpu_device *adev) { return 0; }
-#endif
-
-
void amdgpu_register_gpu_instance(struct amdgpu_device *adev);
void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
index 19158cc30f31..57bda66e85ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
@@ -80,6 +80,9 @@ static void aca_banks_release(struct aca_banks *banks)
{
struct aca_bank_node *node, *tmp;
+ if (list_empty(&banks->list))
+ return;
+
list_for_each_entry_safe(node, tmp, &banks->list, node) {
list_del(&node->node);
kvfree(node);
@@ -453,13 +456,13 @@ static int aca_log_aca_error_data(struct aca_bank_error *bank_error, enum aca_er
switch (type) {
case ACA_ERROR_TYPE_UE:
- amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, count);
+ amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, count);
break;
case ACA_ERROR_TYPE_CE:
- amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, count);
+ amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, count);
break;
case ACA_ERROR_TYPE_DEFERRED:
- amdgpu_ras_error_statistic_de_count(err_data, &mcm_info, NULL, count);
+ amdgpu_ras_error_statistic_de_count(err_data, &mcm_info, count);
break;
default:
break;
@@ -562,9 +565,13 @@ static void aca_error_fini(struct aca_error *aerr)
struct aca_bank_error *bank_error, *tmp;
mutex_lock(&aerr->lock);
+ if (list_empty(&aerr->list))
+ goto out_unlock;
+
list_for_each_entry_safe(bank_error, tmp, &aerr->list, node)
aca_bank_error_remove(aerr, bank_error);
+out_unlock:
mutex_destroy(&aerr->lock);
}
@@ -680,6 +687,9 @@ static void aca_manager_fini(struct aca_handle_manager *mgr)
{
struct aca_handle *handle, *tmp;
+ if (list_empty(&mgr->list))
+ return;
+
list_for_each_entry_safe(handle, tmp, &mgr->list, node)
amdgpu_aca_remove_handle(handle);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 03205e3c3746..4f08b153cb66 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -364,15 +364,15 @@ allocate_mem_reserve_bo_failed:
return r;
}
-void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj)
+void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void **mem_obj)
{
- struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj;
+ struct amdgpu_bo **bo = (struct amdgpu_bo **) mem_obj;
- amdgpu_bo_reserve(bo, true);
- amdgpu_bo_kunmap(bo);
- amdgpu_bo_unpin(bo);
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&(bo));
+ amdgpu_bo_reserve(*bo, true);
+ amdgpu_bo_kunmap(*bo);
+ amdgpu_bo_unpin(*bo);
+ amdgpu_bo_unreserve(*bo);
+ amdgpu_bo_unref(bo);
}
int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size,
@@ -783,22 +783,6 @@ int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev,
return 0;
}
-bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev,
- int hub_inst, int hub_type)
-{
- if (!hub_type) {
- if (adev->gfxhub.funcs->query_utcl2_poison_status)
- return adev->gfxhub.funcs->query_utcl2_poison_status(adev, hub_inst);
- else
- return false;
- } else {
- if (adev->mmhub.funcs->query_utcl2_poison_status)
- return adev->mmhub.funcs->query_utcl2_poison_status(adev, hub_inst);
- else
- return false;
- }
-}
-
int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev)
{
return kgd2kfd_check_and_lock_kfd();
@@ -887,3 +871,21 @@ free_ring_funcs:
return r;
}
+
+/* Stop scheduling on KFD */
+int amdgpu_amdkfd_stop_sched(struct amdgpu_device *adev, uint32_t node_id)
+{
+ if (!adev->kfd.init_complete)
+ return 0;
+
+ return kgd2kfd_stop_sched(adev->kfd.dev, node_id);
+}
+
+/* Start scheduling on KFD */
+int amdgpu_amdkfd_start_sched(struct amdgpu_device *adev, uint32_t node_id)
+{
+ if (!adev->kfd.init_complete)
+ return 0;
+
+ return kgd2kfd_start_sched(adev->kfd.dev, node_id);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index e7bb1ca35801..f9d119448442 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -235,7 +235,7 @@ int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size,
void **mem_obj, uint64_t *gpu_addr,
void **cpu_ptr, bool mqd_gfx9);
-void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj);
+void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void **mem_obj);
int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size,
void **mem_obj);
void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj);
@@ -264,6 +264,8 @@ int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev,
uint32_t *payload);
int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off,
u32 inst);
+int amdgpu_amdkfd_start_sched(struct amdgpu_device *adev, uint32_t node_id);
+int amdgpu_amdkfd_stop_sched(struct amdgpu_device *adev, uint32_t node_id);
/* Read user wptr from a specified user address space with page fault
* disabled. The memory must be pinned and mapped to the hardware when
@@ -322,7 +324,7 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,
void **kptr, uint64_t *size);
void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem);
-int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo);
+int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo, struct amdgpu_bo **bo_gart);
int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
struct dma_fence __rcu **ef);
@@ -345,11 +347,9 @@ void amdgpu_amdkfd_ras_pasid_poison_consumption_handler(struct amdgpu_device *ad
pasid_notify pasid_fn, void *data, uint32_t reset);
bool amdgpu_amdkfd_is_fed(struct amdgpu_device *adev);
-bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem);
+bool amdgpu_amdkfd_bo_mapped_to_dev(void *drm_priv, struct kgd_mem *mem);
void amdgpu_amdkfd_block_mmu_notifications(void *p);
int amdgpu_amdkfd_criu_resume(void *p);
-bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev,
- int hub_inst, int hub_type);
int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
uint64_t size, u32 alloc_flag, int8_t xcp_id);
void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
@@ -426,6 +426,8 @@ void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd);
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask);
int kgd2kfd_check_and_lock_kfd(void);
void kgd2kfd_unlock_kfd(void);
+int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id);
+int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id);
#else
static inline int kgd2kfd_init(void)
{
@@ -496,5 +498,15 @@ static inline int kgd2kfd_check_and_lock_kfd(void)
static inline void kgd2kfd_unlock_kfd(void)
{
}
+
+static inline int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id)
+{
+ return 0;
+}
+
+static inline int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
+{
+ return 0;
+}
#endif
#endif /* AMDGPU_AMDKFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
index aff08321e976..8dfdb18197c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
@@ -191,4 +191,6 @@ const struct kfd2kgd_calls aldebaran_kfd2kgd = {
.get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times,
.build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info,
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
+ .hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
+ .hqd_reset = kgd_gfx_v9_hqd_reset,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index 3a3f3ce09f00..9435af2e6bdc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -20,7 +20,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/module.h>
-#include <linux/fdtable.h>
#include <linux/uaccess.h>
#include <linux/firmware.h>
#include "amdgpu.h"
@@ -300,7 +299,7 @@ static int suspend_resume_compute_scheduler(struct amdgpu_device *adev, bool sus
if (r)
goto out;
} else {
- drm_sched_start(&ring->sched, false);
+ drm_sched_start(&ring->sched);
}
}
@@ -418,5 +417,7 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = {
.get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times,
.build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info,
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
- .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings
+ .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
+ .hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
+ .hqd_reset = kgd_gfx_v9_hqd_reset
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c
index a5c7259cf2a3..e2ae714a700f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c
@@ -541,5 +541,7 @@ const struct kfd2kgd_calls gc_9_4_3_kfd2kgd = {
kgd_gfx_v9_4_3_set_wave_launch_trap_override,
.set_wave_launch_mode = kgd_aldebaran_set_wave_launch_mode,
.set_address_watch = kgd_gfx_v9_4_3_set_address_watch,
- .clear_address_watch = kgd_gfx_v9_4_3_clear_address_watch
+ .clear_address_watch = kgd_gfx_v9_4_3_clear_address_watch,
+ .hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
+ .hqd_reset = kgd_gfx_v9_hqd_reset
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index 3ab6c3aa0ad1..62176d607bef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -1070,6 +1070,20 @@ static void program_trap_handler_settings(struct amdgpu_device *adev,
unlock_srbm(adev);
}
+uint64_t kgd_gfx_v10_hqd_get_pq_addr(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t inst)
+{
+ return 0;
+}
+
+uint64_t kgd_gfx_v10_hqd_reset(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t inst, unsigned int utimeout)
+{
+ return 0;
+}
+
const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
@@ -1097,4 +1111,6 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
.get_iq_wait_times = kgd_gfx_v10_get_iq_wait_times,
.build_grace_period_packet_info = kgd_gfx_v10_build_grace_period_packet_info,
.program_trap_handler_settings = program_trap_handler_settings,
+ .hqd_get_pq_addr = kgd_gfx_v10_hqd_get_pq_addr,
+ .hqd_reset = kgd_gfx_v10_hqd_reset
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h
index 67bcaa3d4226..9efd2dd4fdd7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h
@@ -56,3 +56,12 @@ void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev,
uint32_t grace_period,
uint32_t *reg_offset,
uint32_t *reg_data);
+uint64_t kgd_gfx_v10_hqd_get_pq_addr(struct amdgpu_device *adev,
+ uint32_t pipe_id,
+ uint32_t queue_id,
+ uint32_t inst);
+uint64_t kgd_gfx_v10_hqd_reset(struct amdgpu_device *adev,
+ uint32_t pipe_id,
+ uint32_t queue_id,
+ uint32_t inst,
+ unsigned int utimeout);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
index 8c8437a4383f..c718bedda0ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
@@ -680,5 +680,7 @@ const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = {
.set_wave_launch_trap_override = kgd_gfx_v10_set_wave_launch_trap_override,
.set_wave_launch_mode = kgd_gfx_v10_set_wave_launch_mode,
.set_address_watch = kgd_gfx_v10_set_address_watch,
- .clear_address_watch = kgd_gfx_v10_clear_address_watch
+ .clear_address_watch = kgd_gfx_v10_clear_address_watch,
+ .hqd_get_pq_addr = kgd_gfx_v10_hqd_get_pq_addr,
+ .hqd_reset = kgd_gfx_v10_hqd_reset
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
index b61a32d6af4b..a4ba49cb22db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
@@ -786,6 +786,20 @@ static uint32_t kgd_gfx_v11_clear_address_watch(struct amdgpu_device *adev,
return 0;
}
+static uint64_t kgd_gfx_v11_hqd_get_pq_addr(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t inst)
+{
+ return 0;
+}
+
+static uint64_t kgd_gfx_v11_hqd_reset(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t inst, unsigned int utimeout)
+{
+ return 0;
+}
+
const struct kfd2kgd_calls gfx_v11_kfd2kgd = {
.program_sh_mem_settings = program_sh_mem_settings_v11,
.set_pasid_vmid_mapping = set_pasid_vmid_mapping_v11,
@@ -808,5 +822,7 @@ const struct kfd2kgd_calls gfx_v11_kfd2kgd = {
.set_wave_launch_trap_override = kgd_gfx_v11_set_wave_launch_trap_override,
.set_wave_launch_mode = kgd_gfx_v11_set_wave_launch_mode,
.set_address_watch = kgd_gfx_v11_set_address_watch,
- .clear_address_watch = kgd_gfx_v11_clear_address_watch
+ .clear_address_watch = kgd_gfx_v11_clear_address_watch,
+ .hqd_get_pq_addr = kgd_gfx_v11_hqd_get_pq_addr,
+ .hqd_reset = kgd_gfx_v11_hqd_reset
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 5a35a8ca8922..1254a43ec96b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -1144,6 +1144,109 @@ void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev,
kgd_gfx_v9_unlock_srbm(adev, inst);
}
+uint64_t kgd_gfx_v9_hqd_get_pq_addr(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t inst)
+{
+ uint32_t low, high;
+ uint64_t queue_addr = 0;
+
+ if (!adev->debug_exp_resets &&
+ !adev->gfx.num_gfx_rings)
+ return 0;
+
+ kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, inst);
+
+ if (!RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE))
+ goto unlock_out;
+
+ low = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE);
+ high = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE_HI);
+
+ /* only concerned with user queues. */
+ if (!high)
+ goto unlock_out;
+
+ queue_addr = (((queue_addr | high) << 32) | low) << 8;
+
+unlock_out:
+ amdgpu_gfx_rlc_exit_safe_mode(adev, inst);
+ kgd_gfx_v9_release_queue(adev, inst);
+
+ return queue_addr;
+}
+
+/* assume queue acquired */
+static int kgd_gfx_v9_hqd_dequeue_wait(struct amdgpu_device *adev, uint32_t inst,
+ unsigned int utimeout)
+{
+ unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
+
+ while (true) {
+ uint32_t temp = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE);
+
+ if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
+ return 0;
+
+ if (time_after(jiffies, end_jiffies))
+ return -ETIME;
+
+ usleep_range(500, 1000);
+ }
+}
+
+uint64_t kgd_gfx_v9_hqd_reset(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t inst, unsigned int utimeout)
+{
+ uint32_t low, high, pipe_reset_data = 0;
+ uint64_t queue_addr = 0;
+
+ kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, inst);
+
+ if (!RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE))
+ goto unlock_out;
+
+ low = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE);
+ high = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE_HI);
+
+ /* only concerned with user queues. */
+ if (!high)
+ goto unlock_out;
+
+ queue_addr = (((queue_addr | high) << 32) | low) << 8;
+
+ pr_debug("Attempting queue reset on XCC %i pipe id %i queue id %i\n",
+ inst, pipe_id, queue_id);
+
+ /* assume previous dequeue request issued will take affect after reset */
+ WREG32_SOC15(GC, GET_INST(GC, inst), mmSPI_COMPUTE_QUEUE_RESET, 0x1);
+
+ if (!kgd_gfx_v9_hqd_dequeue_wait(adev, inst, utimeout))
+ goto unlock_out;
+
+ pr_debug("Attempting pipe reset on XCC %i pipe id %i\n", inst, pipe_id);
+
+ pipe_reset_data = REG_SET_FIELD(pipe_reset_data, CP_MEC_CNTL, MEC_ME1_PIPE0_RESET, 1);
+ pipe_reset_data = pipe_reset_data << pipe_id;
+
+ WREG32_SOC15(GC, GET_INST(GC, inst), mmCP_MEC_CNTL, pipe_reset_data);
+ WREG32_SOC15(GC, GET_INST(GC, inst), mmCP_MEC_CNTL, 0);
+
+ if (kgd_gfx_v9_hqd_dequeue_wait(adev, inst, utimeout))
+ queue_addr = 0;
+
+unlock_out:
+ pr_debug("queue reset on XCC %i pipe id %i queue id %i %s\n",
+ inst, pipe_id, queue_id, !!queue_addr ? "succeeded!" : "failed!");
+ amdgpu_gfx_rlc_exit_safe_mode(adev, inst);
+ kgd_gfx_v9_release_queue(adev, inst);
+
+ return queue_addr;
+}
+
const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
@@ -1172,4 +1275,6 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
.build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info,
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
+ .hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
+ .hqd_reset = kgd_gfx_v9_hqd_reset
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
index ce424615f59b..988c50ac3be0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
@@ -101,3 +101,12 @@ void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev,
uint32_t grace_period,
uint32_t *reg_offset,
uint32_t *reg_data);
+uint64_t kgd_gfx_v9_hqd_get_pq_addr(struct amdgpu_device *adev,
+ uint32_t pipe_id,
+ uint32_t queue_id,
+ uint32_t inst);
+uint64_t kgd_gfx_v9_hqd_reset(struct amdgpu_device *adev,
+ uint32_t pipe_id,
+ uint32_t queue_id,
+ uint32_t inst,
+ unsigned int utimeout);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 11672bfe4fad..4afef5b46c7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -25,7 +25,6 @@
#include <linux/pagemap.h>
#include <linux/sched/mm.h>
#include <linux/sched/task.h>
-#include <linux/fdtable.h>
#include <drm/ttm/ttm_tt.h>
#include <drm/drm_exec.h>
@@ -818,18 +817,13 @@ static int kfd_mem_export_dmabuf(struct kgd_mem *mem)
if (!mem->dmabuf) {
struct amdgpu_device *bo_adev;
struct dma_buf *dmabuf;
- int r, fd;
bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
- r = drm_gem_prime_handle_to_fd(&bo_adev->ddev, bo_adev->kfd.client.file,
+ dmabuf = drm_gem_prime_handle_to_dmabuf(&bo_adev->ddev, bo_adev->kfd.client.file,
mem->gem_handle,
mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
- DRM_RDWR : 0, &fd);
- if (r)
- return r;
- dmabuf = dma_buf_get(fd);
- close_fd(fd);
- if (WARN_ON_ONCE(IS_ERR(dmabuf)))
+ DRM_RDWR : 0);
+ if (IS_ERR(dmabuf))
return PTR_ERR(dmabuf);
mem->dmabuf = dmabuf;
}
@@ -1252,7 +1246,7 @@ static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
return ret;
}
-static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
+static int unmap_bo_from_gpuvm(struct kgd_mem *mem,
struct kfd_mem_attachment *entry,
struct amdgpu_sync *sync)
{
@@ -1260,11 +1254,18 @@ static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
struct amdgpu_device *adev = entry->adev;
struct amdgpu_vm *vm = bo_va->base.vm;
+ if (bo_va->queue_refcount) {
+ pr_debug("bo_va->queue_refcount %d\n", bo_va->queue_refcount);
+ return -EBUSY;
+ }
+
amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
amdgpu_sync_fence(sync, bo_va->last_pt_update);
+
+ return 0;
}
static int update_gpuvm_pte(struct kgd_mem *mem,
@@ -2191,7 +2192,10 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
entry->va, entry->va + bo_size, entry);
- unmap_bo_from_gpuvm(mem, entry, ctx.sync);
+ ret = unmap_bo_from_gpuvm(mem, entry, ctx.sync);
+ if (ret)
+ goto unreserve_out;
+
entry->is_mapped = false;
mem->mapped_to_gpu_memory--;
@@ -2226,11 +2230,12 @@ int amdgpu_amdkfd_gpuvm_sync_memory(
/**
* amdgpu_amdkfd_map_gtt_bo_to_gart - Map BO to GART and increment reference count
* @bo: Buffer object to be mapped
+ * @bo_gart: Return bo reference
*
* Before return, bo reference count is incremented. To release the reference and unpin/
* unmap the BO, call amdgpu_amdkfd_free_gtt_mem.
*/
-int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo)
+int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo, struct amdgpu_bo **bo_gart)
{
int ret;
@@ -2257,7 +2262,7 @@ int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo)
amdgpu_bo_unreserve(bo);
- bo = amdgpu_bo_ref(bo);
+ *bo_gart = amdgpu_bo_ref(bo);
return 0;
@@ -3200,12 +3205,13 @@ int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
return 0;
}
-bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem)
+bool amdgpu_amdkfd_bo_mapped_to_dev(void *drm_priv, struct kgd_mem *mem)
{
+ struct amdgpu_vm *vm = drm_priv_to_vm(drm_priv);
struct kfd_mem_attachment *entry;
list_for_each_entry(entry, &mem->attachments, list) {
- if (entry->is_mapped && entry->adev == adev)
+ if (entry->is_mapped && entry->bo_va->base.vm == vm)
return true;
}
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 7dc102f0bc1d..0c8975ac5af9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -1018,8 +1018,9 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
if (clock_type == COMPUTE_ENGINE_PLL_PARAM) {
args.v3.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
- sizeof(args));
+ if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
+ index, (uint32_t *)&args, sizeof(args)))
+ return -EINVAL;
dividers->post_div = args.v3.ucPostDiv;
dividers->enable_post_div = (args.v3.ucCntlFlag &
@@ -1039,8 +1040,9 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
if (strobe_mode)
args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
- sizeof(args));
+ if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
+ index, (uint32_t *)&args, sizeof(args)))
+ return -EINVAL;
dividers->post_div = args.v5.ucPostDiv;
dividers->enable_post_div = (args.v5.ucCntlFlag &
@@ -1058,8 +1060,9 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
/* fusion */
args.v4.ulClock = cpu_to_le32(clock); /* 10 khz */
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
- sizeof(args));
+ if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
+ index, (uint32_t *)&args, sizeof(args)))
+ return -EINVAL;
dividers->post_divider = dividers->post_div = args.v4.ucPostDiv;
dividers->real_clock = le32_to_cpu(args.v4.ulClock);
@@ -1070,8 +1073,9 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
args.v6_in.ulClock.ulComputeClockFlag = clock_type;
args.v6_in.ulClock.ulClockFreq = cpu_to_le32(clock); /* 10 khz */
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
- sizeof(args));
+ if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
+ index, (uint32_t *)&args, sizeof(args)))
+ return -EINVAL;
dividers->whole_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDiv);
dividers->frac_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDivFrac);
@@ -1113,8 +1117,9 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
if (strobe_mode)
args.ucInputFlag |= MPLL_INPUT_FLAG_STROBE_MODE_EN;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
- sizeof(args));
+ if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
+ index, (uint32_t *)&args, sizeof(args)))
+ return -EINVAL;
mpll_param->clkfrac = le16_to_cpu(args.ulFbDiv.usFbDivFrac);
mpll_param->clkf = le16_to_cpu(args.ulFbDiv.usFbDiv);
@@ -1211,8 +1216,9 @@ int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
args.v2.ucVoltageMode = 0;
args.v2.usVoltageLevel = 0;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
- sizeof(args));
+ if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
+ index, (uint32_t *)&args, sizeof(args)))
+ return -EINVAL;
*voltage = le16_to_cpu(args.v2.usVoltageLevel);
break;
@@ -1221,8 +1227,9 @@ int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
args.v3.ucVoltageMode = ATOM_GET_VOLTAGE_LEVEL;
args.v3.usVoltageLevel = cpu_to_le16(voltage_id);
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
- sizeof(args));
+ if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
+ index, (uint32_t *)&args, sizeof(args)))
+ return -EINVAL;
*voltage = le16_to_cpu(args.v3.usVoltageLevel);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 618e469e3622..42e64bce661e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -431,6 +431,11 @@ bool amdgpu_get_bios(struct amdgpu_device *adev)
goto success;
}
+ if (amdgpu_read_platform_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from platform\n");
+ goto success;
+ }
+
if (amdgpu_read_bios(adev)) {
dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n");
goto success;
@@ -446,11 +451,6 @@ bool amdgpu_get_bios(struct amdgpu_device *adev)
goto success;
}
- if (amdgpu_read_platform_bios(adev)) {
- dev_info(adev->dev, "Fetched VBIOS from platform\n");
- goto success;
- }
-
dev_err(adev->dev, "Unable to locate a BIOS ROM\n");
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index c3d89088123d..16153d275d7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -414,7 +414,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
return -EINVAL;
}
- err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, "%s", fw_name);
if (err) {
DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
amdgpu_ucode_release(&adev->pm.fw);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index cae7479c3ecf..344e0a9ee08a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -249,11 +249,7 @@ amdgpu_connector_find_encoder(struct drm_connector *connector,
static struct edid *
amdgpu_connector_get_hardcoded_edid(struct amdgpu_device *adev)
{
- if (adev->mode_info.bios_hardcoded_edid) {
- return kmemdup((unsigned char *)adev->mode_info.bios_hardcoded_edid,
- adev->mode_info.bios_hardcoded_edid_size, GFP_KERNEL);
- }
- return NULL;
+ return drm_edid_duplicate(drm_edid_raw(adev->mode_info.bios_hardcoded_edid));
}
static void amdgpu_connector_get_edid(struct drm_connector *connector)
@@ -442,6 +438,9 @@ static void amdgpu_connector_add_common_modes(struct drm_encoder *encoder,
continue;
mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
+ if (!mode)
+ return;
+
drm_mode_probed_add(connector, mode);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 6dfdff58bffd..1e475eb01417 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -263,6 +263,10 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
if (size < sizeof(struct drm_amdgpu_bo_list_in))
goto free_partial_kdata;
+ /* Only a single BO list is allowed to simplify handling. */
+ if (p->bo_list)
+ ret = -EINVAL;
+
ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata);
if (ret)
goto free_partial_kdata;
@@ -292,6 +296,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
num_ibs[i], &p->jobs[i]);
if (ret)
goto free_all_kdata;
+ p->jobs[i]->enforce_isolation = p->adev->enforce_isolation[fpriv->xcp_id];
}
p->gang_leader = p->jobs[p->gang_leader_idx];
@@ -1106,7 +1111,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
struct drm_gpu_scheduler *sched = entity->rq->sched;
struct amdgpu_ring *ring = to_amdgpu_ring(sched);
- if (amdgpu_vmid_uses_reserved(vm, ring->vm_hub))
+ if (amdgpu_vmid_uses_reserved(adev, vm, ring->vm_hub))
return -EINVAL;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 0e1a11b6b989..cbef720de779 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -2026,100 +2026,6 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL,
DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL,
amdgpu_debugfs_sclk_set, "%llu\n");
-static ssize_t amdgpu_reset_dump_register_list_read(struct file *f,
- char __user *buf, size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
- char reg_offset[12];
- int i, ret, len = 0;
-
- if (*pos)
- return 0;
-
- memset(reg_offset, 0, 12);
- ret = down_read_killable(&adev->reset_domain->sem);
- if (ret)
- return ret;
-
- for (i = 0; i < adev->reset_info.num_regs; i++) {
- sprintf(reg_offset, "0x%x\n", adev->reset_info.reset_dump_reg_list[i]);
- up_read(&adev->reset_domain->sem);
- if (copy_to_user(buf + len, reg_offset, strlen(reg_offset)))
- return -EFAULT;
-
- len += strlen(reg_offset);
- ret = down_read_killable(&adev->reset_domain->sem);
- if (ret)
- return ret;
- }
-
- up_read(&adev->reset_domain->sem);
- *pos += len;
-
- return len;
-}
-
-static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
- const char __user *buf, size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
- char reg_offset[11];
- uint32_t *new = NULL, *tmp = NULL;
- unsigned int len = 0;
- int ret, i = 0;
-
- do {
- memset(reg_offset, 0, 11);
- if (copy_from_user(reg_offset, buf + len,
- min(10, (size-len)))) {
- ret = -EFAULT;
- goto error_free;
- }
-
- new = krealloc_array(tmp, i + 1, sizeof(uint32_t), GFP_KERNEL);
- if (!new) {
- ret = -ENOMEM;
- goto error_free;
- }
- tmp = new;
- if (sscanf(reg_offset, "%X %n", &tmp[i], &ret) != 1) {
- ret = -EINVAL;
- goto error_free;
- }
-
- len += ret;
- i++;
- } while (len < size);
-
- new = kmalloc_array(i, sizeof(uint32_t), GFP_KERNEL);
- if (!new) {
- ret = -ENOMEM;
- goto error_free;
- }
- ret = down_write_killable(&adev->reset_domain->sem);
- if (ret)
- goto error_free;
-
- swap(adev->reset_info.reset_dump_reg_list, tmp);
- swap(adev->reset_info.reset_dump_reg_value, new);
- adev->reset_info.num_regs = i;
- up_write(&adev->reset_domain->sem);
- ret = size;
-
-error_free:
- if (tmp != new)
- kfree(tmp);
- kfree(new);
- return ret;
-}
-
-static const struct file_operations amdgpu_reset_dump_register_list = {
- .owner = THIS_MODULE,
- .read = amdgpu_reset_dump_register_list_read,
- .write = amdgpu_reset_dump_register_list_write,
- .llseek = default_llseek
-};
-
int amdgpu_debugfs_init(struct amdgpu_device *adev)
{
struct dentry *root = adev_to_drm(adev)->primary->debugfs_root;
@@ -2204,8 +2110,6 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
&amdgpu_debugfs_vm_info_fops);
debugfs_create_file("amdgpu_benchmark", 0200, root, adev,
&amdgpu_benchmark_fops);
- debugfs_create_file("amdgpu_reset_dump_register_list", 0644, root, adev,
- &amdgpu_reset_dump_register_list);
adev->debugfs_vbios_blob.data = adev->bios;
adev->debugfs_vbios_blob.size = adev->bios_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
index f0a44d0dec27..5ac59b62020c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
@@ -28,8 +28,8 @@
#include "atom.h"
#ifndef CONFIG_DEV_COREDUMP
-void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
- struct amdgpu_reset_context *reset_context)
+void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check,
+ bool vram_lost, struct amdgpu_job *job)
{
}
#else
@@ -203,7 +203,7 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
struct amdgpu_coredump_info *coredump = data;
struct drm_print_iterator iter;
struct amdgpu_vm_fault_info *fault_info;
- int i, ver;
+ int ver;
iter.data = buffer;
iter.offset = 0;
@@ -236,7 +236,7 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
drm_printf(&p, "\nSOC Memory Information\n");
drm_printf(&p, "real vram size: %llu\n", coredump->adev->gmc.real_vram_size);
drm_printf(&p, "visible vram size: %llu\n", coredump->adev->gmc.visible_vram_size);
- drm_printf(&p, "visible vram size: %llu\n", coredump->adev->mman.gtt_mgr.manager.size);
+ drm_printf(&p, "gtt size: %llu\n", coredump->adev->mman.gtt_mgr.manager.size);
/* GDS Config */
drm_printf(&p, "\nGDS Config\n");
@@ -315,16 +315,10 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
}
}
- if (coredump->reset_vram_lost)
+ if (coredump->skip_vram_check)
+ drm_printf(&p, "VRAM lost check is skipped!\n");
+ else if (coredump->reset_vram_lost)
drm_printf(&p, "VRAM is lost due to GPU reset!\n");
- if (coredump->adev->reset_info.num_regs) {
- drm_printf(&p, "AMDGPU register dumps:\nOffset: Value:\n");
-
- for (i = 0; i < coredump->adev->reset_info.num_regs; i++)
- drm_printf(&p, "0x%08x: 0x%08x\n",
- coredump->adev->reset_info.reset_dump_reg_list[i],
- coredump->adev->reset_info.reset_dump_reg_value[i]);
- }
return count - iter.remain;
}
@@ -334,12 +328,11 @@ static void amdgpu_devcoredump_free(void *data)
kfree(data);
}
-void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
- struct amdgpu_reset_context *reset_context)
+void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check,
+ bool vram_lost, struct amdgpu_job *job)
{
- struct amdgpu_coredump_info *coredump;
struct drm_device *dev = adev_to_drm(adev);
- struct amdgpu_job *job = reset_context->job;
+ struct amdgpu_coredump_info *coredump;
struct drm_sched_job *s_job;
coredump = kzalloc(sizeof(*coredump), GFP_NOWAIT);
@@ -349,11 +342,12 @@ void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
return;
}
+ coredump->skip_vram_check = skip_vram_check;
coredump->reset_vram_lost = vram_lost;
- if (reset_context->job && reset_context->job->vm) {
+ if (job && job->vm) {
+ struct amdgpu_vm *vm = job->vm;
struct amdgpu_task_info *ti;
- struct amdgpu_vm *vm = reset_context->job->vm;
ti = amdgpu_vm_get_task_info_vm(vm);
if (ti) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h
index 52459512cb2b..ef9772c6bcc9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h
@@ -26,7 +26,6 @@
#define __AMDGPU_DEV_COREDUMP_H__
#include "amdgpu.h"
-#include "amdgpu_reset.h"
#ifdef CONFIG_DEV_COREDUMP
@@ -36,12 +35,12 @@ struct amdgpu_coredump_info {
struct amdgpu_device *adev;
struct amdgpu_task_info reset_task_info;
struct timespec64 reset_time;
+ bool skip_vram_check;
bool reset_vram_lost;
struct amdgpu_ring *ring;
};
#endif
-void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
- struct amdgpu_reset_context *reset_context);
-
+void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check,
+ bool vram_lost, struct amdgpu_job *job);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index bcacf2e35eba..f4628412dac4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1916,6 +1916,8 @@ static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
*/
static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
{
+ int i;
+
if (amdgpu_sched_jobs < 4) {
dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
amdgpu_sched_jobs);
@@ -1970,6 +1972,9 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
+ for (i = 0; i < MAX_XCP; i++)
+ adev->enforce_isolation[i] = !!enforce_isolation;
+
return 0;
}
@@ -2471,6 +2476,7 @@ out:
*/
static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
{
+ struct amdgpu_ip_block *ip_block;
struct pci_dev *parent;
int i, r;
bool total;
@@ -2608,7 +2614,10 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
if (!total)
return -ENODEV;
- amdgpu_amdkfd_device_probe(adev);
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
+ if (ip_block->status.valid != false)
+ amdgpu_amdkfd_device_probe(adev);
+
adev->cg_flags &= amdgpu_cg_mask;
adev->pg_flags &= amdgpu_pg_mask;
@@ -3948,6 +3957,27 @@ static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
adev->ram_is_direct_mapped = true;
}
+#if defined(CONFIG_HSA_AMD_P2P)
+/**
+ * amdgpu_device_check_iommu_remap - Check if DMA remapping is enabled.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * return if IOMMU remapping bar address
+ */
+static bool amdgpu_device_check_iommu_remap(struct amdgpu_device *adev)
+{
+ struct iommu_domain *domain;
+
+ domain = iommu_get_domain_for_dev(adev->dev);
+ if (domain && (domain->type == IOMMU_DOMAIN_DMA ||
+ domain->type == IOMMU_DOMAIN_DMA_FQ))
+ return true;
+
+ return false;
+}
+#endif
+
static const struct attribute *amdgpu_dev_attributes[] = {
&dev_attr_pcie_replay_count.attr,
NULL
@@ -4055,6 +4085,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->notifier_lock);
mutex_init(&adev->pm.stable_pstate_ctx_lock);
mutex_init(&adev->benchmark_mutex);
+ mutex_init(&adev->gfx.reset_sem_mutex);
+ /* Initialize the mutex for cleaner shader isolation between GFX and compute processes */
+ mutex_init(&adev->enforce_isolation_mutex);
+ mutex_init(&adev->gfx.kfd_sch_mutex);
amdgpu_device_init_apu_flags(adev);
@@ -4086,6 +4120,21 @@ int amdgpu_device_init(struct amdgpu_device *adev,
amdgpu_device_delayed_init_work_handler);
INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
amdgpu_device_delay_enable_gfx_off);
+ /*
+ * Initialize the enforce_isolation work structures for each XCP
+ * partition. This work handler is responsible for enforcing shader
+ * isolation on AMD GPUs. It counts the number of emitted fences for
+ * each GFX and compute ring. If there are any fences, it schedules
+ * the `enforce_isolation_work` to be run after a delay. If there are
+ * no fences, it signals the Kernel Fusion Driver (KFD) to resume the
+ * runqueue.
+ */
+ for (i = 0; i < MAX_XCP; i++) {
+ INIT_DELAYED_WORK(&adev->gfx.enforce_isolation[i].work,
+ amdgpu_gfx_enforce_isolation_handler);
+ adev->gfx.enforce_isolation[i].adev = adev;
+ adev->gfx.enforce_isolation[i].xcp_id = i;
+ }
INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
@@ -4482,6 +4531,9 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
{
dev_info(adev->dev, "amdgpu: finishing device.\n");
flush_delayed_work(&adev->delayed_init_work);
+
+ if (adev->mman.initialized)
+ drain_workqueue(adev->mman.bdev.wq);
adev->shutdown = true;
/* make sure IB test finished before entering exclusive mode
@@ -4502,9 +4554,6 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
}
amdgpu_fence_driver_hw_fini(adev);
- if (adev->mman.initialized)
- drain_workqueue(adev->mman.bdev.wq);
-
if (adev->pm.sysfs_initialized)
amdgpu_pm_sysfs_fini(adev);
if (adev->ucode_sysfs_en)
@@ -5278,16 +5327,15 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
{
int i, r = 0;
struct amdgpu_job *job = NULL;
+ struct amdgpu_device *tmp_adev = reset_context->reset_req_dev;
bool need_full_reset =
test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
if (reset_context->reset_req_dev == adev)
job = reset_context->job;
- if (amdgpu_sriov_vf(adev)) {
- /* stop the data exchange thread */
- amdgpu_virt_fini_data_exchange(adev);
- }
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_pre_reset(adev);
amdgpu_fence_driver_isr_toggle(adev, true);
@@ -5336,6 +5384,16 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
}
}
+ if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) {
+ dev_info(tmp_adev->dev, "Dumping IP State\n");
+ /* Trigger ip dump before we reset the asic */
+ for (i = 0; i < tmp_adev->num_ip_blocks; i++)
+ if (tmp_adev->ip_blocks[i].version->funcs->dump_ip_state)
+ tmp_adev->ip_blocks[i].version->funcs
+ ->dump_ip_state((void *)tmp_adev);
+ dev_info(tmp_adev->dev, "Dumping IP State Completed\n");
+ }
+
if (need_full_reset)
r = amdgpu_device_ip_suspend(adev);
if (need_full_reset)
@@ -5348,47 +5406,17 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
return r;
}
-static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
-{
- int i;
-
- lockdep_assert_held(&adev->reset_domain->sem);
-
- for (i = 0; i < adev->reset_info.num_regs; i++) {
- adev->reset_info.reset_dump_reg_value[i] =
- RREG32(adev->reset_info.reset_dump_reg_list[i]);
-
- trace_amdgpu_reset_reg_dumps(adev->reset_info.reset_dump_reg_list[i],
- adev->reset_info.reset_dump_reg_value[i]);
- }
-
- return 0;
-}
-
int amdgpu_do_asic_reset(struct list_head *device_list_handle,
struct amdgpu_reset_context *reset_context)
{
struct amdgpu_device *tmp_adev = NULL;
bool need_full_reset, skip_hw_reset, vram_lost = false;
int r = 0;
- uint32_t i;
/* Try reset handler method first */
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
reset_list);
- if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) {
- amdgpu_reset_reg_dumps(tmp_adev);
-
- dev_info(tmp_adev->dev, "Dumping IP State\n");
- /* Trigger ip dump before we reset the asic */
- for (i = 0; i < tmp_adev->num_ip_blocks; i++)
- if (tmp_adev->ip_blocks[i].version->funcs->dump_ip_state)
- tmp_adev->ip_blocks[i].version->funcs
- ->dump_ip_state((void *)tmp_adev);
- dev_info(tmp_adev->dev, "Dumping IP State Completed\n");
- }
-
reset_context->reset_device_list = device_list_handle;
r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
/* If reset handler not implemented, continue; otherwise return */
@@ -5461,7 +5489,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags))
- amdgpu_coredump(tmp_adev, vram_lost, reset_context);
+ amdgpu_coredump(tmp_adev, false, vram_lost, reset_context->job);
if (vram_lost) {
DRM_INFO("VRAM is lost due to GPU reset!\n");
@@ -5513,7 +5541,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
* bad_page_threshold value to fix this once
* probing driver again.
*/
- if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
+ if (!amdgpu_ras_is_rma(tmp_adev)) {
/* must succeed. */
amdgpu_ras_resume(tmp_adev);
} else {
@@ -5879,7 +5907,7 @@ skip_hw_reset:
if (!amdgpu_ring_sched_ready(ring))
continue;
- drm_sched_start(&ring->sched, true);
+ drm_sched_start(&ring->sched);
}
if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
@@ -5891,8 +5919,14 @@ skip_hw_reset:
tmp_adev->asic_reset_res = 0;
if (r) {
- /* bad news, how to tell it to userspace ? */
- dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
+ /* bad news, how to tell it to userspace ?
+ * for ras error, we should report GPU bad status instead of
+ * reset failure
+ */
+ if (reset_context->src != AMDGPU_RESET_SRC_RAS ||
+ !amdgpu_ras_eeprom_check_err_threshold(tmp_adev))
+ dev_info(tmp_adev->dev, "GPU reset(%d) failed\n",
+ atomic_read(&tmp_adev->gpu_reset_counter));
amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
} else {
dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
@@ -6138,18 +6172,24 @@ bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev)
{
#ifdef CONFIG_HSA_AMD_P2P
- uint64_t address_mask = peer_adev->dev->dma_mask ?
- ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
- resource_size_t aper_limit =
- adev->gmc.aper_base + adev->gmc.aper_size - 1;
bool p2p_access =
!adev->gmc.xgmi.connected_to_cpu &&
!(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
- return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
- adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
- !(adev->gmc.aper_base & address_mask ||
- aper_limit & address_mask));
+ bool is_large_bar = adev->gmc.visible_vram_size &&
+ adev->gmc.real_vram_size == adev->gmc.visible_vram_size;
+ bool p2p_addressable = amdgpu_device_check_iommu_remap(peer_adev);
+
+ if (!p2p_addressable) {
+ uint64_t address_mask = peer_adev->dev->dma_mask ?
+ ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
+ resource_size_t aper_limit =
+ adev->gmc.aper_base + adev->gmc.aper_size - 1;
+
+ p2p_addressable = !(adev->gmc.aper_base & address_mask ||
+ aper_limit & address_mask);
+ }
+ return is_large_bar && p2p_access && p2p_addressable;
#else
return false;
#endif
@@ -6374,7 +6414,7 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
if (!amdgpu_ring_sched_ready(ring))
continue;
- drm_sched_start(&ring->sched, true);
+ drm_sched_start(&ring->sched);
}
amdgpu_device_unset_mp1_state(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 7b561e8e3caf..4bd61c169ca8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -1500,6 +1500,7 @@ union gc_info {
struct gc_info_v1_0 v1;
struct gc_info_v1_1 v1_1;
struct gc_info_v1_2 v1_2;
+ struct gc_info_v1_3 v1_3;
struct gc_info_v2_0 v2;
struct gc_info_v2_1 v2_1;
};
@@ -1558,6 +1559,16 @@ static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance);
adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu);
}
+ if (le16_to_cpu(gc_info->v1.header.version_minor) >= 3) {
+ adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v1_3.gc_tcp_size_per_cu);
+ adev->gfx.config.gc_tcp_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_tcp_cache_line_size);
+ adev->gfx.config.gc_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_3.gc_instruction_cache_size_per_sqc);
+ adev->gfx.config.gc_instruction_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_instruction_cache_line_size);
+ adev->gfx.config.gc_scalar_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_3.gc_scalar_data_cache_size_per_sqc);
+ adev->gfx.config.gc_scalar_data_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_scalar_data_cache_line_size);
+ adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v1_3.gc_tcc_size);
+ adev->gfx.config.gc_tcc_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_tcc_cache_line_size);
+ }
break;
case 2:
adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 094498a0964b..f57411ed2dc2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -131,6 +131,7 @@ enum AMDGPU_DEBUG_MASK {
AMDGPU_DEBUG_DISABLE_GPU_SOFT_RECOVERY = BIT(2),
AMDGPU_DEBUG_USE_VRAM_FW_BUF = BIT(3),
AMDGPU_DEBUG_ENABLE_RAS_ACA = BIT(4),
+ AMDGPU_DEBUG_ENABLE_EXP_RESETS = BIT(5),
};
unsigned int amdgpu_vram_limit = UINT_MAX;
@@ -168,6 +169,16 @@ uint amdgpu_sdma_phase_quantum = 32;
char *amdgpu_disable_cu;
char *amdgpu_virtual_display;
bool enforce_isolation;
+
+/* Specifies the default granularity for SVM, used in buffer
+ * migration and restoration of backing memory when handling
+ * recoverable page faults.
+ *
+ * The value is given as log(numPages(buffer)); for a 2 MiB
+ * buffer it computes to be 9
+ */
+uint amdgpu_svm_default_granularity = 9;
+
/*
* OverDrive(bit 14) disabled by default
* GFX DCS(bit 19) disabled by default
@@ -320,6 +331,13 @@ MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(msi, amdgpu_msi, int, 0444);
/**
+ * DOC: svm_default_granularity (uint)
+ * Used in buffer migration and handling of recoverable page faults
+ */
+MODULE_PARM_DESC(svm_default_granularity, "SVM's default granularity in log(2^Pages), default 9 = 2^9 = 2 MiB");
+module_param_named(svm_default_granularity, amdgpu_svm_default_granularity, uint, 0644);
+
+/**
* DOC: lockup_timeout (string)
* Set GPU scheduler timeout value in ms.
*
@@ -2199,6 +2217,11 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev)
pr_info("debug: enable RAS ACA\n");
adev->debug_enable_ras_aca = true;
}
+
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_ENABLE_EXP_RESETS) {
+ pr_info("debug: enable experimental reset features\n");
+ adev->debug_exp_resets = true;
+ }
}
static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags)
@@ -2908,6 +2931,7 @@ static const struct file_operations amdgpu_driver_kms_fops = {
#ifdef CONFIG_PROC_FS
.show_fdinfo = drm_show_fdinfo,
#endif
+ .fop_flags = FOP_UNSIGNED_OFFSET,
};
int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv)
@@ -2953,7 +2977,6 @@ static const struct drm_driver amdgpu_kms_driver = {
DRIVER_SYNCOBJ_TIMELINE,
.open = amdgpu_driver_open_kms,
.postclose = amdgpu_driver_postclose_kms,
- .lastclose = amdgpu_driver_lastclose_kms,
.ioctls = amdgpu_ioctls_kms,
.num_ioctls = ARRAY_SIZE(amdgpu_ioctls_kms),
.dumb_create = amdgpu_mode_dumb_create,
@@ -2980,7 +3003,6 @@ const struct drm_driver amdgpu_partition_driver = {
DRIVER_SYNCOBJ_TIMELINE,
.open = amdgpu_driver_open_kms,
.postclose = amdgpu_driver_postclose_kms,
- .lastclose = amdgpu_driver_lastclose_kms,
.ioctls = amdgpu_ioctls_kms,
.num_ioctls = ARRAY_SIZE(amdgpu_ioctls_kms),
.dumb_create = amdgpu_mode_dumb_create,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
index 8283d682f543..7cc980bf4725 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
@@ -55,8 +55,6 @@ int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev);
void amdgpu_gart_table_ram_free(struct amdgpu_device *adev);
int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
-int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
-void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
int amdgpu_gart_init(struct amdgpu_device *adev);
void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev);
void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index aad2027e5c7c..0e617dff8765 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -348,6 +348,9 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
+ /* always clear VRAM */
+ flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
+
/* create a gem object to contain this object in */
if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index c770cb201e64..83e54697f0ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -24,10 +24,13 @@
*/
#include <linux/firmware.h>
+#include <linux/pm_runtime.h>
+
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "amdgpu_rlc.h"
#include "amdgpu_ras.h"
+#include "amdgpu_reset.h"
#include "amdgpu_xcp.h"
#include "amdgpu_xgmi.h"
@@ -657,7 +660,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
uint64_t queue_mask = 0;
int r, i, j;
- if (adev->enable_mes)
+ if (adev->mes.enable_legacy_queue_map)
return amdgpu_gfx_mes_enable_kcq(adev, xcc_id);
if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources)
@@ -719,7 +722,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
amdgpu_device_flush_hdp(adev, NULL);
- if (adev->enable_mes) {
+ if (adev->mes.enable_legacy_queue_map) {
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
j = i + xcc_id * adev->gfx.num_gfx_rings;
r = amdgpu_mes_map_legacy_queue(adev,
@@ -882,8 +885,11 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r
int r;
if (amdgpu_ras_is_supported(adev, ras_block->block)) {
- if (!amdgpu_persistent_edc_harvesting_supported(adev))
- amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX);
+ if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
+ r = amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX);
+ if (r)
+ return r;
+ }
r = amdgpu_ras_block_late_init(adev, ras_block);
if (r)
@@ -1027,7 +1033,10 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_
pr_err("critical bug! too many kiq readers\n");
goto failed_unlock;
}
- amdgpu_ring_alloc(ring, 32);
+ r = amdgpu_ring_alloc(ring, 32);
+ if (r)
+ goto failed_unlock;
+
amdgpu_ring_emit_rreg(ring, reg, reg_val_offs);
r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
if (r)
@@ -1093,7 +1102,10 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint3
}
spin_lock_irqsave(&kiq->ring_lock, flags);
- amdgpu_ring_alloc(ring, 32);
+ r = amdgpu_ring_alloc(ring, 32);
+ if (r)
+ goto failed_unlock;
+
amdgpu_ring_emit_wreg(ring, reg, v);
r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
if (r)
@@ -1129,6 +1141,7 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint3
failed_undo:
amdgpu_ring_undo(ring);
+failed_unlock:
spin_unlock_irqrestore(&kiq->ring_lock, flags);
failed_kiq_write:
dev_err(adev->dev, "failed to write reg:%x\n", reg);
@@ -1381,6 +1394,217 @@ static ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev,
return sysfs_emit(buf, "%s\n", supported_partition);
}
+static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct drm_gpu_scheduler *sched = &ring->sched;
+ struct drm_sched_entity entity;
+ struct dma_fence *f;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ int i, r;
+
+ /* Initialize the scheduler entity */
+ r = drm_sched_entity_init(&entity, DRM_SCHED_PRIORITY_NORMAL,
+ &sched, 1, NULL);
+ if (r) {
+ dev_err(adev->dev, "Failed setting up GFX kernel entity.\n");
+ goto err;
+ }
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, &entity, NULL,
+ 64, 0,
+ &job);
+ if (r)
+ goto err;
+
+ job->enforce_isolation = true;
+
+ ib = &job->ibs[0];
+ for (i = 0; i <= ring->funcs->align_mask; ++i)
+ ib->ptr[i] = ring->funcs->nop;
+ ib->length_dw = ring->funcs->align_mask + 1;
+
+ f = amdgpu_job_submit(job);
+
+ r = dma_fence_wait(f, false);
+ if (r)
+ goto err;
+
+ dma_fence_put(f);
+
+ /* Clean up the scheduler entity */
+ drm_sched_entity_destroy(&entity);
+ return 0;
+
+err:
+ return r;
+}
+
+static int amdgpu_gfx_run_cleaner_shader(struct amdgpu_device *adev, int xcp_id)
+{
+ int num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ struct amdgpu_ring *ring;
+ int num_xcc_to_clear;
+ int i, r, xcc_id;
+
+ if (adev->gfx.num_xcc_per_xcp)
+ num_xcc_to_clear = adev->gfx.num_xcc_per_xcp;
+ else
+ num_xcc_to_clear = 1;
+
+ for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
+ if ((ring->xcp_id == xcp_id) && ring->sched.ready) {
+ r = amdgpu_gfx_run_cleaner_shader_job(ring);
+ if (r)
+ return r;
+ num_xcc_to_clear--;
+ break;
+ }
+ }
+ }
+
+ if (num_xcc_to_clear)
+ return -ENOENT;
+
+ return 0;
+}
+
+static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ int ret;
+ long value;
+
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
+
+ ret = kstrtol(buf, 0, &value);
+
+ if (ret)
+ return -EINVAL;
+
+ if (value < 0)
+ return -EINVAL;
+
+ if (adev->xcp_mgr) {
+ if (value >= adev->xcp_mgr->num_xcps)
+ return -EINVAL;
+ } else {
+ if (value > 1)
+ return -EINVAL;
+ }
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
+ return ret;
+ }
+
+ ret = amdgpu_gfx_run_cleaner_shader(adev, value);
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t amdgpu_gfx_get_enforce_isolation(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ int i;
+ ssize_t size = 0;
+
+ if (adev->xcp_mgr) {
+ for (i = 0; i < adev->xcp_mgr->num_xcps; i++) {
+ size += sysfs_emit_at(buf, size, "%u", adev->enforce_isolation[i]);
+ if (i < (adev->xcp_mgr->num_xcps - 1))
+ size += sysfs_emit_at(buf, size, " ");
+ }
+ buf[size++] = '\n';
+ } else {
+ size = sysfs_emit_at(buf, 0, "%u\n", adev->enforce_isolation[0]);
+ }
+
+ return size;
+}
+
+static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ long partition_values[MAX_XCP] = {0};
+ int ret, i, num_partitions;
+ const char *input_buf = buf;
+
+ for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
+ ret = sscanf(input_buf, "%ld", &partition_values[i]);
+ if (ret <= 0)
+ break;
+
+ /* Move the pointer to the next value in the string */
+ input_buf = strchr(input_buf, ' ');
+ if (input_buf) {
+ input_buf++;
+ } else {
+ i++;
+ break;
+ }
+ }
+ num_partitions = i;
+
+ if (adev->xcp_mgr && num_partitions != adev->xcp_mgr->num_xcps)
+ return -EINVAL;
+
+ if (!adev->xcp_mgr && num_partitions != 1)
+ return -EINVAL;
+
+ for (i = 0; i < num_partitions; i++) {
+ if (partition_values[i] != 0 && partition_values[i] != 1)
+ return -EINVAL;
+ }
+
+ mutex_lock(&adev->enforce_isolation_mutex);
+
+ for (i = 0; i < num_partitions; i++) {
+ if (adev->enforce_isolation[i] && !partition_values[i]) {
+ /* Going from enabled to disabled */
+ amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(i));
+ } else if (!adev->enforce_isolation[i] && partition_values[i]) {
+ /* Going from disabled to enabled */
+ amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i));
+ }
+ adev->enforce_isolation[i] = partition_values[i];
+ }
+
+ mutex_unlock(&adev->enforce_isolation_mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR(run_cleaner_shader, 0200,
+ NULL, amdgpu_gfx_set_run_cleaner_shader);
+
+static DEVICE_ATTR(enforce_isolation, 0644,
+ amdgpu_gfx_get_enforce_isolation,
+ amdgpu_gfx_set_enforce_isolation);
+
static DEVICE_ATTR(current_compute_partition, 0644,
amdgpu_gfx_get_current_compute_partition,
amdgpu_gfx_set_compute_partition);
@@ -1406,3 +1630,229 @@ void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev)
device_remove_file(adev->dev, &dev_attr_current_compute_partition);
device_remove_file(adev->dev, &dev_attr_available_compute_partition);
}
+
+int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (!amdgpu_sriov_vf(adev)) {
+ r = device_create_file(adev->dev, &dev_attr_enforce_isolation);
+ if (r)
+ return r;
+ }
+
+ r = device_create_file(adev->dev, &dev_attr_run_cleaner_shader);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev)
+{
+ if (!amdgpu_sriov_vf(adev))
+ device_remove_file(adev->dev, &dev_attr_enforce_isolation);
+ device_remove_file(adev->dev, &dev_attr_run_cleaner_shader);
+}
+
+int amdgpu_gfx_cleaner_shader_sw_init(struct amdgpu_device *adev,
+ unsigned int cleaner_shader_size)
+{
+ if (!adev->gfx.enable_cleaner_shader)
+ return -EOPNOTSUPP;
+
+ return amdgpu_bo_create_kernel(adev, cleaner_shader_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT,
+ &adev->gfx.cleaner_shader_obj,
+ &adev->gfx.cleaner_shader_gpu_addr,
+ (void **)&adev->gfx.cleaner_shader_cpu_ptr);
+}
+
+void amdgpu_gfx_cleaner_shader_sw_fini(struct amdgpu_device *adev)
+{
+ if (!adev->gfx.enable_cleaner_shader)
+ return;
+
+ amdgpu_bo_free_kernel(&adev->gfx.cleaner_shader_obj,
+ &adev->gfx.cleaner_shader_gpu_addr,
+ (void **)&adev->gfx.cleaner_shader_cpu_ptr);
+}
+
+void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev,
+ unsigned int cleaner_shader_size,
+ const void *cleaner_shader_ptr)
+{
+ if (!adev->gfx.enable_cleaner_shader)
+ return;
+
+ if (adev->gfx.cleaner_shader_cpu_ptr && cleaner_shader_ptr)
+ memcpy_toio(adev->gfx.cleaner_shader_cpu_ptr, cleaner_shader_ptr,
+ cleaner_shader_size);
+}
+
+/**
+ * amdgpu_gfx_kfd_sch_ctrl - Control the KFD scheduler from the KGD (Graphics Driver)
+ * @adev: amdgpu_device pointer
+ * @idx: Index of the scheduler to control
+ * @enable: Whether to enable or disable the KFD scheduler
+ *
+ * This function is used to control the KFD (Kernel Fusion Driver) scheduler
+ * from the KGD. It is part of the cleaner shader feature. This function plays
+ * a key role in enforcing process isolation on the GPU.
+ *
+ * The function uses a reference count mechanism (kfd_sch_req_count) to keep
+ * track of the number of requests to enable the KFD scheduler. When a request
+ * to enable the KFD scheduler is made, the reference count is decremented.
+ * When the reference count reaches zero, a delayed work is scheduled to
+ * enforce isolation after a delay of GFX_SLICE_PERIOD.
+ *
+ * When a request to disable the KFD scheduler is made, the function first
+ * checks if the reference count is zero. If it is, it cancels the delayed work
+ * for enforcing isolation and checks if the KFD scheduler is active. If the
+ * KFD scheduler is active, it sends a request to stop the KFD scheduler and
+ * sets the KFD scheduler state to inactive. Then, it increments the reference
+ * count.
+ *
+ * The function is synchronized using the kfd_sch_mutex to ensure that the KFD
+ * scheduler state and reference count are updated atomically.
+ *
+ * Note: If the reference count is already zero when a request to enable the
+ * KFD scheduler is made, it means there's an imbalance bug somewhere. The
+ * function triggers a warning in this case.
+ */
+static void amdgpu_gfx_kfd_sch_ctrl(struct amdgpu_device *adev, u32 idx,
+ bool enable)
+{
+ mutex_lock(&adev->gfx.kfd_sch_mutex);
+
+ if (enable) {
+ /* If the count is already 0, it means there's an imbalance bug somewhere.
+ * Note that the bug may be in a different caller than the one which triggers the
+ * WARN_ON_ONCE.
+ */
+ if (WARN_ON_ONCE(adev->gfx.kfd_sch_req_count[idx] == 0)) {
+ dev_err(adev->dev, "Attempted to enable KFD scheduler when reference count is already zero\n");
+ goto unlock;
+ }
+
+ adev->gfx.kfd_sch_req_count[idx]--;
+
+ if (adev->gfx.kfd_sch_req_count[idx] == 0 &&
+ adev->gfx.kfd_sch_inactive[idx]) {
+ schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
+ GFX_SLICE_PERIOD);
+ }
+ } else {
+ if (adev->gfx.kfd_sch_req_count[idx] == 0) {
+ cancel_delayed_work_sync(&adev->gfx.enforce_isolation[idx].work);
+ if (!adev->gfx.kfd_sch_inactive[idx]) {
+ amdgpu_amdkfd_stop_sched(adev, idx);
+ adev->gfx.kfd_sch_inactive[idx] = true;
+ }
+ }
+
+ adev->gfx.kfd_sch_req_count[idx]++;
+ }
+
+unlock:
+ mutex_unlock(&adev->gfx.kfd_sch_mutex);
+}
+
+/**
+ * amdgpu_gfx_enforce_isolation_handler - work handler for enforcing shader isolation
+ *
+ * @work: work_struct.
+ *
+ * This function is the work handler for enforcing shader isolation on AMD GPUs.
+ * It counts the number of emitted fences for each GFX and compute ring. If there
+ * are any fences, it schedules the `enforce_isolation_work` to be run after a
+ * delay of `GFX_SLICE_PERIOD`. If there are no fences, it signals the Kernel Fusion
+ * Driver (KFD) to resume the runqueue. The function is synchronized using the
+ * `enforce_isolation_mutex`.
+ */
+void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work)
+{
+ struct amdgpu_isolation_work *isolation_work =
+ container_of(work, struct amdgpu_isolation_work, work.work);
+ struct amdgpu_device *adev = isolation_work->adev;
+ u32 i, idx, fences = 0;
+
+ if (isolation_work->xcp_id == AMDGPU_XCP_NO_PARTITION)
+ idx = 0;
+ else
+ idx = isolation_work->xcp_id;
+
+ if (idx >= MAX_XCP)
+ return;
+
+ mutex_lock(&adev->enforce_isolation_mutex);
+ for (i = 0; i < AMDGPU_MAX_GFX_RINGS; ++i) {
+ if (isolation_work->xcp_id == adev->gfx.gfx_ring[i].xcp_id)
+ fences += amdgpu_fence_count_emitted(&adev->gfx.gfx_ring[i]);
+ }
+ for (i = 0; i < (AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES); ++i) {
+ if (isolation_work->xcp_id == adev->gfx.compute_ring[i].xcp_id)
+ fences += amdgpu_fence_count_emitted(&adev->gfx.compute_ring[i]);
+ }
+ if (fences) {
+ schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
+ GFX_SLICE_PERIOD);
+ } else {
+ /* Tell KFD to resume the runqueue */
+ if (adev->kfd.init_complete) {
+ WARN_ON_ONCE(!adev->gfx.kfd_sch_inactive[idx]);
+ WARN_ON_ONCE(adev->gfx.kfd_sch_req_count[idx]);
+ amdgpu_amdkfd_start_sched(adev, idx);
+ adev->gfx.kfd_sch_inactive[idx] = false;
+ }
+ }
+ mutex_unlock(&adev->enforce_isolation_mutex);
+}
+
+void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u32 idx;
+
+ if (!adev->gfx.enable_cleaner_shader)
+ return;
+
+ if (ring->xcp_id == AMDGPU_XCP_NO_PARTITION)
+ idx = 0;
+ else
+ idx = ring->xcp_id;
+
+ if (idx >= MAX_XCP)
+ return;
+
+ mutex_lock(&adev->enforce_isolation_mutex);
+ if (adev->enforce_isolation[idx]) {
+ if (adev->kfd.init_complete)
+ amdgpu_gfx_kfd_sch_ctrl(adev, idx, false);
+ }
+ mutex_unlock(&adev->enforce_isolation_mutex);
+}
+
+void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u32 idx;
+
+ if (!adev->gfx.enable_cleaner_shader)
+ return;
+
+ if (ring->xcp_id == AMDGPU_XCP_NO_PARTITION)
+ idx = 0;
+ else
+ idx = ring->xcp_id;
+
+ if (idx >= MAX_XCP)
+ return;
+
+ mutex_lock(&adev->enforce_isolation_mutex);
+ if (adev->enforce_isolation[idx]) {
+ if (adev->kfd.init_complete)
+ amdgpu_gfx_kfd_sch_ctrl(adev, idx, true);
+ }
+ mutex_unlock(&adev->enforce_isolation_mutex);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index ddda94e49db4..5644e10a86a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -34,6 +34,7 @@
#include "soc15.h"
#include "amdgpu_ras.h"
#include "amdgpu_ring_mux.h"
+#include "amdgpu_xcp.h"
/* GFX current status */
#define AMDGPU_GFX_NORMAL_MODE 0x00000000L
@@ -138,6 +139,10 @@ struct kiq_pm4_funcs {
void (*kiq_invalidate_tlbs)(struct amdgpu_ring *kiq_ring,
uint16_t pasid, uint32_t flush_type,
bool all_hub);
+ void (*kiq_reset_hw_queue)(struct amdgpu_ring *kiq_ring,
+ uint32_t queue_type, uint32_t me_id,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t xcc_id, uint32_t vmid);
/* Packet sizes */
int set_resources_size;
int map_queues_size;
@@ -240,6 +245,12 @@ struct amdgpu_gfx_config {
uint32_t gc_tcp_size_per_cu;
uint32_t gc_num_cu_per_sqc;
uint32_t gc_tcc_size;
+ uint32_t gc_tcp_cache_line_size;
+ uint32_t gc_instruction_cache_size_per_sqc;
+ uint32_t gc_instruction_cache_line_size;
+ uint32_t gc_scalar_data_cache_size_per_sqc;
+ uint32_t gc_scalar_data_cache_line_size;
+ uint32_t gc_tcc_cache_line_size;
};
struct amdgpu_cu_info {
@@ -339,6 +350,12 @@ struct amdgpu_me {
DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
};
+struct amdgpu_isolation_work {
+ struct amdgpu_device *adev;
+ u32 xcp_id;
+ struct delayed_work work;
+};
+
struct amdgpu_gfx {
struct mutex gpu_clock_mutex;
struct amdgpu_gfx_config config;
@@ -391,6 +408,7 @@ struct amdgpu_gfx {
struct amdgpu_irq_src eop_irq;
struct amdgpu_irq_src priv_reg_irq;
struct amdgpu_irq_src priv_inst_irq;
+ struct amdgpu_irq_src bad_op_irq;
struct amdgpu_irq_src cp_ecc_error_irq;
struct amdgpu_irq_src sq_irq;
struct amdgpu_irq_src rlc_gc_fed_irq;
@@ -439,6 +457,21 @@ struct amdgpu_gfx {
uint32_t *ip_dump_core;
uint32_t *ip_dump_compute_queues;
uint32_t *ip_dump_gfx_queues;
+
+ struct mutex reset_sem_mutex;
+
+ /* cleaner shader */
+ struct amdgpu_bo *cleaner_shader_obj;
+ unsigned int cleaner_shader_size;
+ u64 cleaner_shader_gpu_addr;
+ void *cleaner_shader_cpu_ptr;
+ const void *cleaner_shader_ptr;
+ bool enable_cleaner_shader;
+ struct amdgpu_isolation_work enforce_isolation[MAX_XCP];
+ /* Mutex for synchronizing KFD scheduler operations */
+ struct mutex kfd_sch_mutex;
+ u64 kfd_sch_req_count[MAX_XCP];
+ bool kfd_sch_inactive[MAX_XCP];
};
struct amdgpu_gfx_ras_reg_entry {
@@ -540,6 +573,17 @@ void amdgpu_gfx_ras_error_func(struct amdgpu_device *adev,
void *ras_error_status,
void (*func)(struct amdgpu_device *adev, void *ras_error_status,
int xcc_id));
+int amdgpu_gfx_cleaner_shader_sw_init(struct amdgpu_device *adev,
+ unsigned int cleaner_shader_size);
+void amdgpu_gfx_cleaner_shader_sw_fini(struct amdgpu_device *adev);
+void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev,
+ unsigned int cleaner_shader_size,
+ const void *cleaner_shader_ptr);
+int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev);
+void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev);
+void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work);
+void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring);
+void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring);
static inline const char *amdgpu_gfx_compute_mode_desc(int mode)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h
index 103a837ccc71..c7b44aeb671b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h
@@ -38,8 +38,6 @@ struct amdgpu_gfxhub_funcs {
void (*mode2_save_regs)(struct amdgpu_device *adev);
void (*mode2_restore_regs)(struct amdgpu_device *adev);
void (*halt)(struct amdgpu_device *adev);
- bool (*query_utcl2_poison_status)(struct amdgpu_device *adev,
- int xcc_id);
};
struct amdgpu_gfxhub {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index b49b3650fd62..17a19d49d30a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -786,7 +786,8 @@ void amdgpu_gmc_fw_reg_write_reg_wait(struct amdgpu_device *adev,
goto failed_kiq;
might_sleep();
- while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY &&
+ !amdgpu_reset_pending(adev->reset_domain)) {
msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index b6a8bddada4c..92d27d32de41 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -424,7 +424,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r || !idle)
goto error;
- if (amdgpu_vmid_uses_reserved(vm, vmhub)) {
+ if (amdgpu_vmid_uses_reserved(adev, vm, vmhub)) {
r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence);
if (r || !id)
goto error;
@@ -476,15 +476,19 @@ error:
/*
* amdgpu_vmid_uses_reserved - check if a VM will use a reserved VMID
+ * @adev: amdgpu_device pointer
* @vm: the VM to check
* @vmhub: the VMHUB which will be used
*
* Returns: True if the VM will use a reserved VMID.
*/
-bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub)
+bool amdgpu_vmid_uses_reserved(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm, unsigned int vmhub)
{
return vm->reserved_vmid[vmhub] ||
- (enforce_isolation && (vmhub == AMDGPU_GFXHUB(0)));
+ (adev->enforce_isolation[(vm->root.bo->xcp_id != AMDGPU_XCP_NO_PARTITION) ?
+ vm->root.bo->xcp_id : 0] &&
+ AMDGPU_IS_GFXHUB(vmhub));
}
int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
@@ -600,9 +604,10 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
}
}
/* alloc a default reserved vmid to enforce isolation */
- if (enforce_isolation)
- amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0));
-
+ for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
+ if (adev->enforce_isolation[i])
+ amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i));
+ }
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
index 240fa6751260..4012fb2dd08a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
@@ -78,7 +78,8 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
struct amdgpu_vmid *id);
-bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub);
+bool amdgpu_vmid_uses_reserved(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm, unsigned int vmhub);
int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
unsigned vmhub);
void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
index 44e2ea8c9728..b03664c66dd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
@@ -49,6 +49,7 @@ struct amdgpu_isp {
const struct isp_funcs *funcs;
struct mfd_cell *isp_cell;
struct resource *isp_res;
+ struct resource *isp_i2c_res;
struct isp_platform_data *isp_pdata;
unsigned int harvest_config;
const struct firmware *fw;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 908e13455152..ad6bf5d4e0a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -30,6 +30,60 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
#include "amdgpu_reset.h"
+#include "amdgpu_dev_coredump.h"
+#include "amdgpu_xgmi.h"
+
+static void amdgpu_job_do_core_dump(struct amdgpu_device *adev,
+ struct amdgpu_job *job)
+{
+ int i;
+
+ dev_info(adev->dev, "Dumping IP State\n");
+ for (i = 0; i < adev->num_ip_blocks; i++)
+ if (adev->ip_blocks[i].version->funcs->dump_ip_state)
+ adev->ip_blocks[i].version->funcs
+ ->dump_ip_state((void *)adev);
+ dev_info(adev->dev, "Dumping IP State Completed\n");
+
+ amdgpu_coredump(adev, true, false, job);
+}
+
+static void amdgpu_job_core_dump(struct amdgpu_device *adev,
+ struct amdgpu_job *job)
+{
+ struct list_head device_list, *device_list_handle = NULL;
+ struct amdgpu_device *tmp_adev = NULL;
+ struct amdgpu_hive_info *hive = NULL;
+
+ if (!amdgpu_sriov_vf(adev))
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive)
+ mutex_lock(&hive->hive_lock);
+ /*
+ * Reuse the logic in amdgpu_device_gpu_recover() to build list of
+ * devices for code dump
+ */
+ INIT_LIST_HEAD(&device_list);
+ if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) {
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
+ list_add_tail(&tmp_adev->reset_list, &device_list);
+ if (!list_is_first(&adev->reset_list, &device_list))
+ list_rotate_to_front(&adev->reset_list, &device_list);
+ device_list_handle = &device_list;
+ } else {
+ list_add_tail(&adev->reset_list, &device_list);
+ device_list_handle = &device_list;
+ }
+
+ /* Do the coredump for each device */
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list)
+ amdgpu_job_do_core_dump(tmp_adev, job);
+
+ if (hive) {
+ mutex_unlock(&hive->hive_lock);
+ amdgpu_put_xgmi_hive(hive);
+ }
+}
static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
{
@@ -48,9 +102,14 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
return DRM_GPU_SCHED_STAT_ENODEV;
}
-
adev->job_hang = true;
+ /*
+ * Do the coredump immediately after a job timeout to get a very
+ * close dump/snapshot/representation of GPU's current error status
+ */
+ amdgpu_job_core_dump(adev, job);
+
if (amdgpu_gpu_recovery &&
amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
dev_err(adev->dev, "ring %s timeout, but soft recovered\n",
@@ -72,6 +131,26 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
+ /* attempt a per ring reset */
+ if (amdgpu_gpu_recovery &&
+ ring->funcs->reset) {
+ /* stop the scheduler, but don't mess with the
+ * bad job yet because if ring reset fails
+ * we'll fall back to full GPU reset.
+ */
+ drm_sched_wqueue_stop(&ring->sched);
+ r = amdgpu_ring_reset(ring, job->vmid);
+ if (!r) {
+ if (amdgpu_ring_sched_ready(ring))
+ drm_sched_stop(&ring->sched, s_job);
+ atomic_inc(&ring->adev->gpu_reset_counter);
+ amdgpu_fence_driver_force_completion(ring);
+ if (amdgpu_ring_sched_ready(ring))
+ drm_sched_start(&ring->sched);
+ goto exit;
+ }
+ }
+
if (amdgpu_device_should_recover_gpu(ring->adev)) {
struct amdgpu_reset_context reset_context;
memset(&reset_context, 0, sizeof(reset_context));
@@ -81,6 +160,12 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
reset_context.src = AMDGPU_RESET_SRC_JOB;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ /*
+ * To avoid an unnecessary extra coredump, as we have already
+ * got the very close representation of GPU's error status
+ */
+ set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
+
r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context);
if (r)
dev_err(adev->dev, "GPU Recovery Failed: %d\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index a963a25ddd62..ce6b9ba967ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -76,6 +76,9 @@ struct amdgpu_job {
/* job_run_counter >= 1 means a resubmit job */
uint32_t job_run_counter;
+ /* enforce isolation */
+ bool enforce_isolation;
+
uint32_t num_ibs;
struct amdgpu_ib ibs[];
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 66782be5917b..016a6f6c4267 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -43,6 +43,7 @@
#include "amdgpu_gem.h"
#include "amdgpu_display.h"
#include "amdgpu_ras.h"
+#include "amdgpu_reset.h"
#include "amd_pcie.h"
void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
@@ -778,6 +779,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
? -EFAULT : 0;
}
case AMDGPU_INFO_READ_MMR_REG: {
+ int ret = 0;
unsigned int n, alloc_size;
uint32_t *regs;
unsigned int se_num = (info->read_mmr_reg.instance >>
@@ -787,24 +789,37 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
AMDGPU_INFO_MMR_SH_INDEX_MASK;
+ if (!down_read_trylock(&adev->reset_domain->sem))
+ return -ENOENT;
+
/* set full masks if the userspace set all bits
* in the bitfields
*/
- if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
+ if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) {
se_num = 0xffffffff;
- else if (se_num >= AMDGPU_GFX_MAX_SE)
- return -EINVAL;
- if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
+ } else if (se_num >= AMDGPU_GFX_MAX_SE) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) {
sh_num = 0xffffffff;
- else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE)
- return -EINVAL;
+ } else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE) {
+ ret = -EINVAL;
+ goto out;
+ }
- if (info->read_mmr_reg.count > 128)
- return -EINVAL;
+ if (info->read_mmr_reg.count > 128) {
+ ret = -EINVAL;
+ goto out;
+ }
regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
- if (!regs)
- return -ENOMEM;
+ if (!regs) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
alloc_size = info->read_mmr_reg.count * sizeof(*regs);
amdgpu_gfx_off_ctrl(adev, false);
@@ -816,13 +831,17 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
info->read_mmr_reg.dword_offset + i);
kfree(regs);
amdgpu_gfx_off_ctrl(adev, true);
- return -EFAULT;
+ ret = -EFAULT;
+ goto out;
}
}
amdgpu_gfx_off_ctrl(adev, true);
n = copy_to_user(out, regs, min(size, alloc_size));
kfree(regs);
- return n ? -EFAULT : 0;
+ ret = (n ? -EFAULT : 0);
+out:
+ up_read(&adev->reset_domain->sem);
+ return ret;
}
case AMDGPU_INFO_DEV_INFO: {
struct drm_amdgpu_info_device *dev_info;
@@ -1269,23 +1288,6 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return 0;
}
-
-/*
- * Outdated mess for old drm with Xorg being in charge (void function now).
- */
-/**
- * amdgpu_driver_lastclose_kms - drm callback for last close
- *
- * @dev: drm dev pointer
- *
- * Switch vga_switcheroo state after last close (all asics).
- */
-void amdgpu_driver_lastclose_kms(struct drm_device *dev)
-{
- drm_fb_helper_lastclose(dev);
- vga_switcheroo_process_delayed_switch();
-}
-
/**
* amdgpu_driver_open_kms - drm callback for open
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
index 2542bd7aa7c7..18ee60378727 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
@@ -396,7 +396,6 @@ static int amdgpu_mca_smu_parse_mca_error_count(struct amdgpu_device *adev, enum
static int amdgpu_mca_dispatch_mca_set(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type,
struct mca_bank_set *mca_set, struct ras_err_data *err_data)
{
- struct ras_err_addr err_addr;
struct amdgpu_smuio_mcm_config_info mcm_info;
struct mca_bank_node *node, *tmp;
struct mca_bank_entry *entry;
@@ -421,27 +420,20 @@ static int amdgpu_mca_dispatch_mca_set(struct amdgpu_device *adev, enum amdgpu_r
continue;
memset(&mcm_info, 0, sizeof(mcm_info));
- memset(&err_addr, 0, sizeof(err_addr));
mcm_info.socket_id = entry->info.socket_id;
mcm_info.die_id = entry->info.aid;
- if (blk == AMDGPU_RAS_BLOCK__UMC) {
- err_addr.err_status = entry->regs[MCA_REG_IDX_STATUS];
- err_addr.err_ipid = entry->regs[MCA_REG_IDX_IPID];
- err_addr.err_addr = entry->regs[MCA_REG_IDX_ADDR];
- }
-
if (type == AMDGPU_MCA_ERROR_TYPE_UE) {
amdgpu_ras_error_statistic_ue_count(err_data,
- &mcm_info, &err_addr, (uint64_t)count);
+ &mcm_info, (uint64_t)count);
} else {
if (amdgpu_mca_is_deferred_error(adev, entry->regs[MCA_REG_IDX_STATUS]))
amdgpu_ras_error_statistic_de_count(err_data,
- &mcm_info, &err_addr, (uint64_t)count);
+ &mcm_info, (uint64_t)count);
else
amdgpu_ras_error_statistic_ce_count(err_data,
- &mcm_info, &err_addr, (uint64_t)count);
+ &mcm_info, (uint64_t)count);
}
amdgpu_mca_bank_set_remove_node(mca_set, node);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index 1cb1ec7beefe..10b61ff63802 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -501,60 +501,50 @@ int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
int amdgpu_mes_suspend(struct amdgpu_device *adev)
{
- struct idr *idp;
- struct amdgpu_mes_process *process;
- struct amdgpu_mes_gang *gang;
struct mes_suspend_gang_input input;
- int r, pasid;
+ int r;
+
+ if (!amdgpu_mes_suspend_resume_all_supported(adev))
+ return 0;
+
+ memset(&input, 0x0, sizeof(struct mes_suspend_gang_input));
+ input.suspend_all_gangs = 1;
/*
* Avoid taking any other locks under MES lock to avoid circular
* lock dependencies.
*/
amdgpu_mes_lock(&adev->mes);
-
- idp = &adev->mes.pasid_idr;
-
- idr_for_each_entry(idp, process, pasid) {
- list_for_each_entry(gang, &process->gang_list, list) {
- r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
- if (r)
- DRM_ERROR("failed to suspend pasid %d gangid %d",
- pasid, gang->gang_id);
- }
- }
-
+ r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
amdgpu_mes_unlock(&adev->mes);
- return 0;
+ if (r)
+ DRM_ERROR("failed to suspend all gangs");
+
+ return r;
}
int amdgpu_mes_resume(struct amdgpu_device *adev)
{
- struct idr *idp;
- struct amdgpu_mes_process *process;
- struct amdgpu_mes_gang *gang;
struct mes_resume_gang_input input;
- int r, pasid;
+ int r;
+
+ if (!amdgpu_mes_suspend_resume_all_supported(adev))
+ return 0;
+
+ memset(&input, 0x0, sizeof(struct mes_resume_gang_input));
+ input.resume_all_gangs = 1;
/*
* Avoid taking any other locks under MES lock to avoid circular
* lock dependencies.
*/
amdgpu_mes_lock(&adev->mes);
-
- idp = &adev->mes.pasid_idr;
-
- idr_for_each_entry(idp, process, pasid) {
- list_for_each_entry(gang, &process->gang_list, list) {
- r = adev->mes.funcs->resume_gang(&adev->mes, &input);
- if (r)
- DRM_ERROR("failed to resume pasid %d gangid %d",
- pasid, gang->gang_id);
- }
- }
-
+ r = adev->mes.funcs->resume_gang(&adev->mes, &input);
amdgpu_mes_unlock(&adev->mes);
- return 0;
+ if (r)
+ DRM_ERROR("failed to resume all gangs");
+
+ return r;
}
static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
@@ -793,6 +783,68 @@ int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
return 0;
}
+int amdgpu_mes_reset_hw_queue(struct amdgpu_device *adev, int queue_id)
+{
+ unsigned long flags;
+ struct amdgpu_mes_queue *queue;
+ struct amdgpu_mes_gang *gang;
+ struct mes_reset_queue_input queue_input;
+ int r;
+
+ /*
+ * Avoid taking any other locks under MES lock to avoid circular
+ * lock dependencies.
+ */
+ amdgpu_mes_lock(&adev->mes);
+
+ /* remove the mes gang from idr list */
+ spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
+
+ queue = idr_find(&adev->mes.queue_id_idr, queue_id);
+ if (!queue) {
+ spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
+ amdgpu_mes_unlock(&adev->mes);
+ DRM_ERROR("queue id %d doesn't exist\n", queue_id);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
+
+ DRM_DEBUG("try to reset queue, doorbell off = 0x%llx\n",
+ queue->doorbell_off);
+
+ gang = queue->gang;
+ queue_input.doorbell_offset = queue->doorbell_off;
+ queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
+
+ r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
+ if (r)
+ DRM_ERROR("failed to reset hardware queue, queue id = %d\n",
+ queue_id);
+
+ amdgpu_mes_unlock(&adev->mes);
+
+ return 0;
+}
+
+int amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device *adev, int queue_type,
+ int me_id, int pipe_id, int queue_id, int vmid)
+{
+ struct mes_reset_queue_input queue_input;
+ int r;
+
+ queue_input.queue_type = queue_type;
+ queue_input.use_mmio = true;
+ queue_input.me_id = me_id;
+ queue_input.pipe_id = pipe_id;
+ queue_input.queue_id = queue_id;
+ queue_input.vmid = vmid;
+ r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
+ if (r)
+ DRM_ERROR("failed to reset hardware queue by mmio, queue id = %d\n",
+ queue_id);
+ return r;
+}
+
int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
@@ -838,6 +890,33 @@ int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
return r;
}
+int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ unsigned int vmid,
+ bool use_mmio)
+{
+ struct mes_reset_legacy_queue_input queue_input;
+ int r;
+
+ memset(&queue_input, 0, sizeof(queue_input));
+
+ queue_input.queue_type = ring->funcs->type;
+ queue_input.doorbell_offset = ring->doorbell_index;
+ queue_input.me_id = ring->me;
+ queue_input.pipe_id = ring->pipe;
+ queue_input.queue_id = ring->queue;
+ queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
+ queue_input.wptr_addr = ring->wptr_gpu_addr;
+ queue_input.vmid = vmid;
+ queue_input.use_mmio = use_mmio;
+
+ r = adev->mes.funcs->reset_legacy_queue(&adev->mes, &queue_input);
+ if (r)
+ DRM_ERROR("failed to reset legacy queue\n");
+
+ return r;
+}
+
uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
{
struct mes_misc_op_input op_input;
@@ -1533,7 +1612,7 @@ int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
}
- r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], fw_name);
+ r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], "%s", fw_name);
if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
dev_info(adev->dev, "try to fall back to %s_mes.bin\n", ucode_prefix);
r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
@@ -1584,6 +1663,19 @@ out:
return r;
}
+bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev)
+{
+ uint32_t mes_rev = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
+ bool is_supported = false;
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
+ amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) &&
+ mes_rev >= 0x63)
+ is_supported = true;
+
+ return is_supported;
+}
+
#if defined(CONFIG_DEBUG_FS)
static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index 0bc837dab578..96788c0f42f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -75,6 +75,7 @@ struct amdgpu_mes {
uint32_t sched_version;
uint32_t kiq_version;
+ bool enable_legacy_queue_map;
uint32_t total_max_queue;
uint32_t max_doorbell_slices;
@@ -248,6 +249,18 @@ struct mes_remove_queue_input {
uint64_t gang_context_addr;
};
+struct mes_reset_queue_input {
+ uint32_t doorbell_offset;
+ uint64_t gang_context_addr;
+ bool use_mmio;
+ uint32_t queue_type;
+ uint32_t me_id;
+ uint32_t pipe_id;
+ uint32_t queue_id;
+ uint32_t xcc_id;
+ uint32_t vmid;
+};
+
struct mes_map_legacy_queue_input {
uint32_t queue_type;
uint32_t doorbell_offset;
@@ -279,6 +292,18 @@ struct mes_resume_gang_input {
uint64_t gang_context_addr;
};
+struct mes_reset_legacy_queue_input {
+ uint32_t queue_type;
+ uint32_t doorbell_offset;
+ bool use_mmio;
+ uint32_t me_id;
+ uint32_t pipe_id;
+ uint32_t queue_id;
+ uint64_t mqd_addr;
+ uint64_t wptr_addr;
+ uint32_t vmid;
+};
+
enum mes_misc_opcode {
MES_MISC_OP_WRITE_REG,
MES_MISC_OP_READ_REG,
@@ -347,6 +372,12 @@ struct amdgpu_mes_funcs {
int (*misc_op)(struct amdgpu_mes *mes,
struct mes_misc_op_input *input);
+
+ int (*reset_legacy_queue)(struct amdgpu_mes *mes,
+ struct mes_reset_legacy_queue_input *input);
+
+ int (*reset_hw_queue)(struct amdgpu_mes *mes,
+ struct mes_reset_queue_input *input);
};
#define amdgpu_mes_kiq_hw_init(adev) (adev)->mes.kiq_hw_init((adev))
@@ -374,6 +405,9 @@ int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
struct amdgpu_mes_queue_properties *qprops,
int *queue_id);
int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id);
+int amdgpu_mes_reset_hw_queue(struct amdgpu_device *adev, int queue_id);
+int amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device *adev, int queue_type,
+ int me_id, int pipe_id, int queue_id, int vmid);
int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
@@ -381,6 +415,10 @@ int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
enum amdgpu_unmap_queues_action action,
u64 gpu_addr, u64 seq);
+int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ unsigned int vmid,
+ bool use_mmio);
uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg);
int amdgpu_mes_wreg(struct amdgpu_device *adev,
@@ -478,4 +516,6 @@ static inline void amdgpu_mes_unlock(struct amdgpu_mes *mes)
memalloc_noreclaim_restore(mes->saved_flags);
mutex_unlock(&mes->mutex_hidden);
}
+
+bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev);
#endif /* __AMDGPU_MES_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
index 95d676ee207f..1ca9d4ed8063 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
@@ -63,8 +63,6 @@ struct amdgpu_mmhub_funcs {
uint64_t page_table_base);
void (*update_power_gating)(struct amdgpu_device *adev,
bool enable);
- bool (*query_utcl2_poison_status)(struct amdgpu_device *adev,
- int hub_inst);
};
struct amdgpu_mmhub {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index d002b845d8ac..5e3faefc5510 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -51,6 +51,7 @@ struct amdgpu_encoder;
struct amdgpu_router;
struct amdgpu_hpd;
struct edid;
+struct drm_edid;
#define to_amdgpu_crtc(x) container_of(x, struct amdgpu_crtc, base)
#define to_amdgpu_connector(x) container_of(x, struct amdgpu_connector, base)
@@ -326,8 +327,7 @@ struct amdgpu_mode_info {
/* FMT dithering */
struct drm_property *dither_property;
/* hardcoded DFP edid from BIOS */
- struct edid *bios_hardcoded_edid;
- int bios_hardcoded_edid_size;
+ const struct drm_edid *bios_hardcoded_edid;
/* firmware flags */
u32 firmware_flags;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index bc42ccbde659..d7e27957013f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -90,6 +90,12 @@ struct amdgpu_bo_va {
bool cleared;
bool is_xgmi;
+
+ /*
+ * protected by vm reservation lock
+ * if non-zero, cannot unmap from GPU because user queues may still access it
+ */
+ unsigned int queue_refcount;
};
struct amdgpu_bo {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
index 0bb2466d539a..675aa138ea11 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
@@ -94,7 +94,7 @@ static void amdgpu_pll_get_fb_ref_div(struct amdgpu_device *adev, unsigned int n
ref_div_max = min(128 / post_div, ref_div_max);
/* get matching reference and feedback divider */
- *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
+ *ref_div = clamp(DIV_ROUND_CLOSEST(den, post_div), 1u, ref_div_max);
*fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
/* limit fb divider to its maximum */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index d0307c55da50..61a2f386d9fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1223,11 +1223,11 @@ static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, s
for_each_ras_error(err_node, err_data) {
err_info = &err_node->err_info;
amdgpu_ras_error_statistic_de_count(&obj->err_data,
- &err_info->mcm_info, NULL, err_info->de_count);
+ &err_info->mcm_info, err_info->de_count);
amdgpu_ras_error_statistic_ce_count(&obj->err_data,
- &err_info->mcm_info, NULL, err_info->ce_count);
+ &err_info->mcm_info, err_info->ce_count);
amdgpu_ras_error_statistic_ue_count(&obj->err_data,
- &err_info->mcm_info, NULL, err_info->ue_count);
+ &err_info->mcm_info, err_info->ue_count);
}
} else {
/* for legacy asic path which doesn't has error source info */
@@ -2153,7 +2153,7 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *
/* gpu reset is fallback for failed and default cases.
* For RMA case, amdgpu_umc_poison_handler will handle gpu reset.
*/
- if (poison_stat && !con->is_rma) {
+ if (poison_stat && !amdgpu_ras_is_rma(adev)) {
event_id = amdgpu_ras_acquire_event_id(adev, type);
RAS_EVENT_LOG(adev, event_id,
"GPU reset for %s RAS poison consumption is issued!\n",
@@ -2881,9 +2881,6 @@ static void amdgpu_ras_ecc_log_init(struct ras_ecc_log_info *ecc_log)
{
mutex_init(&ecc_log->lock);
- /* Set any value as siphash key */
- memset(&ecc_log->ecc_key, 0xad, sizeof(ecc_log->ecc_key));
-
INIT_RADIX_TREE(&ecc_log->de_page_tree, GFP_KERNEL);
ecc_log->de_queried_count = 0;
ecc_log->prev_de_queried_count = 0;
@@ -2948,7 +2945,7 @@ static void amdgpu_ras_do_page_retirement(struct work_struct *work)
amdgpu_ras_error_data_fini(&err_data);
- if (err_cnt && con->is_rma)
+ if (err_cnt && amdgpu_ras_is_rma(adev))
amdgpu_ras_reset_gpu(adev);
amdgpu_ras_schedule_retirement_dwork(con,
@@ -3049,7 +3046,7 @@ static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev,
}
/* for RMA, amdgpu_ras_poison_creation_handler will trigger gpu reset */
- if (reset_flags && !con->is_rma) {
+ if (reset_flags && !amdgpu_ras_is_rma(adev)) {
if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET)
reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
else if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET)
@@ -3195,7 +3192,7 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
* This calling fails when is_rma is true or
* ret != 0.
*/
- if (con->is_rma || ret)
+ if (amdgpu_ras_is_rma(adev) || ret)
goto free;
if (con->eeprom_control.ras_num_recs) {
@@ -3244,7 +3241,7 @@ out:
* Except error threshold exceeding case, other failure cases in this
* function would not fail amdgpu driver init.
*/
- if (!con->is_rma)
+ if (!amdgpu_ras_is_rma(adev))
ret = 0;
else
ret = -EINVAL;
@@ -4287,7 +4284,7 @@ int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
/* mode1 is the only selection for RMA status */
- if (ras->is_rma) {
+ if (amdgpu_ras_is_rma(adev)) {
ras->gpu_reset_flags = 0;
ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
}
@@ -4611,8 +4608,6 @@ static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_d
if (!err_node)
return NULL;
- INIT_LIST_HEAD(&err_node->err_info.err_addr_list);
-
memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info));
err_data->err_list_count++;
@@ -4622,21 +4617,9 @@ static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_d
return &err_node->err_info;
}
-void amdgpu_ras_add_mca_err_addr(struct ras_err_info *err_info, struct ras_err_addr *err_addr)
-{
- /* This function will be retired. */
- return;
-}
-
-void amdgpu_ras_del_mca_err_addr(struct ras_err_info *err_info, struct ras_err_addr *mca_err_addr)
-{
- list_del(&mca_err_addr->node);
- kfree(mca_err_addr);
-}
-
int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
- struct amdgpu_smuio_mcm_config_info *mcm_info,
- struct ras_err_addr *err_addr, u64 count)
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ u64 count)
{
struct ras_err_info *err_info;
@@ -4650,9 +4633,6 @@ int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
if (!err_info)
return -EINVAL;
- if (err_addr && err_addr->err_status)
- amdgpu_ras_add_mca_err_addr(err_info, err_addr);
-
err_info->ue_count += count;
err_data->ue_count += count;
@@ -4660,8 +4640,8 @@ int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
}
int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
- struct amdgpu_smuio_mcm_config_info *mcm_info,
- struct ras_err_addr *err_addr, u64 count)
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ u64 count)
{
struct ras_err_info *err_info;
@@ -4682,8 +4662,8 @@ int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
}
int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
- struct amdgpu_smuio_mcm_config_info *mcm_info,
- struct ras_err_addr *err_addr, u64 count)
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ u64 count)
{
struct ras_err_info *err_info;
@@ -4697,9 +4677,6 @@ int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
if (!err_info)
return -EINVAL;
- if (err_addr && err_addr->err_status)
- amdgpu_ras_add_mca_err_addr(err_info, err_addr);
-
err_info->de_count += count;
err_data->de_count += count;
@@ -4771,6 +4748,16 @@ static void amdgpu_ras_boot_time_error_reporting(struct amdgpu_device *adev,
dev_info(adev->dev,
"socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm bist test failed\n",
socket_id, aid_id, hbm_id, fw_status);
+
+ if (AMDGPU_RAS_GPU_ERR_DATA_ABORT(boot_error))
+ dev_info(adev->dev,
+ "socket: %d, aid: %d, fw_status: 0x%x, data abort exception\n",
+ socket_id, aid_id, fw_status);
+
+ if (AMDGPU_RAS_GPU_ERR_UNKNOWN(boot_error))
+ dev_info(adev->dev,
+ "socket: %d, aid: %d, fw_status: 0x%x, unknown boot time errors\n",
+ socket_id, aid_id, fw_status);
}
static bool amdgpu_ras_boot_error_detected(struct amdgpu_device *adev,
@@ -4837,3 +4824,13 @@ void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id,
va_end(args);
}
+
+bool amdgpu_ras_is_rma(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ if (!con)
+ return false;
+
+ return con->is_rma;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index dcf1f3dbb5c4..669720a9c60a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -28,7 +28,6 @@
#include <linux/list.h>
#include <linux/kfifo.h>
#include <linux/radix-tree.h>
-#include <linux/siphash.h>
#include "ta_ras_if.h"
#include "amdgpu_ras_eeprom.h"
#include "amdgpu_smuio.h"
@@ -47,6 +46,8 @@ struct amdgpu_iv_entry;
#define AMDGPU_RAS_GPU_ERR_SOCKET_ID(x) AMDGPU_GET_REG_FIELD(x, 10, 8)
#define AMDGPU_RAS_GPU_ERR_AID_ID(x) AMDGPU_GET_REG_FIELD(x, 12, 11)
#define AMDGPU_RAS_GPU_ERR_HBM_ID(x) AMDGPU_GET_REG_FIELD(x, 14, 13)
+#define AMDGPU_RAS_GPU_ERR_DATA_ABORT(x) AMDGPU_GET_REG_FIELD(x, 29, 29)
+#define AMDGPU_RAS_GPU_ERR_UNKNOWN(x) AMDGPU_GET_REG_FIELD(x, 30, 30)
#define AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT 100
#define AMDGPU_RAS_BOOT_STEADY_STATUS 0xBA
@@ -476,16 +477,15 @@ struct ras_err_pages {
};
struct ras_ecc_err {
- u64 hash_index;
uint64_t status;
uint64_t ipid;
uint64_t addr;
+ uint64_t pa_pfn;
struct ras_err_pages err_pages;
};
struct ras_ecc_log_info {
struct mutex lock;
- siphash_key_t ecc_key;
struct radix_tree_root de_page_tree;
uint64_t de_queried_count;
uint64_t prev_de_queried_count;
@@ -572,19 +572,11 @@ struct ras_fs_data {
char debugfs_name[32];
};
-struct ras_err_addr {
- struct list_head node;
- uint64_t err_status;
- uint64_t err_ipid;
- uint64_t err_addr;
-};
-
struct ras_err_info {
struct amdgpu_smuio_mcm_config_info mcm_info;
u64 ce_count;
u64 ue_count;
u64 de_count;
- struct list_head err_addr_list;
};
struct ras_err_node {
@@ -941,14 +933,14 @@ void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev,
int amdgpu_ras_error_data_init(struct ras_err_data *err_data);
void amdgpu_ras_error_data_fini(struct ras_err_data *err_data);
int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
- struct amdgpu_smuio_mcm_config_info *mcm_info,
- struct ras_err_addr *err_addr, u64 count);
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ u64 count);
int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
- struct amdgpu_smuio_mcm_config_info *mcm_info,
- struct ras_err_addr *err_addr, u64 count);
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ u64 count);
int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
- struct amdgpu_smuio_mcm_config_info *mcm_info,
- struct ras_err_addr *err_addr, u64 count);
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ u64 count);
void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances);
int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
const struct aca_info *aca_info, void *data);
@@ -957,12 +949,6 @@ int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr,
struct aca_handle *handle, char *buf, void *data);
-void amdgpu_ras_add_mca_err_addr(struct ras_err_info *err_info,
- struct ras_err_addr *err_addr);
-
-void amdgpu_ras_del_mca_err_addr(struct ras_err_info *err_info,
- struct ras_err_addr *mca_err_addr);
-
void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status);
bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev);
@@ -982,4 +968,5 @@ __printf(3, 4)
void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id,
const char *fmt, ...);
+bool amdgpu_ras_is_rma(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
index 4ae581f3fcb5..1cb920abc2fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
@@ -136,6 +136,12 @@ static inline bool amdgpu_reset_domain_schedule(struct amdgpu_reset_domain *doma
return queue_work(domain->wq, work);
}
+static inline bool amdgpu_reset_pending(struct amdgpu_reset_domain *domain)
+{
+ lockdep_assert_held(&domain->sem);
+ return rwsem_is_contended(&domain->sem);
+}
+
void amdgpu_device_lock_reset_domain(struct amdgpu_reset_domain *reset_domain);
void amdgpu_device_unlock_reset_domain(struct amdgpu_reset_domain *reset_domain);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index e6344a6b0a9f..690976665cf6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -144,8 +144,10 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
/* We pad to match fetch size */
count = ring->funcs->align_mask + 1 -
(ring->wptr & ring->funcs->align_mask);
- count %= ring->funcs->align_mask + 1;
- ring->funcs->insert_nop(ring, count);
+ count &= ring->funcs->align_mask;
+
+ if (count != 0)
+ ring->funcs->insert_nop(ring, count);
mb();
amdgpu_ring_set_wptr(ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 582053f1cd56..f93f51002201 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -235,6 +235,8 @@ struct amdgpu_ring_funcs {
void (*patch_cntl)(struct amdgpu_ring *ring, unsigned offset);
void (*patch_ce)(struct amdgpu_ring *ring, unsigned offset);
void (*patch_de)(struct amdgpu_ring *ring, unsigned offset);
+ int (*reset)(struct amdgpu_ring *ring, unsigned int vmid);
+ void (*emit_cleaner_shader)(struct amdgpu_ring *ring);
};
struct amdgpu_ring {
@@ -334,6 +336,7 @@ struct amdgpu_ring {
#define amdgpu_ring_patch_cntl(r, o) ((r)->funcs->patch_cntl((r), (o)))
#define amdgpu_ring_patch_ce(r, o) ((r)->funcs->patch_ce((r), (o)))
#define amdgpu_ring_patch_de(r, o) ((r)->funcs->patch_de((r), (o)))
+#define amdgpu_ring_reset(r, v) (r)->funcs->reset((r), (v))
unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type);
int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
index d234b7ccfaaf..1c66da1c3fb4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
@@ -410,7 +410,7 @@ void amdgpu_sw_ring_ib_end(struct amdgpu_ring *ring)
struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
WARN_ON(!ring->is_sw_ring);
- if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT)
+ if (adev->gfx.mcbp && ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT)
return;
amdgpu_ring_mux_end_ib(mux, ring);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index 863b2a34b2d6..b0a8abc7a8ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -22,7 +22,6 @@
* Authors: Andres Rodriguez <andresx7@gmail.com>
*/
-#include <linux/fdtable.h>
#include <linux/file.h>
#include <linux/pid.h>
@@ -43,10 +42,10 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
uint32_t id;
int r;
- if (!f.file)
+ if (!fd_file(f))
return -EINVAL;
- r = amdgpu_file_to_fpriv(f.file, &fpriv);
+ r = amdgpu_file_to_fpriv(fd_file(f), &fpriv);
if (r) {
fdput(f);
return r;
@@ -72,10 +71,10 @@ static int amdgpu_sched_context_priority_override(struct amdgpu_device *adev,
struct amdgpu_ctx *ctx;
int r;
- if (!f.file)
+ if (!fd_file(f))
return -EINVAL;
- r = amdgpu_file_to_fpriv(f.file, &fpriv);
+ r = amdgpu_file_to_fpriv(fd_file(f), &fpriv);
if (r) {
fdput(f);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index d3706a484870..087ce0f6fa07 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -115,6 +115,7 @@ struct amdgpu_sdma {
bool has_page_queue;
struct ras_common_if *ras_if;
struct amdgpu_sdma_ras *ras;
+ uint32_t *ip_dump;
};
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index 2f84bdb8c594..bb7b9b2eaac1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -196,7 +196,7 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
amdgpu_umc_handle_bad_pages(adev, ras_error_status);
if ((err_data->ue_count || err_data->de_count) &&
- (reset || (con && con->is_rma))) {
+ (reset || amdgpu_ras_is_rma(adev))) {
con->gpu_reset_flags |= reset;
amdgpu_ras_reset_gpu(adev);
}
@@ -204,55 +204,6 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
return AMDGPU_RAS_SUCCESS;
}
-int amdgpu_umc_bad_page_polling_timeout(struct amdgpu_device *adev,
- uint32_t reset, uint32_t timeout_ms)
-{
- struct ras_err_data err_data;
- struct ras_common_if head = {
- .block = AMDGPU_RAS_BLOCK__UMC,
- };
- struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
- struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- uint32_t timeout = timeout_ms;
-
- memset(&err_data, 0, sizeof(err_data));
- amdgpu_ras_error_data_init(&err_data);
-
- do {
-
- amdgpu_umc_handle_bad_pages(adev, &err_data);
-
- if (timeout && !err_data.de_count) {
- msleep(1);
- timeout--;
- }
-
- } while (timeout && !err_data.de_count);
-
- if (!timeout)
- dev_warn(adev->dev, "Can't find bad pages\n");
-
- if (err_data.de_count)
- dev_info(adev->dev, "%ld new deferred hardware errors detected\n", err_data.de_count);
-
- if (obj) {
- obj->err_data.ue_count += err_data.ue_count;
- obj->err_data.ce_count += err_data.ce_count;
- obj->err_data.de_count += err_data.de_count;
- }
-
- amdgpu_ras_error_data_fini(&err_data);
-
- kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
-
- if (reset || (err_data.err_addr_cnt && con && con->is_rma)) {
- con->gpu_reset_flags |= reset;
- amdgpu_ras_reset_gpu(adev);
- }
-
- return 0;
-}
-
int amdgpu_umc_pasid_poison_handler(struct amdgpu_device *adev,
enum amdgpu_ras_block block, uint16_t pasid,
pasid_notify pasid_fn, void *data, uint32_t reset)
@@ -472,43 +423,6 @@ int amdgpu_umc_update_ecc_status(struct amdgpu_device *adev,
return 0;
}
-static int amdgpu_umc_uint64_cmp(const void *a, const void *b)
-{
- uint64_t *addr_a = (uint64_t *)a;
- uint64_t *addr_b = (uint64_t *)b;
-
- if (*addr_a > *addr_b)
- return 1;
- else if (*addr_a < *addr_b)
- return -1;
- else
- return 0;
-}
-
-/* Use string hash to avoid logging the same bad pages repeatedly */
-int amdgpu_umc_build_pages_hash(struct amdgpu_device *adev,
- uint64_t *pfns, int len, uint64_t *val)
-{
- struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- char buf[MAX_UMC_HASH_STRING_SIZE] = {0};
- int offset = 0, i = 0;
- uint64_t hash_val;
-
- if (!pfns || !len)
- return -EINVAL;
-
- sort(pfns, len, sizeof(uint64_t), amdgpu_umc_uint64_cmp, NULL);
-
- for (i = 0; i < len; i++)
- offset += snprintf(&buf[offset], sizeof(buf) - offset, "%llx", pfns[i]);
-
- hash_val = siphash(buf, offset, &con->umc_ecc_log.ecc_key);
-
- *val = hash_val;
-
- return 0;
-}
-
int amdgpu_umc_logs_ecc_err(struct amdgpu_device *adev,
struct radix_tree_root *ecc_tree, struct ras_ecc_err *ecc_err)
{
@@ -519,18 +433,10 @@ int amdgpu_umc_logs_ecc_err(struct amdgpu_device *adev,
ecc_log = &con->umc_ecc_log;
mutex_lock(&ecc_log->lock);
- ret = radix_tree_insert(ecc_tree, ecc_err->hash_index, ecc_err);
- if (!ret) {
- struct ras_err_pages *err_pages = &ecc_err->err_pages;
- int i;
-
- /* Reserve memory */
- for (i = 0; i < err_pages->count; i++)
- amdgpu_ras_reserve_page(adev, err_pages->pfn[i]);
-
+ ret = radix_tree_insert(ecc_tree, ecc_err->pa_pfn, ecc_err);
+ if (!ret)
radix_tree_tag_set(ecc_tree,
- ecc_err->hash_index, UMC_ECC_NEW_DETECTED_TAG);
- }
+ ecc_err->pa_pfn, UMC_ECC_NEW_DETECTED_TAG);
mutex_unlock(&ecc_log->lock);
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index 5f50c69c3cec..ce4179db2a6d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -127,13 +127,8 @@ int amdgpu_umc_page_retirement_mca(struct amdgpu_device *adev,
int amdgpu_umc_loop_channels(struct amdgpu_device *adev,
umc_func func, void *data);
-int amdgpu_umc_bad_page_polling_timeout(struct amdgpu_device *adev,
- uint32_t reset, uint32_t timeout_ms);
-
int amdgpu_umc_update_ecc_status(struct amdgpu_device *adev,
uint64_t status, uint64_t ipid, uint64_t addr);
-int amdgpu_umc_build_pages_hash(struct amdgpu_device *adev,
- uint64_t *pfns, int len, uint64_t *val);
int amdgpu_umc_logs_ecc_err(struct amdgpu_device *adev,
struct radix_tree_root *ecc_tree, struct ras_ecc_err *ecc_err);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
index fbc2852278e1..6162582d0aa2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
@@ -587,7 +587,7 @@ int amdgpu_umsch_mm_init_microcode(struct amdgpu_umsch_mm *umsch)
break;
}
- r = amdgpu_ucode_request(adev, &adev->umsch_mm.fw, fw_name);
+ r = amdgpu_ucode_request(adev, &adev->umsch_mm.fw, "%s", fw_name);
if (r) {
release_firmware(adev->umsch_mm.fw);
adev->umsch_mm.fw = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 07d930339b07..31fd30dcd593 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -260,7 +260,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
return -EINVAL;
}
- r = amdgpu_ucode_request(adev, &adev->uvd.fw, fw_name);
+ r = amdgpu_ucode_request(adev, &adev->uvd.fw, "%s", fw_name);
if (r) {
dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n",
fw_name);
@@ -1088,7 +1088,6 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser,
int r;
job->vm = NULL;
- ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
if (ib->length_dw % 16) {
DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 968ca2c84ef7..74fdbf71d95b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -158,7 +158,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
return -EINVAL;
}
- r = amdgpu_ucode_request(adev, &adev->vce.fw, fw_name);
+ r = amdgpu_ucode_request(adev, &adev->vce.fw, "%s", fw_name);
if (r) {
dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
fw_name);
@@ -749,7 +749,6 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p,
int i, r = 0;
job->vm = NULL;
- ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
for (idx = 0; idx < ib->length_dw;) {
uint32_t len = amdgpu_ib_get_value(ib, idx);
@@ -1044,7 +1043,6 @@ out:
if (!r) {
/* No error, free all destroyed handle slots */
tmp = destroyed;
- amdgpu_ib_free(p->adev, ib, NULL);
} else {
/* Error during parsing, free all allocated handle slots */
tmp = allocated;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index c87d68d4be53..2a1f3dbb14d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -330,6 +330,9 @@ struct amdgpu_vcn {
uint16_t inst_mask;
uint8_t num_inst_per_aid;
bool using_unified_queue;
+
+ /* IP reg dump */
+ uint32_t *ip_dump;
};
struct amdgpu_fw_shared_rb_ptrs_struct {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index b287a82e6177..b6397d3229e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -33,6 +33,7 @@
#include "amdgpu.h"
#include "amdgpu_ras.h"
#include "amdgpu_reset.h"
+#include "amdgpu_dpm.h"
#include "vi.h"
#include "soc15.h"
#include "nv.h"
@@ -849,6 +850,13 @@ enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *ad
return mode;
}
+void amdgpu_virt_pre_reset(struct amdgpu_device *adev)
+{
+ /* stop the data exchange thread */
+ amdgpu_virt_fini_data_exchange(adev);
+ amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_FLR);
+}
+
void amdgpu_virt_post_reset(struct amdgpu_device *adev)
{
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index b42a8854dca0..b650a2032c42 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -376,6 +376,7 @@ u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
u32 offset, u32 acc_flags, u32 hwip, u32 xcc_id);
bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev,
uint32_t ucode_id);
+void amdgpu_virt_pre_reset(struct amdgpu_device *adev);
void amdgpu_virt_post_reset(struct amdgpu_device *adev);
bool amdgpu_sriov_xnack_support(struct amdgpu_device *adev);
bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index 6415d0d039e1..e5f508d34ed8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -549,7 +549,7 @@ static int amdgpu_vkms_sw_fini(void *handle)
adev->mode_info.mode_config_initialized = false;
- kfree(adev->mode_info.bios_hardcoded_edid);
+ drm_edid_free(adev->mode_info.bios_hardcoded_edid);
kfree(adev->amdgpu_vkms_output);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index a060c28f0877..2452dfa6314f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -681,6 +681,11 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
ring->funcs->emit_wreg;
+ if (adev->gfx.enable_cleaner_shader &&
+ ring->funcs->emit_cleaner_shader &&
+ job->enforce_isolation)
+ ring->funcs->emit_cleaner_shader(ring);
+
if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
return 0;
@@ -742,6 +747,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
amdgpu_ring_emit_switch_buffer(ring);
amdgpu_ring_emit_switch_buffer(ring);
}
+
amdgpu_ring_ib_end(ring);
return 0;
}
@@ -838,7 +844,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
params.vm = vm;
params.immediate = immediate;
- r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
+ r = vm->update_funcs->prepare(&params, NULL);
if (r)
goto error;
@@ -902,10 +908,12 @@ amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
{
struct amdgpu_vm *vm = params->vm;
- if (!fence || !*fence)
+ tlb_cb->vm = vm;
+ if (!fence || !*fence) {
+ amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
return;
+ }
- tlb_cb->vm = vm;
if (!dma_fence_add_callback(*fence, &tlb_cb->cb,
amdgpu_vm_tlb_seq_cb)) {
dma_fence_put(vm->last_tlb_flush);
@@ -933,7 +941,7 @@ amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
* @unlocked: unlocked invalidation during MM callback
* @flush_tlb: trigger tlb invalidation after update completed
* @allow_override: change MTYPE for local NUMA nodes
- * @resv: fences we need to sync to
+ * @sync: fences we need to sync to
* @start: start of mapped range
* @last: last mapped entry
* @flags: flags for the entries
@@ -949,16 +957,16 @@ amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
* 0 for success, negative erro code for failure.
*/
int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- bool immediate, bool unlocked, bool flush_tlb, bool allow_override,
- struct dma_resv *resv, uint64_t start, uint64_t last,
- uint64_t flags, uint64_t offset, uint64_t vram_base,
+ bool immediate, bool unlocked, bool flush_tlb,
+ bool allow_override, struct amdgpu_sync *sync,
+ uint64_t start, uint64_t last, uint64_t flags,
+ uint64_t offset, uint64_t vram_base,
struct ttm_resource *res, dma_addr_t *pages_addr,
struct dma_fence **fence)
{
struct amdgpu_vm_tlb_seq_struct *tlb_cb;
struct amdgpu_vm_update_params params;
struct amdgpu_res_cursor cursor;
- enum amdgpu_sync_mode sync_mode;
int r, idx;
if (!drm_dev_enter(adev_to_drm(adev), &idx))
@@ -991,14 +999,6 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
params.allow_override = allow_override;
INIT_LIST_HEAD(&params.tlb_flush_waitlist);
- /* Implicitly sync to command submissions in the same VM before
- * unmapping. Sync to moving fences before mapping.
- */
- if (!(flags & AMDGPU_PTE_VALID))
- sync_mode = AMDGPU_SYNC_EQ_OWNER;
- else
- sync_mode = AMDGPU_SYNC_EXPLICIT;
-
amdgpu_vm_eviction_lock(vm);
if (vm->evicting) {
r = -EBUSY;
@@ -1013,7 +1013,7 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
dma_fence_put(tmp);
}
- r = vm->update_funcs->prepare(&params, resv, sync_mode);
+ r = vm->update_funcs->prepare(&params, sync);
if (r)
goto error_free;
@@ -1155,23 +1155,30 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
struct amdgpu_bo *bo = bo_va->base.bo;
struct amdgpu_vm *vm = bo_va->base.vm;
struct amdgpu_bo_va_mapping *mapping;
+ struct dma_fence **last_update;
dma_addr_t *pages_addr = NULL;
struct ttm_resource *mem;
- struct dma_fence **last_update;
+ struct amdgpu_sync sync;
bool flush_tlb = clear;
- bool uncached;
- struct dma_resv *resv;
uint64_t vram_base;
uint64_t flags;
+ bool uncached;
int r;
+ amdgpu_sync_create(&sync);
if (clear || !bo) {
mem = NULL;
- resv = vm->root.bo->tbo.base.resv;
+
+ /* Implicitly sync to command submissions in the same VM before
+ * unmapping.
+ */
+ r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
+ AMDGPU_SYNC_EQ_OWNER, vm);
+ if (r)
+ goto error_free;
} else {
struct drm_gem_object *obj = &bo->tbo.base;
- resv = bo->tbo.base.resv;
if (obj->import_attach && bo_va->is_xgmi) {
struct dma_buf *dma_buf = obj->import_attach->dmabuf;
struct drm_gem_object *gobj = dma_buf->priv;
@@ -1185,6 +1192,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
if (mem && (mem->mem_type == TTM_PL_TT ||
mem->mem_type == AMDGPU_PL_PREEMPT))
pages_addr = bo->tbo.ttm->dma_address;
+
+ /* Implicitly sync to moving fences before mapping anything */
+ r = amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv,
+ AMDGPU_SYNC_EXPLICIT, vm);
+ if (r)
+ goto error_free;
}
if (bo) {
@@ -1234,12 +1247,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
trace_amdgpu_vm_bo_update(mapping);
r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb,
- !uncached, resv, mapping->start, mapping->last,
- update_flags, mapping->offset,
- vram_base, mem, pages_addr,
- last_update);
+ !uncached, &sync, mapping->start,
+ mapping->last, update_flags,
+ mapping->offset, vram_base, mem,
+ pages_addr, last_update);
if (r)
- return r;
+ goto error_free;
}
/* If the BO is not in its preferred location add it back to
@@ -1267,7 +1280,9 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
trace_amdgpu_vm_bo_mapping(mapping);
}
- return 0;
+error_free:
+ amdgpu_sync_free(&sync);
+ return r;
}
/**
@@ -1414,25 +1429,34 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct dma_fence **fence)
{
- struct dma_resv *resv = vm->root.bo->tbo.base.resv;
struct amdgpu_bo_va_mapping *mapping;
- uint64_t init_pte_value = 0;
struct dma_fence *f = NULL;
+ struct amdgpu_sync sync;
int r;
+
+ /*
+ * Implicitly sync to command submissions in the same VM before
+ * unmapping.
+ */
+ amdgpu_sync_create(&sync);
+ r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
+ AMDGPU_SYNC_EQ_OWNER, vm);
+ if (r)
+ goto error_free;
+
while (!list_empty(&vm->freed)) {
mapping = list_first_entry(&vm->freed,
struct amdgpu_bo_va_mapping, list);
list_del(&mapping->list);
r = amdgpu_vm_update_range(adev, vm, false, false, true, false,
- resv, mapping->start, mapping->last,
- init_pte_value, 0, 0, NULL, NULL,
- &f);
+ &sync, mapping->start, mapping->last,
+ 0, 0, 0, NULL, NULL, &f);
amdgpu_vm_free_mapping(adev, vm, mapping, f);
if (r) {
dma_fence_put(f);
- return r;
+ goto error_free;
}
}
@@ -1443,7 +1467,9 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
dma_fence_put(f);
}
- return 0;
+error_free:
+ amdgpu_sync_free(&sync);
+ return r;
}
@@ -2218,7 +2244,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
(1 << 30) - 1) >> 30;
vm_size = roundup_pow_of_two(
- min(max(phys_ram_gb * 3, min_vm_size), max_size));
+ clamp(phys_ram_gb * 3, min_vm_size, max_size));
}
adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
@@ -2421,6 +2447,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (r)
return r;
+ ttm_lru_bulk_move_init(&vm->lru_bulk_move);
+
vm->is_compute_context = false;
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
@@ -2485,6 +2513,7 @@ error_free_root:
error_free_delayed:
dma_fence_put(vm->last_tlb_flush);
dma_fence_put(vm->last_unlocked);
+ ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
amdgpu_vm_fini_entities(vm);
return r;
@@ -2641,6 +2670,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
}
}
+ ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
}
/**
@@ -2754,6 +2784,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
* amdgpu_vm_handle_fault - graceful handling of VM faults.
* @adev: amdgpu device pointer
* @pasid: PASID of the VM
+ * @ts: Timestamp of the fault
* @vmid: VMID, only used for GFX 9.4.3.
* @node_id: Node_id received in IH cookie. Only applicable for
* GFX 9.4.3.
@@ -2764,7 +2795,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
* shouldn't be reported any more.
*/
bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
- u32 vmid, u32 node_id, uint64_t addr,
+ u32 vmid, u32 node_id, uint64_t addr, uint64_t ts,
bool write_fault)
{
bool is_compute_context = false;
@@ -2790,7 +2821,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
addr /= AMDGPU_GPU_PAGE_SIZE;
if (is_compute_context && !svm_range_restore_pages(adev, pasid, vmid,
- node_id, addr, write_fault)) {
+ node_id, addr, ts, write_fault)) {
amdgpu_bo_unref(&root);
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 046949c4b695..52dd7cdfdc81 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -304,8 +304,8 @@ struct amdgpu_vm_update_params {
struct amdgpu_vm_update_funcs {
int (*map_table)(struct amdgpu_bo_vm *bo);
- int (*prepare)(struct amdgpu_vm_update_params *p, struct dma_resv *resv,
- enum amdgpu_sync_mode sync_mode);
+ int (*prepare)(struct amdgpu_vm_update_params *p,
+ struct amdgpu_sync *sync);
int (*update)(struct amdgpu_vm_update_params *p,
struct amdgpu_bo_vm *bo, uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr, uint64_t flags);
@@ -505,9 +505,10 @@ int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev,
void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
struct amdgpu_vm *vm, struct amdgpu_bo *bo);
int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- bool immediate, bool unlocked, bool flush_tlb, bool allow_override,
- struct dma_resv *resv, uint64_t start, uint64_t last,
- uint64_t flags, uint64_t offset, uint64_t vram_base,
+ bool immediate, bool unlocked, bool flush_tlb,
+ bool allow_override, struct amdgpu_sync *sync,
+ uint64_t start, uint64_t last, uint64_t flags,
+ uint64_t offset, uint64_t vram_base,
struct ttm_resource *res, dma_addr_t *pages_addr,
struct dma_fence **fence);
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
@@ -558,7 +559,7 @@ amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm);
void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info);
bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
- u32 vmid, u32 node_id, uint64_t addr,
+ u32 vmid, u32 node_id, uint64_t addr, uint64_t ts,
bool write_fault);
void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
index 3895bd7d176a..0c1ef5850a5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -39,20 +39,18 @@ static int amdgpu_vm_cpu_map_table(struct amdgpu_bo_vm *table)
* amdgpu_vm_cpu_prepare - prepare page table update with the CPU
*
* @p: see amdgpu_vm_update_params definition
- * @resv: reservation object with embedded fence
- * @sync_mode: synchronization mode
+ * @sync: sync obj with fences to wait on
*
* Returns:
* Negativ errno, 0 for success.
*/
static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
- struct dma_resv *resv,
- enum amdgpu_sync_mode sync_mode)
+ struct amdgpu_sync *sync)
{
- if (!resv)
+ if (!sync)
return 0;
- return amdgpu_bo_sync_wait_resv(p->adev, resv, sync_mode, p->vm, true);
+ return amdgpu_sync_wait(sync, true);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index e39d6e7643bf..a076f43097e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -403,7 +403,7 @@ int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
params.vm = vm;
params.immediate = immediate;
- r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
+ r = vm->update_funcs->prepare(&params, NULL);
if (r)
goto exit;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 9b748d7058b5..4772fba33285 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -77,32 +77,24 @@ static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p,
* amdgpu_vm_sdma_prepare - prepare SDMA command submission
*
* @p: see amdgpu_vm_update_params definition
- * @resv: reservation object with embedded fence
- * @sync_mode: synchronization mode
+ * @sync: amdgpu_sync object with fences to wait for
*
* Returns:
* Negativ errno, 0 for success.
*/
static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
- struct dma_resv *resv,
- enum amdgpu_sync_mode sync_mode)
+ struct amdgpu_sync *sync)
{
- struct amdgpu_sync sync;
int r;
r = amdgpu_vm_sdma_alloc_job(p, 0);
if (r)
return r;
- if (!resv)
+ if (!sync)
return 0;
- amdgpu_sync_create(&sync);
- r = amdgpu_sync_resv(p->adev, &sync, resv, sync_mode, p->vm);
- if (!r)
- r = amdgpu_sync_push_to_job(&sync, p->job);
- amdgpu_sync_free(&sync);
-
+ r = amdgpu_sync_push_to_job(sync, p->job);
if (r) {
p->num_dw_left = 0;
amdgpu_job_free(p->job);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 821ba2309dec..7de449fae1e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -1389,10 +1389,10 @@ static void __xgmi_v6_4_0_query_error_count(struct amdgpu_device *adev, struct a
switch (xgmi_v6_4_0_pcs_mca_get_error_type(adev, status)) {
case ACA_ERROR_TYPE_UE:
- amdgpu_ras_error_statistic_ue_count(err_data, mcm_info, NULL, 1ULL);
+ amdgpu_ras_error_statistic_ue_count(err_data, mcm_info, 1ULL);
break;
case ACA_ERROR_TYPE_CE:
- amdgpu_ras_error_statistic_ce_count(err_data, mcm_info, NULL, 1ULL);
+ amdgpu_ras_error_statistic_ce_count(err_data, mcm_info, 1ULL);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
index fb2b394bb9c5..6e9eeaeb3de1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
@@ -213,7 +213,7 @@ struct amd_sriov_msg_pf2vf_info {
uint32_t gpu_capacity;
/* reserved */
uint32_t reserved[256 - AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE];
-};
+} __packed;
struct amd_sriov_msg_vf2pf_info_header {
/* the total structure size in byte */
@@ -273,7 +273,7 @@ struct amd_sriov_msg_vf2pf_info {
uint32_t mes_info_size;
/* reserved */
uint32_t reserved[256 - AMD_SRIOV_MSG_VF2PF_INFO_FILLED_SIZE];
-};
+} __packed;
/* mailbox message send from guest to host */
enum amd_sriov_mailbox_request_message {
diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
index 228fd4dd32f1..26e2188101e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
+++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
@@ -75,6 +75,8 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
uint32_t inst_mask;
ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
+ adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id;
if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
return;
@@ -103,6 +105,8 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
ring->xcp_id = xcp_id;
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
+ adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id;
break;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 25feab188dfe..a51f3414b65d 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -215,7 +215,7 @@ void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *amdgpu_encode
dig->bl_dev = bd;
bd->props.brightness = amdgpu_atombios_encoder_get_backlight_brightness(bd);
- bd->props.power = FB_BLANK_UNBLANK;
+ bd->props.power = BACKLIGHT_POWER_ON;
backlight_update_status(bd);
DRM_INFO("amdgpu atom DIG backlight initialized\n");
@@ -2064,27 +2064,25 @@ amdgpu_atombios_encoder_get_lcd_info(struct amdgpu_encoder *encoder)
case LCD_FAKE_EDID_PATCH_RECORD_TYPE:
fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record;
if (fake_edid_record->ucFakeEDIDLength) {
- struct edid *edid;
- int edid_size =
- max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength);
- edid = kmalloc(edid_size, GFP_KERNEL);
- if (edid) {
- memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0],
- fake_edid_record->ucFakeEDIDLength);
-
- if (drm_edid_is_valid(edid)) {
- adev->mode_info.bios_hardcoded_edid = edid;
- adev->mode_info.bios_hardcoded_edid_size = edid_size;
- } else
- kfree(edid);
- }
+ const struct drm_edid *edid;
+ int edid_size;
+
+ if (fake_edid_record->ucFakeEDIDLength == 128)
+ edid_size = fake_edid_record->ucFakeEDIDLength;
+ else
+ edid_size = fake_edid_record->ucFakeEDIDLength * 128;
+ edid = drm_edid_alloc(fake_edid_record->ucFakeEDIDString, edid_size);
+ if (drm_edid_valid(edid))
+ adev->mode_info.bios_hardcoded_edid = edid;
+ else
+ drm_edid_free(edid);
+ record += struct_size(fake_edid_record,
+ ucFakeEDIDString,
+ edid_size);
+ } else {
+ /* empty fake edid record must be 3 bytes long */
+ record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1;
}
- record += fake_edid_record->ucFakeEDIDLength ?
- struct_size(fake_edid_record,
- ucFakeEDIDString,
- fake_edid_record->ucFakeEDIDLength) :
- /* empty fake edid record must be 3 bytes long */
- sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1;
break;
case LCD_PANEL_RESOLUTION_RECORD_TYPE:
panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index 55982c0064b5..06088d52d81c 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -364,6 +364,7 @@
* 1 - Stream
* 2 - Bypass
*/
+#define EOP_EXEC (1 << 28) /* For Trailing Fence */
#define DATA_SEL(x) ((x) << 29)
/* 0 - discard
* 1 - send low 32bit data
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index dddb5fe16f2c..742adbc460c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2846,7 +2846,7 @@ static int dce_v10_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- kfree(adev->mode_info.bios_hardcoded_edid);
+ drm_edid_free(adev->mode_info.bios_hardcoded_edid);
drm_kms_helper_poll_fini(adev_to_drm(adev));
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 11780e4d7e9f..8d46ebadfa46 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2973,7 +2973,7 @@ static int dce_v11_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- kfree(adev->mode_info.bios_hardcoded_edid);
+ drm_edid_free(adev->mode_info.bios_hardcoded_edid);
drm_kms_helper_poll_fini(adev_to_drm(adev));
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 05c0df97f01d..f08dc6a3886f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2745,7 +2745,7 @@ static int dce_v6_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- kfree(adev->mode_info.bios_hardcoded_edid);
+ drm_edid_free(adev->mode_info.bios_hardcoded_edid);
drm_kms_helper_poll_fini(adev_to_drm(adev));
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index dc73e301d937..a6a3adf2ae13 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2766,7 +2766,7 @@ static int dce_v8_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- kfree(adev->mode_info.bios_hardcoded_edid);
+ drm_edid_free(adev->mode_info.bios_hardcoded_edid);
drm_kms_helper_poll_fini(adev_to_drm(adev));
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index e444e621ddaa..45ed97038df0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -4649,7 +4649,7 @@ static void gfx_v10_0_alloc_ip_dump(struct amdgpu_device *adev)
uint32_t inst;
ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
+ if (!ptr) {
DRM_ERROR("Failed to allocate memory for GFX IP Dump\n");
adev->gfx.ip_dump_core = NULL;
} else {
@@ -4662,7 +4662,7 @@ static void gfx_v10_0_alloc_ip_dump(struct amdgpu_device *adev)
adev->gfx.mec.num_queue_per_pipe;
ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
+ if (!ptr) {
DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n");
adev->gfx.ip_dump_compute_queues = NULL;
} else {
@@ -4675,7 +4675,7 @@ static void gfx_v10_0_alloc_ip_dump(struct amdgpu_device *adev)
adev->gfx.me.num_queue_per_pipe;
ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
+ if (!ptr) {
DRM_ERROR("Failed to allocate memory for GFX Queues IP Dump\n");
adev->gfx.ip_dump_gfx_queues = NULL;
} else {
@@ -4741,6 +4741,13 @@ static int gfx_v10_0_sw_init(void *handle)
if (r)
return r;
+ /* Bad opcode Event */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
+ GFX_10_1__SRCID__CP_BAD_OPCODE_ERROR,
+ &adev->gfx.bad_op_irq);
+ if (r)
+ return r;
+
/* Privileged reg */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_10_1__SRCID__CP_PRIV_REG_FAULT,
&adev->gfx.priv_reg_irq);
@@ -5213,26 +5220,74 @@ static void gfx_v10_0_constants_init(struct amdgpu_device *adev)
}
+static u32 gfx_v10_0_get_cpg_int_cntl(struct amdgpu_device *adev,
+ int me, int pipe)
+{
+ if (me != 0)
+ return 0;
+
+ switch (pipe) {
+ case 0:
+ return SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0);
+ case 1:
+ return SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING1);
+ default:
+ return 0;
+ }
+}
+
+static u32 gfx_v10_0_get_cpc_int_cntl(struct amdgpu_device *adev,
+ int me, int pipe)
+{
+ /*
+ * amdgpu controls only the first MEC. That's why this function only
+ * handles the setting of interrupts for this specific MEC. All other
+ * pipes' interrupts are set by amdkfd.
+ */
+ if (me != 1)
+ return 0;
+
+ switch (pipe) {
+ case 0:
+ return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
+ case 1:
+ return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
+ case 2:
+ return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
+ case 3:
+ return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
+ default:
+ return 0;
+ }
+}
+
static void gfx_v10_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
bool enable)
{
- u32 tmp;
+ u32 tmp, cp_int_cntl_reg;
+ int i, j;
if (amdgpu_sriov_vf(adev))
return;
- tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
-
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
- enable ? 1 : 0);
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
- enable ? 1 : 0);
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
- enable ? 1 : 0);
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
- enable ? 1 : 0);
-
- WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
+ cp_int_cntl_reg = gfx_v10_0_get_cpg_int_cntl(adev, i, j);
+
+ if (cp_int_cntl_reg) {
+ tmp = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
+ enable ? 1 : 0);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
+ enable ? 1 : 0);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
+ enable ? 1 : 0);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
+ enable ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, tmp);
+ }
+ }
+ }
}
static int gfx_v10_0_init_csb(struct amdgpu_device *adev)
@@ -6637,13 +6692,13 @@ static int gfx_v10_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
return 0;
}
-static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
+static int gfx_v10_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset)
{
struct amdgpu_device *adev = ring->adev;
struct v10_gfx_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.gfx_ring[0];
- if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
+ if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd));
mutex_lock(&adev->srbm_mutex);
nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
@@ -6695,7 +6750,7 @@ static int gfx_v10_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (!r) {
- r = gfx_v10_0_gfx_init_queue(ring);
+ r = gfx_v10_0_kgq_init_queue(ring, false);
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
}
@@ -6975,13 +7030,13 @@ static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring)
return 0;
}
-static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring)
+static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring, bool restore)
{
struct amdgpu_device *adev = ring->adev;
struct v10_compute_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0];
- if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
+ if (!restore && !amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd));
mutex_lock(&adev->srbm_mutex);
nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
@@ -7043,7 +7098,7 @@ static int gfx_v10_0_kcq_resume(struct amdgpu_device *adev)
goto done;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (!r) {
- r = gfx_v10_0_kcq_init_queue(ring);
+ r = gfx_v10_0_kcq_init_queue(ring, false);
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
}
@@ -7369,6 +7424,7 @@ static int gfx_v10_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+ amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
/* WA added for Vangogh asic fixing the SMU suspend failure
* It needs to set power gating again during gfxoff control
@@ -7679,6 +7735,10 @@ static int gfx_v10_0_late_init(void *handle)
if (r)
return r;
+ r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
+ if (r)
+ return r;
+
return 0;
}
@@ -8889,7 +8949,9 @@ static void gfx_v10_0_ring_soft_recovery(struct amdgpu_ring *ring,
value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
WREG32_SOC15(GC, 0, mmSQ_CMD, value);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void
@@ -9074,12 +9136,39 @@ static int gfx_v10_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
unsigned int type,
enum amdgpu_interrupt_state state)
{
+ u32 cp_int_cntl_reg, cp_int_cntl;
+ int i, j;
+
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
case AMDGPU_IRQ_STATE_ENABLE:
- WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
- PRIV_REG_INT_ENABLE,
- state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
+ cp_int_cntl_reg = gfx_v10_0_get_cpg_int_cntl(adev, i, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
+ PRIV_REG_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
+ for (i = 0; i < adev->gfx.mec.num_mec; i++) {
+ for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
+ /* MECs start at 1 */
+ cp_int_cntl_reg = gfx_v10_0_get_cpc_int_cntl(adev, i + 1, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
+ PRIV_REG_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
break;
default:
break;
@@ -9088,17 +9177,75 @@ static int gfx_v10_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v10_0_set_bad_op_fault_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 cp_int_cntl_reg, cp_int_cntl;
+ int i, j;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ case AMDGPU_IRQ_STATE_ENABLE:
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
+ cp_int_cntl_reg = gfx_v10_0_get_cpg_int_cntl(adev, i, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
+ OPCODE_ERROR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
+ for (i = 0; i < adev->gfx.mec.num_mec; i++) {
+ for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
+ /* MECs start at 1 */
+ cp_int_cntl_reg = gfx_v10_0_get_cpc_int_cntl(adev, i + 1, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
+ OPCODE_ERROR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
static int gfx_v10_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned int type,
enum amdgpu_interrupt_state state)
{
+ u32 cp_int_cntl_reg, cp_int_cntl;
+ int i, j;
+
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
case AMDGPU_IRQ_STATE_ENABLE:
- WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
- PRIV_INSTR_INT_ENABLE,
- state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
+ cp_int_cntl_reg = gfx_v10_0_get_cpg_int_cntl(adev, i, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
+ PRIV_INSTR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
break;
default:
break;
@@ -9122,8 +9269,8 @@ static void gfx_v10_0_handle_priv_fault(struct amdgpu_device *adev,
case 0:
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
ring = &adev->gfx.gfx_ring[i];
- /* we only enabled 1 gfx queue per pipe for now */
- if (ring->me == me_id && ring->pipe == pipe_id)
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
drm_sched_fault(&ring->sched);
}
break;
@@ -9150,6 +9297,15 @@ static int gfx_v10_0_priv_reg_irq(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v10_0_bad_op_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_ERROR("Illegal opcode in command stream \n");
+ gfx_v10_0_handle_priv_fault(adev, entry);
+ return 0;
+}
+
static int gfx_v10_0_priv_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -9244,6 +9400,174 @@ static void gfx_v10_0_emit_mem_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
}
+static void gfx_v10_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
+{
+ int i;
+
+ /* Header itself is a NOP packet */
+ if (num_nop == 1) {
+ amdgpu_ring_write(ring, ring->funcs->nop);
+ return;
+ }
+
+ /* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/
+ amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
+
+ /* Header is at index 0, followed by num_nops - 1 NOP packet's */
+ for (i = 1; i < num_nop; i++)
+ amdgpu_ring_write(ring, ring->funcs->nop);
+}
+
+static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
+ unsigned long flags;
+ u32 tmp;
+ u64 addr;
+ int r;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
+ return -EINVAL;
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+
+ if (amdgpu_ring_alloc(kiq_ring, 5 + 7 + 7 + kiq->pmf->map_queues_size)) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+
+ addr = amdgpu_bo_gpu_offset(ring->mqd_obj) +
+ offsetof(struct v10_gfx_mqd, cp_gfx_hqd_active);
+ tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
+ if (ring->pipe == 0)
+ tmp = REG_SET_FIELD(tmp, CP_VMID_RESET, PIPE0_QUEUES, 1 << ring->queue);
+ else
+ tmp = REG_SET_FIELD(tmp, CP_VMID_RESET, PIPE1_QUEUES, 1 << ring->queue);
+
+ gfx_v10_0_ring_emit_wreg(kiq_ring,
+ SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), tmp);
+ gfx_v10_0_wait_reg_mem(kiq_ring, 0, 1, 0,
+ lower_32_bits(addr), upper_32_bits(addr),
+ 0, 1, 0x20);
+ gfx_v10_0_ring_emit_reg_wait(kiq_ring,
+ SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0, 0xffffffff);
+ kiq->pmf->kiq_map_queues(kiq_ring, ring);
+ amdgpu_ring_commit(kiq_ring);
+
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_ring_test_ring(kiq_ring);
+ if (r)
+ return r;
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("fail to resv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+ if (!r) {
+ r = gfx_v10_0_kgq_init_queue(ring, true);
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ }
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ if (r) {
+ DRM_ERROR("fail to unresv mqd_obj\n");
+ return r;
+ }
+
+ return amdgpu_ring_test_ring(ring);
+}
+
+static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring,
+ unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
+ unsigned long flags;
+ int i, r;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
+ return -EINVAL;
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+
+ if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+
+ kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES,
+ 0, 0);
+ amdgpu_ring_commit(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_ring_test_ring(kiq_ring);
+ if (r)
+ return r;
+
+ /* make sure dequeue is complete*/
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
+ nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+ nv_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ if (r) {
+ dev_err(adev->dev, "fail to wait on hqd deactivate\n");
+ return r;
+ }
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0)) {
+ dev_err(adev->dev, "fail to resv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+ if (!r) {
+ r = gfx_v10_0_kcq_init_queue(ring, true);
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ }
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ if (r) {
+ dev_err(adev->dev, "fail to unresv mqd_obj\n");
+ return r;
+ }
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+ if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size)) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+ kiq->pmf->kiq_map_queues(kiq_ring, ring);
+ amdgpu_ring_commit(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_ring_test_ring(kiq_ring);
+ if (r)
+ return r;
+
+ return amdgpu_ring_test_ring(ring);
+}
+
static void gfx_v10_ip_print(void *handle, struct drm_printer *p)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -9435,7 +9759,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
.emit_hdp_flush = gfx_v10_0_ring_emit_hdp_flush,
.test_ring = gfx_v10_0_ring_test_ring,
.test_ib = gfx_v10_0_ring_test_ib,
- .insert_nop = amdgpu_ring_insert_nop,
+ .insert_nop = gfx_v10_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_switch_buffer = gfx_v10_0_ring_emit_sb,
.emit_cntxcntl = gfx_v10_0_ring_emit_cntxcntl,
@@ -9447,6 +9771,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
.soft_recovery = gfx_v10_0_ring_soft_recovery,
.emit_mem_sync = gfx_v10_0_emit_mem_sync,
+ .reset = gfx_v10_0_reset_kgq,
};
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
@@ -9476,12 +9801,14 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
.emit_hdp_flush = gfx_v10_0_ring_emit_hdp_flush,
.test_ring = gfx_v10_0_ring_test_ring,
.test_ib = gfx_v10_0_ring_test_ib,
- .insert_nop = amdgpu_ring_insert_nop,
+ .insert_nop = gfx_v10_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_wreg = gfx_v10_0_ring_emit_wreg,
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
+ .soft_recovery = gfx_v10_0_ring_soft_recovery,
.emit_mem_sync = gfx_v10_0_emit_mem_sync,
+ .reset = gfx_v10_0_reset_kcq,
};
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
@@ -9536,6 +9863,11 @@ static const struct amdgpu_irq_src_funcs gfx_v10_0_priv_reg_irq_funcs = {
.process = gfx_v10_0_priv_reg_irq,
};
+static const struct amdgpu_irq_src_funcs gfx_v10_0_bad_op_irq_funcs = {
+ .set = gfx_v10_0_set_bad_op_fault_state,
+ .process = gfx_v10_0_bad_op_irq,
+};
+
static const struct amdgpu_irq_src_funcs gfx_v10_0_priv_inst_irq_funcs = {
.set = gfx_v10_0_set_priv_inst_fault_state,
.process = gfx_v10_0_priv_inst_irq,
@@ -9557,6 +9889,9 @@ static void gfx_v10_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gfx.priv_reg_irq.num_types = 1;
adev->gfx.priv_reg_irq.funcs = &gfx_v10_0_priv_reg_irq_funcs;
+ adev->gfx.bad_op_irq.num_types = 1;
+ adev->gfx.bad_op_irq.funcs = &gfx_v10_0_bad_op_irq_funcs;
+
adev->gfx.priv_inst_irq.num_types = 1;
adev->gfx.priv_inst_irq.funcs = &gfx_v10_0_priv_inst_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index dcef39907449..d3e8be82a172 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -481,6 +481,24 @@ static void gfx_v11_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
amdgpu_ring_write(ring, inv); /* poll interval */
}
+static void gfx_v11_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
+{
+ int i;
+
+ /* Header itself is a NOP packet */
+ if (num_nop == 1) {
+ amdgpu_ring_write(ring, ring->funcs->nop);
+ return;
+ }
+
+ /* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/
+ amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
+
+ /* Header is at index 0, followed by num_nops - 1 NOP packet's */
+ for (i = 1; i < num_nop; i++)
+ amdgpu_ring_write(ring, ring->funcs->nop);
+}
+
static int gfx_v11_0_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -1484,7 +1502,7 @@ static void gfx_v11_0_alloc_ip_dump(struct amdgpu_device *adev)
uint32_t inst;
ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
+ if (!ptr) {
DRM_ERROR("Failed to allocate memory for GFX IP Dump\n");
adev->gfx.ip_dump_core = NULL;
} else {
@@ -1497,7 +1515,7 @@ static void gfx_v11_0_alloc_ip_dump(struct amdgpu_device *adev)
adev->gfx.mec.num_queue_per_pipe;
ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
+ if (!ptr) {
DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n");
adev->gfx.ip_dump_compute_queues = NULL;
} else {
@@ -1510,7 +1528,7 @@ static void gfx_v11_0_alloc_ip_dump(struct amdgpu_device *adev)
adev->gfx.me.num_queue_per_pipe;
ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
+ if (!ptr) {
DRM_ERROR("Failed to allocate memory for GFX Queues IP Dump\n");
adev->gfx.ip_dump_gfx_queues = NULL;
} else {
@@ -1569,6 +1587,13 @@ static int gfx_v11_0_sw_init(void *handle)
if (r)
return r;
+ /* Bad opcode Event */
+ r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
+ GFX_11_0_0__SRCID__CP_BAD_OPCODE_ERROR,
+ &adev->gfx.bad_op_irq);
+ if (r)
+ return r;
+
/* Privileged reg */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT,
@@ -1953,26 +1978,74 @@ static void gfx_v11_0_constants_init(struct amdgpu_device *adev)
gfx_v11_0_init_gds_vmid(adev);
}
+static u32 gfx_v11_0_get_cpg_int_cntl(struct amdgpu_device *adev,
+ int me, int pipe)
+{
+ if (me != 0)
+ return 0;
+
+ switch (pipe) {
+ case 0:
+ return SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0);
+ case 1:
+ return SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING1);
+ default:
+ return 0;
+ }
+}
+
+static u32 gfx_v11_0_get_cpc_int_cntl(struct amdgpu_device *adev,
+ int me, int pipe)
+{
+ /*
+ * amdgpu controls only the first MEC. That's why this function only
+ * handles the setting of interrupts for this specific MEC. All other
+ * pipes' interrupts are set by amdkfd.
+ */
+ if (me != 1)
+ return 0;
+
+ switch (pipe) {
+ case 0:
+ return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
+ case 1:
+ return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL);
+ case 2:
+ return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE2_INT_CNTL);
+ case 3:
+ return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE3_INT_CNTL);
+ default:
+ return 0;
+ }
+}
+
static void gfx_v11_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
bool enable)
{
- u32 tmp;
+ u32 tmp, cp_int_cntl_reg;
+ int i, j;
if (amdgpu_sriov_vf(adev))
return;
- tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0);
-
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
- enable ? 1 : 0);
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
- enable ? 1 : 0);
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
- enable ? 1 : 0);
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
- enable ? 1 : 0);
-
- WREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0, tmp);
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
+ cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j);
+
+ if (cp_int_cntl_reg) {
+ tmp = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
+ enable ? 1 : 0);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
+ enable ? 1 : 0);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
+ enable ? 1 : 0);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
+ enable ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, tmp);
+ }
+ }
+ }
}
static int gfx_v11_0_init_csb(struct amdgpu_device *adev)
@@ -3911,13 +3984,13 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
return 0;
}
-static int gfx_v11_0_gfx_init_queue(struct amdgpu_ring *ring)
+static int gfx_v11_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset)
{
struct amdgpu_device *adev = ring->adev;
struct v11_gfx_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.gfx_ring[0];
- if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
+ if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd));
mutex_lock(&adev->srbm_mutex);
soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
@@ -3953,7 +4026,7 @@ static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (!r) {
- r = gfx_v11_0_gfx_init_queue(ring);
+ r = gfx_v11_0_kgq_init_queue(ring, false);
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
}
@@ -4248,13 +4321,13 @@ static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring)
return 0;
}
-static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring)
+static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring, bool reset)
{
struct amdgpu_device *adev = ring->adev;
struct v11_compute_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0];
- if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
+ if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd));
mutex_lock(&adev->srbm_mutex);
soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
@@ -4318,7 +4391,7 @@ static int gfx_v11_0_kcq_resume(struct amdgpu_device *adev)
goto done;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (!r) {
- r = gfx_v11_0_kcq_init_queue(ring);
+ r = gfx_v11_0_kcq_init_queue(ring, false);
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
}
@@ -4598,6 +4671,7 @@ static int gfx_v11_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+ amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
if (!adev->no_hw_access) {
if (amdgpu_async_gfx_ring) {
@@ -4668,8 +4742,8 @@ static int gfx_v11_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int gfx_v11_0_request_gfx_index_mutex(struct amdgpu_device *adev,
- int req)
+int gfx_v11_0_request_gfx_index_mutex(struct amdgpu_device *adev,
+ bool req)
{
u32 i, tmp, val;
@@ -4707,6 +4781,8 @@ static int gfx_v11_0_soft_reset(void *handle)
int r, i, j, k;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+
tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 0);
@@ -4714,8 +4790,6 @@ static int gfx_v11_0_soft_reset(void *handle)
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 0);
WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp);
- gfx_v11_0_set_safe_mode(adev, 0);
-
mutex_lock(&adev->srbm_mutex);
for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
@@ -4740,8 +4814,10 @@ static int gfx_v11_0_soft_reset(void *handle)
mutex_unlock(&adev->srbm_mutex);
/* Try to acquire the gfx mutex before access to CP_VMID_RESET */
- r = gfx_v11_0_request_gfx_index_mutex(adev, 1);
+ mutex_lock(&adev->gfx.reset_sem_mutex);
+ r = gfx_v11_0_request_gfx_index_mutex(adev, true);
if (r) {
+ mutex_unlock(&adev->gfx.reset_sem_mutex);
DRM_ERROR("Failed to acquire the gfx mutex during soft reset\n");
return r;
}
@@ -4755,7 +4831,8 @@ static int gfx_v11_0_soft_reset(void *handle)
RREG32_SOC15(GC, 0, regCP_VMID_RESET);
/* release the gfx mutex */
- r = gfx_v11_0_request_gfx_index_mutex(adev, 0);
+ r = gfx_v11_0_request_gfx_index_mutex(adev, false);
+ mutex_unlock(&adev->gfx.reset_sem_mutex);
if (r) {
DRM_ERROR("Failed to release the gfx mutex during soft reset\n");
return r;
@@ -4823,7 +4900,7 @@ static int gfx_v11_0_soft_reset(void *handle)
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1);
WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp);
- gfx_v11_0_unset_safe_mode(adev, 0);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return gfx_v11_0_cp_resume(adev);
}
@@ -4954,6 +5031,9 @@ static int gfx_v11_0_late_init(void *handle)
if (r)
return r;
+ r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
+ if (r)
+ return r;
return 0;
}
@@ -5843,6 +5923,9 @@ static int gfx_v11_0_ring_preempt_ib(struct amdgpu_ring *ring)
struct amdgpu_ring *kiq_ring = &kiq->ring;
unsigned long flags;
+ if (adev->enable_mes)
+ return -EINVAL;
+
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
@@ -6008,7 +6091,9 @@ static void gfx_v11_0_ring_soft_recovery(struct amdgpu_ring *ring,
value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
WREG32_SOC15(GC, 0, regSQ_CMD, value);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void
@@ -6201,15 +6286,42 @@ static int gfx_v11_0_eop_irq(struct amdgpu_device *adev,
static int gfx_v11_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
- unsigned type,
+ unsigned int type,
enum amdgpu_interrupt_state state)
{
+ u32 cp_int_cntl_reg, cp_int_cntl;
+ int i, j;
+
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
case AMDGPU_IRQ_STATE_ENABLE:
- WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0,
- PRIV_REG_INT_ENABLE,
- state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
+ cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
+ PRIV_REG_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
+ for (i = 0; i < adev->gfx.mec.num_mec; i++) {
+ for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
+ /* MECs start at 1 */
+ cp_int_cntl_reg = gfx_v11_0_get_cpc_int_cntl(adev, i + 1, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
+ PRIV_REG_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
break;
default:
break;
@@ -6218,17 +6330,75 @@ static int gfx_v11_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v11_0_set_bad_op_fault_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 cp_int_cntl_reg, cp_int_cntl;
+ int i, j;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ case AMDGPU_IRQ_STATE_ENABLE:
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
+ cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
+ OPCODE_ERROR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
+ for (i = 0; i < adev->gfx.mec.num_mec; i++) {
+ for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
+ /* MECs start at 1 */
+ cp_int_cntl_reg = gfx_v11_0_get_cpc_int_cntl(adev, i + 1, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
+ OPCODE_ERROR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
static int gfx_v11_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
- unsigned type,
+ unsigned int type,
enum amdgpu_interrupt_state state)
{
+ u32 cp_int_cntl_reg, cp_int_cntl;
+ int i, j;
+
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
case AMDGPU_IRQ_STATE_ENABLE:
- WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0,
- PRIV_INSTR_INT_ENABLE,
- state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
+ cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
+ PRIV_INSTR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
break;
default:
break;
@@ -6252,8 +6422,8 @@ static void gfx_v11_0_handle_priv_fault(struct amdgpu_device *adev,
case 0:
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
ring = &adev->gfx.gfx_ring[i];
- /* we only enabled 1 gfx queue per pipe for now */
- if (ring->me == me_id && ring->pipe == pipe_id)
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
drm_sched_fault(&ring->sched);
}
break;
@@ -6281,6 +6451,15 @@ static int gfx_v11_0_priv_reg_irq(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v11_0_bad_op_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_ERROR("Illegal opcode in command stream \n");
+ gfx_v11_0_handle_priv_fault(adev, entry);
+ return 0;
+}
+
static int gfx_v11_0_priv_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -6367,6 +6546,99 @@ static void gfx_v11_0_emit_mem_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
}
+static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int r;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
+ if (r)
+ return r;
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0)) {
+ dev_err(adev->dev, "fail to resv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+ if (!r) {
+ r = gfx_v11_0_kgq_init_queue(ring, true);
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ }
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ if (r) {
+ dev_err(adev->dev, "fail to unresv mqd_obj\n");
+ return r;
+ }
+
+ r = amdgpu_mes_map_legacy_queue(adev, ring);
+ if (r) {
+ dev_err(adev->dev, "failed to remap kgq\n");
+ return r;
+ }
+
+ return amdgpu_ring_test_ring(ring);
+}
+
+static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int i, r = 0;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+ WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
+ WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
+
+ /* make sure dequeue is complete*/
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ if (r) {
+ dev_err(adev->dev, "fail to wait on hqd deactivate\n");
+ return r;
+ }
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0)) {
+ dev_err(adev->dev, "fail to resv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+ if (!r) {
+ r = gfx_v11_0_kcq_init_queue(ring, true);
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ }
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ if (r) {
+ dev_err(adev->dev, "fail to unresv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_mes_map_legacy_queue(adev, ring);
+ if (r) {
+ dev_err(adev->dev, "failed to remap kcq\n");
+ return r;
+ }
+
+ return amdgpu_ring_test_ring(ring);
+}
+
static void gfx_v11_ip_print(void *handle, struct drm_printer *p)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -6556,7 +6828,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
.emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush,
.test_ring = gfx_v11_0_ring_test_ring,
.test_ib = gfx_v11_0_ring_test_ib,
- .insert_nop = amdgpu_ring_insert_nop,
+ .insert_nop = gfx_v11_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_cntxcntl = gfx_v11_0_ring_emit_cntxcntl,
.emit_gfx_shadow = gfx_v11_0_ring_emit_gfx_shadow,
@@ -6568,6 +6840,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
.emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
.soft_recovery = gfx_v11_0_ring_soft_recovery,
.emit_mem_sync = gfx_v11_0_emit_mem_sync,
+ .reset = gfx_v11_0_reset_kgq,
};
static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
@@ -6598,12 +6871,14 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
.emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush,
.test_ring = gfx_v11_0_ring_test_ring,
.test_ib = gfx_v11_0_ring_test_ib,
- .insert_nop = amdgpu_ring_insert_nop,
+ .insert_nop = gfx_v11_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_wreg = gfx_v11_0_ring_emit_wreg,
.emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
+ .soft_recovery = gfx_v11_0_ring_soft_recovery,
.emit_mem_sync = gfx_v11_0_emit_mem_sync,
+ .reset = gfx_v11_0_reset_kcq,
};
static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = {
@@ -6658,6 +6933,11 @@ static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_reg_irq_funcs = {
.process = gfx_v11_0_priv_reg_irq,
};
+static const struct amdgpu_irq_src_funcs gfx_v11_0_bad_op_irq_funcs = {
+ .set = gfx_v11_0_set_bad_op_fault_state,
+ .process = gfx_v11_0_bad_op_irq,
+};
+
static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_inst_irq_funcs = {
.set = gfx_v11_0_set_priv_inst_fault_state,
.process = gfx_v11_0_priv_inst_irq,
@@ -6675,6 +6955,9 @@ static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gfx.priv_reg_irq.num_types = 1;
adev->gfx.priv_reg_irq.funcs = &gfx_v11_0_priv_reg_irq_funcs;
+ adev->gfx.bad_op_irq.num_types = 1;
+ adev->gfx.bad_op_irq.funcs = &gfx_v11_0_bad_op_irq_funcs;
+
adev->gfx.priv_inst_irq.num_types = 1;
adev->gfx.priv_inst_irq.funcs = &gfx_v11_0_priv_inst_irq_funcs;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.h
index 10cfc29c27c9..157a5c812259 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.h
@@ -26,4 +26,7 @@
extern const struct amdgpu_ip_block_version gfx_v11_0_ip_block;
+int gfx_v11_0_request_gfx_index_mutex(struct amdgpu_device *adev,
+ bool req);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c
index 9cd221ed240c..999bb3cc88b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c
@@ -97,7 +97,7 @@ static int gfx_v11_0_3_poison_consumption_handler(struct amdgpu_device *adev,
ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET;
}
- if (con && !con->is_rma)
+ if (con && !amdgpu_ras_is_rma(adev))
amdgpu_ras_reset_gpu(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index 2c611b8577a7..d1357c01eb39 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -1281,7 +1281,7 @@ static void gfx_v12_0_alloc_ip_dump(struct amdgpu_device *adev)
uint32_t inst;
ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
+ if (!ptr) {
DRM_ERROR("Failed to allocate memory for GFX IP Dump\n");
adev->gfx.ip_dump_core = NULL;
} else {
@@ -1294,7 +1294,7 @@ static void gfx_v12_0_alloc_ip_dump(struct amdgpu_device *adev)
adev->gfx.mec.num_queue_per_pipe;
ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
+ if (!ptr) {
DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n");
adev->gfx.ip_dump_compute_queues = NULL;
} else {
@@ -1307,7 +1307,7 @@ static void gfx_v12_0_alloc_ip_dump(struct amdgpu_device *adev)
adev->gfx.me.num_queue_per_pipe;
ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
+ if (!ptr) {
DRM_ERROR("Failed to allocate memory for GFX Queues IP Dump\n");
adev->gfx.ip_dump_gfx_queues = NULL;
} else {
@@ -1355,6 +1355,13 @@ static int gfx_v12_0_sw_init(void *handle)
if (r)
return r;
+ /* Bad opcode Event */
+ r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
+ GFX_11_0_0__SRCID__CP_BAD_OPCODE_ERROR,
+ &adev->gfx.bad_op_irq);
+ if (r)
+ return r;
+
/* Privileged reg */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT,
@@ -1686,26 +1693,68 @@ static void gfx_v12_0_constants_init(struct amdgpu_device *adev)
gfx_v12_0_init_compute_vmid(adev);
}
+static u32 gfx_v12_0_get_cpg_int_cntl(struct amdgpu_device *adev,
+ int me, int pipe)
+{
+ if (me != 0)
+ return 0;
+
+ switch (pipe) {
+ case 0:
+ return SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0);
+ default:
+ return 0;
+ }
+}
+
+static u32 gfx_v12_0_get_cpc_int_cntl(struct amdgpu_device *adev,
+ int me, int pipe)
+{
+ /*
+ * amdgpu controls only the first MEC. That's why this function only
+ * handles the setting of interrupts for this specific MEC. All other
+ * pipes' interrupts are set by amdkfd.
+ */
+ if (me != 1)
+ return 0;
+
+ switch (pipe) {
+ case 0:
+ return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
+ case 1:
+ return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL);
+ default:
+ return 0;
+ }
+}
+
static void gfx_v12_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
- bool enable)
+ bool enable)
{
- u32 tmp;
+ u32 tmp, cp_int_cntl_reg;
+ int i, j;
if (amdgpu_sriov_vf(adev))
return;
- tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0);
-
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
- enable ? 1 : 0);
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
- enable ? 1 : 0);
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
- enable ? 1 : 0);
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
- enable ? 1 : 0);
-
- WREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0, tmp);
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
+ cp_int_cntl_reg = gfx_v12_0_get_cpg_int_cntl(adev, i, j);
+
+ if (cp_int_cntl_reg) {
+ tmp = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
+ enable ? 1 : 0);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
+ enable ? 1 : 0);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
+ enable ? 1 : 0);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
+ enable ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, tmp);
+ }
+ }
+ }
}
static int gfx_v12_0_init_csb(struct amdgpu_device *adev)
@@ -2867,13 +2916,13 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
return 0;
}
-static int gfx_v12_0_gfx_init_queue(struct amdgpu_ring *ring)
+static int gfx_v12_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset)
{
struct amdgpu_device *adev = ring->adev;
struct v12_gfx_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.gfx_ring[0];
- if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
+ if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd));
mutex_lock(&adev->srbm_mutex);
soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
@@ -2909,7 +2958,7 @@ static int gfx_v12_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (!r) {
- r = gfx_v12_0_gfx_init_queue(ring);
+ r = gfx_v12_0_kgq_init_queue(ring, false);
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
}
@@ -3005,7 +3054,7 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
(order_base_2(prop->queue_size / 4) - 1));
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
(order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
@@ -3213,13 +3262,13 @@ static int gfx_v12_0_kiq_init_queue(struct amdgpu_ring *ring)
return 0;
}
-static int gfx_v12_0_kcq_init_queue(struct amdgpu_ring *ring)
+static int gfx_v12_0_kcq_init_queue(struct amdgpu_ring *ring, bool reset)
{
struct amdgpu_device *adev = ring->adev;
struct v12_compute_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0];
- if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
+ if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd));
mutex_lock(&adev->srbm_mutex);
soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
@@ -3283,7 +3332,7 @@ static int gfx_v12_0_kcq_resume(struct amdgpu_device *adev)
goto done;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (!r) {
- r = gfx_v12_0_kcq_init_queue(ring);
+ r = gfx_v12_0_kcq_init_queue(ring, false);
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
}
@@ -3553,6 +3602,7 @@ static int gfx_v12_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+ amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
if (!adev->no_hw_access) {
if (amdgpu_async_gfx_ring) {
@@ -3672,6 +3722,10 @@ static int gfx_v12_0_late_init(void *handle)
if (r)
return r;
+ r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
+ if (r)
+ return r;
+
return 0;
}
@@ -4447,6 +4501,9 @@ static int gfx_v12_0_ring_preempt_ib(struct amdgpu_ring *ring)
struct amdgpu_ring *kiq_ring = &kiq->ring;
unsigned long flags;
+ if (adev->enable_mes)
+ return -EINVAL;
+
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
@@ -4563,7 +4620,9 @@ static void gfx_v12_0_ring_soft_recovery(struct amdgpu_ring *ring,
value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
WREG32_SOC15(GC, 0, regSQ_CMD, value);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void
@@ -4747,15 +4806,42 @@ static int gfx_v12_0_eop_irq(struct amdgpu_device *adev,
static int gfx_v12_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
- unsigned type,
+ unsigned int type,
enum amdgpu_interrupt_state state)
{
+ u32 cp_int_cntl_reg, cp_int_cntl;
+ int i, j;
+
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
case AMDGPU_IRQ_STATE_ENABLE:
- WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0,
- PRIV_REG_INT_ENABLE,
- state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
+ cp_int_cntl_reg = gfx_v12_0_get_cpg_int_cntl(adev, i, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
+ PRIV_REG_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
+ for (i = 0; i < adev->gfx.mec.num_mec; i++) {
+ for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
+ /* MECs start at 1 */
+ cp_int_cntl_reg = gfx_v12_0_get_cpc_int_cntl(adev, i + 1, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
+ PRIV_REG_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
break;
default:
break;
@@ -4764,17 +4850,75 @@ static int gfx_v12_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v12_0_set_bad_op_fault_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 cp_int_cntl_reg, cp_int_cntl;
+ int i, j;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ case AMDGPU_IRQ_STATE_ENABLE:
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
+ cp_int_cntl_reg = gfx_v12_0_get_cpg_int_cntl(adev, i, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
+ OPCODE_ERROR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
+ for (i = 0; i < adev->gfx.mec.num_mec; i++) {
+ for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
+ /* MECs start at 1 */
+ cp_int_cntl_reg = gfx_v12_0_get_cpc_int_cntl(adev, i + 1, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
+ OPCODE_ERROR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
static int gfx_v12_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
- unsigned type,
+ unsigned int type,
enum amdgpu_interrupt_state state)
{
+ u32 cp_int_cntl_reg, cp_int_cntl;
+ int i, j;
+
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
case AMDGPU_IRQ_STATE_ENABLE:
- WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0,
- PRIV_INSTR_INT_ENABLE,
- state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
+ cp_int_cntl_reg = gfx_v12_0_get_cpg_int_cntl(adev, i, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
+ PRIV_INSTR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
break;
default:
break;
@@ -4798,8 +4942,8 @@ static void gfx_v12_0_handle_priv_fault(struct amdgpu_device *adev,
case 0:
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
ring = &adev->gfx.gfx_ring[i];
- /* we only enabled 1 gfx queue per pipe for now */
- if (ring->me == me_id && ring->pipe == pipe_id)
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
drm_sched_fault(&ring->sched);
}
break;
@@ -4827,6 +4971,15 @@ static int gfx_v12_0_priv_reg_irq(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v12_0_bad_op_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_ERROR("Illegal opcode in command stream \n");
+ gfx_v12_0_handle_priv_fault(adev, entry);
+ return 0;
+}
+
static int gfx_v12_0_priv_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -4859,6 +5012,24 @@ static void gfx_v12_0_emit_mem_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
}
+static void gfx_v12_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
+{
+ int i;
+
+ /* Header itself is a NOP packet */
+ if (num_nop == 1) {
+ amdgpu_ring_write(ring, ring->funcs->nop);
+ return;
+ }
+
+ /* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/
+ amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
+
+ /* Header is at index 0, followed by num_nops - 1 NOP packet's */
+ for (i = 1; i < num_nop; i++)
+ amdgpu_ring_write(ring, ring->funcs->nop);
+}
+
static void gfx_v12_ip_print(void *handle, struct drm_printer *p)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -4989,6 +5160,93 @@ static void gfx_v12_ip_dump(void *handle)
amdgpu_gfx_off_ctrl(adev, true);
}
+static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int r;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
+ if (r) {
+ dev_err(adev->dev, "reset via MES failed %d\n", r);
+ return r;
+ }
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0)) {
+ dev_err(adev->dev, "fail to resv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+ if (!r) {
+ r = gfx_v12_0_kgq_init_queue(ring, true);
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ }
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ if (r) {
+ DRM_ERROR("fail to unresv mqd_obj\n");
+ return r;
+ }
+
+ r = amdgpu_mes_map_legacy_queue(adev, ring);
+ if (r) {
+ dev_err(adev->dev, "failed to remap kgq\n");
+ return r;
+ }
+
+ return amdgpu_ring_test_ring(ring);
+}
+
+static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int r, i;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
+ soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+ WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
+ WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ soc24_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("fail to resv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+ if (!r) {
+ r = gfx_v12_0_kcq_init_queue(ring, true);
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ }
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ if (r) {
+ DRM_ERROR("fail to unresv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_mes_map_legacy_queue(adev, ring);
+ if (r) {
+ dev_err(adev->dev, "failed to remap kcq\n");
+ return r;
+ }
+
+ return amdgpu_ring_test_ring(ring);
+}
+
static const struct amd_ip_funcs gfx_v12_0_ip_funcs = {
.name = "gfx_v12_0",
.early_init = gfx_v12_0_early_init,
@@ -5040,7 +5298,7 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = {
.emit_hdp_flush = gfx_v12_0_ring_emit_hdp_flush,
.test_ring = gfx_v12_0_ring_test_ring,
.test_ib = gfx_v12_0_ring_test_ib,
- .insert_nop = amdgpu_ring_insert_nop,
+ .insert_nop = gfx_v12_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_cntxcntl = gfx_v12_0_ring_emit_cntxcntl,
.init_cond_exec = gfx_v12_0_ring_emit_init_cond_exec,
@@ -5051,6 +5309,7 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = {
.emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait,
.soft_recovery = gfx_v12_0_ring_soft_recovery,
.emit_mem_sync = gfx_v12_0_emit_mem_sync,
+ .reset = gfx_v12_0_reset_kgq,
};
static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = {
@@ -5078,12 +5337,14 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = {
.emit_hdp_flush = gfx_v12_0_ring_emit_hdp_flush,
.test_ring = gfx_v12_0_ring_test_ring,
.test_ib = gfx_v12_0_ring_test_ib,
- .insert_nop = amdgpu_ring_insert_nop,
+ .insert_nop = gfx_v12_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_wreg = gfx_v12_0_ring_emit_wreg,
.emit_reg_wait = gfx_v12_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait,
+ .soft_recovery = gfx_v12_0_ring_soft_recovery,
.emit_mem_sync = gfx_v12_0_emit_mem_sync,
+ .reset = gfx_v12_0_reset_kcq,
};
static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_kiq = {
@@ -5138,6 +5399,11 @@ static const struct amdgpu_irq_src_funcs gfx_v12_0_priv_reg_irq_funcs = {
.process = gfx_v12_0_priv_reg_irq,
};
+static const struct amdgpu_irq_src_funcs gfx_v12_0_bad_op_irq_funcs = {
+ .set = gfx_v12_0_set_bad_op_fault_state,
+ .process = gfx_v12_0_bad_op_irq,
+};
+
static const struct amdgpu_irq_src_funcs gfx_v12_0_priv_inst_irq_funcs = {
.set = gfx_v12_0_set_priv_inst_fault_state,
.process = gfx_v12_0_priv_inst_irq,
@@ -5151,6 +5417,9 @@ static void gfx_v12_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gfx.priv_reg_irq.num_types = 1;
adev->gfx.priv_reg_irq.funcs = &gfx_v12_0_priv_reg_irq_funcs;
+ adev->gfx.bad_op_irq.num_types = 1;
+ adev->gfx.bad_op_irq.funcs = &gfx_v12_0_bad_op_irq_funcs;
+
adev->gfx.priv_inst_irq.num_types = 1;
adev->gfx.priv_inst_irq.funcs = &gfx_v12_0_priv_inst_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index d84589137df9..f146806c4633 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2114,6 +2114,8 @@ static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
{
bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
+ bool exec = flags & AMDGPU_FENCE_FLAG_EXEC;
+
/* Workaround for cache flush problems. First send a dummy EOP
* event down the pipe with seq one below.
*/
@@ -2133,7 +2135,8 @@ static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
EOP_TC_ACTION_EN |
EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
- EVENT_INDEX(5)));
+ EVENT_INDEX(5) |
+ (exec ? EOP_EXEC : 0)));
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
@@ -4921,6 +4924,76 @@ static void gfx_v7_0_emit_mem_sync_compute(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
}
+static void gfx_v7_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
+ int mem_space, int opt, uint32_t addr0,
+ uint32_t addr1, uint32_t ref, uint32_t mask,
+ uint32_t inv)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+ amdgpu_ring_write(ring,
+ /* memory (1) or register (0) */
+ (WAIT_REG_MEM_MEM_SPACE(mem_space) |
+ WAIT_REG_MEM_OPERATION(opt) | /* wait */
+ WAIT_REG_MEM_FUNCTION(3) | /* equal */
+ WAIT_REG_MEM_ENGINE(eng_sel)));
+
+ if (mem_space)
+ BUG_ON(addr0 & 0x3); /* Dword align */
+ amdgpu_ring_write(ring, addr0);
+ amdgpu_ring_write(ring, addr1);
+ amdgpu_ring_write(ring, ref);
+ amdgpu_ring_write(ring, mask);
+ amdgpu_ring_write(ring, inv); /* poll interval */
+}
+
+static void gfx_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask)
+{
+ gfx_v7_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
+}
+
+static int gfx_v7_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
+ unsigned long flags;
+ u32 tmp;
+ int r;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
+ return -EINVAL;
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+
+ if (amdgpu_ring_alloc(kiq_ring, 5)) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+
+ tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
+ gfx_v7_0_ring_emit_wreg(kiq_ring, mmCP_VMID_RESET, tmp);
+ amdgpu_ring_commit(kiq_ring);
+
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_ring_test_ring(kiq_ring);
+ if (r)
+ return r;
+
+ if (amdgpu_ring_alloc(ring, 7 + 12 + 5))
+ return -ENOMEM;
+ gfx_v7_0_ring_emit_fence_gfx(ring, ring->fence_drv.gpu_addr,
+ ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC);
+ gfx_v7_0_ring_emit_reg_wait(ring, mmCP_VMID_RESET, 0, 0xffff);
+ gfx_v7_0_ring_emit_wreg(ring, mmCP_VMID_RESET, 0);
+
+ return amdgpu_ring_test_ring(ring);
+}
+
static const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
.name = "gfx_v7_0",
.early_init = gfx_v7_0_early_init,
@@ -4972,6 +5045,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
.emit_wreg = gfx_v7_0_ring_emit_wreg,
.soft_recovery = gfx_v7_0_ring_soft_recovery,
.emit_mem_sync = gfx_v7_0_emit_mem_sync,
+ .reset = gfx_v7_0_reset_kgq,
};
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
@@ -5002,6 +5076,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_wreg = gfx_v7_0_ring_emit_wreg,
+ .soft_recovery = gfx_v7_0_ring_soft_recovery,
.emit_mem_sync = gfx_v7_0_emit_mem_sync_compute,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index b4658c7db0e1..bc8295812cc8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -6149,6 +6149,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
{
bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
+ bool exec = flags & AMDGPU_FENCE_FLAG_EXEC;
/* Workaround for cache flush problems. First send a dummy EOP
* event down the pipe with seq one below.
@@ -6172,7 +6173,8 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
EOP_TC_ACTION_EN |
EOP_TC_WB_ACTION_EN |
EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
- EVENT_INDEX(5)));
+ EVENT_INDEX(5) |
+ (exec ? EOP_EXEC : 0)));
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
@@ -6380,6 +6382,34 @@ static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
amdgpu_ring_write(ring, val);
}
+static void gfx_v8_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
+ int mem_space, int opt, uint32_t addr0,
+ uint32_t addr1, uint32_t ref, uint32_t mask,
+ uint32_t inv)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+ amdgpu_ring_write(ring,
+ /* memory (1) or register (0) */
+ (WAIT_REG_MEM_MEM_SPACE(mem_space) |
+ WAIT_REG_MEM_OPERATION(opt) | /* wait */
+ WAIT_REG_MEM_FUNCTION(3) | /* equal */
+ WAIT_REG_MEM_ENGINE(eng_sel)));
+
+ if (mem_space)
+ BUG_ON(addr0 & 0x3); /* Dword align */
+ amdgpu_ring_write(ring, addr0);
+ amdgpu_ring_write(ring, addr1);
+ amdgpu_ring_write(ring, ref);
+ amdgpu_ring_write(ring, mask);
+ amdgpu_ring_write(ring, inv); /* poll interval */
+}
+
+static void gfx_v8_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask)
+{
+ gfx_v8_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
+}
+
static void gfx_v8_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
{
struct amdgpu_device *adev = ring->adev;
@@ -6856,6 +6886,48 @@ static void gfx_v8_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
}
+static int gfx_v8_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
+ unsigned long flags;
+ u32 tmp;
+ int r;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
+ return -EINVAL;
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+
+ if (amdgpu_ring_alloc(kiq_ring, 5)) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+
+ tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
+ gfx_v8_0_ring_emit_wreg(kiq_ring, mmCP_VMID_RESET, tmp);
+ amdgpu_ring_commit(kiq_ring);
+
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_ring_test_ring(kiq_ring);
+ if (r)
+ return r;
+
+ if (amdgpu_ring_alloc(ring, 7 + 12 + 5))
+ return -ENOMEM;
+ gfx_v8_0_ring_emit_fence_gfx(ring, ring->fence_drv.gpu_addr,
+ ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC);
+ gfx_v8_0_ring_emit_reg_wait(ring, mmCP_VMID_RESET, 0, 0xffff);
+ gfx_v8_0_ring_emit_wreg(ring, mmCP_VMID_RESET, 0);
+
+ return amdgpu_ring_test_ring(ring);
+}
+
static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
.name = "gfx_v8_0",
.early_init = gfx_v8_0_early_init,
@@ -6923,6 +6995,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
.emit_wreg = gfx_v8_0_ring_emit_wreg,
.soft_recovery = gfx_v8_0_ring_soft_recovery,
.emit_mem_sync = gfx_v8_0_emit_mem_sync,
+ .reset = gfx_v8_0_reset_kgq,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
@@ -6955,6 +7028,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_wreg = gfx_v8_0_ring_emit_wreg,
+ .soft_recovery = gfx_v8_0_ring_soft_recovery,
.emit_mem_sync = gfx_v8_0_emit_mem_sync_compute,
.emit_wave_limit = gfx_v8_0_emit_wave_limit,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 2929c8972ea7..23f0573ae47b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -50,6 +50,7 @@
#include "amdgpu_ring_mux.h"
#include "gfx_v9_4.h"
#include "gfx_v9_0.h"
+#include "gfx_v9_0_cleaner_shader.h"
#include "gfx_v9_4_2.h"
#include "asic_reg/pwr/pwr_10_0_offset.h"
@@ -893,10 +894,18 @@ static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev);
static void gfx_v9_0_update_spm_vmid_internal(struct amdgpu_device *adev,
unsigned int vmid);
+static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
+static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);
static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring,
uint64_t queue_mask)
{
+ struct amdgpu_device *adev = kiq_ring->adev;
+ u64 shader_mc_addr;
+
+ /* Cleaner shader MC address */
+ shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8;
+
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
amdgpu_ring_write(kiq_ring,
PACKET3_SET_RESOURCES_VMID_MASK(0) |
@@ -906,8 +915,8 @@ static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring,
lower_32_bits(queue_mask)); /* queue mask lo */
amdgpu_ring_write(kiq_ring,
upper_32_bits(queue_mask)); /* queue mask hi */
- amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
- amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
+ amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */
+ amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */
amdgpu_ring_write(kiq_ring, 0); /* oac mask */
amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
}
@@ -1004,12 +1013,47 @@ static void gfx_v9_0_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
}
+
+static void gfx_v9_0_kiq_reset_hw_queue(struct amdgpu_ring *kiq_ring, uint32_t queue_type,
+ uint32_t me_id, uint32_t pipe_id, uint32_t queue_id,
+ uint32_t xcc_id, uint32_t vmid)
+{
+ struct amdgpu_device *adev = kiq_ring->adev;
+ unsigned i;
+
+ /* enter save mode */
+ amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
+ mutex_lock(&adev->srbm_mutex);
+ soc15_grbm_select(adev, me_id, pipe_id, queue_id, 0, 0);
+
+ if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
+ WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 0x2);
+ WREG32_SOC15(GC, 0, mmSPI_COMPUTE_QUEUE_RESET, 0x1);
+ /* wait till dequeue take effects */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout)
+ dev_err(adev->dev, "fail to wait on hqd deactive\n");
+ } else {
+ dev_err(adev->dev, "reset queue_type(%d) not supported\n", queue_type);
+ }
+
+ soc15_grbm_select(adev, 0, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ /* exit safe mode */
+ amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
+}
+
static const struct kiq_pm4_funcs gfx_v9_0_kiq_pm4_funcs = {
.kiq_set_resources = gfx_v9_0_kiq_set_resources,
.kiq_map_queues = gfx_v9_0_kiq_map_queues,
.kiq_unmap_queues = gfx_v9_0_kiq_unmap_queues,
.kiq_query_status = gfx_v9_0_kiq_query_status,
.kiq_invalidate_tlbs = gfx_v9_0_kiq_invalidate_tlbs,
+ .kiq_reset_hw_queue = gfx_v9_0_kiq_reset_hw_queue,
.set_resources_size = 8,
.map_queues_size = 7,
.unmap_queues_size = 6,
@@ -1301,6 +1345,10 @@ static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
/* Apple MacBook Pro (15-inch, 2019) Radeon Pro Vega 20 4 GB */
{ 0x1002, 0x69af, 0x106b, 0x019a, 0xc0 },
+ /* https://bbs.openkylin.top/t/topic/171497 */
+ { 0x1002, 0x15d8, 0x19e5, 0x3e14, 0xc2 },
+ /* HP 705G4 DM with R5 2400G */
+ { 0x1002, 0x15dd, 0x103c, 0x8464, 0xd6 },
{ 0, 0, 0, 0, 0 },
};
@@ -2129,7 +2177,7 @@ static void gfx_v9_0_alloc_ip_dump(struct amdgpu_device *adev)
uint32_t inst;
ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
+ if (!ptr) {
DRM_ERROR("Failed to allocate memory for GFX IP Dump\n");
adev->gfx.ip_dump_core = NULL;
} else {
@@ -2142,7 +2190,7 @@ static void gfx_v9_0_alloc_ip_dump(struct amdgpu_device *adev)
adev->gfx.mec.num_queue_per_pipe;
ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
+ if (!ptr) {
DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n");
adev->gfx.ip_dump_compute_queues = NULL;
} else {
@@ -2174,6 +2222,12 @@ static int gfx_v9_0_sw_init(void *handle)
break;
}
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ default:
+ adev->gfx.enable_cleaner_shader = false;
+ break;
+ }
+
adev->gfx.mec.num_pipe_per_mec = 4;
adev->gfx.mec.num_queue_per_pipe = 8;
@@ -2182,6 +2236,13 @@ static int gfx_v9_0_sw_init(void *handle)
if (r)
return r;
+ /* Bad opcode Event */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
+ GFX_9_0__SRCID__CP_BAD_OPCODE_ERROR,
+ &adev->gfx.bad_op_irq);
+ if (r)
+ return r;
+
/* Privileged reg */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
&adev->gfx.priv_reg_irq);
@@ -2329,6 +2390,10 @@ static int gfx_v9_0_sw_init(void *handle)
gfx_v9_0_alloc_ip_dump(adev);
+ r = amdgpu_gfx_sysfs_isolation_shader_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -2364,6 +2429,8 @@ static int gfx_v9_0_sw_fini(void *handle)
}
gfx_v9_0_free_microcode(adev);
+ amdgpu_gfx_sysfs_isolation_shader_fini(adev);
+
kfree(adev->gfx.ip_dump_core);
kfree(adev->gfx.ip_dump_compute_queues);
@@ -2634,7 +2701,7 @@ static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
- if(adev->gfx.num_gfx_rings)
+ if (adev->gfx.num_gfx_rings)
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
@@ -3735,7 +3802,7 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
return 0;
}
-static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
+static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring, bool restore)
{
struct amdgpu_device *adev = ring->adev;
struct v9_mqd *mqd = ring->mqd_ptr;
@@ -3747,8 +3814,8 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
*/
tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
- if (!tmp_mqd->cp_hqd_pq_control ||
- (!amdgpu_in_reset(adev) && !adev->in_suspend)) {
+ if (!restore && (!tmp_mqd->cp_hqd_pq_control ||
+ (!amdgpu_in_reset(adev) && !adev->in_suspend))) {
memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
@@ -3812,7 +3879,7 @@ static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
goto done;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (!r) {
- r = gfx_v9_0_kcq_init_queue(ring);
+ r = gfx_v9_0_kcq_init_queue(ring, false);
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
}
@@ -3908,6 +3975,9 @@ static int gfx_v9_0_hw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size,
+ adev->gfx.cleaner_shader_ptr);
+
if (!amdgpu_sriov_vf(adev))
gfx_v9_0_init_golden_registers(adev);
@@ -3937,6 +4007,7 @@ static int gfx_v9_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+ amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
/* DF freeze and kcq disable will fail */
if (!amdgpu_ras_intr_triggered())
@@ -4747,6 +4818,10 @@ static int gfx_v9_0_late_init(void *handle)
if (r)
return r;
+ r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
+ if (r)
+ return r;
+
r = gfx_v9_0_ecc_late_init(handle);
if (r)
return r;
@@ -5858,7 +5933,9 @@ static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
WREG32_SOC15(GC, 0, mmSQ_CMD, value);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
@@ -5929,17 +6006,95 @@ static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
}
}
+static u32 gfx_v9_0_get_cpc_int_cntl(struct amdgpu_device *adev,
+ int me, int pipe)
+{
+ /*
+ * amdgpu controls only the first MEC. That's why this function only
+ * handles the setting of interrupts for this specific MEC. All other
+ * pipes' interrupts are set by amdkfd.
+ */
+ if (me != 1)
+ return 0;
+
+ switch (pipe) {
+ case 0:
+ return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
+ case 1:
+ return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
+ case 2:
+ return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
+ case 3:
+ return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
+ default:
+ return 0;
+ }
+}
+
static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
enum amdgpu_interrupt_state state)
{
+ u32 cp_int_cntl_reg, cp_int_cntl;
+ int i, j;
+
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
case AMDGPU_IRQ_STATE_ENABLE:
WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
PRIV_REG_INT_ENABLE,
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ for (i = 0; i < adev->gfx.mec.num_mec; i++) {
+ for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
+ /* MECs start at 1 */
+ cp_int_cntl_reg = gfx_v9_0_get_cpc_int_cntl(adev, i + 1, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
+ PRIV_REG_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int gfx_v9_0_set_bad_op_fault_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 cp_int_cntl_reg, cp_int_cntl;
+ int i, j;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ case AMDGPU_IRQ_STATE_ENABLE:
+ WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
+ OPCODE_ERROR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ for (i = 0; i < adev->gfx.mec.num_mec; i++) {
+ for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
+ /* MECs start at 1 */
+ cp_int_cntl_reg = gfx_v9_0_get_cpc_int_cntl(adev, i + 1, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
+ OPCODE_ERROR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
break;
default:
break;
@@ -6121,6 +6276,15 @@ static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v9_0_bad_op_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_ERROR("Illegal opcode in command stream\n");
+ gfx_v9_0_fault(adev, entry);
+ return 0;
+}
+
static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -7001,6 +7165,157 @@ static void gfx_v9_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
}
}
+static void gfx_v9_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
+{
+ int i;
+
+ /* Header itself is a NOP packet */
+ if (num_nop == 1) {
+ amdgpu_ring_write(ring, ring->funcs->nop);
+ return;
+ }
+
+ /* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/
+ amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
+
+ /* Header is at index 0, followed by num_nops - 1 NOP packet's */
+ for (i = 1; i < num_nop; i++)
+ amdgpu_ring_write(ring, ring->funcs->nop);
+}
+
+static int gfx_v9_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
+ unsigned long flags;
+ u32 tmp;
+ int r;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
+ return -EINVAL;
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+
+ if (amdgpu_ring_alloc(kiq_ring, 5)) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+
+ tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
+ gfx_v9_0_ring_emit_wreg(kiq_ring,
+ SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), tmp);
+ amdgpu_ring_commit(kiq_ring);
+
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_ring_test_ring(kiq_ring);
+ if (r)
+ return r;
+
+ if (amdgpu_ring_alloc(ring, 7 + 7 + 5))
+ return -ENOMEM;
+ gfx_v9_0_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
+ ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC);
+ gfx_v9_0_ring_emit_reg_wait(ring,
+ SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0, 0xffff);
+ gfx_v9_0_ring_emit_wreg(ring,
+ SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0);
+
+ return amdgpu_ring_test_ring(ring);
+}
+
+static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
+ unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
+ unsigned long flags;
+ int i, r;
+
+ if (!adev->debug_exp_resets &&
+ !adev->gfx.num_gfx_rings)
+ return -EINVAL;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
+ return -EINVAL;
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+
+ if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+
+ kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES,
+ 0, 0);
+ amdgpu_ring_commit(kiq_ring);
+
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_ring_test_ring(kiq_ring);
+ if (r)
+ return r;
+
+ /* make sure dequeue is complete*/
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
+ soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+ soc15_grbm_select(adev, 0, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ if (r) {
+ dev_err(adev->dev, "fail to wait on hqd deactive\n");
+ return r;
+ }
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0)){
+ dev_err(adev->dev, "fail to resv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+ if (!r) {
+ r = gfx_v9_0_kcq_init_queue(ring, true);
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ }
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ if (r) {
+ dev_err(adev->dev, "fail to unresv mqd_obj\n");
+ return r;
+ }
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+ r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size);
+ if (r) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+ kiq->pmf->kiq_map_queues(kiq_ring, ring);
+ amdgpu_ring_commit(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ r = amdgpu_ring_test_ring(kiq_ring);
+ if (r) {
+ DRM_ERROR("fail to remap queue\n");
+ return r;
+ }
+ return amdgpu_ring_test_ring(ring);
+}
+
static void gfx_v9_ip_print(void *handle, struct drm_printer *p)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -7083,6 +7398,13 @@ static void gfx_v9_ip_dump(void *handle)
}
+static void gfx_v9_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
+{
+ /* Emit the cleaner shader */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
+ amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */
+}
+
static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
.name = "gfx_v9_0",
.early_init = gfx_v9_0_early_init,
@@ -7132,7 +7454,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
5 + /* HDP_INVL */
8 + 8 + /* FENCE x2 */
2 + /* SWITCH_BUFFER */
- 7, /* gfx_v9_0_emit_mem_sync */
+ 7 + /* gfx_v9_0_emit_mem_sync */
+ 2, /* gfx_v9_0_ring_emit_cleaner_shader */
.emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
.emit_ib = gfx_v9_0_ring_emit_ib_gfx,
.emit_fence = gfx_v9_0_ring_emit_fence,
@@ -7141,7 +7464,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
.test_ring = gfx_v9_0_ring_test_ring,
- .insert_nop = amdgpu_ring_insert_nop,
+ .insert_nop = gfx_v9_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_switch_buffer = gfx_v9_ring_emit_sb,
.emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
@@ -7153,6 +7476,10 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
.soft_recovery = gfx_v9_0_ring_soft_recovery,
.emit_mem_sync = gfx_v9_0_emit_mem_sync,
+ .reset = gfx_v9_0_reset_kgq,
+ .emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader,
+ .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
+ .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
};
static const struct amdgpu_ring_funcs gfx_v9_0_sw_ring_funcs_gfx = {
@@ -7185,7 +7512,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_sw_ring_funcs_gfx = {
5 + /* HDP_INVL */
8 + 8 + /* FENCE x2 */
2 + /* SWITCH_BUFFER */
- 7, /* gfx_v9_0_emit_mem_sync */
+ 7 + /* gfx_v9_0_emit_mem_sync */
+ 2, /* gfx_v9_0_ring_emit_cleaner_shader */
.emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
.emit_ib = gfx_v9_0_ring_emit_ib_gfx,
.emit_fence = gfx_v9_0_ring_emit_fence,
@@ -7195,7 +7523,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_sw_ring_funcs_gfx = {
.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
.test_ring = gfx_v9_0_ring_test_ring,
.test_ib = gfx_v9_0_ring_test_ib,
- .insert_nop = amdgpu_sw_ring_insert_nop,
+ .insert_nop = gfx_v9_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_switch_buffer = gfx_v9_ring_emit_sb,
.emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
@@ -7209,6 +7537,9 @@ static const struct amdgpu_ring_funcs gfx_v9_0_sw_ring_funcs_gfx = {
.patch_cntl = gfx_v9_0_ring_patch_cntl,
.patch_de = gfx_v9_0_ring_patch_de_meta,
.patch_ce = gfx_v9_0_ring_patch_ce_meta,
+ .emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader,
+ .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
+ .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
};
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
@@ -7229,7 +7560,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
7 + /* gfx_v9_0_emit_mem_sync */
5 + /* gfx_v9_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
- 15, /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */
+ 15 + /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */
+ 2, /* gfx_v9_0_ring_emit_cleaner_shader */
.emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
.emit_ib = gfx_v9_0_ring_emit_ib_compute,
.emit_fence = gfx_v9_0_ring_emit_fence,
@@ -7239,13 +7571,18 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
.test_ring = gfx_v9_0_ring_test_ring,
.test_ib = gfx_v9_0_ring_test_ib,
- .insert_nop = amdgpu_ring_insert_nop,
+ .insert_nop = gfx_v9_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_wreg = gfx_v9_0_ring_emit_wreg,
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
+ .soft_recovery = gfx_v9_0_ring_soft_recovery,
.emit_mem_sync = gfx_v9_0_emit_mem_sync,
.emit_wave_limit = gfx_v9_0_emit_wave_limit,
+ .reset = gfx_v9_0_reset_kcq,
+ .emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader,
+ .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
+ .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
};
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
@@ -7303,6 +7640,11 @@ static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = {
.process = gfx_v9_0_priv_reg_irq,
};
+static const struct amdgpu_irq_src_funcs gfx_v9_0_bad_op_irq_funcs = {
+ .set = gfx_v9_0_set_bad_op_fault_state,
+ .process = gfx_v9_0_bad_op_irq,
+};
+
static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
.set = gfx_v9_0_set_priv_inst_fault_state,
.process = gfx_v9_0_priv_inst_irq,
@@ -7322,6 +7664,9 @@ static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gfx.priv_reg_irq.num_types = 1;
adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs;
+ adev->gfx.bad_op_irq.num_types = 1;
+ adev->gfx.bad_op_irq.funcs = &gfx_v9_0_bad_op_irq_funcs;
+
adev->gfx.priv_inst_irq.num_types = 1;
adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h
new file mode 100644
index 000000000000..36c0292b5110
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+/* Define the cleaner shader gfx_9_0 */
+static const u32 __maybe_unused gfx_9_0_cleaner_shader_hex[] = {
+ /* Add the cleaner shader code here */
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index 20ea6cb01edf..408e5600bb61 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -37,6 +37,7 @@
#include "gc/gc_9_4_3_sh_mask.h"
#include "gfx_v9_4_3.h"
+#include "gfx_v9_4_3_cleaner_shader.h"
#include "amdgpu_xcp.h"
#include "amdgpu_aca.h"
@@ -63,6 +64,98 @@ MODULE_FIRMWARE("amdgpu/gc_9_4_4_rlc.bin");
#define NORMALIZE_XCC_REG_OFFSET(offset) \
(offset & 0xFFFF)
+static const struct amdgpu_hwip_reg_entry gc_reg_list_9_4_3[] = {
+ SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT1),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT2),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STALLED_STAT1),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STALLED_STAT1),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_BUSY_STAT),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_ERROR),
+ SOC15_REG_ENTRY_STR(GC, 0, regCPF_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regCPC_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regCPG_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regGDS_PROTECTION_FAULT),
+ SOC15_REG_ENTRY_STR(GC, 0, regGDS_VM_PROTECTION_FAULT),
+ SOC15_REG_ENTRY_STR(GC, 0, regRLC_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regRMI_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regSQC_DCACHE_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regSQC_ICACHE_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regSQ_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regTCP_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regWD_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regVM_L2_PROTECTION_FAULT_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_DEBUG),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC1_INSTR_PNTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC2_INSTR_PNTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regRLC_STAT),
+ SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_COMMAND),
+ SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_MESSAGE),
+ SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_ARGUMENT_1),
+ SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_ARGUMENT_2),
+ SOC15_REG_ENTRY_STR(GC, 0, regSMU_RLC_RESPONSE),
+ SOC15_REG_ENTRY_STR(GC, 0, regRLC_SAFE_MODE),
+ SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_SAFE_MODE),
+ SOC15_REG_ENTRY_STR(GC, 0, regRLC_INT_STAT),
+ SOC15_REG_ENTRY_STR(GC, 0, regRLC_GPM_GENERAL_6),
+ /* cp header registers */
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME2_HEADER_DUMP),
+ /* SE status registers */
+ SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0),
+ SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE1),
+ SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE2),
+ SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE3)
+};
+
+static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9_4_3[] = {
+ /* compute queue registers */
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_VMID),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ACTIVE),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PERSISTENT_STATE),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PIPE_PRIORITY),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUEUE_PRIORITY),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUANTUM),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_CONTROL),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_CONTROL),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_REQUEST),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_CONTROL),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_EVENTS),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_CONTROL),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_SIZE),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_WG_STATE_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_SIZE),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GDS_RESOURCE_STATE),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ERROR),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR_MEM),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GFX_STATUS),
+};
+
struct amdgpu_gfx_ras gfx_v9_4_3_ras;
static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev);
@@ -71,10 +164,18 @@ static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev);
static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev);
static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
struct amdgpu_cu_info *cu_info);
+static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
+static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);
static void gfx_v9_4_3_kiq_set_resources(struct amdgpu_ring *kiq_ring,
uint64_t queue_mask)
{
+ struct amdgpu_device *adev = kiq_ring->adev;
+ u64 shader_mc_addr;
+
+ /* Cleaner shader MC address */
+ shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8;
+
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
amdgpu_ring_write(kiq_ring,
PACKET3_SET_RESOURCES_VMID_MASK(0) |
@@ -84,8 +185,8 @@ static void gfx_v9_4_3_kiq_set_resources(struct amdgpu_ring *kiq_ring,
lower_32_bits(queue_mask)); /* queue mask lo */
amdgpu_ring_write(kiq_ring,
upper_32_bits(queue_mask)); /* queue mask hi */
- amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
- amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
+ amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */
+ amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */
amdgpu_ring_write(kiq_ring, 0); /* oac mask */
amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
}
@@ -182,12 +283,46 @@ static void gfx_v9_4_3_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
}
+static void gfx_v9_4_3_kiq_reset_hw_queue(struct amdgpu_ring *kiq_ring, uint32_t queue_type,
+ uint32_t me_id, uint32_t pipe_id, uint32_t queue_id,
+ uint32_t xcc_id, uint32_t vmid)
+{
+ struct amdgpu_device *adev = kiq_ring->adev;
+ unsigned i;
+
+ /* enter save mode */
+ amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
+ mutex_lock(&adev->srbm_mutex);
+ soc15_grbm_select(adev, me_id, pipe_id, queue_id, 0, xcc_id);
+
+ if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 0x2);
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_COMPUTE_QUEUE_RESET, 0x1);
+ /* wait till dequeue take effects */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout)
+ dev_err(adev->dev, "fail to wait on hqd deactive\n");
+ } else {
+ dev_err(adev->dev, "reset queue_type(%d) not supported\n\n", queue_type);
+ }
+
+ soc15_grbm_select(adev, 0, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ /* exit safe mode */
+ amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
+}
+
static const struct kiq_pm4_funcs gfx_v9_4_3_kiq_pm4_funcs = {
.kiq_set_resources = gfx_v9_4_3_kiq_set_resources,
.kiq_map_queues = gfx_v9_4_3_kiq_map_queues,
.kiq_unmap_queues = gfx_v9_4_3_kiq_unmap_queues,
.kiq_query_status = gfx_v9_4_3_kiq_query_status,
.kiq_invalidate_tlbs = gfx_v9_4_3_kiq_invalidate_tlbs,
+ .kiq_reset_hw_queue = gfx_v9_4_3_kiq_reset_hw_queue,
.set_resources_size = 8,
.map_queues_size = 7,
.unmap_queues_size = 6,
@@ -885,11 +1020,59 @@ static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id,
hw_prio, NULL);
}
+static void gfx_v9_4_3_alloc_ip_dump(struct amdgpu_device *adev)
+{
+ uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3);
+ uint32_t *ptr, num_xcc, inst;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+
+ ptr = kcalloc(reg_count * num_xcc, sizeof(uint32_t), GFP_KERNEL);
+ if (!ptr) {
+ DRM_ERROR("Failed to allocate memory for GFX IP Dump\n");
+ adev->gfx.ip_dump_core = NULL;
+ } else {
+ adev->gfx.ip_dump_core = ptr;
+ }
+
+ /* Allocate memory for compute queue registers for all the instances */
+ reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3);
+ inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
+ adev->gfx.mec.num_queue_per_pipe;
+
+ ptr = kcalloc(reg_count * inst * num_xcc, sizeof(uint32_t), GFP_KERNEL);
+ if (!ptr) {
+ DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n");
+ adev->gfx.ip_dump_compute_queues = NULL;
+ } else {
+ adev->gfx.ip_dump_compute_queues = ptr;
+ }
+}
+
static int gfx_v9_4_3_sw_init(void *handle)
{
int i, j, k, r, ring_id, xcc_id, num_xcc;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 4, 3):
+ case IP_VERSION(9, 4, 4):
+ adev->gfx.cleaner_shader_ptr = gfx_9_4_3_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_3_cleaner_shader_hex);
+ if (adev->gfx.mec_fw_version >= 153) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
+ default:
+ adev->gfx.enable_cleaner_shader = false;
+ break;
+ }
+
adev->gfx.mec.num_mec = 2;
adev->gfx.mec.num_pipe_per_mec = 4;
adev->gfx.mec.num_queue_per_pipe = 8;
@@ -901,6 +1084,13 @@ static int gfx_v9_4_3_sw_init(void *handle)
if (r)
return r;
+ /* Bad opcode Event */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
+ GFX_9_0__SRCID__CP_BAD_OPCODE_ERROR,
+ &adev->gfx.bad_op_irq);
+ if (r)
+ return r;
+
/* Privileged reg */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
&adev->gfx.priv_reg_irq);
@@ -976,10 +1166,19 @@ static int gfx_v9_4_3_sw_init(void *handle)
return r;
- if (!amdgpu_sriov_vf(adev))
+ if (!amdgpu_sriov_vf(adev)) {
r = amdgpu_gfx_sysfs_init(adev);
+ if (r)
+ return r;
+ }
- return r;
+ gfx_v9_4_3_alloc_ip_dump(adev);
+
+ r = amdgpu_gfx_sysfs_isolation_shader_init(adev);
+ if (r)
+ return r;
+
+ return 0;
}
static int gfx_v9_4_3_sw_fini(void *handle)
@@ -997,11 +1196,17 @@ static int gfx_v9_4_3_sw_fini(void *handle)
amdgpu_gfx_kiq_fini(adev, i);
}
+ amdgpu_gfx_cleaner_shader_sw_fini(adev);
+
gfx_v9_4_3_mec_fini(adev);
amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
gfx_v9_4_3_free_microcode(adev);
if (!amdgpu_sriov_vf(adev))
amdgpu_gfx_sysfs_fini(adev);
+ amdgpu_gfx_sysfs_isolation_shader_fini(adev);
+
+ kfree(adev->gfx.ip_dump_core);
+ kfree(adev->gfx.ip_dump_compute_queues);
return 0;
}
@@ -1910,7 +2115,7 @@ static int gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id)
return 0;
}
-static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id)
+static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id, bool restore)
{
struct amdgpu_device *adev = ring->adev;
struct v9_mqd *mqd = ring->mqd_ptr;
@@ -1922,8 +2127,8 @@ static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id)
*/
tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
- if (!tmp_mqd->cp_hqd_pq_control ||
- (!amdgpu_in_reset(adev) && !adev->in_suspend)) {
+ if (!restore && (!tmp_mqd->cp_hqd_pq_control ||
+ (!amdgpu_in_reset(adev) && !adev->in_suspend))) {
memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
@@ -2008,7 +2213,7 @@ static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id)
goto done;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (!r) {
- r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id);
+ r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false);
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
}
@@ -2139,6 +2344,9 @@ static int gfx_v9_4_3_hw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size,
+ adev->gfx.cleaner_shader_ptr);
+
if (!amdgpu_sriov_vf(adev))
gfx_v9_4_3_init_golden_registers(adev);
@@ -2162,6 +2370,7 @@ static int gfx_v9_4_3_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+ amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
for (i = 0; i < num_xcc; i++) {
@@ -2327,6 +2536,10 @@ static int gfx_v9_4_3_late_init(void *handle)
if (r)
return r;
+ r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
+ if (r)
+ return r;
+
if (adev->gfx.ras &&
adev->gfx.ras->enable_watchdog_timer)
adev->gfx.ras->enable_watchdog_timer(adev);
@@ -2833,6 +3046,24 @@ static void gfx_v9_4_3_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
ref, mask);
}
+static void gfx_v9_4_3_ring_soft_recovery(struct amdgpu_ring *ring,
+ unsigned vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t value = 0;
+
+ if (!adev->debug_exp_resets)
+ return;
+
+ value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
+ value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
+ value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
+ value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, ring->xcc_id);
+ WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regSQ_CMD, value);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, ring->xcc_id);
+}
+
static void gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
struct amdgpu_device *adev, int me, int pipe,
enum amdgpu_interrupt_state state, int xcc_id)
@@ -2886,21 +3117,103 @@ static void gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
}
}
+static u32 gfx_v9_4_3_get_cpc_int_cntl(struct amdgpu_device *adev,
+ int xcc_id, int me, int pipe)
+{
+ /*
+ * amdgpu controls only the first MEC. That's why this function only
+ * handles the setting of interrupts for this specific MEC. All other
+ * pipes' interrupts are set by amdkfd.
+ */
+ if (me != 1)
+ return 0;
+
+ switch (pipe) {
+ case 0:
+ return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE0_INT_CNTL);
+ case 1:
+ return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE1_INT_CNTL);
+ case 2:
+ return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE2_INT_CNTL);
+ case 3:
+ return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE3_INT_CNTL);
+ default:
+ return 0;
+ }
+}
+
static int gfx_v9_4_3_set_priv_reg_fault_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
enum amdgpu_interrupt_state state)
{
- int i, num_xcc;
+ u32 mec_int_cntl_reg, mec_int_cntl;
+ int i, j, k, num_xcc;
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
case AMDGPU_IRQ_STATE_ENABLE:
- for (i = 0; i < num_xcc; i++)
+ for (i = 0; i < num_xcc; i++) {
WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
- PRIV_REG_INT_ENABLE,
- state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ PRIV_REG_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ for (j = 0; j < adev->gfx.mec.num_mec; j++) {
+ for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
+ /* MECs start at 1 */
+ mec_int_cntl_reg = gfx_v9_4_3_get_cpc_int_cntl(adev, i, j + 1, k);
+
+ if (mec_int_cntl_reg) {
+ mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, i);
+ mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
+ PRIV_REG_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ?
+ 1 : 0);
+ WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, i);
+ }
+ }
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int gfx_v9_4_3_set_bad_op_fault_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 mec_int_cntl_reg, mec_int_cntl;
+ int i, j, k, num_xcc;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ case AMDGPU_IRQ_STATE_ENABLE:
+ for (i = 0; i < num_xcc; i++) {
+ WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
+ OPCODE_ERROR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ for (j = 0; j < adev->gfx.mec.num_mec; j++) {
+ for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
+ /* MECs start at 1 */
+ mec_int_cntl_reg = gfx_v9_4_3_get_cpc_int_cntl(adev, i, j + 1, k);
+
+ if (mec_int_cntl_reg) {
+ mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, i);
+ mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
+ OPCODE_ERROR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ?
+ 1 : 0);
+ WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, i);
+ }
+ }
+ }
+ }
break;
default:
break;
@@ -3061,6 +3374,15 @@ static int gfx_v9_4_3_priv_reg_irq(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v9_4_3_bad_op_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_ERROR("Illegal opcode in command stream\n");
+ gfx_v9_4_3_fault(adev, entry);
+ return 0;
+}
+
static int gfx_v9_4_3_priv_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -3147,6 +3469,183 @@ static void gfx_v9_4_3_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
}
}
+static int gfx_v9_4_3_unmap_done(struct amdgpu_device *adev, uint32_t me,
+ uint32_t pipe, uint32_t queue,
+ uint32_t xcc_id)
+{
+ int i, r;
+ /* make sure dequeue is complete*/
+ gfx_v9_4_3_xcc_set_safe_mode(adev, xcc_id);
+ mutex_lock(&adev->srbm_mutex);
+ soc15_grbm_select(adev, me, pipe, queue, 0, GET_INST(GC, xcc_id));
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+ else
+ r = 0;
+ soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
+ mutex_unlock(&adev->srbm_mutex);
+ gfx_v9_4_3_xcc_unset_safe_mode(adev, xcc_id);
+
+ return r;
+
+}
+
+static bool gfx_v9_4_3_pipe_reset_support(struct amdgpu_device *adev)
+{
+ /*TODO: Need check gfx9.4.4 mec fw whether supports pipe reset as well.*/
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
+ adev->gfx.mec_fw_version >= 0x0000009b)
+ return true;
+ else
+ dev_warn_once(adev->dev, "Please use the latest MEC version to see whether support pipe reset\n");
+
+ return false;
+}
+
+static int gfx_v9_4_3_reset_hw_pipe(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t reset_pipe, clean_pipe;
+ int r;
+
+ if (!gfx_v9_4_3_pipe_reset_support(adev))
+ return -EINVAL;
+
+ gfx_v9_4_3_xcc_set_safe_mode(adev, ring->xcc_id);
+ mutex_lock(&adev->srbm_mutex);
+
+ reset_pipe = RREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL);
+ clean_pipe = reset_pipe;
+
+ if (ring->me == 1) {
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE0_RESET, 1);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE1_RESET, 1);
+ break;
+ case 2:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE2_RESET, 1);
+ break;
+ case 3:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE3_RESET, 1);
+ break;
+ default:
+ break;
+ }
+ } else {
+ if (ring->pipe)
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE1_RESET, 1);
+ else
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE0_RESET, 1);
+ }
+
+ WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL, reset_pipe);
+ WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL, clean_pipe);
+ mutex_unlock(&adev->srbm_mutex);
+ gfx_v9_4_3_xcc_unset_safe_mode(adev, ring->xcc_id);
+
+ r = gfx_v9_4_3_unmap_done(adev, ring->me, ring->pipe, ring->queue, ring->xcc_id);
+ return r;
+}
+
+static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
+ unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[ring->xcc_id];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
+ unsigned long flags;
+ int r;
+
+ if (!adev->debug_exp_resets)
+ return -EINVAL;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
+ return -EINVAL;
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+
+ if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+
+ kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES,
+ 0, 0);
+ amdgpu_ring_commit(kiq_ring);
+
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_ring_test_ring(kiq_ring);
+ if (r) {
+ dev_err(adev->dev, "kiq ring test failed after ring: %s queue reset\n",
+ ring->name);
+ goto pipe_reset;
+ }
+
+ r = gfx_v9_4_3_unmap_done(adev, ring->me, ring->pipe, ring->queue, ring->xcc_id);
+ if (r)
+ dev_err(adev->dev, "fail to wait on hqd deactive and will try pipe reset\n");
+
+pipe_reset:
+ if(r) {
+ r = gfx_v9_4_3_reset_hw_pipe(ring);
+ dev_info(adev->dev, "ring: %s pipe reset :%s\n", ring->name,
+ r ? "failed" : "successfully");
+ if (r)
+ return r;
+ }
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0)){
+ dev_err(adev->dev, "fail to resv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+ if (!r) {
+ r = gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true);
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ }
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ if (r) {
+ dev_err(adev->dev, "fail to unresv mqd_obj\n");
+ return r;
+ }
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+ r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size);
+ if (r) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+ kiq->pmf->kiq_map_queues(kiq_ring, ring);
+ amdgpu_ring_commit(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_ring_test_ring(kiq_ring);
+ if (r) {
+ dev_err(adev->dev, "fail to remap queue\n");
+ return r;
+ }
+ return amdgpu_ring_test_ring(ring);
+}
+
enum amdgpu_gfx_cp_ras_mem_id {
AMDGPU_GFX_CP_MEM1 = 1,
AMDGPU_GFX_CP_MEM2,
@@ -3959,8 +4458,8 @@ static void gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device *adev,
/* the caller should make sure initialize value of
* err_data->ue_count and err_data->ce_count
*/
- amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count);
- amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count);
+ amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
+ amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count);
}
static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev,
@@ -4062,6 +4561,151 @@ static void gfx_v9_4_3_enable_watchdog_timer(struct amdgpu_device *adev)
amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_enable_watchdog_timer);
}
+static void gfx_v9_4_3_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
+{
+ int i;
+
+ /* Header itself is a NOP packet */
+ if (num_nop == 1) {
+ amdgpu_ring_write(ring, ring->funcs->nop);
+ return;
+ }
+
+ /* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/
+ amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
+
+ /* Header is at index 0, followed by num_nops - 1 NOP packet's */
+ for (i = 1; i < num_nop; i++)
+ amdgpu_ring_write(ring, ring->funcs->nop);
+}
+
+static void gfx_v9_4_3_ip_print(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ uint32_t i, j, k;
+ uint32_t xcc_id, xcc_offset, inst_offset;
+ uint32_t num_xcc, reg, num_inst;
+ uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3);
+
+ if (!adev->gfx.ip_dump_core)
+ return;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ drm_printf(p, "Number of Instances:%d\n", num_xcc);
+ for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
+ xcc_offset = xcc_id * reg_count;
+ drm_printf(p, "\nInstance id:%d\n", xcc_id);
+ for (i = 0; i < reg_count; i++)
+ drm_printf(p, "%-50s \t 0x%08x\n",
+ gc_reg_list_9_4_3[i].reg_name,
+ adev->gfx.ip_dump_core[xcc_offset + i]);
+ }
+
+ /* print compute queue registers for all instances */
+ if (!adev->gfx.ip_dump_compute_queues)
+ return;
+
+ num_inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
+ adev->gfx.mec.num_queue_per_pipe;
+
+ reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3);
+ drm_printf(p, "\nnum_xcc: %d num_mec: %d num_pipe: %d num_queue: %d\n",
+ num_xcc,
+ adev->gfx.mec.num_mec,
+ adev->gfx.mec.num_pipe_per_mec,
+ adev->gfx.mec.num_queue_per_pipe);
+
+ for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
+ xcc_offset = xcc_id * reg_count * num_inst;
+ inst_offset = 0;
+ for (i = 0; i < adev->gfx.mec.num_mec; i++) {
+ for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
+ for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
+ drm_printf(p,
+ "\nxcc:%d mec:%d, pipe:%d, queue:%d\n",
+ xcc_id, i, j, k);
+ for (reg = 0; reg < reg_count; reg++) {
+ drm_printf(p,
+ "%-50s \t 0x%08x\n",
+ gc_cp_reg_list_9_4_3[reg].reg_name,
+ adev->gfx.ip_dump_compute_queues
+ [xcc_offset + inst_offset +
+ reg]);
+ }
+ inst_offset += reg_count;
+ }
+ }
+ }
+ }
+}
+
+static void gfx_v9_4_3_ip_dump(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ uint32_t i, j, k;
+ uint32_t num_xcc, reg, num_inst;
+ uint32_t xcc_id, xcc_offset, inst_offset;
+ uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3);
+
+ if (!adev->gfx.ip_dump_core)
+ return;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+
+ amdgpu_gfx_off_ctrl(adev, false);
+ for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
+ xcc_offset = xcc_id * reg_count;
+ for (i = 0; i < reg_count; i++)
+ adev->gfx.ip_dump_core[xcc_offset + i] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(gc_reg_list_9_4_3[i],
+ GET_INST(GC, xcc_id)));
+ }
+ amdgpu_gfx_off_ctrl(adev, true);
+
+ /* dump compute queue registers for all instances */
+ if (!adev->gfx.ip_dump_compute_queues)
+ return;
+
+ num_inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
+ adev->gfx.mec.num_queue_per_pipe;
+ reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3);
+ amdgpu_gfx_off_ctrl(adev, false);
+ mutex_lock(&adev->srbm_mutex);
+ for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
+ xcc_offset = xcc_id * reg_count * num_inst;
+ inst_offset = 0;
+ for (i = 0; i < adev->gfx.mec.num_mec; i++) {
+ for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
+ for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
+ /* ME0 is for GFX so start from 1 for CP */
+ soc15_grbm_select(adev, 1 + i, j, k, 0,
+ GET_INST(GC, xcc_id));
+
+ for (reg = 0; reg < reg_count; reg++) {
+ adev->gfx.ip_dump_compute_queues
+ [xcc_offset +
+ inst_offset + reg] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(
+ gc_cp_reg_list_9_4_3[reg],
+ GET_INST(GC, xcc_id)));
+ }
+ inst_offset += reg_count;
+ }
+ }
+ }
+ }
+ soc15_grbm_select(adev, 0, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ amdgpu_gfx_off_ctrl(adev, true);
+}
+
+static void gfx_v9_4_3_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
+{
+ /* Emit the cleaner shader */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
+ amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */
+}
+
static const struct amd_ip_funcs gfx_v9_4_3_ip_funcs = {
.name = "gfx_v9_4_3",
.early_init = gfx_v9_4_3_early_init,
@@ -4078,8 +4722,8 @@ static const struct amd_ip_funcs gfx_v9_4_3_ip_funcs = {
.set_clockgating_state = gfx_v9_4_3_set_clockgating_state,
.set_powergating_state = gfx_v9_4_3_set_powergating_state,
.get_clockgating_state = gfx_v9_4_3_get_clockgating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
+ .dump_ip_state = gfx_v9_4_3_ip_dump,
+ .print_ip_state = gfx_v9_4_3_ip_print,
};
static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_compute = {
@@ -4101,7 +4745,8 @@ static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_compute = {
8 + 8 + 8 + /* gfx_v9_4_3_ring_emit_fence x3 for user fence, vm fence */
7 + /* gfx_v9_4_3_emit_mem_sync */
5 + /* gfx_v9_4_3_emit_wave_limit for updating regSPI_WCL_PIPE_PERCENT_GFX register */
- 15, /* for updating 3 regSPI_WCL_PIPE_PERCENT_CS registers */
+ 15 + /* for updating 3 regSPI_WCL_PIPE_PERCENT_CS registers */
+ 2, /* gfx_v9_4_3_ring_emit_cleaner_shader */
.emit_ib_size = 7, /* gfx_v9_4_3_ring_emit_ib_compute */
.emit_ib = gfx_v9_4_3_ring_emit_ib_compute,
.emit_fence = gfx_v9_4_3_ring_emit_fence,
@@ -4111,13 +4756,18 @@ static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_compute = {
.emit_hdp_flush = gfx_v9_4_3_ring_emit_hdp_flush,
.test_ring = gfx_v9_4_3_ring_test_ring,
.test_ib = gfx_v9_4_3_ring_test_ib,
- .insert_nop = amdgpu_ring_insert_nop,
+ .insert_nop = gfx_v9_4_3_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_wreg = gfx_v9_4_3_ring_emit_wreg,
.emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait,
+ .soft_recovery = gfx_v9_4_3_ring_soft_recovery,
.emit_mem_sync = gfx_v9_4_3_emit_mem_sync,
.emit_wave_limit = gfx_v9_4_3_emit_wave_limit,
+ .reset = gfx_v9_4_3_reset_kcq,
+ .emit_cleaner_shader = gfx_v9_4_3_ring_emit_cleaner_shader,
+ .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
+ .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
};
static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_kiq = {
@@ -4172,6 +4822,11 @@ static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_reg_irq_funcs = {
.process = gfx_v9_4_3_priv_reg_irq,
};
+static const struct amdgpu_irq_src_funcs gfx_v9_4_3_bad_op_irq_funcs = {
+ .set = gfx_v9_4_3_set_bad_op_fault_state,
+ .process = gfx_v9_4_3_bad_op_irq,
+};
+
static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_inst_irq_funcs = {
.set = gfx_v9_4_3_set_priv_inst_fault_state,
.process = gfx_v9_4_3_priv_inst_irq,
@@ -4185,6 +4840,9 @@ static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev)
adev->gfx.priv_reg_irq.num_types = 1;
adev->gfx.priv_reg_irq.funcs = &gfx_v9_4_3_priv_reg_irq_funcs;
+ adev->gfx.bad_op_irq.num_types = 1;
+ adev->gfx.bad_op_irq.funcs = &gfx_v9_4_3_bad_op_irq_funcs;
+
adev->gfx.priv_inst_irq.num_types = 1;
adev->gfx.priv_inst_irq.funcs = &gfx_v9_4_3_priv_inst_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3_cleaner_shader.asm b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3_cleaner_shader.asm
new file mode 100644
index 000000000000..d5325ef80ab0
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3_cleaner_shader.asm
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// This shader is to clean LDS, SGPRs and VGPRs. It is first 64 Dwords or 256 bytes of 192 Dwords cleaner shader.
+//To turn this shader program on for complitaion change this to main and lower shader main to main_1
+
+// MI300 : Clear SGPRs, VGPRs and LDS
+// Uses two kernels launched separately:
+// 1. Clean VGPRs, LDS, and lower SGPRs
+// Launches one workgroup per CU, each workgroup with 4x wave64 per SIMD in the CU
+// Waves are "wave64" and have 128 VGPRs each, which uses all 512 VGPRs per SIMD
+// Waves in the workgroup share the 64KB of LDS
+// Each wave clears SGPRs 0 - 95. Because there are 4 waves/SIMD, this is physical SGPRs 0-383
+// Each wave clears 128 VGPRs, so all 512 in the SIMD
+// The first wave of the workgroup clears its 64KB of LDS
+// The shader starts with "S_BARRIER" to ensure SPI has launched all waves of the workgroup
+// before any wave in the workgroup could end. Without this, it is possible not all SGPRs get cleared.
+// 2. Clean remaining SGPRs
+// Launches a workgroup with 24 waves per workgroup, yielding 6 waves per SIMD in each CU
+// Waves are allocating 96 SGPRs
+// CP sets up SPI_RESOURCE_RESERVE_* registers to prevent these waves from allocating SGPRs 0-223.
+// As such, these 6 waves per SIMD are allocated physical SGPRs 224-799
+// Barriers do not work for >16 waves per workgroup, so we cannot start with S_BARRIER
+// Instead, the shader starts with an S_SETHALT 1. Once all waves are launched CP will send unhalt command
+// The shader then clears all SGPRs allocated to it, cleaning out physical SGPRs 224-799
+
+shader main
+ asic(MI300)
+ type(CS)
+ wave_size(64)
+// Note: original source code from SQ team
+
+// (theorhetical fastest = ~512clks vgpr + 1536 lds + ~128 sgpr = 2176 clks)
+
+ s_cmp_eq_u32 s0, 1 // Bit0 is set, sgpr0 is set then clear VGPRS and LDS as FW set COMPUTE_USER_DATA_3
+ s_cbranch_scc0 label_0023 // Clean VGPRs and LDS if sgpr0 of wave is set, scc = (s3 == 1)
+ S_BARRIER
+
+ s_movk_i32 m0, 0x0000
+ s_mov_b32 s2, 0x00000078 // Loop 128/8=16 times (loop unrolled for performance)
+ //
+ // CLEAR VGPRs
+ //
+ s_set_gpr_idx_on s2, 0x8 // enable Dest VGPR indexing
+label_0005:
+ v_mov_b32 v0, 0
+ v_mov_b32 v1, 0
+ v_mov_b32 v2, 0
+ v_mov_b32 v3, 0
+ v_mov_b32 v4, 0
+ v_mov_b32 v5, 0
+ v_mov_b32 v6, 0
+ v_mov_b32 v7, 0
+ s_sub_u32 s2, s2, 8
+ s_set_gpr_idx_idx s2
+ s_cbranch_scc0 label_0005
+ s_set_gpr_idx_off
+
+ //
+ //
+
+ s_mov_b32 s2, 0x80000000 // Bit31 is first_wave
+ s_and_b32 s2, s2, s1 // sgpr0 has tg_size (first_wave) term as in ucode only COMPUTE_PGM_RSRC2.tg_size_en is set
+ s_cbranch_scc0 label_clean_sgpr_1 // Clean LDS if its first wave of ThreadGroup/WorkGroup
+ // CLEAR LDS
+ //
+ s_mov_b32 exec_lo, 0xffffffff
+ s_mov_b32 exec_hi, 0xffffffff
+ v_mbcnt_lo_u32_b32 v1, exec_hi, 0 // Set V1 to thread-ID (0..63)
+ v_mbcnt_hi_u32_b32 v1, exec_lo, v1 // Set V1 to thread-ID (0..63)
+ v_mul_u32_u24 v1, 0x00000008, v1 // * 8, so each thread is a double-dword address (8byte)
+ s_mov_b32 s2, 0x00000003f // 64 loop iteraions
+ s_mov_b32 m0, 0xffffffff
+ // Clear all of LDS space
+ // Each FirstWave of WorkGroup clears 64kbyte block
+
+label_001F:
+ ds_write2_b64 v1, v[2:3], v[2:3] offset1:32
+ ds_write2_b64 v1, v[4:5], v[4:5] offset0:64 offset1:96
+ v_add_co_u32 v1, vcc, 0x00000400, v1
+ s_sub_u32 s2, s2, 1
+ s_cbranch_scc0 label_001F
+ //
+ // CLEAR SGPRs
+ //
+label_clean_sgpr_1:
+ s_mov_b32 m0, 0x0000005c // Loop 96/4=24 times (loop unrolled for performance)
+ s_nop 0
+label_sgpr_loop:
+ s_movreld_b32 s0, 0
+ s_movreld_b32 s1, 0
+ s_movreld_b32 s2, 0
+ s_movreld_b32 s3, 0
+ s_sub_u32 m0, m0, 4
+ s_cbranch_scc0 label_sgpr_loop
+
+ //clear vcc, flat scratch
+ s_mov_b32 flat_scratch_lo, 0 //clear flat scratch lo SGPR
+ s_mov_b32 flat_scratch_hi, 0 //clear flat scratch hi SGPR
+ s_mov_b64 vcc, 0 //clear vcc
+ s_mov_b64 ttmp0, 0 //Clear ttmp0 and ttmp1
+ s_mov_b64 ttmp2, 0 //Clear ttmp2 and ttmp3
+ s_mov_b64 ttmp4, 0 //Clear ttmp4 and ttmp5
+ s_mov_b64 ttmp6, 0 //Clear ttmp6 and ttmp7
+ s_mov_b64 ttmp8, 0 //Clear ttmp8 and ttmp9
+ s_mov_b64 ttmp10, 0 //Clear ttmp10 and ttmp11
+ s_mov_b64 ttmp12, 0 //Clear ttmp12 and ttmp13
+ s_mov_b64 ttmp14, 0 //Clear ttmp14 and ttmp15
+s_endpgm
+
+label_0023:
+
+ s_sethalt 1
+
+ s_mov_b32 m0, 0x0000005c // Loop 96/4=24 times (loop unrolled for performance)
+ s_nop 0
+label_sgpr_loop1:
+
+ s_movreld_b32 s0, 0
+ s_movreld_b32 s1, 0
+ s_movreld_b32 s2, 0
+ s_movreld_b32 s3, 0
+ s_sub_u32 m0, m0, 4
+ s_cbranch_scc0 label_sgpr_loop1
+
+ //clear vcc, flat scratch
+ s_mov_b32 flat_scratch_lo, 0 //clear flat scratch lo SGPR
+ s_mov_b32 flat_scratch_hi, 0 //clear flat scratch hi SGPR
+ s_mov_b64 vcc, 0xee //clear vcc
+
+s_endpgm
+end
+
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3_cleaner_shader.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3_cleaner_shader.h
new file mode 100644
index 000000000000..69aa567c6c1d
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3_cleaner_shader.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/* Define the cleaner shader gfx_9_4_3 */
+static const u32 gfx_9_4_3_cleaner_shader_hex[] = {
+ 0xbf068100, 0xbf84003b,
+ 0xbf8a0000, 0xb07c0000,
+ 0xbe8200ff, 0x00000078,
+ 0xbf110802, 0x7e000280,
+ 0x7e020280, 0x7e040280,
+ 0x7e060280, 0x7e080280,
+ 0x7e0a0280, 0x7e0c0280,
+ 0x7e0e0280, 0x80828802,
+ 0xbe803202, 0xbf84fff5,
+ 0xbf9c0000, 0xbe8200ff,
+ 0x80000000, 0x86020102,
+ 0xbf840011, 0xbefe00c1,
+ 0xbeff00c1, 0xd28c0001,
+ 0x0001007f, 0xd28d0001,
+ 0x0002027e, 0x10020288,
+ 0xbe8200bf, 0xbefc00c1,
+ 0xd89c2000, 0x00020201,
+ 0xd89c6040, 0x00040401,
+ 0x320202ff, 0x00000400,
+ 0x80828102, 0xbf84fff8,
+ 0xbefc00ff, 0x0000005c,
+ 0xbf800000, 0xbe802c80,
+ 0xbe812c80, 0xbe822c80,
+ 0xbe832c80, 0x80fc847c,
+ 0xbf84fffa, 0xbee60080,
+ 0xbee70080, 0xbeea0180,
+ 0xbeec0180, 0xbeee0180,
+ 0xbef00180, 0xbef20180,
+ 0xbef40180, 0xbef60180,
+ 0xbef80180, 0xbefa0180,
+ 0xbf810000, 0xbf8d0001,
+ 0xbefc00ff, 0x0000005c,
+ 0xbf800000, 0xbe802c80,
+ 0xbe812c80, 0xbe822c80,
+ 0xbe832c80, 0x80fc847c,
+ 0xbf84fffa, 0xbee60080,
+ 0xbee70080, 0xbeea01ff,
+ 0x000000ee, 0xbf810000,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index d200310d1731..0e3ddea7b8e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -443,23 +443,6 @@ static void gfxhub_v1_0_init(struct amdgpu_device *adev)
mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
}
-static bool gfxhub_v1_0_query_utcl2_poison_status(struct amdgpu_device *adev,
- int xcc_id)
-{
- u32 status = 0;
- struct amdgpu_vmhub *hub;
-
- if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2))
- return false;
-
- hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
- status = RREG32(hub->vm_l2_pro_fault_status);
- /* reset page fault status */
- WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
-
- return REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED);
-}
-
const struct amdgpu_gfxhub_funcs gfxhub_v1_0_funcs = {
.get_mc_fb_offset = gfxhub_v1_0_get_mc_fb_offset,
.setup_vm_pt_regs = gfxhub_v1_0_setup_vm_pt_regs,
@@ -468,5 +451,4 @@ const struct amdgpu_gfxhub_funcs gfxhub_v1_0_funcs = {
.set_fault_enable_default = gfxhub_v1_0_set_fault_enable_default,
.init = gfxhub_v1_0_init,
.get_xgmi_info = gfxhub_v1_1_get_xgmi_info,
- .query_utcl2_poison_status = gfxhub_v1_0_query_utcl2_poison_status,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
index 72109abe7c86..ed8e130c7d19 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
@@ -622,22 +622,6 @@ static int gfxhub_v1_2_get_xgmi_info(struct amdgpu_device *adev)
return 0;
}
-static bool gfxhub_v1_2_query_utcl2_poison_status(struct amdgpu_device *adev,
- int xcc_id)
-{
- u32 fed, status;
-
- status = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regVM_L2_PROTECTION_FAULT_STATUS);
- fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED);
- if (!amdgpu_sriov_vf(adev)) {
- /* clear page fault status and address */
- WREG32_P(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id),
- regVM_L2_PROTECTION_FAULT_CNTL), 1, ~1);
- }
-
- return fed;
-}
-
const struct amdgpu_gfxhub_funcs gfxhub_v1_2_funcs = {
.get_mc_fb_offset = gfxhub_v1_2_get_mc_fb_offset,
.setup_vm_pt_regs = gfxhub_v1_2_setup_vm_pt_regs,
@@ -646,7 +630,6 @@ const struct amdgpu_gfxhub_funcs gfxhub_v1_2_funcs = {
.set_fault_enable_default = gfxhub_v1_2_set_fault_enable_default,
.init = gfxhub_v1_2_init,
.get_xgmi_info = gfxhub_v1_2_get_xgmi_info,
- .query_utcl2_poison_status = gfxhub_v1_2_query_utcl2_poison_status,
};
static int gfxhub_v1_2_xcp_resume(void *handle, uint32_t inst_mask)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index f0ceab3ce5bf..9784a2892185 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -132,7 +132,8 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
/* Try to handle the recoverable page faults by filling page
* tables
*/
- if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr, write_fault))
+ if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr,
+ entry->timestamp, write_fault))
return 1;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index b73136d390cc..c76ac0dfe572 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -595,7 +595,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
cam_index = entry->src_data[2] & 0x3ff;
ret = amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id,
- addr, write_fault);
+ addr, entry->timestamp, write_fault);
WDOORBELL32(adev->irq.retry_cam_doorbell_index, cam_index);
if (ret)
return 1;
@@ -618,7 +618,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
* tables
*/
if (amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id,
- addr, write_fault))
+ addr, entry->timestamp, write_fault))
return 1;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
index 077c6d920e27..e019249883fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
@@ -41,7 +41,7 @@ static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg)
- WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+ WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
else
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
index a9ea23fa0def..ed7facacf2fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
@@ -32,7 +32,7 @@ static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg)
- WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+ WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
else
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
index ab06c2b4b20b..33736d361dd0 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
@@ -35,7 +35,7 @@ static void hdp_v6_0_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg)
- WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+ WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
else
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
index 8d7d0813e331..1c99bb09e2a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
@@ -32,7 +32,7 @@ static void hdp_v7_0_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg)
- WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+ WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
else
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c
index aac107898bae..964c29ef25dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c
@@ -42,23 +42,23 @@ static const unsigned int isp_4_1_0_int_srcid[MAX_ISP410_INT_SRC] = {
static int isp_v4_1_0_hw_init(struct amdgpu_isp *isp)
{
struct amdgpu_device *adev = isp->adev;
+ int idx, int_idx, num_res, r;
u64 isp_base;
- int int_idx;
- int r;
if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
return -EINVAL;
isp_base = adev->rmmio_base;
- isp->isp_cell = kcalloc(1, sizeof(struct mfd_cell), GFP_KERNEL);
+ isp->isp_cell = kcalloc(2, sizeof(struct mfd_cell), GFP_KERNEL);
if (!isp->isp_cell) {
r = -ENOMEM;
DRM_ERROR("%s: isp mfd cell alloc failed\n", __func__);
goto failure;
}
- isp->isp_res = kcalloc(MAX_ISP410_INT_SRC + 1, sizeof(struct resource),
+ num_res = MAX_ISP410_MEM_RES + MAX_ISP410_SENSOR_RES + MAX_ISP410_INT_SRC;
+ isp->isp_res = kcalloc(num_res, sizeof(struct resource),
GFP_KERNEL);
if (!isp->isp_res) {
r = -ENOMEM;
@@ -83,22 +83,53 @@ static int isp_v4_1_0_hw_init(struct amdgpu_isp *isp)
isp->isp_res[0].start = isp_base;
isp->isp_res[0].end = isp_base + ISP_REGS_OFFSET_END;
- for (int_idx = 0; int_idx < MAX_ISP410_INT_SRC; int_idx++) {
- isp->isp_res[int_idx + 1].name = "isp_4_1_0_irq";
- isp->isp_res[int_idx + 1].flags = IORESOURCE_IRQ;
- isp->isp_res[int_idx + 1].start =
+ isp->isp_res[1].name = "isp_4_1_phy0_reg";
+ isp->isp_res[1].flags = IORESOURCE_MEM;
+ isp->isp_res[1].start = isp_base + ISP410_PHY0_OFFSET;
+ isp->isp_res[1].end = isp_base + ISP410_PHY0_OFFSET + ISP410_PHY0_SIZE;
+
+ isp->isp_res[2].name = "isp_gpio_sensor0_reg";
+ isp->isp_res[2].flags = IORESOURCE_MEM;
+ isp->isp_res[2].start = isp_base + ISP410_GPIO_SENSOR0_OFFSET;
+ isp->isp_res[2].end = isp_base + ISP410_GPIO_SENSOR0_OFFSET +
+ ISP410_GPIO_SENSOR0_SIZE;
+
+ for (idx = MAX_ISP410_MEM_RES + MAX_ISP410_SENSOR_RES, int_idx = 0;
+ idx < num_res; idx++, int_idx++) {
+ isp->isp_res[idx].name = "isp_4_1_0_irq";
+ isp->isp_res[idx].flags = IORESOURCE_IRQ;
+ isp->isp_res[idx].start =
amdgpu_irq_create_mapping(adev, isp_4_1_0_int_srcid[int_idx]);
- isp->isp_res[int_idx + 1].end =
- isp->isp_res[int_idx + 1].start;
+ isp->isp_res[idx].end =
+ isp->isp_res[idx].start;
}
isp->isp_cell[0].name = "amd_isp_capture";
- isp->isp_cell[0].num_resources = MAX_ISP410_INT_SRC + 1;
+ isp->isp_cell[0].num_resources = num_res;
isp->isp_cell[0].resources = &isp->isp_res[0];
isp->isp_cell[0].platform_data = isp->isp_pdata;
isp->isp_cell[0].pdata_size = sizeof(struct isp_platform_data);
- r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 1);
+ isp->isp_i2c_res = kcalloc(1, sizeof(struct resource),
+ GFP_KERNEL);
+ if (!isp->isp_i2c_res) {
+ r = -ENOMEM;
+ DRM_ERROR("%s: isp mfd res alloc failed\n", __func__);
+ goto failure;
+ }
+
+ isp->isp_i2c_res[0].name = "isp_i2c0_reg";
+ isp->isp_i2c_res[0].flags = IORESOURCE_MEM;
+ isp->isp_i2c_res[0].start = isp_base + ISP410_I2C0_OFFSET;
+ isp->isp_i2c_res[0].end = isp_base + ISP410_I2C0_OFFSET + ISP410_I2C0_SIZE;
+
+ isp->isp_cell[1].name = "amd_isp_i2c_designware";
+ isp->isp_cell[1].num_resources = 1;
+ isp->isp_cell[1].resources = &isp->isp_i2c_res[0];
+ isp->isp_cell[1].platform_data = isp->isp_pdata;
+ isp->isp_cell[1].pdata_size = sizeof(struct isp_platform_data);
+
+ r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 2);
if (r) {
DRM_ERROR("%s: add mfd hotplug device failed\n", __func__);
goto failure;
@@ -111,6 +142,7 @@ failure:
kfree(isp->isp_pdata);
kfree(isp->isp_res);
kfree(isp->isp_cell);
+ kfree(isp->isp_i2c_res);
return r;
}
@@ -122,6 +154,7 @@ static int isp_v4_1_0_hw_fini(struct amdgpu_isp *isp)
kfree(isp->isp_res);
kfree(isp->isp_cell);
kfree(isp->isp_pdata);
+ kfree(isp->isp_i2c_res);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h
index 315f2822410c..7db24c0f1080 100644
--- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h
@@ -32,8 +32,19 @@
#include "ivsrcid/isp/irqsrcs_isp_4_1.h"
+#define MAX_ISP410_MEM_RES 2
+#define MAX_ISP410_SENSOR_RES 1
#define MAX_ISP410_INT_SRC 8
+#define ISP410_PHY0_OFFSET 0x66700
+#define ISP410_PHY0_SIZE 0xD30
+
+#define ISP410_I2C0_OFFSET 0x66400
+#define ISP410_I2C0_SIZE 0x100
+
+#define ISP410_GPIO_SENSOR0_OFFSET 0x6613C
+#define ISP410_GPIO_SENSOR0_SIZE 0x4
+
void isp_v4_1_0_set_isp_funcs(struct amdgpu_isp *isp);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
index 4e17fa03f7b5..b56f27295468 100644
--- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
@@ -42,23 +42,24 @@ static const unsigned int isp_4_1_1_int_srcid[MAX_ISP411_INT_SRC] = {
static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
{
struct amdgpu_device *adev = isp->adev;
+ int idx, int_idx, num_res, r;
u64 isp_base;
- int int_idx;
- int r;
if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
return -EINVAL;
isp_base = adev->rmmio_base;
- isp->isp_cell = kcalloc(1, sizeof(struct mfd_cell), GFP_KERNEL);
+ isp->isp_cell = kcalloc(2, sizeof(struct mfd_cell), GFP_KERNEL);
if (!isp->isp_cell) {
r = -ENOMEM;
DRM_ERROR("%s: isp mfd cell alloc failed\n", __func__);
goto failure;
}
- isp->isp_res = kcalloc(MAX_ISP411_INT_SRC + 1, sizeof(struct resource),
+ num_res = MAX_ISP411_MEM_RES + MAX_ISP411_SENSOR_RES + MAX_ISP411_INT_SRC;
+
+ isp->isp_res = kcalloc(num_res, sizeof(struct resource),
GFP_KERNEL);
if (!isp->isp_res) {
r = -ENOMEM;
@@ -83,22 +84,52 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
isp->isp_res[0].start = isp_base;
isp->isp_res[0].end = isp_base + ISP_REGS_OFFSET_END;
- for (int_idx = 0; int_idx < MAX_ISP411_INT_SRC; int_idx++) {
- isp->isp_res[int_idx + 1].name = "isp_4_1_1_irq";
- isp->isp_res[int_idx + 1].flags = IORESOURCE_IRQ;
- isp->isp_res[int_idx + 1].start =
+ isp->isp_res[1].name = "isp_4_1_1_phy0_reg";
+ isp->isp_res[1].flags = IORESOURCE_MEM;
+ isp->isp_res[1].start = isp_base + ISP411_PHY0_OFFSET;
+ isp->isp_res[1].end = isp_base + ISP411_PHY0_OFFSET + ISP411_PHY0_SIZE;
+
+ isp->isp_res[2].name = "isp_4_1_1_sensor0_reg";
+ isp->isp_res[2].flags = IORESOURCE_MEM;
+ isp->isp_res[2].start = isp_base + ISP411_GPIO_SENSOR0_OFFSET;
+ isp->isp_res[2].end = isp_base + ISP411_GPIO_SENSOR0_OFFSET +
+ ISP411_GPIO_SENSOR0_SIZE;
+
+ for (idx = MAX_ISP411_MEM_RES + MAX_ISP411_SENSOR_RES, int_idx = 0;
+ idx < num_res; idx++, int_idx++) {
+ isp->isp_res[idx].name = "isp_4_1_1_irq";
+ isp->isp_res[idx].flags = IORESOURCE_IRQ;
+ isp->isp_res[idx].start =
amdgpu_irq_create_mapping(adev, isp_4_1_1_int_srcid[int_idx]);
- isp->isp_res[int_idx + 1].end =
- isp->isp_res[int_idx + 1].start;
+ isp->isp_res[idx].end =
+ isp->isp_res[idx].start;
}
isp->isp_cell[0].name = "amd_isp_capture";
- isp->isp_cell[0].num_resources = MAX_ISP411_INT_SRC + 1;
+ isp->isp_cell[0].num_resources = num_res;
isp->isp_cell[0].resources = &isp->isp_res[0];
isp->isp_cell[0].platform_data = isp->isp_pdata;
isp->isp_cell[0].pdata_size = sizeof(struct isp_platform_data);
- r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 1);
+ isp->isp_i2c_res = kcalloc(1, sizeof(struct resource), GFP_KERNEL);
+ if (!isp->isp_i2c_res) {
+ r = -ENOMEM;
+ DRM_ERROR("%s: isp mfd res alloc failed\n", __func__);
+ goto failure;
+ }
+
+ isp->isp_i2c_res[0].name = "isp_i2c0_reg";
+ isp->isp_i2c_res[0].flags = IORESOURCE_MEM;
+ isp->isp_i2c_res[0].start = isp_base + ISP411_I2C0_OFFSET;
+ isp->isp_i2c_res[0].end = isp_base + ISP411_I2C0_OFFSET + ISP411_I2C0_SIZE;
+
+ isp->isp_cell[1].name = "amd_isp_i2c_designware";
+ isp->isp_cell[1].num_resources = 1;
+ isp->isp_cell[1].resources = &isp->isp_i2c_res[0];
+ isp->isp_cell[1].platform_data = isp->isp_pdata;
+ isp->isp_cell[1].pdata_size = sizeof(struct isp_platform_data);
+
+ r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 2);
if (r) {
DRM_ERROR("%s: add mfd hotplug device failed\n", __func__);
goto failure;
@@ -111,6 +142,7 @@ failure:
kfree(isp->isp_pdata);
kfree(isp->isp_res);
kfree(isp->isp_cell);
+ kfree(isp->isp_i2c_res);
return r;
}
@@ -122,6 +154,7 @@ static int isp_v4_1_1_hw_fini(struct amdgpu_isp *isp)
kfree(isp->isp_res);
kfree(isp->isp_cell);
kfree(isp->isp_pdata);
+ kfree(isp->isp_i2c_res);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h
index dfb9522c9d6a..40887ddeb08c 100644
--- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h
@@ -32,8 +32,19 @@
#include "ivsrcid/isp/irqsrcs_isp_4_1.h"
+#define MAX_ISP411_MEM_RES 2
+#define MAX_ISP411_SENSOR_RES 1
#define MAX_ISP411_INT_SRC 8
+#define ISP411_PHY0_OFFSET 0x66700
+#define ISP411_PHY0_SIZE 0xD30
+
+#define ISP411_I2C0_OFFSET 0x66400
+#define ISP411_I2C0_SIZE 0x100
+
+#define ISP411_GPIO_SENSOR0_OFFSET 0x6613C
+#define ISP411_GPIO_SENSOR0_SIZE 0x4
+
void isp_v4_1_1_set_isp_funcs(struct amdgpu_isp *isp);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
index 71f43a5c7f72..6e0e88076224 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
@@ -23,6 +23,7 @@
#include "amdgpu.h"
#include "amdgpu_jpeg.h"
+#include "amdgpu_cs.h"
#include "soc15.h"
#include "soc15d.h"
#include "vcn_v1_0.h"
@@ -34,6 +35,9 @@
static void jpeg_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev);
static void jpeg_v1_0_ring_begin_use(struct amdgpu_ring *ring);
+static int jpeg_v1_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib);
static void jpeg_v1_0_decode_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val)
{
@@ -300,7 +304,10 @@ static void jpeg_v1_0_decode_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring,
PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, (vmid | (vmid << 4)));
+ if (ring->funcs->parse_cs)
+ amdgpu_ring_write(ring, 0);
+ else
+ amdgpu_ring_write(ring, (vmid | (vmid << 4)));
amdgpu_ring_write(ring,
PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JPEG_VMID), 0, 0, PACKETJ_TYPE0));
@@ -554,6 +561,7 @@ static const struct amdgpu_ring_funcs jpeg_v1_0_decode_ring_vm_funcs = {
.get_rptr = jpeg_v1_0_decode_ring_get_rptr,
.get_wptr = jpeg_v1_0_decode_ring_get_wptr,
.set_wptr = jpeg_v1_0_decode_ring_set_wptr,
+ .parse_cs = jpeg_v1_dec_ring_parse_cs,
.emit_frame_size =
6 + 6 + /* hdp invalidate / flush */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
@@ -611,3 +619,69 @@ static void jpeg_v1_0_ring_begin_use(struct amdgpu_ring *ring)
vcn_v1_0_set_pg_for_begin_use(ring, set_clocks);
}
+
+/**
+ * jpeg_v1_dec_ring_parse_cs - command submission parser
+ *
+ * @parser: Command submission parser context
+ * @job: the job to parse
+ * @ib: the IB to parse
+ *
+ * Parse the command stream, return -EINVAL for invalid packet,
+ * 0 otherwise
+ */
+static int jpeg_v1_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib)
+{
+ u32 i, reg, res, cond, type;
+ int ret = 0;
+ struct amdgpu_device *adev = parser->adev;
+
+ for (i = 0; i < ib->length_dw ; i += 2) {
+ reg = CP_PACKETJ_GET_REG(ib->ptr[i]);
+ res = CP_PACKETJ_GET_RES(ib->ptr[i]);
+ cond = CP_PACKETJ_GET_COND(ib->ptr[i]);
+ type = CP_PACKETJ_GET_TYPE(ib->ptr[i]);
+
+ if (res || cond != PACKETJ_CONDITION_CHECK0) /* only allow 0 for now */
+ return -EINVAL;
+
+ if (reg >= JPEG_V1_REG_RANGE_START && reg <= JPEG_V1_REG_RANGE_END)
+ continue;
+
+ switch (type) {
+ case PACKETJ_TYPE0:
+ if (reg != JPEG_V1_LMI_JPEG_WRITE_64BIT_BAR_HIGH &&
+ reg != JPEG_V1_LMI_JPEG_WRITE_64BIT_BAR_LOW &&
+ reg != JPEG_V1_LMI_JPEG_READ_64BIT_BAR_HIGH &&
+ reg != JPEG_V1_LMI_JPEG_READ_64BIT_BAR_LOW &&
+ reg != JPEG_V1_REG_CTX_INDEX &&
+ reg != JPEG_V1_REG_CTX_DATA) {
+ ret = -EINVAL;
+ }
+ break;
+ case PACKETJ_TYPE1:
+ if (reg != JPEG_V1_REG_CTX_DATA)
+ ret = -EINVAL;
+ break;
+ case PACKETJ_TYPE3:
+ if (reg != JPEG_V1_REG_SOFT_RESET)
+ ret = -EINVAL;
+ break;
+ case PACKETJ_TYPE6:
+ if (ib->ptr[i] != CP_PACKETJ_NOP)
+ ret = -EINVAL;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret) {
+ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+ break;
+ }
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h
index bbf33a6a3972..9654d22e0376 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h
@@ -29,4 +29,15 @@ int jpeg_v1_0_sw_init(void *handle);
void jpeg_v1_0_sw_fini(void *handle);
void jpeg_v1_0_start(struct amdgpu_device *adev, int mode);
+#define JPEG_V1_REG_RANGE_START 0x8000
+#define JPEG_V1_REG_RANGE_END 0x803f
+
+#define JPEG_V1_LMI_JPEG_WRITE_64BIT_BAR_HIGH 0x8238
+#define JPEG_V1_LMI_JPEG_WRITE_64BIT_BAR_LOW 0x8239
+#define JPEG_V1_LMI_JPEG_READ_64BIT_BAR_HIGH 0x825a
+#define JPEG_V1_LMI_JPEG_READ_64BIT_BAR_LOW 0x825b
+#define JPEG_V1_REG_CTX_INDEX 0x8328
+#define JPEG_V1_REG_CTX_DATA 0x8329
+#define JPEG_V1_REG_SOFT_RESET 0x83a0
+
#endif /*__JPEG_V1_0_H__*/
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index 98aa3ccd0d20..41c0f8750dc1 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -23,6 +23,7 @@
#include "amdgpu.h"
#include "amdgpu_jpeg.h"
+#include "amdgpu_cs.h"
#include "amdgpu_pm.h"
#include "soc15.h"
#include "soc15d.h"
@@ -538,7 +539,11 @@ void jpeg_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, (vmid | (vmid << 4) | (vmid << 8)));
+
+ if (ring->funcs->parse_cs)
+ amdgpu_ring_write(ring, 0);
+ else
+ amdgpu_ring_write(ring, (vmid | (vmid << 4) | (vmid << 8)));
amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
@@ -764,6 +769,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = {
.get_rptr = jpeg_v2_0_dec_ring_get_rptr,
.get_wptr = jpeg_v2_0_dec_ring_get_wptr,
.set_wptr = jpeg_v2_0_dec_ring_set_wptr,
+ .parse_cs = jpeg_v2_dec_ring_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
@@ -810,3 +816,58 @@ const struct amdgpu_ip_block_version jpeg_v2_0_ip_block = {
.rev = 0,
.funcs = &jpeg_v2_0_ip_funcs,
};
+
+/**
+ * jpeg_v2_dec_ring_parse_cs - command submission parser
+ *
+ * @parser: Command submission parser context
+ * @job: the job to parse
+ * @ib: the IB to parse
+ *
+ * Parse the command stream, return -EINVAL for invalid packet,
+ * 0 otherwise
+ */
+int jpeg_v2_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib)
+{
+ u32 i, reg, res, cond, type;
+ struct amdgpu_device *adev = parser->adev;
+
+ for (i = 0; i < ib->length_dw ; i += 2) {
+ reg = CP_PACKETJ_GET_REG(ib->ptr[i]);
+ res = CP_PACKETJ_GET_RES(ib->ptr[i]);
+ cond = CP_PACKETJ_GET_COND(ib->ptr[i]);
+ type = CP_PACKETJ_GET_TYPE(ib->ptr[i]);
+
+ if (res) /* only support 0 at the moment */
+ return -EINVAL;
+
+ switch (type) {
+ case PACKETJ_TYPE0:
+ if (cond != PACKETJ_CONDITION_CHECK0 || reg < JPEG_REG_RANGE_START ||
+ reg > JPEG_REG_RANGE_END) {
+ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+ return -EINVAL;
+ }
+ break;
+ case PACKETJ_TYPE3:
+ if (cond != PACKETJ_CONDITION_CHECK3 || reg < JPEG_REG_RANGE_START ||
+ reg > JPEG_REG_RANGE_END) {
+ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+ return -EINVAL;
+ }
+ break;
+ case PACKETJ_TYPE6:
+ if (ib->ptr[i] == CP_PACKETJ_NOP)
+ continue;
+ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+ return -EINVAL;
+ default:
+ dev_err(adev->dev, "Unknown packet type %d !\n", type);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
index 654e43e83e2c..63fadda7a673 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
@@ -45,6 +45,9 @@
#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000
+#define JPEG_REG_RANGE_START 0x4000
+#define JPEG_REG_RANGE_END 0x41c2
+
void jpeg_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring);
void jpeg_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring);
void jpeg_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
@@ -57,6 +60,9 @@ void jpeg_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr);
void jpeg_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
void jpeg_v2_0_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count);
+int jpeg_v2_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib);
extern const struct amdgpu_ip_block_version jpeg_v2_0_ip_block;
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index d8ef95c847c2..eedb9a829d95 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -662,6 +662,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = {
.get_rptr = jpeg_v2_5_dec_ring_get_rptr,
.get_wptr = jpeg_v2_5_dec_ring_get_wptr,
.set_wptr = jpeg_v2_5_dec_ring_set_wptr,
+ .parse_cs = jpeg_v2_dec_ring_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
@@ -691,6 +692,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_6_dec_ring_vm_funcs = {
.get_rptr = jpeg_v2_5_dec_ring_get_rptr,
.get_wptr = jpeg_v2_5_dec_ring_get_wptr,
.set_wptr = jpeg_v2_5_dec_ring_set_wptr,
+ .parse_cs = jpeg_v2_dec_ring_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
index 31cfa3ce6528..b1e7fd25afbc 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
@@ -560,6 +560,7 @@ static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = {
.get_rptr = jpeg_v3_0_dec_ring_get_rptr,
.get_wptr = jpeg_v3_0_dec_ring_get_wptr,
.set_wptr = jpeg_v3_0_dec_ring_set_wptr,
+ .parse_cs = jpeg_v2_dec_ring_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
index 3dac8f259d7f..6c5c1a68a9b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
@@ -727,6 +727,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_dec_ring_vm_funcs = {
.get_rptr = jpeg_v4_0_dec_ring_get_rptr,
.get_wptr = jpeg_v4_0_dec_ring_get_wptr,
.set_wptr = jpeg_v4_0_dec_ring_set_wptr,
+ .parse_cs = jpeg_v2_dec_ring_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.h
index 07d36c2abd6b..47638fd4d4e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.h
@@ -32,5 +32,4 @@ enum amdgpu_jpeg_v4_0_sub_block {
};
extern const struct amdgpu_ip_block_version jpeg_v4_0_ip_block;
-
#endif /* __JPEG_V4_0_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
index 6ae5a784e187..86958cb2c2ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
@@ -23,9 +23,9 @@
#include "amdgpu.h"
#include "amdgpu_jpeg.h"
-#include "amdgpu_cs.h"
#include "soc15.h"
#include "soc15d.h"
+#include "jpeg_v2_0.h"
#include "jpeg_v4_0_3.h"
#include "mmsch_v4_0_3.h"
@@ -59,6 +59,12 @@ static int amdgpu_ih_srcid_jpeg[] = {
VCN_4_0__SRCID__JPEG7_DECODE
};
+static inline bool jpeg_v4_0_3_normalizn_reqd(struct amdgpu_device *adev)
+{
+ return amdgpu_sriov_vf(adev) ||
+ (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4));
+}
+
/**
* jpeg_v4_0_3_early_init - set function pointers
*
@@ -734,32 +740,20 @@ void jpeg_v4_0_3_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4));
amdgpu_ring_write(ring, 0);
- if (ring->adev->jpeg.inst[ring->me].aid_id) {
- amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_MCM_ADDR_INTERNAL_OFFSET,
- 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x4);
- } else {
- amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
- amdgpu_ring_write(ring, 0);
- }
+ amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
+ amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, 0x3fbc);
- if (ring->adev->jpeg.inst[ring->me].aid_id) {
- amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_MCM_ADDR_INTERNAL_OFFSET,
- 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x0);
- } else {
- amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
- amdgpu_ring_write(ring, 0);
- }
-
amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, 0x1);
+ amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
+ amdgpu_ring_write(ring, 0);
+
amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7));
amdgpu_ring_write(ring, 0);
}
@@ -834,8 +828,8 @@ void jpeg_v4_0_3_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
{
uint32_t reg_offset;
- /* For VF, only local offsets should be used */
- if (amdgpu_sriov_vf(ring->adev))
+ /* Use normalized offsets if required */
+ if (jpeg_v4_0_3_normalizn_reqd(ring->adev))
reg = NORMALIZE_JPEG_REG_OFFSET(reg);
reg_offset = (reg << 2);
@@ -881,8 +875,8 @@ void jpeg_v4_0_3_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint
{
uint32_t reg_offset;
- /* For VF, only local offsets should be used */
- if (amdgpu_sriov_vf(ring->adev))
+ /* Use normalized offsets if required */
+ if (jpeg_v4_0_3_normalizn_reqd(ring->adev))
reg = NORMALIZE_JPEG_REG_OFFSET(reg);
reg_offset = (reg << 2);
@@ -1089,7 +1083,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = {
.get_rptr = jpeg_v4_0_3_dec_ring_get_rptr,
.get_wptr = jpeg_v4_0_3_dec_ring_get_wptr,
.set_wptr = jpeg_v4_0_3_dec_ring_set_wptr,
- .parse_cs = jpeg_v4_0_3_dec_ring_parse_cs,
+ .parse_cs = jpeg_v2_dec_ring_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
@@ -1254,56 +1248,3 @@ static void jpeg_v4_0_3_set_ras_funcs(struct amdgpu_device *adev)
{
adev->jpeg.ras = &jpeg_v4_0_3_ras;
}
-
-/**
- * jpeg_v4_0_3_dec_ring_parse_cs - command submission parser
- *
- * @parser: Command submission parser context
- * @job: the job to parse
- * @ib: the IB to parse
- *
- * Parse the command stream, return -EINVAL for invalid packet,
- * 0 otherwise
- */
-int jpeg_v4_0_3_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
- struct amdgpu_job *job,
- struct amdgpu_ib *ib)
-{
- uint32_t i, reg, res, cond, type;
- struct amdgpu_device *adev = parser->adev;
-
- for (i = 0; i < ib->length_dw ; i += 2) {
- reg = CP_PACKETJ_GET_REG(ib->ptr[i]);
- res = CP_PACKETJ_GET_RES(ib->ptr[i]);
- cond = CP_PACKETJ_GET_COND(ib->ptr[i]);
- type = CP_PACKETJ_GET_TYPE(ib->ptr[i]);
-
- if (res) /* only support 0 at the moment */
- return -EINVAL;
-
- switch (type) {
- case PACKETJ_TYPE0:
- if (cond != PACKETJ_CONDITION_CHECK0 || reg < JPEG_REG_RANGE_START || reg > JPEG_REG_RANGE_END) {
- dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
- return -EINVAL;
- }
- break;
- case PACKETJ_TYPE3:
- if (cond != PACKETJ_CONDITION_CHECK3 || reg < JPEG_REG_RANGE_START || reg > JPEG_REG_RANGE_END) {
- dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
- return -EINVAL;
- }
- break;
- case PACKETJ_TYPE6:
- if (ib->ptr[i] == CP_PACKETJ_NOP)
- continue;
- dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
- return -EINVAL;
- default:
- dev_err(adev->dev, "Unknown packet type %d !\n", type);
- return -EINVAL;
- }
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
index 71c54b294e15..747a3e5f6856 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
@@ -46,9 +46,6 @@
#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000
-#define JPEG_REG_RANGE_START 0x4000
-#define JPEG_REG_RANGE_END 0x41c2
-
extern const struct amdgpu_ip_block_version jpeg_v4_0_3_ip_block;
void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring,
@@ -65,7 +62,5 @@ void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring);
void jpeg_v4_0_3_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
void jpeg_v4_0_3_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
uint32_t val, uint32_t mask);
-int jpeg_v4_0_3_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
- struct amdgpu_job *job,
- struct amdgpu_ib *ib);
+
#endif /* __JPEG_V4_0_3_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
index f96ac6bce526..44eeed445ea9 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
@@ -768,6 +768,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = {
.get_rptr = jpeg_v4_0_5_dec_ring_get_rptr,
.get_wptr = jpeg_v4_0_5_dec_ring_get_wptr,
.set_wptr = jpeg_v4_0_5_dec_ring_set_wptr,
+ .parse_cs = jpeg_v2_dec_ring_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
index f4daff90c770..d662aa841f97 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
@@ -26,6 +26,7 @@
#include "amdgpu_pm.h"
#include "soc15.h"
#include "soc15d.h"
+#include "jpeg_v2_0.h"
#include "jpeg_v4_0_3.h"
#include "vcn/vcn_5_0_0_offset.h"
@@ -646,7 +647,7 @@ static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = {
.get_rptr = jpeg_v5_0_0_dec_ring_get_rptr,
.get_wptr = jpeg_v5_0_0_dec_ring_get_wptr,
.set_wptr = jpeg_v5_0_0_dec_ring_set_wptr,
- .parse_cs = jpeg_v4_0_3_dec_ring_parse_cs,
+ .parse_cs = jpeg_v2_dec_ring_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 2ea8223eb969..ee91ff9e52a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -26,6 +26,7 @@
#include "amdgpu.h"
#include "soc15_common.h"
#include "soc21.h"
+#include "gfx_v11_0.h"
#include "gc/gc_11_0_0_offset.h"
#include "gc/gc_11_0_0_sh_mask.h"
#include "gc/gc_11_0_0_default.h"
@@ -360,6 +361,100 @@ static int mes_v11_0_remove_hw_queue(struct amdgpu_mes *mes,
offsetof(union MESAPI__REMOVE_QUEUE, api_status));
}
+static int mes_v11_0_reset_queue_mmio(struct amdgpu_mes *mes, uint32_t queue_type,
+ uint32_t me_id, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t vmid)
+{
+ struct amdgpu_device *adev = mes->adev;
+ uint32_t value;
+ int i, r = 0;
+
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+
+ if (queue_type == AMDGPU_RING_TYPE_GFX) {
+ dev_info(adev->dev, "reset gfx queue (%d:%d:%d: vmid:%d)\n",
+ me_id, pipe_id, queue_id, vmid);
+
+ mutex_lock(&adev->gfx.reset_sem_mutex);
+ gfx_v11_0_request_gfx_index_mutex(adev, true);
+ /* all se allow writes */
+ WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX,
+ (uint32_t)(0x1 << GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT));
+ value = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
+ if (pipe_id == 0)
+ value = REG_SET_FIELD(value, CP_VMID_RESET, PIPE0_QUEUES, 1 << queue_id);
+ else
+ value = REG_SET_FIELD(value, CP_VMID_RESET, PIPE1_QUEUES, 1 << queue_id);
+ WREG32_SOC15(GC, 0, regCP_VMID_RESET, value);
+ gfx_v11_0_request_gfx_index_mutex(adev, false);
+ mutex_unlock(&adev->gfx.reset_sem_mutex);
+
+ mutex_lock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, me_id, pipe_id, queue_id, 0);
+ /* wait till dequeue take effects */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout) {
+ dev_err(adev->dev, "failed to wait on gfx hqd deactivate\n");
+ r = -ETIMEDOUT;
+ }
+
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ } else if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
+ dev_info(adev->dev, "reset compute queue (%d:%d:%d)\n",
+ me_id, pipe_id, queue_id);
+ mutex_lock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, me_id, pipe_id, queue_id, 0);
+ WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
+ WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
+
+ /* wait till dequeue take effects */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout) {
+ dev_err(adev->dev, "failed to wait on hqd deactivate\n");
+ r = -ETIMEDOUT;
+ }
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ }
+
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ return r;
+}
+
+static int mes_v11_0_reset_hw_queue(struct amdgpu_mes *mes,
+ struct mes_reset_queue_input *input)
+{
+ if (input->use_mmio)
+ return mes_v11_0_reset_queue_mmio(mes, input->queue_type,
+ input->me_id, input->pipe_id,
+ input->queue_id, input->vmid);
+
+ union MESAPI__RESET mes_reset_queue_pkt;
+
+ memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
+
+ mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET;
+ mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset;
+ mes_reset_queue_pkt.gang_context_addr = input->gang_context_addr;
+ /*mes_reset_queue_pkt.reset_queue_only = 1;*/
+
+ return mes_v11_0_submit_pkt_and_poll_completion(mes,
+ &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt),
+ offsetof(union MESAPI__REMOVE_QUEUE, api_status));
+}
+
static int mes_v11_0_map_legacy_queue(struct amdgpu_mes *mes,
struct mes_map_legacy_queue_input *input)
{
@@ -421,13 +516,41 @@ static int mes_v11_0_unmap_legacy_queue(struct amdgpu_mes *mes,
static int mes_v11_0_suspend_gang(struct amdgpu_mes *mes,
struct mes_suspend_gang_input *input)
{
- return 0;
+ union MESAPI__SUSPEND mes_suspend_gang_pkt;
+
+ memset(&mes_suspend_gang_pkt, 0, sizeof(mes_suspend_gang_pkt));
+
+ mes_suspend_gang_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_suspend_gang_pkt.header.opcode = MES_SCH_API_SUSPEND;
+ mes_suspend_gang_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_suspend_gang_pkt.suspend_all_gangs = input->suspend_all_gangs;
+ mes_suspend_gang_pkt.gang_context_addr = input->gang_context_addr;
+ mes_suspend_gang_pkt.suspend_fence_addr = input->suspend_fence_addr;
+ mes_suspend_gang_pkt.suspend_fence_value = input->suspend_fence_value;
+
+ return mes_v11_0_submit_pkt_and_poll_completion(mes,
+ &mes_suspend_gang_pkt, sizeof(mes_suspend_gang_pkt),
+ offsetof(union MESAPI__SUSPEND, api_status));
}
static int mes_v11_0_resume_gang(struct amdgpu_mes *mes,
struct mes_resume_gang_input *input)
{
- return 0;
+ union MESAPI__RESUME mes_resume_gang_pkt;
+
+ memset(&mes_resume_gang_pkt, 0, sizeof(mes_resume_gang_pkt));
+
+ mes_resume_gang_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_resume_gang_pkt.header.opcode = MES_SCH_API_RESUME;
+ mes_resume_gang_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_resume_gang_pkt.resume_all_gangs = input->resume_all_gangs;
+ mes_resume_gang_pkt.gang_context_addr = input->gang_context_addr;
+
+ return mes_v11_0_submit_pkt_and_poll_completion(mes,
+ &mes_resume_gang_pkt, sizeof(mes_resume_gang_pkt),
+ offsetof(union MESAPI__RESUME, api_status));
}
static int mes_v11_0_query_sched_status(struct amdgpu_mes *mes)
@@ -595,6 +718,43 @@ static int mes_v11_0_set_hw_resources_1(struct amdgpu_mes *mes)
offsetof(union MESAPI_SET_HW_RESOURCES_1, api_status));
}
+static int mes_v11_0_reset_legacy_queue(struct amdgpu_mes *mes,
+ struct mes_reset_legacy_queue_input *input)
+{
+ union MESAPI__RESET mes_reset_queue_pkt;
+
+ if (input->use_mmio)
+ return mes_v11_0_reset_queue_mmio(mes, input->queue_type,
+ input->me_id, input->pipe_id,
+ input->queue_id, input->vmid);
+
+ memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
+
+ mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET;
+ mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_reset_queue_pkt.queue_type =
+ convert_to_mes_queue_type(input->queue_type);
+
+ if (mes_reset_queue_pkt.queue_type == MES_QUEUE_TYPE_GFX) {
+ mes_reset_queue_pkt.reset_legacy_gfx = 1;
+ mes_reset_queue_pkt.pipe_id_lp = input->pipe_id;
+ mes_reset_queue_pkt.queue_id_lp = input->queue_id;
+ mes_reset_queue_pkt.mqd_mc_addr_lp = input->mqd_addr;
+ mes_reset_queue_pkt.doorbell_offset_lp = input->doorbell_offset;
+ mes_reset_queue_pkt.wptr_addr_lp = input->wptr_addr;
+ mes_reset_queue_pkt.vmid_id_lp = input->vmid;
+ } else {
+ mes_reset_queue_pkt.reset_queue_only = 1;
+ mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset;
+ }
+
+ return mes_v11_0_submit_pkt_and_poll_completion(mes,
+ &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt),
+ offsetof(union MESAPI__RESET, api_status));
+}
+
static const struct amdgpu_mes_funcs mes_v11_0_funcs = {
.add_hw_queue = mes_v11_0_add_hw_queue,
.remove_hw_queue = mes_v11_0_remove_hw_queue,
@@ -603,6 +763,8 @@ static const struct amdgpu_mes_funcs mes_v11_0_funcs = {
.suspend_gang = mes_v11_0_suspend_gang,
.resume_gang = mes_v11_0_resume_gang,
.misc_op = mes_v11_0_misc_op,
+ .reset_legacy_queue = mes_v11_0_reset_legacy_queue,
+ .reset_hw_queue = mes_v11_0_reset_hw_queue,
};
static int mes_v11_0_allocate_ucode_buffer(struct amdgpu_device *adev,
@@ -693,6 +855,28 @@ static void mes_v11_0_free_ucode_buffers(struct amdgpu_device *adev,
(void **)&adev->mes.ucode_fw_ptr[pipe]);
}
+static void mes_v11_0_get_fw_version(struct amdgpu_device *adev)
+{
+ int pipe;
+
+ /* get MES scheduler/KIQ versions */
+ mutex_lock(&adev->srbm_mutex);
+
+ for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
+ soc21_grbm_select(adev, 3, pipe, 0, 0);
+
+ if (pipe == AMDGPU_MES_SCHED_PIPE)
+ adev->mes.sched_version =
+ RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
+ else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq)
+ adev->mes.kiq_version =
+ RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
+ }
+
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+}
+
static void mes_v11_0_enable(struct amdgpu_device *adev, bool enable)
{
uint64_t ucode_addr;
@@ -1062,18 +1246,6 @@ static int mes_v11_0_queue_init(struct amdgpu_device *adev,
mes_v11_0_queue_init_register(ring);
}
- /* get MES scheduler/KIQ versions */
- mutex_lock(&adev->srbm_mutex);
- soc21_grbm_select(adev, 3, pipe, 0, 0);
-
- if (pipe == AMDGPU_MES_SCHED_PIPE)
- adev->mes.sched_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
- else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq)
- adev->mes.kiq_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
-
- soc21_grbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
-
return 0;
}
@@ -1320,15 +1492,24 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev)
mes_v11_0_enable(adev, true);
+ mes_v11_0_get_fw_version(adev);
+
mes_v11_0_kiq_setting(&adev->gfx.kiq[0].ring);
r = mes_v11_0_queue_init(adev, AMDGPU_MES_KIQ_PIPE);
if (r)
goto failure;
- r = mes_v11_0_hw_init(adev);
- if (r)
- goto failure;
+ if ((adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x47)
+ adev->mes.enable_legacy_queue_map = true;
+ else
+ adev->mes.enable_legacy_queue_map = false;
+
+ if (adev->mes.enable_legacy_queue_map) {
+ r = mes_v11_0_hw_init(adev);
+ if (r)
+ goto failure;
+ }
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index e39a58d262c9..e499b2857a01 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -350,6 +350,32 @@ static int mes_v12_0_remove_hw_queue(struct amdgpu_mes *mes,
offsetof(union MESAPI__REMOVE_QUEUE, api_status));
}
+static int mes_v12_0_reset_hw_queue(struct amdgpu_mes *mes,
+ struct mes_reset_queue_input *input)
+{
+ union MESAPI__RESET mes_reset_queue_pkt;
+ int pipe;
+
+ memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
+
+ mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET;
+ mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset;
+ mes_reset_queue_pkt.gang_context_addr = input->gang_context_addr;
+ /*mes_reset_queue_pkt.reset_queue_only = 1;*/
+
+ if (mes->adev->enable_uni_mes)
+ pipe = AMDGPU_MES_KIQ_PIPE;
+ else
+ pipe = AMDGPU_MES_SCHED_PIPE;
+
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
+ &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt),
+ offsetof(union MESAPI__REMOVE_QUEUE, api_status));
+}
+
static int mes_v12_0_map_legacy_queue(struct amdgpu_mes *mes,
struct mes_map_legacy_queue_input *input)
{
@@ -676,6 +702,44 @@ static void mes_v12_0_enable_unmapped_doorbell_handling(
WREG32_SOC15(GC, 0, regCP_UNMAPPED_DOORBELL, data);
}
+static int mes_v12_0_reset_legacy_queue(struct amdgpu_mes *mes,
+ struct mes_reset_legacy_queue_input *input)
+{
+ union MESAPI__RESET mes_reset_queue_pkt;
+ int pipe;
+
+ memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
+
+ mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET;
+ mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_reset_queue_pkt.queue_type =
+ convert_to_mes_queue_type(input->queue_type);
+
+ if (mes_reset_queue_pkt.queue_type == MES_QUEUE_TYPE_GFX) {
+ mes_reset_queue_pkt.reset_legacy_gfx = 1;
+ mes_reset_queue_pkt.pipe_id_lp = input->pipe_id;
+ mes_reset_queue_pkt.queue_id_lp = input->queue_id;
+ mes_reset_queue_pkt.mqd_mc_addr_lp = input->mqd_addr;
+ mes_reset_queue_pkt.doorbell_offset_lp = input->doorbell_offset;
+ mes_reset_queue_pkt.wptr_addr_lp = input->wptr_addr;
+ mes_reset_queue_pkt.vmid_id_lp = input->vmid;
+ } else {
+ mes_reset_queue_pkt.reset_queue_only = 1;
+ mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset;
+ }
+
+ if (mes->adev->enable_uni_mes)
+ pipe = AMDGPU_MES_KIQ_PIPE;
+ else
+ pipe = AMDGPU_MES_SCHED_PIPE;
+
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
+ &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt),
+ offsetof(union MESAPI__RESET, api_status));
+}
+
static const struct amdgpu_mes_funcs mes_v12_0_funcs = {
.add_hw_queue = mes_v12_0_add_hw_queue,
.remove_hw_queue = mes_v12_0_remove_hw_queue,
@@ -684,6 +748,8 @@ static const struct amdgpu_mes_funcs mes_v12_0_funcs = {
.suspend_gang = mes_v12_0_suspend_gang,
.resume_gang = mes_v12_0_resume_gang,
.misc_op = mes_v12_0_misc_op,
+ .reset_legacy_queue = mes_v12_0_reset_legacy_queue,
+ .reset_hw_queue = mes_v12_0_reset_hw_queue,
};
static int mes_v12_0_allocate_ucode_buffer(struct amdgpu_device *adev,
@@ -1266,6 +1332,7 @@ static int mes_v12_0_sw_init(void *handle)
adev->mes.funcs = &mes_v12_0_funcs;
adev->mes.kiq_hw_init = &mes_v12_0_kiq_hw_init;
adev->mes.kiq_hw_fini = &mes_v12_0_kiq_hw_fini;
+ adev->mes.enable_legacy_queue_map = true;
adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE;
@@ -1422,9 +1489,11 @@ static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev)
mes_v12_0_set_hw_resources_1(&adev->mes, AMDGPU_MES_KIQ_PIPE);
}
- r = mes_v12_0_hw_init(adev);
- if (r)
- goto failure;
+ if (adev->mes.enable_legacy_queue_map) {
+ r = mes_v12_0_hw_init(adev);
+ if (r)
+ goto failure;
+ }
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
index 621761a17ac7..b01bb759d0f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
@@ -559,22 +559,6 @@ static void mmhub_v1_8_get_clockgating(struct amdgpu_device *adev, u64 *flags)
}
-static bool mmhub_v1_8_query_utcl2_poison_status(struct amdgpu_device *adev,
- int hub_inst)
-{
- u32 fed, status;
-
- status = RREG32_SOC15(MMHUB, hub_inst, regVM_L2_PROTECTION_FAULT_STATUS);
- fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED);
- if (!amdgpu_sriov_vf(adev)) {
- /* clear page fault status and address */
- WREG32_P(SOC15_REG_OFFSET(MMHUB, hub_inst,
- regVM_L2_PROTECTION_FAULT_CNTL), 1, ~1);
- }
-
- return fed;
-}
-
const struct amdgpu_mmhub_funcs mmhub_v1_8_funcs = {
.get_fb_location = mmhub_v1_8_get_fb_location,
.init = mmhub_v1_8_init,
@@ -584,7 +568,6 @@ const struct amdgpu_mmhub_funcs mmhub_v1_8_funcs = {
.setup_vm_pt_regs = mmhub_v1_8_setup_vm_pt_regs,
.set_clockgating = mmhub_v1_8_set_clockgating,
.get_clockgating = mmhub_v1_8_get_clockgating,
- .query_utcl2_poison_status = mmhub_v1_8_query_utcl2_poison_status,
};
static const struct amdgpu_ras_err_status_reg_entry mmhub_v1_8_ce_reg_list[] = {
@@ -670,8 +653,8 @@ static void mmhub_v1_8_inst_query_ras_error_count(struct amdgpu_device *adev,
AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
&ue_count);
- amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count);
- amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count);
+ amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count);
+ amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
}
static void mmhub_v1_8_query_ras_error_count(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
index caf616a2c8a6..1d099ffb3a5a 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
@@ -25,7 +25,7 @@
#define __MXGPU_NV_H__
#define NV_MAILBOX_POLL_ACK_TIMEDOUT 500
-#define NV_MAILBOX_POLL_MSG_TIMEDOUT 6000
+#define NV_MAILBOX_POLL_MSG_TIMEDOUT 15000
#define NV_MAILBOX_POLL_FLR_TIMEDOUT 10000
#define NV_MAILBOX_POLL_MSG_REP_MAX 11
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 772604feb6ac..23ef4eb36b40 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -72,6 +72,53 @@ MODULE_FIRMWARE("amdgpu/renoir_sdma.bin");
MODULE_FIRMWARE("amdgpu/green_sardine_sdma.bin");
MODULE_FIRMWARE("amdgpu/aldebaran_sdma.bin");
+static const struct amdgpu_hwip_reg_entry sdma_reg_list_4_0[] = {
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS1_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS2_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS3_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UCODE_CHECKSUM),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RB_RPTR_FETCH_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RB_RPTR_FETCH),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_XNACK0),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_XNACK1),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_XNACK0),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_XNACK1),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_SUB_REMAIN),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_VM_CNTL)
+};
+
#define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L
#define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L
@@ -1750,6 +1797,8 @@ static int sdma_v4_0_sw_init(void *handle)
struct amdgpu_ring *ring;
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_0);
+ uint32_t *ptr;
/* SDMA trap event */
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -1870,6 +1919,13 @@ static int sdma_v4_0_sw_init(void *handle)
return -EINVAL;
}
+ /* Allocate memory for SDMA IP Dump buffer */
+ ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (ptr)
+ adev->sdma.ip_dump = ptr;
+ else
+ DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
+
return r;
}
@@ -1890,6 +1946,8 @@ static int sdma_v4_0_sw_fini(void *handle)
else
amdgpu_sdma_destroy_inst_ctx(adev, false);
+ kfree(adev->sdma.ip_dump);
+
return 0;
}
@@ -2292,6 +2350,48 @@ static void sdma_v4_0_get_clockgating_state(void *handle, u64 *flags)
*flags |= AMD_CG_SUPPORT_SDMA_LS;
}
+static void sdma_v4_0_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_0);
+ uint32_t instance_offset;
+
+ if (!adev->sdma.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->sdma.num_instances);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ instance_offset = i * reg_count;
+ drm_printf(p, "\nInstance:%d\n", i);
+
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", sdma_reg_list_4_0[j].reg_name,
+ adev->sdma.ip_dump[instance_offset + j]);
+ }
+}
+
+static void sdma_v4_0_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t instance_offset;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_0);
+
+ if (!adev->sdma.ip_dump)
+ return;
+
+ amdgpu_gfx_off_ctrl(adev, false);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ instance_offset = i * reg_count;
+ for (j = 0; j < reg_count; j++)
+ adev->sdma.ip_dump[instance_offset + j] =
+ RREG32(sdma_v4_0_get_reg_offset(adev, i,
+ sdma_reg_list_4_0[j].reg_offset));
+ }
+ amdgpu_gfx_off_ctrl(adev, true);
+}
+
const struct amd_ip_funcs sdma_v4_0_ip_funcs = {
.name = "sdma_v4_0",
.early_init = sdma_v4_0_early_init,
@@ -2308,6 +2408,8 @@ const struct amd_ip_funcs sdma_v4_0_ip_funcs = {
.set_clockgating_state = sdma_v4_0_set_clockgating_state,
.set_powergating_state = sdma_v4_0_set_powergating_state,
.get_clockgating_state = sdma_v4_0_get_clockgating_state,
+ .dump_ip_state = sdma_v4_0_dump_ip_state,
+ .print_ip_state = sdma_v4_0_print_ip_state,
};
static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index 2c55bfd935bb..c77889040760 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -46,6 +46,53 @@
MODULE_FIRMWARE("amdgpu/sdma_4_4_2.bin");
MODULE_FIRMWARE("amdgpu/sdma_4_4_5.bin");
+static const struct amdgpu_hwip_reg_entry sdma_reg_list_4_4_2[] = {
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_STATUS_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_STATUS1_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_STATUS2_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_STATUS3_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UCODE_CHECKSUM),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RB_RPTR_FETCH_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RB_RPTR_FETCH),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UTCL1_RD_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UTCL1_WR_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UTCL1_RD_XNACK0),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UTCL1_RD_XNACK1),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UTCL1_WR_XNACK0),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UTCL1_WR_XNACK1),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_IB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_IB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_IB_SUB_REMAIN),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_VM_CNTL)
+};
+
#define mmSMNAID_AID0_MCA_SMU 0x03b30400
#define WREG32_SDMA(instance, offset, value) \
@@ -1291,6 +1338,8 @@ static int sdma_v4_4_2_sw_init(void *handle)
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 aid_id;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_4_2);
+ uint32_t *ptr;
/* SDMA trap event */
for (i = 0; i < adev->sdma.num_inst_per_aid; i++) {
@@ -1386,6 +1435,13 @@ static int sdma_v4_4_2_sw_init(void *handle)
return -EINVAL;
}
+ /* Allocate memory for SDMA IP Dump buffer */
+ ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (ptr)
+ adev->sdma.ip_dump = ptr;
+ else
+ DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
+
return r;
}
@@ -1406,6 +1462,8 @@ static int sdma_v4_4_2_sw_fini(void *handle)
else
amdgpu_sdma_destroy_inst_ctx(adev, false);
+ kfree(adev->sdma.ip_dump);
+
return 0;
}
@@ -1799,6 +1857,48 @@ static void sdma_v4_4_2_get_clockgating_state(void *handle, u64 *flags)
*flags |= AMD_CG_SUPPORT_SDMA_LS;
}
+static void sdma_v4_4_2_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_4_2);
+ uint32_t instance_offset;
+
+ if (!adev->sdma.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->sdma.num_instances);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ instance_offset = i * reg_count;
+ drm_printf(p, "\nInstance:%d\n", i);
+
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", sdma_reg_list_4_4_2[j].reg_name,
+ adev->sdma.ip_dump[instance_offset + j]);
+ }
+}
+
+static void sdma_v4_4_2_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t instance_offset;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_4_2);
+
+ if (!adev->sdma.ip_dump)
+ return;
+
+ amdgpu_gfx_off_ctrl(adev, false);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ instance_offset = i * reg_count;
+ for (j = 0; j < reg_count; j++)
+ adev->sdma.ip_dump[instance_offset + j] =
+ RREG32(sdma_v4_4_2_get_reg_offset(adev, i,
+ sdma_reg_list_4_4_2[j].reg_offset));
+ }
+ amdgpu_gfx_off_ctrl(adev, true);
+}
+
const struct amd_ip_funcs sdma_v4_4_2_ip_funcs = {
.name = "sdma_v4_4_2",
.early_init = sdma_v4_4_2_early_init,
@@ -1815,6 +1915,8 @@ const struct amd_ip_funcs sdma_v4_4_2_ip_funcs = {
.set_clockgating_state = sdma_v4_4_2_set_clockgating_state,
.set_powergating_state = sdma_v4_4_2_set_powergating_state,
.get_clockgating_state = sdma_v4_4_2_get_clockgating_state,
+ .dump_ip_state = sdma_v4_4_2_dump_ip_state,
+ .print_ip_state = sdma_v4_4_2_print_ip_state,
};
static const struct amdgpu_ring_funcs sdma_v4_4_2_ring_funcs = {
@@ -2141,7 +2243,7 @@ static void sdma_v4_4_2_inst_query_ras_error_count(struct amdgpu_device *adev,
AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
&ue_count);
- amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count);
+ amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
}
static void sdma_v4_4_2_query_ras_error_count(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index b7d33d78bce0..3e48ea38385d 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -59,6 +59,55 @@ MODULE_FIRMWARE("amdgpu/cyan_skillfish2_sdma1.bin");
#define SDMA0_HYP_DEC_REG_END 0x5893
#define SDMA1_HYP_DEC_REG_OFFSET 0x20
+static const struct amdgpu_hwip_reg_entry sdma_reg_list_5_0[] = {
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS1_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS2_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS3_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UCODE_CHECKSUM),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RB_RPTR_FETCH_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RB_RPTR_FETCH),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_XNACK0),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_XNACK1),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_XNACK0),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_XNACK1),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_SUB_REMAIN),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_INT_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_VM_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS2)
+};
+
static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev);
@@ -1341,6 +1390,8 @@ static int sdma_v5_0_sw_init(void *handle)
struct amdgpu_ring *ring;
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_0);
+ uint32_t *ptr;
/* SDMA trap event */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0,
@@ -1378,6 +1429,13 @@ static int sdma_v5_0_sw_init(void *handle)
return r;
}
+ /* Allocate memory for SDMA IP Dump buffer */
+ ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (ptr)
+ adev->sdma.ip_dump = ptr;
+ else
+ DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
+
return r;
}
@@ -1391,6 +1449,8 @@ static int sdma_v5_0_sw_fini(void *handle)
amdgpu_sdma_destroy_inst_ctx(adev, false);
+ kfree(adev->sdma.ip_dump);
+
return 0;
}
@@ -1718,7 +1778,49 @@ static void sdma_v5_0_get_clockgating_state(void *handle, u64 *flags)
*flags |= AMD_CG_SUPPORT_SDMA_LS;
}
-const struct amd_ip_funcs sdma_v5_0_ip_funcs = {
+static void sdma_v5_0_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_0);
+ uint32_t instance_offset;
+
+ if (!adev->sdma.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->sdma.num_instances);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ instance_offset = i * reg_count;
+ drm_printf(p, "\nInstance:%d\n", i);
+
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", sdma_reg_list_5_0[j].reg_name,
+ adev->sdma.ip_dump[instance_offset + j]);
+ }
+}
+
+static void sdma_v5_0_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t instance_offset;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_0);
+
+ if (!adev->sdma.ip_dump)
+ return;
+
+ amdgpu_gfx_off_ctrl(adev, false);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ instance_offset = i * reg_count;
+ for (j = 0; j < reg_count; j++)
+ adev->sdma.ip_dump[instance_offset + j] =
+ RREG32(sdma_v5_0_get_reg_offset(adev, i,
+ sdma_reg_list_5_0[j].reg_offset));
+ }
+ amdgpu_gfx_off_ctrl(adev, true);
+}
+
+static const struct amd_ip_funcs sdma_v5_0_ip_funcs = {
.name = "sdma_v5_0",
.early_init = sdma_v5_0_early_init,
.late_init = NULL,
@@ -1734,6 +1836,8 @@ const struct amd_ip_funcs sdma_v5_0_ip_funcs = {
.set_clockgating_state = sdma_v5_0_set_clockgating_state,
.set_powergating_state = sdma_v5_0_set_powergating_state,
.get_clockgating_state = sdma_v5_0_get_clockgating_state,
+ .dump_ip_state = sdma_v5_0_dump_ip_state,
+ .print_ip_state = sdma_v5_0_print_ip_state,
};
static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.h b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.h
index d4e3c2e696f6..2ab71f21755a 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.h
@@ -24,7 +24,6 @@
#ifndef __SDMA_V5_0_H__
#define __SDMA_V5_0_H__
-extern const struct amd_ip_funcs sdma_v5_0_ip_funcs;
extern const struct amdgpu_ip_block_version sdma_v5_0_ip_block;
#endif /* __SDMA_V5_0_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 2e72d445415f..bc9b240a3488 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -60,6 +60,55 @@ MODULE_FIRMWARE("amdgpu/sdma_5_2_7.bin");
#define SDMA0_HYP_DEC_REG_END 0x5893
#define SDMA1_HYP_DEC_REG_OFFSET 0x20
+static const struct amdgpu_hwip_reg_entry sdma_reg_list_5_2[] = {
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS1_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS2_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS3_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UCODE_CHECKSUM),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RB_RPTR_FETCH_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RB_RPTR_FETCH),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_XNACK0),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_XNACK1),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_XNACK0),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_XNACK1),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_SUB_REMAIN),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_INT_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_VM_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS2)
+};
+
static void sdma_v5_2_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v5_2_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v5_2_set_vm_pte_funcs(struct amdgpu_device *adev);
@@ -1224,6 +1273,8 @@ static int sdma_v5_2_sw_init(void *handle)
struct amdgpu_ring *ring;
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_2);
+ uint32_t *ptr;
/* SDMA trap event */
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -1255,6 +1306,13 @@ static int sdma_v5_2_sw_init(void *handle)
return r;
}
+ /* Allocate memory for SDMA IP Dump buffer */
+ ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (ptr)
+ adev->sdma.ip_dump = ptr;
+ else
+ DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
+
return r;
}
@@ -1268,6 +1326,8 @@ static int sdma_v5_2_sw_fini(void *handle)
amdgpu_sdma_destroy_inst_ctx(adev, true);
+ kfree(adev->sdma.ip_dump);
+
return 0;
}
@@ -1676,7 +1736,49 @@ static void sdma_v5_2_ring_end_use(struct amdgpu_ring *ring)
amdgpu_gfx_off_ctrl(adev, true);
}
-const struct amd_ip_funcs sdma_v5_2_ip_funcs = {
+static void sdma_v5_2_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_2);
+ uint32_t instance_offset;
+
+ if (!adev->sdma.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->sdma.num_instances);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ instance_offset = i * reg_count;
+ drm_printf(p, "\nInstance:%d\n", i);
+
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", sdma_reg_list_5_2[j].reg_name,
+ adev->sdma.ip_dump[instance_offset + j]);
+ }
+}
+
+static void sdma_v5_2_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t instance_offset;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_2);
+
+ if (!adev->sdma.ip_dump)
+ return;
+
+ amdgpu_gfx_off_ctrl(adev, false);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ instance_offset = i * reg_count;
+ for (j = 0; j < reg_count; j++)
+ adev->sdma.ip_dump[instance_offset + j] =
+ RREG32(sdma_v5_2_get_reg_offset(adev, i,
+ sdma_reg_list_5_2[j].reg_offset));
+ }
+ amdgpu_gfx_off_ctrl(adev, true);
+}
+
+static const struct amd_ip_funcs sdma_v5_2_ip_funcs = {
.name = "sdma_v5_2",
.early_init = sdma_v5_2_early_init,
.late_init = NULL,
@@ -1692,6 +1794,8 @@ const struct amd_ip_funcs sdma_v5_2_ip_funcs = {
.set_clockgating_state = sdma_v5_2_set_clockgating_state,
.set_powergating_state = sdma_v5_2_set_powergating_state,
.get_clockgating_state = sdma_v5_2_get_clockgating_state,
+ .dump_ip_state = sdma_v5_2_dump_ip_state,
+ .print_ip_state = sdma_v5_2_print_ip_state,
};
static const struct amdgpu_ring_funcs sdma_v5_2_ring_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.h b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.h
index b70414fef2a1..863145b3a77e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.h
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.h
@@ -24,7 +24,6 @@
#ifndef __SDMA_V5_2_H__
#define __SDMA_V5_2_H__
-extern const struct amd_ip_funcs sdma_v5_2_ip_funcs;
extern const struct amdgpu_ip_block_version sdma_v5_2_ip_block;
#endif /* __SDMA_V5_2_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index dab4c2db8c9d..208a1fa9d4e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -57,6 +57,63 @@ MODULE_FIRMWARE("amdgpu/sdma_6_1_2.bin");
#define SDMA0_HYP_DEC_REG_END 0x589a
#define SDMA1_HYP_DEC_REG_OFFSET 0x20
+static const struct amdgpu_hwip_reg_entry sdma_reg_list_6_0[] = {
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS1_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS2_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS3_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS4_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS5_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS6_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UCODE_CHECKSUM),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_RB_RPTR_FETCH_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_RB_RPTR_FETCH),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_RD_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_WR_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_RD_XNACK0),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_RD_XNACK1),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_WR_XNACK0),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_WR_XNACK1),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_SUB_REMAIN),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE_STATUS0),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_SUB_REMAIN),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_SUB_REMAIN),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_INT_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_CHICKEN_BITS),
+};
+
static void sdma_v6_0_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v6_0_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v6_0_set_vm_pte_funcs(struct amdgpu_device *adev);
@@ -1239,6 +1296,8 @@ static int sdma_v6_0_sw_init(void *handle)
struct amdgpu_ring *ring;
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_6_0);
+ uint32_t *ptr;
/* SDMA trap event */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
@@ -1274,6 +1333,13 @@ static int sdma_v6_0_sw_init(void *handle)
return -EINVAL;
}
+ /* Allocate memory for SDMA IP Dump buffer */
+ ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (ptr)
+ adev->sdma.ip_dump = ptr;
+ else
+ DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
+
return r;
}
@@ -1287,6 +1353,8 @@ static int sdma_v6_0_sw_fini(void *handle)
amdgpu_sdma_destroy_inst_ctx(adev, true);
+ kfree(adev->sdma.ip_dump);
+
return 0;
}
@@ -1488,6 +1556,48 @@ static void sdma_v6_0_get_clockgating_state(void *handle, u64 *flags)
{
}
+static void sdma_v6_0_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_6_0);
+ uint32_t instance_offset;
+
+ if (!adev->sdma.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->sdma.num_instances);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ instance_offset = i * reg_count;
+ drm_printf(p, "\nInstance:%d\n", i);
+
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", sdma_reg_list_6_0[j].reg_name,
+ adev->sdma.ip_dump[instance_offset + j]);
+ }
+}
+
+static void sdma_v6_0_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t instance_offset;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_6_0);
+
+ if (!adev->sdma.ip_dump)
+ return;
+
+ amdgpu_gfx_off_ctrl(adev, false);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ instance_offset = i * reg_count;
+ for (j = 0; j < reg_count; j++)
+ adev->sdma.ip_dump[instance_offset + j] =
+ RREG32(sdma_v6_0_get_reg_offset(adev, i,
+ sdma_reg_list_6_0[j].reg_offset));
+ }
+ amdgpu_gfx_off_ctrl(adev, true);
+}
+
const struct amd_ip_funcs sdma_v6_0_ip_funcs = {
.name = "sdma_v6_0",
.early_init = sdma_v6_0_early_init,
@@ -1505,6 +1615,8 @@ const struct amd_ip_funcs sdma_v6_0_ip_funcs = {
.set_clockgating_state = sdma_v6_0_set_clockgating_state,
.set_powergating_state = sdma_v6_0_set_powergating_state,
.get_clockgating_state = sdma_v6_0_get_clockgating_state,
+ .dump_ip_state = sdma_v6_0_dump_ip_state,
+ .print_ip_state = sdma_v6_0_print_ip_state,
};
static const struct amdgpu_ring_funcs sdma_v6_0_ring_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
index ecee9e7d7e4c..cfd8e183ad50 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
@@ -51,6 +51,64 @@ MODULE_FIRMWARE("amdgpu/sdma_7_0_1.bin");
#define SDMA0_HYP_DEC_REG_END 0x589a
#define SDMA1_HYP_DEC_REG_OFFSET 0x20
+static const struct amdgpu_hwip_reg_entry sdma_reg_list_7_0[] = {
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS1_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS2_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS3_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS4_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS5_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS6_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UCODE_REV),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_RB_RPTR_FETCH_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_RB_RPTR_FETCH),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_RD_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_WR_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_RD_XNACK0),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_RD_XNACK1),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_WR_XNACK0),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_WR_XNACK1),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_SUB_REMAIN),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE_STATUS0),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_SUB_REMAIN),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_SUB_REMAIN),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_INT_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_VM_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_CHICKEN_BITS),
+};
+
static void sdma_v7_0_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v7_0_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v7_0_set_vm_pte_funcs(struct amdgpu_device *adev);
@@ -1217,6 +1275,8 @@ static int sdma_v7_0_sw_init(void *handle)
struct amdgpu_ring *ring;
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_7_0);
+ uint32_t *ptr;
/* SDMA trap event */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
@@ -1247,6 +1307,13 @@ static int sdma_v7_0_sw_init(void *handle)
return r;
}
+ /* Allocate memory for SDMA IP Dump buffer */
+ ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (ptr)
+ adev->sdma.ip_dump = ptr;
+ else
+ DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
+
return r;
}
@@ -1263,6 +1330,8 @@ static int sdma_v7_0_sw_fini(void *handle)
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)
sdma_v12_0_free_ucode_buffer(adev);
+ kfree(adev->sdma.ip_dump);
+
return 0;
}
@@ -1466,6 +1535,48 @@ static void sdma_v7_0_get_clockgating_state(void *handle, u64 *flags)
{
}
+static void sdma_v7_0_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_7_0);
+ uint32_t instance_offset;
+
+ if (!adev->sdma.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->sdma.num_instances);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ instance_offset = i * reg_count;
+ drm_printf(p, "\nInstance:%d\n", i);
+
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", sdma_reg_list_7_0[j].reg_name,
+ adev->sdma.ip_dump[instance_offset + j]);
+ }
+}
+
+static void sdma_v7_0_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t instance_offset;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_7_0);
+
+ if (!adev->sdma.ip_dump)
+ return;
+
+ amdgpu_gfx_off_ctrl(adev, false);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ instance_offset = i * reg_count;
+ for (j = 0; j < reg_count; j++)
+ adev->sdma.ip_dump[instance_offset + j] =
+ RREG32(sdma_v7_0_get_reg_offset(adev, i,
+ sdma_reg_list_7_0[j].reg_offset));
+ }
+ amdgpu_gfx_off_ctrl(adev, true);
+}
+
const struct amd_ip_funcs sdma_v7_0_ip_funcs = {
.name = "sdma_v7_0",
.early_init = sdma_v7_0_early_init,
@@ -1483,6 +1594,8 @@ const struct amd_ip_funcs sdma_v7_0_ip_funcs = {
.set_clockgating_state = sdma_v7_0_set_clockgating_state,
.set_powergating_state = sdma_v7_0_set_powergating_state,
.get_clockgating_state = sdma_v7_0_get_clockgating_state,
+ .dump_ip_state = sdma_v7_0_dump_ip_state,
+ .print_ip_state = sdma_v7_0_print_ip_state,
};
static const struct amdgpu_ring_funcs sdma_v7_0_ring_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index 282584a48be0..ef7c603b50ae 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -93,6 +93,10 @@ struct soc15_ras_field_entry {
#define SOC15_REG_ENTRY_OFFSET(entry) (adev->reg_offset[entry.hwip][entry.inst][entry.seg] + entry.reg_offset)
+/* Over ride the instance id */
+#define SOC15_REG_ENTRY_OFFSET_INST(entry, inst) \
+ (adev->reg_offset[entry.hwip][inst][entry.seg] + entry.reg_offset)
+
#define SOC15_REG_GOLDEN_VALUE(ip, inst, reg, and_mask, or_mask) \
{ ip##_HWIP, inst, reg##_BASE_IDX, reg, and_mask, or_mask }
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15d.h b/drivers/gpu/drm/amd/amdgpu/soc15d.h
index e74e1983da53..b9cbeb389edc 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15d.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15d.h
@@ -413,6 +413,10 @@
# define PACKET3_QUERY_STATUS_DOORBELL_OFFSET(x) ((x) << 2)
# define PACKET3_QUERY_STATUS_ENG_SEL(x) ((x) << 25)
+#define PACKET3_RUN_CLEANER_SHADER 0xD2
+/* 1. header
+ * 2. RESERVED [31:0]
+ */
#define VCE_CMD_NO_OP 0x00000000
#define VCE_CMD_END 0x00000001
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
index 9dbb13adb661..1a8ea834efa6 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
@@ -157,9 +157,9 @@ static int umc_v12_0_query_error_count(struct amdgpu_device *adev,
umc_v12_0_query_error_count_per_type(adev, umc_reg_offset,
&de_count, umc_v12_0_is_deferred_error);
- amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count);
- amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count);
- amdgpu_ras_error_statistic_de_count(err_data, &mcm_info, NULL, de_count);
+ amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
+ amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count);
+ amdgpu_ras_error_statistic_de_count(err_data, &mcm_info, de_count);
return 0;
}
@@ -225,26 +225,16 @@ static void umc_v12_0_convert_error_address(struct amdgpu_device *adev,
}
}
-static int umc_v12_0_convert_err_addr(struct amdgpu_device *adev,
- struct ta_ras_query_address_input *addr_in,
- uint64_t *pfns, int len)
+static void umc_v12_0_dump_addr_info(struct amdgpu_device *adev,
+ struct ta_ras_query_address_output *addr_out,
+ uint64_t err_addr)
{
uint32_t col, row, row_xor, bank, channel_index;
- uint64_t soc_pa, retired_page, column, err_addr;
- struct ta_ras_query_address_output addr_out;
- uint32_t pos = 0;
-
- err_addr = addr_in->ma.err_addr;
- addr_in->addr_type = TA_RAS_MCA_TO_PA;
- if (psp_ras_query_address(&adev->psp, addr_in, &addr_out)) {
- dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx",
- err_addr);
- return 0;
- }
+ uint64_t soc_pa, retired_page, column;
- soc_pa = addr_out.pa.pa;
- bank = addr_out.pa.bank;
- channel_index = addr_out.pa.channel_idx;
+ soc_pa = addr_out->pa.pa;
+ bank = addr_out->pa.bank;
+ channel_index = addr_out->pa.channel_idx;
col = (err_addr >> 1) & 0x1fULL;
row = (err_addr >> 10) & 0x3fffULL;
@@ -258,11 +248,6 @@ static int umc_v12_0_convert_err_addr(struct amdgpu_device *adev,
for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) {
retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT);
retired_page |= (((column & 0x4) >> 2) << UMC_V12_0_PA_C4_BIT);
-
- if (pos >= len)
- return 0;
- pfns[pos++] = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
-
/* include column bit 0 and 1 */
col &= 0x3;
col |= (column << 2);
@@ -272,19 +257,74 @@ static int umc_v12_0_convert_err_addr(struct amdgpu_device *adev,
/* shift R13 bit */
retired_page ^= (0x1ULL << UMC_V12_0_PA_R13_BIT);
+ dev_info(adev->dev,
+ "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
+ retired_page, row_xor, col, bank, channel_index);
+ }
+}
+
+static int umc_v12_0_lookup_bad_pages_in_a_row(struct amdgpu_device *adev,
+ uint64_t pa_addr, uint64_t *pfns, int len)
+{
+ uint64_t soc_pa, retired_page, column;
+ uint32_t pos = 0;
+
+ soc_pa = pa_addr;
+ /* clear [C3 C2] in soc physical address */
+ soc_pa &= ~(0x3ULL << UMC_V12_0_PA_C2_BIT);
+ /* clear [C4] in soc physical address */
+ soc_pa &= ~(0x1ULL << UMC_V12_0_PA_C4_BIT);
+
+ /* loop for all possibilities of [C4 C3 C2] */
+ for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) {
+ retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT);
+ retired_page |= (((column & 0x4) >> 2) << UMC_V12_0_PA_C4_BIT);
+
+ if (pos >= len)
+ return 0;
+ pfns[pos++] = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
+
+ /* shift R13 bit */
+ retired_page ^= (0x1ULL << UMC_V12_0_PA_R13_BIT);
if (pos >= len)
return 0;
pfns[pos++] = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
- dev_info(adev->dev,
- "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
- retired_page, row_xor, col, bank, channel_index);
}
return pos;
}
+static int umc_v12_0_convert_mca_to_addr(struct amdgpu_device *adev,
+ uint64_t err_addr, uint32_t ch, uint32_t umc,
+ uint32_t node, uint32_t socket,
+ uint64_t *addr, bool dump_addr)
+{
+ struct ta_ras_query_address_input addr_in;
+ struct ta_ras_query_address_output addr_out;
+
+ memset(&addr_in, 0, sizeof(addr_in));
+ addr_in.ma.err_addr = err_addr;
+ addr_in.ma.ch_inst = ch;
+ addr_in.ma.umc_inst = umc;
+ addr_in.ma.node_inst = node;
+ addr_in.ma.socket_id = socket;
+ addr_in.addr_type = TA_RAS_MCA_TO_PA;
+ if (psp_ras_query_address(&adev->psp, &addr_in, &addr_out)) {
+ dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx",
+ err_addr);
+ return -EINVAL;
+ }
+
+ if (dump_addr)
+ umc_v12_0_dump_addr_info(adev, &addr_out, err_addr);
+
+ *addr = addr_out.pa.pa;
+
+ return 0;
+}
+
static int umc_v12_0_query_error_address(struct amdgpu_device *adev,
uint32_t node_inst, uint32_t umc_inst,
uint32_t ch_inst, void *data)
@@ -483,12 +523,10 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
uint16_t hwid, mcatype;
- struct ta_ras_query_address_input addr_in;
uint64_t page_pfn[UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL];
- uint64_t err_addr, hash_val = 0;
+ uint64_t err_addr, pa_addr = 0;
struct ras_ecc_err *ecc_err;
- int count;
- int ret;
+ int count, ret, i;
hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID);
mcatype = REG_GET_FIELD(ipid, MCMP1_IPIDT0, McaType);
@@ -514,46 +552,25 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
MCA_IPID_2_UMC_CH(ipid),
err_addr);
- memset(page_pfn, 0, sizeof(page_pfn));
-
- memset(&addr_in, 0, sizeof(addr_in));
- addr_in.ma.err_addr = err_addr;
- addr_in.ma.ch_inst = MCA_IPID_2_UMC_CH(ipid);
- addr_in.ma.umc_inst = MCA_IPID_2_UMC_INST(ipid);
- addr_in.ma.node_inst = MCA_IPID_2_DIE_ID(ipid);
- addr_in.ma.socket_id = MCA_IPID_2_SOCKET_ID(ipid);
-
- count = umc_v12_0_convert_err_addr(adev,
- &addr_in, page_pfn, ARRAY_SIZE(page_pfn));
- if (count <= 0) {
- dev_warn(adev->dev, "Fail to convert error address! count:%d\n", count);
- return 0;
- }
-
- ret = amdgpu_umc_build_pages_hash(adev,
- page_pfn, count, &hash_val);
- if (ret) {
- dev_err(adev->dev, "Fail to build error pages hash\n");
+ ret = umc_v12_0_convert_mca_to_addr(adev,
+ err_addr, MCA_IPID_2_UMC_CH(ipid),
+ MCA_IPID_2_UMC_INST(ipid), MCA_IPID_2_DIE_ID(ipid),
+ MCA_IPID_2_SOCKET_ID(ipid), &pa_addr, true);
+ if (ret)
return ret;
- }
ecc_err = kzalloc(sizeof(*ecc_err), GFP_KERNEL);
if (!ecc_err)
return -ENOMEM;
- ecc_err->err_pages.pfn = kcalloc(count, sizeof(*ecc_err->err_pages.pfn), GFP_KERNEL);
- if (!ecc_err->err_pages.pfn) {
- kfree(ecc_err);
- return -ENOMEM;
- }
-
- memcpy(ecc_err->err_pages.pfn, page_pfn, count * sizeof(*ecc_err->err_pages.pfn));
- ecc_err->err_pages.count = count;
-
- ecc_err->hash_index = hash_val;
ecc_err->status = status;
ecc_err->ipid = ipid;
ecc_err->addr = addr;
+ ecc_err->pa_pfn = UMC_V12_ADDR_MASK_BAD_COLS(pa_addr) >> AMDGPU_GPU_PAGE_SHIFT;
+
+ /* If converted pa_pfn is 0, use pa C4 pfn. */
+ if (!ecc_err->pa_pfn)
+ ecc_err->pa_pfn = BIT_ULL(UMC_V12_0_PA_C4_BIT) >> AMDGPU_GPU_PAGE_SHIFT;
ret = amdgpu_umc_logs_ecc_err(adev, &con->umc_ecc_log.de_page_tree, ecc_err);
if (ret) {
@@ -562,13 +579,25 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
else
dev_err(adev->dev, "Fail to log ecc error! ret:%d\n", ret);
- kfree(ecc_err->err_pages.pfn);
kfree(ecc_err);
return ret;
}
con->umc_ecc_log.de_queried_count++;
+ memset(page_pfn, 0, sizeof(page_pfn));
+ count = umc_v12_0_lookup_bad_pages_in_a_row(adev,
+ pa_addr,
+ page_pfn, ARRAY_SIZE(page_pfn));
+ if (count <= 0) {
+ dev_warn(adev->dev, "Fail to convert error address! count:%d\n", count);
+ return 0;
+ }
+
+ /* Reserve memory */
+ for (i = 0; i < count; i++)
+ amdgpu_ras_reserve_page(adev, page_pfn[i]);
+
/* The problem case is as follows:
* 1. GPU A triggers a gpu ras reset, and GPU A drives
* GPU B to also perform a gpu ras reset.
@@ -593,16 +622,21 @@ static int umc_v12_0_fill_error_record(struct amdgpu_device *adev,
struct ras_ecc_err *ecc_err, void *ras_error_status)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
- uint32_t i = 0;
- int ret = 0;
+ uint64_t page_pfn[UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL];
+ int ret, i, count;
if (!err_data || !ecc_err)
return -EINVAL;
- for (i = 0; i < ecc_err->err_pages.count; i++) {
+ memset(page_pfn, 0, sizeof(page_pfn));
+ count = umc_v12_0_lookup_bad_pages_in_a_row(adev,
+ ecc_err->pa_pfn << AMDGPU_GPU_PAGE_SHIFT,
+ page_pfn, ARRAY_SIZE(page_pfn));
+
+ for (i = 0; i < count; i++) {
ret = amdgpu_umc_fill_error_record(err_data,
ecc_err->addr,
- ecc_err->err_pages.pfn[i] << AMDGPU_GPU_PAGE_SHIFT,
+ page_pfn[i] << AMDGPU_GPU_PAGE_SHIFT,
MCA_IPID_2_UMC_CH(ecc_err->ipid),
MCA_IPID_2_UMC_INST(ecc_err->ipid));
if (ret)
@@ -636,7 +670,8 @@ static void umc_v12_0_query_ras_ecc_err_addr(struct amdgpu_device *adev,
dev_err(adev->dev, "Fail to fill umc error record, ret:%d\n", ret);
break;
}
- radix_tree_tag_clear(ecc_tree, entries[i]->hash_index, UMC_ECC_NEW_DETECTED_TAG);
+ radix_tree_tag_clear(ecc_tree,
+ entries[i]->pa_pfn, UMC_ECC_NEW_DETECTED_TAG);
}
mutex_unlock(&con->umc_ecc_log.lock);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
index b4974793850b..be5598d76c1d 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
@@ -81,6 +81,11 @@
(((REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo) & 0x1) << 2) | \
(REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi) & 0x03))
+#define UMC_V12_ADDR_MASK_BAD_COLS(addr) \
+ ((addr) & ~((0x3ULL << UMC_V12_0_PA_C2_BIT) | \
+ (0x1ULL << UMC_V12_0_PA_C4_BIT) | \
+ (0x1ULL << UMC_V12_0_PA_R13_BIT)))
+
bool umc_v12_0_is_deferred_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 32517c364cf7..4bfba2931b08 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -950,7 +950,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
.get_rptr = vce_v3_0_ring_get_rptr,
.get_wptr = vce_v3_0_ring_get_wptr,
.set_wptr = vce_v3_0_ring_set_wptr,
- .parse_cs = amdgpu_vce_ring_parse_cs_vm,
+ .patch_cs_in_place = amdgpu_vce_ring_parse_cs_vm,
.emit_frame_size =
6 + /* vce_v3_0_emit_vm_flush */
4 + /* vce_v3_0_emit_pipeline_sync */
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 06d787385ad4..0748bf44c880 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -1102,7 +1102,7 @@ static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = {
.get_rptr = vce_v4_0_ring_get_rptr,
.get_wptr = vce_v4_0_ring_get_wptr,
.set_wptr = vce_v4_0_ring_set_wptr,
- .parse_cs = amdgpu_vce_ring_parse_cs_vm,
+ .patch_cs_in_place = amdgpu_vce_ring_parse_cs_vm,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index a280b9fecb77..ecdfbfefd66a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -45,6 +45,42 @@
#define mmUVD_REG_XX_MASK_1_0 0x05ac
#define mmUVD_REG_XX_MASK_1_0_BASE_IDX 1
+static const struct amdgpu_hwip_reg_entry vcn_reg_list_1_0[] = {
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA0),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA1),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_CMD),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_CONFIG),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_CTL),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_DATA),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_MASK),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_PAUSE)
+};
+
static int vcn_v1_0_stop(struct amdgpu_device *adev);
static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
@@ -90,6 +126,8 @@ static int vcn_v1_0_sw_init(void *handle)
{
struct amdgpu_ring *ring;
int i, r;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_1_0);
+ uint32_t *ptr;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* VCN DEC TRAP */
@@ -161,6 +199,14 @@ static int vcn_v1_0_sw_init(void *handle)
r = jpeg_v1_0_sw_init(handle);
+ /* Allocate memory for VCN IP Dump buffer */
+ ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (!ptr) {
+ DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
+ adev->vcn.ip_dump = NULL;
+ } else {
+ adev->vcn.ip_dump = ptr;
+ }
return r;
}
@@ -184,6 +230,8 @@ static int vcn_v1_0_sw_fini(void *handle)
r = amdgpu_vcn_sw_fini(adev);
+ kfree(adev->vcn.ip_dump);
+
return r;
}
@@ -1877,6 +1925,66 @@ void vcn_v1_0_ring_end_use(struct amdgpu_ring *ring)
mutex_unlock(&ring->adev->vcn.vcn1_jpeg1_workaround);
}
+static void vcn_v1_0_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_1_0);
+ uint32_t inst_off, is_powered;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i)) {
+ drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
+ continue;
+ }
+
+ inst_off = i * reg_count;
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered) {
+ drm_printf(p, "\nActive Instance:VCN%d\n", i);
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_1_0[j].reg_name,
+ adev->vcn.ip_dump[inst_off + j]);
+ } else {
+ drm_printf(p, "\nInactive Instance:VCN%d\n", i);
+ }
+ }
+}
+
+static void vcn_v1_0_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ bool is_powered;
+ uint32_t inst_off;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_1_0);
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ inst_off = i * reg_count;
+ /* mmUVD_POWER_STATUS is always readable and is first element of the array */
+ adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered)
+ for (j = 1; j < reg_count; j++)
+ adev->vcn.ip_dump[inst_off + j] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_1_0[j], i));
+ }
+}
+
static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
.name = "vcn_v1_0",
.early_init = vcn_v1_0_early_init,
@@ -1895,8 +2003,8 @@ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
.post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */,
.set_clockgating_state = vcn_v1_0_set_clockgating_state,
.set_powergating_state = vcn_v1_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
+ .dump_ip_state = vcn_v1_0_dump_ip_state,
+ .print_ip_state = vcn_v1_0_print_ip_state,
};
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index d3d096909a7f..bfd067e2d2f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -53,6 +53,42 @@
#define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x5a7
#define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x1e2
+static const struct amdgpu_hwip_reg_entry vcn_reg_list_2_0[] = {
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA0),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA1),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_CMD),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_CONFIG),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_CTL),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_DATA),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_MASK),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_PAUSE)
+};
+
static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -96,6 +132,8 @@ static int vcn_v2_0_sw_init(void *handle)
{
struct amdgpu_ring *ring;
int i, r;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0);
+ uint32_t *ptr;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
volatile struct amdgpu_fw_shared *fw_shared;
@@ -184,6 +222,15 @@ static int vcn_v2_0_sw_init(void *handle)
if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(adev->vcn.inst);
+ /* Allocate memory for VCN IP Dump buffer */
+ ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (!ptr) {
+ DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
+ adev->vcn.ip_dump = NULL;
+ } else {
+ adev->vcn.ip_dump = ptr;
+ }
+
return 0;
}
@@ -213,6 +260,8 @@ static int vcn_v2_0_sw_fini(void *handle)
r = amdgpu_vcn_sw_fini(adev);
+ kfree(adev->vcn.ip_dump);
+
return r;
}
@@ -1985,6 +2034,66 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
return vcn_v2_0_start_mmsch(adev, &adev->virt.mm_table);
}
+static void vcn_v2_0_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0);
+ uint32_t inst_off, is_powered;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i)) {
+ drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
+ continue;
+ }
+
+ inst_off = i * reg_count;
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered) {
+ drm_printf(p, "\nActive Instance:VCN%d\n", i);
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_2_0[j].reg_name,
+ adev->vcn.ip_dump[inst_off + j]);
+ } else {
+ drm_printf(p, "\nInactive Instance:VCN%d\n", i);
+ }
+ }
+}
+
+static void vcn_v2_0_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ bool is_powered;
+ uint32_t inst_off;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0);
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ inst_off = i * reg_count;
+ /* mmUVD_POWER_STATUS is always readable and is first element of the array */
+ adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered)
+ for (j = 1; j < reg_count; j++)
+ adev->vcn.ip_dump[inst_off + j] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_2_0[j], i));
+ }
+}
+
static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
.name = "vcn_v2_0",
.early_init = vcn_v2_0_early_init,
@@ -2003,8 +2112,8 @@ static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v2_0_set_clockgating_state,
.set_powergating_state = vcn_v2_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
+ .dump_ip_state = vcn_v2_0_dump_ip_state,
+ .print_ip_state = vcn_v2_0_print_ip_state,
};
static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 96f60c303161..04e9e806e318 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -55,6 +55,43 @@
#define VCN25_MAX_HW_INSTANCES_ARCTURUS 2
+static const struct amdgpu_hwip_reg_entry vcn_reg_list_2_5[] = {
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA0),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA1),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_CMD),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_CONFIG),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_CTL),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_DATA),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_MASK),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_PAUSE)
+};
+
static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
@@ -122,6 +159,8 @@ static int vcn_v2_5_sw_init(void *handle)
{
struct amdgpu_ring *ring;
int i, j, r;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
+ uint32_t *ptr;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
@@ -241,6 +280,15 @@ static int vcn_v2_5_sw_init(void *handle)
if (r)
return r;
+ /* Allocate memory for VCN IP Dump buffer */
+ ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (!ptr) {
+ DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
+ adev->vcn.ip_dump = NULL;
+ } else {
+ adev->vcn.ip_dump = ptr;
+ }
+
return 0;
}
@@ -277,6 +325,8 @@ static int vcn_v2_5_sw_fini(void *handle)
r = amdgpu_vcn_sw_fini(adev);
+ kfree(adev->vcn.ip_dump);
+
return r;
}
@@ -1876,6 +1926,66 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
}
}
+static void vcn_v2_5_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
+ uint32_t inst_off, is_powered;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i)) {
+ drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
+ continue;
+ }
+
+ inst_off = i * reg_count;
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered) {
+ drm_printf(p, "\nActive Instance:VCN%d\n", i);
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_2_5[j].reg_name,
+ adev->vcn.ip_dump[inst_off + j]);
+ } else {
+ drm_printf(p, "\nInactive Instance:VCN%d\n", i);
+ }
+ }
+}
+
+static void vcn_v2_5_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ bool is_powered;
+ uint32_t inst_off;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ inst_off = i * reg_count;
+ /* mmUVD_POWER_STATUS is always readable and is first element of the array */
+ adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered)
+ for (j = 1; j < reg_count; j++)
+ adev->vcn.ip_dump[inst_off + j] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_2_5[j], i));
+ }
+}
+
static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
.name = "vcn_v2_5",
.early_init = vcn_v2_5_early_init,
@@ -1894,8 +2004,8 @@ static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v2_5_set_clockgating_state,
.set_powergating_state = vcn_v2_5_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
+ .dump_ip_state = vcn_v2_5_dump_ip_state,
+ .print_ip_state = vcn_v2_5_print_ip_state,
};
static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
@@ -1916,8 +2026,8 @@ static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v2_5_set_clockgating_state,
.set_powergating_state = vcn_v2_5_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
+ .dump_ip_state = vcn_v2_5_dump_ip_state,
+ .print_ip_state = vcn_v2_5_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 24f947751c46..65dd68b32280 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -60,6 +60,42 @@
#define RDECODE_MSG_CREATE 0x00000000
#define RDECODE_MESSAGE_CREATE 0x00000001
+static const struct amdgpu_hwip_reg_entry vcn_reg_list_3_0[] = {
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA0),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA1),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_CMD),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_CONFIG),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_CTL),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_DATA),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_MASK),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_PAUSE)
+};
+
static int amdgpu_ih_clientid_vcns[] = {
SOC15_IH_CLIENTID_VCN,
SOC15_IH_CLIENTID_VCN1
@@ -126,6 +162,8 @@ static int vcn_v3_0_sw_init(void *handle)
struct amdgpu_ring *ring;
int i, j, r;
int vcn_doorbell_index = 0;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
+ uint32_t *ptr;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = amdgpu_vcn_sw_init(adev);
@@ -246,6 +284,15 @@ static int vcn_v3_0_sw_init(void *handle)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
adev->vcn.pause_dpg_mode = vcn_v3_0_pause_dpg_mode;
+ /* Allocate memory for VCN IP Dump buffer */
+ ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (ptr == NULL) {
+ DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
+ adev->vcn.ip_dump = NULL;
+ } else {
+ adev->vcn.ip_dump = ptr;
+ }
+
return 0;
}
@@ -284,6 +331,7 @@ static int vcn_v3_0_sw_fini(void *handle)
r = amdgpu_vcn_sw_fini(adev);
+ kfree(adev->vcn.ip_dump);
return r;
}
@@ -2203,6 +2251,67 @@ static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev)
}
}
+static void vcn_v3_0_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
+ uint32_t inst_off;
+ bool is_powered;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i)) {
+ drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
+ continue;
+ }
+
+ inst_off = i * reg_count;
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered) {
+ drm_printf(p, "\nActive Instance:VCN%d\n", i);
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_3_0[j].reg_name,
+ adev->vcn.ip_dump[inst_off + j]);
+ } else {
+ drm_printf(p, "\nInactive Instance:VCN%d\n", i);
+ }
+ }
+}
+
+static void vcn_v3_0_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ bool is_powered;
+ uint32_t inst_off;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ inst_off = i * reg_count;
+ /* mmUVD_POWER_STATUS is always readable and is first element of the array */
+ adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered)
+ for (j = 1; j < reg_count; j++)
+ adev->vcn.ip_dump[inst_off + j] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_3_0[j], i));
+ }
+}
+
static const struct amd_ip_funcs vcn_v3_0_ip_funcs = {
.name = "vcn_v3_0",
.early_init = vcn_v3_0_early_init,
@@ -2221,8 +2330,8 @@ static const struct amd_ip_funcs vcn_v3_0_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v3_0_set_clockgating_state,
.set_powergating_state = vcn_v3_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
+ .dump_ip_state = vcn_v3_0_dump_ip_state,
+ .print_ip_state = vcn_v3_0_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v3_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 776c539bfdda..26c6f10a8c8f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -52,6 +52,42 @@
#define RDECODE_MSG_CREATE 0x00000000
#define RDECODE_MESSAGE_CREATE 0x00000001
+static const struct amdgpu_hwip_reg_entry vcn_reg_list_4_0[] = {
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA0),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA1),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_CMD),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_PGFSM_CONFIG),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_PGFSM_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_CTL),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_DATA),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_MASK),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_PAUSE)
+};
+
static int amdgpu_ih_clientid_vcns[] = {
SOC15_IH_CLIENTID_VCN,
SOC15_IH_CLIENTID_VCN1
@@ -137,6 +173,8 @@ static int vcn_v4_0_sw_init(void *handle)
struct amdgpu_ring *ring;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i, r;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0);
+ uint32_t *ptr;
r = amdgpu_vcn_sw_init(adev);
if (r)
@@ -200,6 +238,15 @@ static int vcn_v4_0_sw_init(void *handle)
if (r)
return r;
+ /* Allocate memory for VCN IP Dump buffer */
+ ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (!ptr) {
+ DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
+ adev->vcn.ip_dump = NULL;
+ } else {
+ adev->vcn.ip_dump = ptr;
+ }
+
return 0;
}
@@ -239,6 +286,8 @@ static int vcn_v4_0_sw_fini(void *handle)
r = amdgpu_vcn_sw_fini(adev);
+ kfree(adev->vcn.ip_dump);
+
return r;
}
@@ -2109,6 +2158,67 @@ static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev)
}
}
+static void vcn_v4_0_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0);
+ uint32_t inst_off, is_powered;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i)) {
+ drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
+ continue;
+ }
+
+ inst_off = i * reg_count;
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered) {
+ drm_printf(p, "\nActive Instance:VCN%d\n", i);
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0[j].reg_name,
+ adev->vcn.ip_dump[inst_off + j]);
+ } else {
+ drm_printf(p, "\nInactive Instance:VCN%d\n", i);
+ }
+ }
+}
+
+static void vcn_v4_0_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ bool is_powered;
+ uint32_t inst_off;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0);
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ inst_off = i * reg_count;
+ /* mmUVD_POWER_STATUS is always readable and is first element of the array */
+ adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS);
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered)
+ for (j = 1; j < reg_count; j++)
+ adev->vcn.ip_dump[inst_off + j] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0[j],
+ i));
+ }
+}
+
static const struct amd_ip_funcs vcn_v4_0_ip_funcs = {
.name = "vcn_v4_0",
.early_init = vcn_v4_0_early_init,
@@ -2127,8 +2237,8 @@ static const struct amd_ip_funcs vcn_v4_0_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v4_0_set_clockgating_state,
.set_powergating_state = vcn_v4_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
+ .dump_ip_state = vcn_v4_0_dump_ip_state,
+ .print_ip_state = vcn_v4_0_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v4_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index 9bae95538b62..0fda70336300 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -45,6 +45,42 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48300
+static const struct amdgpu_hwip_reg_entry vcn_reg_list_4_0_3[] = {
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA0),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA1),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_CMD),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_PGFSM_CONFIG),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_PGFSM_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_CTL),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_DATA),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_MASK),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_PAUSE)
+};
+
#define NORMALIZE_VCN_REG_OFFSET(offset) \
(offset & 0x1FFFF)
@@ -92,6 +128,8 @@ static int vcn_v4_0_3_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring;
int i, r, vcn_inst;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
+ uint32_t *ptr;
r = amdgpu_vcn_sw_init(adev);
if (r)
@@ -159,6 +197,15 @@ static int vcn_v4_0_3_sw_init(void *handle)
}
}
+ /* Allocate memory for VCN IP Dump buffer */
+ ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (!ptr) {
+ DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
+ adev->vcn.ip_dump = NULL;
+ } else {
+ adev->vcn.ip_dump = ptr;
+ }
+
return 0;
}
@@ -194,6 +241,8 @@ static int vcn_v4_0_3_sw_fini(void *handle)
r = amdgpu_vcn_sw_fini(adev);
+ kfree(adev->vcn.ip_dump);
+
return r;
}
@@ -1684,6 +1733,68 @@ static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev)
adev->vcn.inst->irq.funcs = &vcn_v4_0_3_irq_funcs;
}
+static void vcn_v4_0_3_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
+ uint32_t inst_off, is_powered;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i)) {
+ drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
+ continue;
+ }
+
+ inst_off = i * reg_count;
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered) {
+ drm_printf(p, "\nActive Instance:VCN%d\n", i);
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0_3[j].reg_name,
+ adev->vcn.ip_dump[inst_off + j]);
+ } else {
+ drm_printf(p, "\nInactive Instance:VCN%d\n", i);
+ }
+ }
+}
+
+static void vcn_v4_0_3_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ bool is_powered;
+ uint32_t inst_off, inst_id;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ inst_id = GET_INST(VCN, i);
+ inst_off = i * reg_count;
+ /* mmUVD_POWER_STATUS is always readable and is first element of the array */
+ adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, inst_id, regUVD_POWER_STATUS);
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered)
+ for (j = 1; j < reg_count; j++)
+ adev->vcn.ip_dump[inst_off + j] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0_3[j],
+ inst_id));
+ }
+}
+
static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = {
.name = "vcn_v4_0_3",
.early_init = vcn_v4_0_3_early_init,
@@ -1702,8 +1813,8 @@ static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v4_0_3_set_clockgating_state,
.set_powergating_state = vcn_v4_0_3_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
+ .dump_ip_state = vcn_v4_0_3_dump_ip_state,
+ .print_ip_state = vcn_v4_0_3_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
index 8d75061f9f38..b1fd226b7efb 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
@@ -52,6 +52,42 @@
#define RDECODE_MSG_CREATE 0x00000000
#define RDECODE_MESSAGE_CREATE 0x00000001
+static const struct amdgpu_hwip_reg_entry vcn_reg_list_4_0_5[] = {
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA0),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA1),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_CMD),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_PGFSM_CONFIG),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_PGFSM_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_CTL),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_DATA),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_MASK),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_PAUSE)
+};
+
static int amdgpu_ih_clientid_vcns[] = {
SOC15_IH_CLIENTID_VCN,
SOC15_IH_CLIENTID_VCN1
@@ -97,6 +133,8 @@ static int vcn_v4_0_5_sw_init(void *handle)
struct amdgpu_ring *ring;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i, r;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5);
+ uint32_t *ptr;
r = amdgpu_vcn_sw_init(adev);
if (r)
@@ -168,6 +206,14 @@ static int vcn_v4_0_5_sw_init(void *handle)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
adev->vcn.pause_dpg_mode = vcn_v4_0_5_pause_dpg_mode;
+ /* Allocate memory for VCN IP Dump buffer */
+ ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (!ptr) {
+ DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
+ adev->vcn.ip_dump = NULL;
+ } else {
+ adev->vcn.ip_dump = ptr;
+ }
return 0;
}
@@ -207,6 +253,8 @@ static int vcn_v4_0_5_sw_fini(void *handle)
r = amdgpu_vcn_sw_fini(adev);
+ kfree(adev->vcn.ip_dump);
+
return r;
}
@@ -1733,6 +1781,67 @@ static void vcn_v4_0_5_set_irq_funcs(struct amdgpu_device *adev)
}
}
+static void vcn_v4_0_5_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5);
+ uint32_t inst_off, is_powered;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i)) {
+ drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
+ continue;
+ }
+
+ inst_off = i * reg_count;
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered) {
+ drm_printf(p, "\nActive Instance:VCN%d\n", i);
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0_5[j].reg_name,
+ adev->vcn.ip_dump[inst_off + j]);
+ } else {
+ drm_printf(p, "\nInactive Instance:VCN%d\n", i);
+ }
+ }
+}
+
+static void vcn_v4_0_5_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ bool is_powered;
+ uint32_t inst_off;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5);
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ inst_off = i * reg_count;
+ /* mmUVD_POWER_STATUS is always readable and is first element of the array */
+ adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS);
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered)
+ for (j = 1; j < reg_count; j++)
+ adev->vcn.ip_dump[inst_off + j] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0_5[j],
+ i));
+ }
+}
+
static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = {
.name = "vcn_v4_0_5",
.early_init = vcn_v4_0_5_early_init,
@@ -1751,8 +1860,8 @@ static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v4_0_5_set_clockgating_state,
.set_powergating_state = vcn_v4_0_5_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
+ .dump_ip_state = vcn_v4_0_5_dump_ip_state,
+ .print_ip_state = vcn_v4_0_5_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v4_0_5_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
index 68c97fcd539b..c305386358b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
@@ -37,6 +37,40 @@
#include <drm/drm_drv.h>
+static const struct amdgpu_hwip_reg_entry vcn_reg_list_5_0[] = {
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA0),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA1),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_CMD),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_CTL),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_DATA),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_MASK),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_PAUSE)
+};
+
static int amdgpu_ih_clientid_vcns[] = {
SOC15_IH_CLIENTID_VCN,
SOC15_IH_CLIENTID_VCN1
@@ -83,6 +117,8 @@ static int vcn_v5_0_0_sw_init(void *handle)
struct amdgpu_ring *ring;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i, r;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
+ uint32_t *ptr;
r = amdgpu_vcn_sw_init(adev);
if (r)
@@ -137,6 +173,14 @@ static int vcn_v5_0_0_sw_init(void *handle)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
adev->vcn.pause_dpg_mode = vcn_v5_0_0_pause_dpg_mode;
+ /* Allocate memory for VCN IP Dump buffer */
+ ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (!ptr) {
+ DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
+ adev->vcn.ip_dump = NULL;
+ } else {
+ adev->vcn.ip_dump = ptr;
+ }
return 0;
}
@@ -173,6 +217,8 @@ static int vcn_v5_0_0_sw_fini(void *handle)
r = amdgpu_vcn_sw_fini(adev);
+ kfree(adev->vcn.ip_dump);
+
return r;
}
@@ -1297,6 +1343,66 @@ static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev)
}
}
+static void vcn_v5_0_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
+ uint32_t inst_off, is_powered;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i)) {
+ drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
+ continue;
+ }
+
+ inst_off = i * reg_count;
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered) {
+ drm_printf(p, "\nActive Instance:VCN%d\n", i);
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_5_0[j].reg_name,
+ adev->vcn.ip_dump[inst_off + j]);
+ } else {
+ drm_printf(p, "\nInactive Instance:VCN%d\n", i);
+ }
+ }
+}
+
+static void vcn_v5_0_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ bool is_powered;
+ uint32_t inst_off;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ inst_off = i * reg_count;
+ /* mmUVD_POWER_STATUS is always readable and is first element of the array */
+ adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS);
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered)
+ for (j = 1; j < reg_count; j++)
+ adev->vcn.ip_dump[inst_off + j] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_5_0[j], i));
+ }
+}
+
static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = {
.name = "vcn_v5_0_0",
.early_init = vcn_v5_0_0_early_init,
@@ -1315,8 +1421,8 @@ static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v5_0_0_set_clockgating_state,
.set_powergating_state = vcn_v5_0_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
+ .dump_ip_state = vcn_v5_0_dump_ip_state,
+ .print_ip_state = vcn_v5_0_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h
index 80ce42aacc0c..b61f6b838ec2 100644
--- a/drivers/gpu/drm/amd/amdgpu/vid.h
+++ b/drivers/gpu/drm/amd/amdgpu/vid.h
@@ -246,6 +246,7 @@
* 1 - Stream
* 2 - Bypass
*/
+#define EOP_EXEC (1 << 28) /* For Trailing Fence */
#define DATA_SEL(x) ((x) << 29)
/* 0 - discard
* 1 - send low 32bit data