diff options
Diffstat (limited to 'drivers/gpu/drm/amd/pm')
42 files changed, 7310 insertions, 1393 deletions
diff --git a/drivers/gpu/drm/amd/pm/Makefile b/drivers/gpu/drm/amd/pm/Makefile index f01e86030cd1..8cf6eff1ea93 100644 --- a/drivers/gpu/drm/amd/pm/Makefile +++ b/drivers/gpu/drm/amd/pm/Makefile @@ -27,6 +27,7 @@ subdir-ccflags-y += \ -I$(FULL_AMD_PATH)/pm/swsmu \ -I$(FULL_AMD_PATH)/pm/swsmu/smu11 \ -I$(FULL_AMD_PATH)/pm/swsmu/smu12 \ + -I$(FULL_AMD_PATH)/pm/swsmu/smu13 \ -I$(FULL_AMD_PATH)/pm/powerplay \ -I$(FULL_AMD_PATH)/pm/powerplay/smumgr\ -I$(FULL_AMD_PATH)/pm/powerplay/hwmgr diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 8fb12afe3c96..03581d5b1836 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -911,50 +911,27 @@ amdgpu_get_vce_clock_state(void *handle, u32 idx) int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) { - uint32_t clk_freq; - int ret = 0; - if (is_support_sw_smu(adev)) { - ret = smu_get_dpm_freq_range(&adev->smu, SMU_GFXCLK, - low ? &clk_freq : NULL, - !low ? &clk_freq : NULL); - if (ret) - return 0; - return clk_freq * 100; + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - } else { - return (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low)); - } + return pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low)); } int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low) { - uint32_t clk_freq; - int ret = 0; - if (is_support_sw_smu(adev)) { - ret = smu_get_dpm_freq_range(&adev->smu, SMU_UCLK, - low ? &clk_freq : NULL, - !low ? &clk_freq : NULL); - if (ret) - return 0; - return clk_freq * 100; + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - } else { - return (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low)); - } + return pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low)); } int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate) { int ret = 0; - bool swsmu = is_support_sw_smu(adev); + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; switch (block_type) { case AMD_IP_BLOCK_TYPE_UVD: case AMD_IP_BLOCK_TYPE_VCE: - if (swsmu) { - ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate); - } else if (adev->powerplay.pp_funcs && - adev->powerplay.pp_funcs->set_powergating_by_smu) { + if (pp_funcs && pp_funcs->set_powergating_by_smu) { /* * TODO: need a better lock mechanism * @@ -982,7 +959,7 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block * amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu] */ mutex_lock(&adev->pm.mutex); - ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu( + ret = (pp_funcs->set_powergating_by_smu( (adev)->powerplay.pp_handle, block_type, gate)); mutex_unlock(&adev->pm.mutex); } @@ -990,23 +967,13 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block case AMD_IP_BLOCK_TYPE_GFX: case AMD_IP_BLOCK_TYPE_VCN: case AMD_IP_BLOCK_TYPE_SDMA: - if (swsmu) - ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate); - else if (adev->powerplay.pp_funcs && - adev->powerplay.pp_funcs->set_powergating_by_smu) - ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu( - (adev)->powerplay.pp_handle, block_type, gate)); - break; case AMD_IP_BLOCK_TYPE_JPEG: - if (swsmu) - ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate); - break; case AMD_IP_BLOCK_TYPE_GMC: case AMD_IP_BLOCK_TYPE_ACP: - if (adev->powerplay.pp_funcs && - adev->powerplay.pp_funcs->set_powergating_by_smu) - ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu( + if (pp_funcs && pp_funcs->set_powergating_by_smu) { + ret = (pp_funcs->set_powergating_by_smu( (adev)->powerplay.pp_handle, block_type, gate)); + } break; default: break; @@ -1019,18 +986,13 @@ int amdgpu_dpm_baco_enter(struct amdgpu_device *adev) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; void *pp_handle = adev->powerplay.pp_handle; - struct smu_context *smu = &adev->smu; int ret = 0; - if (is_support_sw_smu(adev)) { - ret = smu_baco_enter(smu); - } else { - if (!pp_funcs || !pp_funcs->set_asic_baco_state) - return -ENOENT; + if (!pp_funcs || !pp_funcs->set_asic_baco_state) + return -ENOENT; - /* enter BACO state */ - ret = pp_funcs->set_asic_baco_state(pp_handle, 1); - } + /* enter BACO state */ + ret = pp_funcs->set_asic_baco_state(pp_handle, 1); return ret; } @@ -1039,18 +1001,13 @@ int amdgpu_dpm_baco_exit(struct amdgpu_device *adev) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; void *pp_handle = adev->powerplay.pp_handle; - struct smu_context *smu = &adev->smu; int ret = 0; - if (is_support_sw_smu(adev)) { - ret = smu_baco_exit(smu); - } else { - if (!pp_funcs || !pp_funcs->set_asic_baco_state) - return -ENOENT; + if (!pp_funcs || !pp_funcs->set_asic_baco_state) + return -ENOENT; - /* exit BACO state */ - ret = pp_funcs->set_asic_baco_state(pp_handle, 0); - } + /* exit BACO state */ + ret = pp_funcs->set_asic_baco_state(pp_handle, 0); return ret; } @@ -1059,12 +1016,10 @@ int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev, enum pp_mp1_state mp1_state) { int ret = 0; + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - if (is_support_sw_smu(adev)) { - ret = smu_set_mp1_state(&adev->smu, mp1_state); - } else if (adev->powerplay.pp_funcs && - adev->powerplay.pp_funcs->set_mp1_state) { - ret = adev->powerplay.pp_funcs->set_mp1_state( + if (pp_funcs && pp_funcs->set_mp1_state) { + ret = pp_funcs->set_mp1_state( adev->powerplay.pp_handle, mp1_state); } @@ -1076,68 +1031,46 @@ bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; void *pp_handle = adev->powerplay.pp_handle; - struct smu_context *smu = &adev->smu; bool baco_cap; - if (is_support_sw_smu(adev)) { - return smu_baco_is_support(smu); - } else { - if (!pp_funcs || !pp_funcs->get_asic_baco_capability) - return false; + if (!pp_funcs || !pp_funcs->get_asic_baco_capability) + return false; - if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap)) - return false; + if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap)) + return false; - return baco_cap ? true : false; - } + return baco_cap; } int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; void *pp_handle = adev->powerplay.pp_handle; - struct smu_context *smu = &adev->smu; - if (is_support_sw_smu(adev)) { - return smu_mode2_reset(smu); - } else { - if (!pp_funcs || !pp_funcs->asic_reset_mode_2) - return -ENOENT; + if (!pp_funcs || !pp_funcs->asic_reset_mode_2) + return -ENOENT; - return pp_funcs->asic_reset_mode_2(pp_handle); - } + return pp_funcs->asic_reset_mode_2(pp_handle); } int amdgpu_dpm_baco_reset(struct amdgpu_device *adev) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; void *pp_handle = adev->powerplay.pp_handle; - struct smu_context *smu = &adev->smu; int ret = 0; - if (is_support_sw_smu(adev)) { - ret = smu_baco_enter(smu); - if (ret) - return ret; - - ret = smu_baco_exit(smu); - if (ret) - return ret; - } else { - if (!pp_funcs - || !pp_funcs->set_asic_baco_state) - return -ENOENT; + if (!pp_funcs || !pp_funcs->set_asic_baco_state) + return -ENOENT; - /* enter BACO state */ - ret = pp_funcs->set_asic_baco_state(pp_handle, 1); - if (ret) - return ret; + /* enter BACO state */ + ret = pp_funcs->set_asic_baco_state(pp_handle, 1); + if (ret) + return ret; - /* exit BACO state */ - ret = pp_funcs->set_asic_baco_state(pp_handle, 0); - if (ret) - return ret; - } + /* exit BACO state */ + ret = pp_funcs->set_asic_baco_state(pp_handle, 0); + if (ret) + return ret; return 0; } @@ -1166,16 +1099,14 @@ int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, enum PP_SMC_POWER_PROFILE type, bool en) { + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0; if (amdgpu_sriov_vf(adev)) return 0; - if (is_support_sw_smu(adev)) - ret = smu_switch_power_profile(&adev->smu, type, en); - else if (adev->powerplay.pp_funcs && - adev->powerplay.pp_funcs->switch_power_profile) - ret = adev->powerplay.pp_funcs->switch_power_profile( + if (pp_funcs && pp_funcs->switch_power_profile) + ret = pp_funcs->switch_power_profile( adev->powerplay.pp_handle, type, en); return ret; @@ -1184,13 +1115,11 @@ int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev, uint32_t pstate) { + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0; - if (is_support_sw_smu(adev)) - ret = smu_set_xgmi_pstate(&adev->smu, pstate); - else if (adev->powerplay.pp_funcs && - adev->powerplay.pp_funcs->set_xgmi_pstate) - ret = adev->powerplay.pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle, + if (pp_funcs && pp_funcs->set_xgmi_pstate) + ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle, pstate); return ret; @@ -1202,12 +1131,8 @@ int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev, int ret = 0; const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; void *pp_handle = adev->powerplay.pp_handle; - struct smu_context *smu = &adev->smu; - if (is_support_sw_smu(adev)) - ret = smu_set_df_cstate(smu, cstate); - else if (pp_funcs && - pp_funcs->set_df_cstate) + if (pp_funcs && pp_funcs->set_df_cstate) ret = pp_funcs->set_df_cstate(pp_handle, cstate); return ret; @@ -1228,12 +1153,9 @@ int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev) void *pp_handle = adev->powerplay.pp_handle; const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - struct smu_context *smu = &adev->smu; int ret = 0; - if (is_support_sw_smu(adev)) - ret = smu_enable_mgpu_fan_boost(smu); - else if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) + if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) ret = pp_funcs->enable_mgpu_fan_boost(pp_handle); return ret; @@ -1290,20 +1212,17 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, void *data, uint32_t *size) { + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0; if (!data || !size) return -EINVAL; - if (is_support_sw_smu(adev)) - ret = smu_read_sensor(&adev->smu, sensor, data, size); - else { - if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor) - ret = adev->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle, + if (pp_funcs && pp_funcs->read_sensor) + ret = pp_funcs->read_sensor((adev)->powerplay.pp_handle, sensor, data, size); - else - ret = -EINVAL; - } + else + ret = -EINVAL; return ret; } @@ -1560,36 +1479,30 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) amdgpu_fence_wait_empty(ring); } - if (is_support_sw_smu(adev)) { - struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm; - smu_handle_task(&adev->smu, - smu_dpm->dpm_level, - AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, - true); - } else { - if (adev->powerplay.pp_funcs->dispatch_tasks) { - if (!amdgpu_device_has_dc_support(adev)) { - mutex_lock(&adev->pm.mutex); - amdgpu_dpm_get_active_displays(adev); - adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count; - adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev); - adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev); - /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */ - if (adev->pm.pm_display_cfg.vrefresh > 120) - adev->pm.pm_display_cfg.min_vblank_time = 0; - if (adev->powerplay.pp_funcs->display_configuration_change) - adev->powerplay.pp_funcs->display_configuration_change( - adev->powerplay.pp_handle, - &adev->pm.pm_display_cfg); - mutex_unlock(&adev->pm.mutex); - } - amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL); - } else { + if (adev->powerplay.pp_funcs->dispatch_tasks) { + if (!amdgpu_device_has_dc_support(adev)) { mutex_lock(&adev->pm.mutex); amdgpu_dpm_get_active_displays(adev); - amdgpu_dpm_change_power_state_locked(adev); + adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count; + adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev); + adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev); + /* we have issues with mclk switching with + * refresh rates over 120 hz on the non-DC code. + */ + if (adev->pm.pm_display_cfg.vrefresh > 120) + adev->pm.pm_display_cfg.min_vblank_time = 0; + if (adev->powerplay.pp_funcs->display_configuration_change) + adev->powerplay.pp_funcs->display_configuration_change( + adev->powerplay.pp_handle, + &adev->pm.pm_display_cfg); mutex_unlock(&adev->pm.mutex); } + amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL); + } else { + mutex_lock(&adev->pm.mutex); + amdgpu_dpm_get_active_displays(adev); + amdgpu_dpm_change_power_state_locked(adev); + mutex_unlock(&adev->pm.mutex); } } @@ -1684,7 +1597,10 @@ int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_versio pr_err("smu firmware loading failed\n"); return r; } - *smu_version = adev->pm.fw_version; + + if (smu_version) + *smu_version = adev->pm.fw_version; } + return 0; } diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 5fa65f191a37..9a54066ec0af 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -23,13 +23,10 @@ * Alex Deucher <alexdeucher@gmail.com> */ -#include <drm/drm_debugfs.h> - #include "amdgpu.h" #include "amdgpu_drv.h" #include "amdgpu_pm.h" #include "amdgpu_dpm.h" -#include "amdgpu_smu.h" #include "atom.h" #include <linux/pci.h> #include <linux/hwmon.h> @@ -125,11 +122,14 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; enum amd_pm_state_type pm; int ret; if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) { @@ -137,12 +137,7 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev, return ret; } - if (is_support_sw_smu(adev)) { - if (adev->smu.ppt_funcs->get_current_power_state) - pm = smu_get_current_power_state(&adev->smu); - else - pm = adev->pm.dpm.user_state; - } else if (adev->powerplay.pp_funcs->get_current_power_state) { + if (pp_funcs->get_current_power_state) { pm = amdgpu_dpm_get_current_power_state(adev); } else { pm = adev->pm.dpm.user_state; @@ -151,9 +146,9 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev, pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); - return snprintf(buf, PAGE_SIZE, "%s\n", - (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : - (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); + return sysfs_emit(buf, "%s\n", + (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : + (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); } static ssize_t amdgpu_set_power_dpm_state(struct device *dev, @@ -168,6 +163,8 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; if (strncmp("battery", buf, strlen("battery")) == 0) state = POWER_STATE_TYPE_BATTERY; @@ -274,6 +271,8 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) { @@ -281,9 +280,7 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev, return ret; } - if (is_support_sw_smu(adev)) - level = smu_get_performance_level(&adev->smu); - else if (adev->powerplay.pp_funcs->get_performance_level) + if (adev->powerplay.pp_funcs->get_performance_level) level = amdgpu_dpm_get_performance_level(adev); else level = adev->pm.dpm.forced_level; @@ -291,16 +288,17 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev, pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); - return snprintf(buf, PAGE_SIZE, "%s\n", - (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" : - (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : - (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" : - (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" : - (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" : - (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" : - (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" : - (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" : - "unknown"); + return sysfs_emit(buf, "%s\n", + (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" : + (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : + (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" : + (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" : + (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" : + (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" : + (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" : + (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" : + (level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" : + "unknown"); } static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, @@ -310,12 +308,15 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; enum amd_dpm_forced_level level; enum amd_dpm_forced_level current_level = 0xff; int ret = 0; if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; if (strncmp("low", buf, strlen("low")) == 0) { level = AMD_DPM_FORCED_LEVEL_LOW; @@ -335,6 +336,8 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK; } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) { level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; + } else if (strncmp("perf_determinism", buf, strlen("perf_determinism")) == 0) { + level = AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM; } else { return -EINVAL; } @@ -345,9 +348,7 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, return ret; } - if (is_support_sw_smu(adev)) - current_level = smu_get_performance_level(&adev->smu); - else if (adev->powerplay.pp_funcs->get_performance_level) + if (pp_funcs->get_performance_level) current_level = amdgpu_dpm_get_performance_level(adev); if (current_level == level) { @@ -377,14 +378,7 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, return -EINVAL; } - if (is_support_sw_smu(adev)) { - ret = smu_force_performance_level(&adev->smu, level); - if (ret) { - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); - return -EINVAL; - } - } else if (adev->powerplay.pp_funcs->force_performance_level) { + if (pp_funcs->force_performance_level) { mutex_lock(&adev->pm.mutex); if (adev->pm.dpm.thermal_active) { mutex_unlock(&adev->pm.mutex); @@ -415,11 +409,14 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; struct pp_states_info data; int i, buf_len, ret; if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) { @@ -427,11 +424,7 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev, return ret; } - if (is_support_sw_smu(adev)) { - ret = smu_get_power_num_states(&adev->smu, &data); - if (ret) - return ret; - } else if (adev->powerplay.pp_funcs->get_pp_num_states) { + if (pp_funcs->get_pp_num_states) { amdgpu_dpm_get_pp_num_states(adev, &data); } else { memset(&data, 0, sizeof(data)); @@ -457,13 +450,15 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - struct pp_states_info data; - struct smu_context *smu = &adev->smu; + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + struct pp_states_info data = {0}; enum amd_pm_state_type pm = 0; int i = 0, ret = 0; if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) { @@ -471,13 +466,8 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev, return ret; } - if (is_support_sw_smu(adev)) { - pm = smu_get_current_power_state(smu); - ret = smu_get_power_num_states(smu, &data); - if (ret) - return ret; - } else if (adev->powerplay.pp_funcs->get_current_power_state - && adev->powerplay.pp_funcs->get_pp_num_states) { + if (pp_funcs->get_current_power_state + && pp_funcs->get_pp_num_states) { pm = amdgpu_dpm_get_current_power_state(adev); amdgpu_dpm_get_pp_num_states(adev, &data); } @@ -493,7 +483,7 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev, if (i == data.nums) i = -EINVAL; - return snprintf(buf, PAGE_SIZE, "%d\n", i); + return sysfs_emit(buf, "%d\n", i); } static ssize_t amdgpu_get_pp_force_state(struct device *dev, @@ -505,11 +495,13 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; if (adev->pp_force_state_enabled) return amdgpu_get_pp_cur_state(dev, attr, buf); else - return snprintf(buf, PAGE_SIZE, "\n"); + return sysfs_emit(buf, "\n"); } static ssize_t amdgpu_set_pp_force_state(struct device *dev, @@ -525,6 +517,8 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; if (strlen(buf) == 1) adev->pp_force_state_enabled = false; @@ -585,6 +579,8 @@ static ssize_t amdgpu_get_pp_table(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) { @@ -592,13 +588,7 @@ static ssize_t amdgpu_get_pp_table(struct device *dev, return ret; } - if (is_support_sw_smu(adev)) { - size = smu_sys_get_pp_table(&adev->smu, (void **)&table); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); - if (size < 0) - return size; - } else if (adev->powerplay.pp_funcs->get_pp_table) { + if (adev->powerplay.pp_funcs->get_pp_table) { size = amdgpu_dpm_get_pp_table(adev, &table); pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); @@ -629,6 +619,8 @@ static ssize_t amdgpu_set_pp_table(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) { @@ -636,15 +628,12 @@ static ssize_t amdgpu_set_pp_table(struct device *dev, return ret; } - if (is_support_sw_smu(adev)) { - ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count); - if (ret) { - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } - } else if (adev->powerplay.pp_funcs->set_pp_table) - amdgpu_dpm_set_pp_table(adev, buf, count); + ret = amdgpu_dpm_set_pp_table(adev, buf, count); + if (ret) { + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + return ret; + } pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); @@ -794,6 +783,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; if (count > 127) return -EINVAL; @@ -842,53 +833,42 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, return ret; } - if (is_support_sw_smu(adev)) { - ret = smu_od_edit_dpm_table(&adev->smu, type, - parameter, parameter_size); - + if (adev->powerplay.pp_funcs->set_fine_grain_clk_vol) { + ret = amdgpu_dpm_set_fine_grain_clk_vol(adev, type, + parameter, + parameter_size); if (ret) { pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return -EINVAL; } - } else { - - if (adev->powerplay.pp_funcs->set_fine_grain_clk_vol) { - ret = amdgpu_dpm_set_fine_grain_clk_vol(adev, type, - parameter, - parameter_size); - if (ret) { - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); - return -EINVAL; - } - } + } - if (adev->powerplay.pp_funcs->odn_edit_dpm_table) { - ret = amdgpu_dpm_odn_edit_dpm_table(adev, type, - parameter, parameter_size); - if (ret) { - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); - return -EINVAL; - } + if (adev->powerplay.pp_funcs->odn_edit_dpm_table) { + ret = amdgpu_dpm_odn_edit_dpm_table(adev, type, + parameter, parameter_size); + if (ret) { + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + return -EINVAL; } + } - if (type == PP_OD_COMMIT_DPM_TABLE) { - if (adev->powerplay.pp_funcs->dispatch_tasks) { - amdgpu_dpm_dispatch_task(adev, - AMD_PP_TASK_READJUST_POWER_STATE, - NULL); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); - return count; - } else { - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); - return -EINVAL; - } + if (type == PP_OD_COMMIT_DPM_TABLE) { + if (adev->powerplay.pp_funcs->dispatch_tasks) { + amdgpu_dpm_dispatch_task(adev, + AMD_PP_TASK_READJUST_POWER_STATE, + NULL); + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + return count; + } else { + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + return -EINVAL; } } + pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); @@ -906,6 +886,8 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) { @@ -913,18 +895,13 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, return ret; } - if (is_support_sw_smu(adev)) { - size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf); - size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size); - size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, buf+size); - size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDGFX_OFFSET, buf+size); - size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, buf+size); - size += smu_print_clk_levels(&adev->smu, SMU_OD_CCLK, buf+size); - } else if (adev->powerplay.pp_funcs->print_clock_levels) { + if (adev->powerplay.pp_funcs->print_clock_levels) { size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf); size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size); size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size); + size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf+size); size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size); + size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf+size); } else { size = snprintf(buf, PAGE_SIZE, "\n"); } @@ -962,6 +939,8 @@ static ssize_t amdgpu_set_pp_features(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = kstrtou64(buf, 0, &featuremask); if (ret) @@ -973,14 +952,7 @@ static ssize_t amdgpu_set_pp_features(struct device *dev, return ret; } - if (is_support_sw_smu(adev)) { - ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask); - if (ret) { - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); - return -EINVAL; - } - } else if (adev->powerplay.pp_funcs->set_ppfeature_status) { + if (adev->powerplay.pp_funcs->set_ppfeature_status) { ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask); if (ret) { pm_runtime_mark_last_busy(ddev->dev); @@ -1005,6 +977,8 @@ static ssize_t amdgpu_get_pp_features(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) { @@ -1012,9 +986,7 @@ static ssize_t amdgpu_get_pp_features(struct device *dev, return ret; } - if (is_support_sw_smu(adev)) - size = smu_sys_get_pp_feature_mask(&adev->smu, buf); - else if (adev->powerplay.pp_funcs->get_ppfeature_status) + if (adev->powerplay.pp_funcs->get_ppfeature_status) size = amdgpu_dpm_get_ppfeature_status(adev, buf); else size = snprintf(buf, PAGE_SIZE, "\n"); @@ -1055,8 +1027,8 @@ static ssize_t amdgpu_get_pp_features(struct device *dev, * NOTE: change to the dcefclk max dpm level is not supported now */ -static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, - struct device_attribute *attr, +static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev, + enum pp_clock_type type, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); @@ -1066,6 +1038,8 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) { @@ -1073,10 +1047,8 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, return ret; } - if (is_support_sw_smu(adev)) - size = smu_print_clk_levels(&adev->smu, SMU_SCLK, buf); - else if (adev->powerplay.pp_funcs->print_clock_levels) - size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf); + if (adev->powerplay.pp_funcs->print_clock_levels) + size = amdgpu_dpm_print_clock_levels(adev, type, buf); else size = snprintf(buf, PAGE_SIZE, "\n"); @@ -1121,8 +1093,8 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask) return 0; } -static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, - struct device_attribute *attr, +static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev, + enum pp_clock_type type, const char *buf, size_t count) { @@ -1133,6 +1105,8 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = amdgpu_read_mask(buf, count, &mask); if (ret) @@ -1144,10 +1118,10 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, return ret; } - if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask); - else if (adev->powerplay.pp_funcs->force_clock_level) - ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); + if (adev->powerplay.pp_funcs->force_clock_level) + ret = amdgpu_dpm_force_clock_level(adev, type, mask); + else + ret = 0; pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); @@ -1158,35 +1132,26 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, return count; } -static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev, +static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, struct device_attribute *attr, char *buf) { - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = drm_to_adev(ddev); - ssize_t size; - int ret; - - if (amdgpu_in_reset(adev)) - return -EPERM; - - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } - - if (is_support_sw_smu(adev)) - size = smu_print_clk_levels(&adev->smu, SMU_MCLK, buf); - else if (adev->powerplay.pp_funcs->print_clock_levels) - size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf); - else - size = snprintf(buf, PAGE_SIZE, "\n"); + return amdgpu_get_pp_dpm_clock(dev, PP_SCLK, buf); +} - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); +static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + return amdgpu_set_pp_dpm_clock(dev, PP_SCLK, buf, count); +} - return size; +static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return amdgpu_get_pp_dpm_clock(dev, PP_MCLK, buf); } static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, @@ -1194,67 +1159,14 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, const char *buf, size_t count) { - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = drm_to_adev(ddev); - uint32_t mask = 0; - int ret; - - if (amdgpu_in_reset(adev)) - return -EPERM; - - ret = amdgpu_read_mask(buf, count, &mask); - if (ret) - return ret; - - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } - - if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask); - else if (adev->powerplay.pp_funcs->force_clock_level) - ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); - - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); - - if (ret) - return -EINVAL; - - return count; + return amdgpu_set_pp_dpm_clock(dev, PP_MCLK, buf, count); } static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev, struct device_attribute *attr, char *buf) { - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = drm_to_adev(ddev); - ssize_t size; - int ret; - - if (amdgpu_in_reset(adev)) - return -EPERM; - - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } - - if (is_support_sw_smu(adev)) - size = smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf); - else if (adev->powerplay.pp_funcs->print_clock_levels) - size = amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf); - else - size = snprintf(buf, PAGE_SIZE, "\n"); - - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); - - return size; + return amdgpu_get_pp_dpm_clock(dev, PP_SOCCLK, buf); } static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev, @@ -1262,69 +1174,14 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev, const char *buf, size_t count) { - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = drm_to_adev(ddev); - int ret; - uint32_t mask = 0; - - if (amdgpu_in_reset(adev)) - return -EPERM; - - ret = amdgpu_read_mask(buf, count, &mask); - if (ret) - return ret; - - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } - - if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask); - else if (adev->powerplay.pp_funcs->force_clock_level) - ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask); - else - ret = 0; - - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); - - if (ret) - return -EINVAL; - - return count; + return amdgpu_set_pp_dpm_clock(dev, PP_SOCCLK, buf, count); } static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev, struct device_attribute *attr, char *buf) { - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = drm_to_adev(ddev); - ssize_t size; - int ret; - - if (amdgpu_in_reset(adev)) - return -EPERM; - - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } - - if (is_support_sw_smu(adev)) - size = smu_print_clk_levels(&adev->smu, SMU_FCLK, buf); - else if (adev->powerplay.pp_funcs->print_clock_levels) - size = amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf); - else - size = snprintf(buf, PAGE_SIZE, "\n"); - - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); - - return size; + return amdgpu_get_pp_dpm_clock(dev, PP_FCLK, buf); } static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev, @@ -1332,67 +1189,14 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev, const char *buf, size_t count) { - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = drm_to_adev(ddev); - int ret; - uint32_t mask = 0; - - if (amdgpu_in_reset(adev)) - return -EPERM; - - ret = amdgpu_read_mask(buf, count, &mask); - if (ret) - return ret; - - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } - - if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask); - else if (adev->powerplay.pp_funcs->force_clock_level) - ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask); - else - ret = 0; - - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); - - if (ret) - return -EINVAL; - - return count; + return amdgpu_set_pp_dpm_clock(dev, PP_FCLK, buf, count); } static ssize_t amdgpu_get_pp_dpm_vclk(struct device *dev, struct device_attribute *attr, char *buf) { - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = drm_to_adev(ddev); - ssize_t size; - int ret; - - if (amdgpu_in_reset(adev)) - return -EPERM; - - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } - - if (is_support_sw_smu(adev)) - size = smu_print_clk_levels(&adev->smu, SMU_VCLK, buf); - else - size = snprintf(buf, PAGE_SIZE, "\n"); - - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); - - return size; + return amdgpu_get_pp_dpm_clock(dev, PP_VCLK, buf); } static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev, @@ -1400,65 +1204,14 @@ static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev, const char *buf, size_t count) { - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = drm_to_adev(ddev); - int ret; - uint32_t mask = 0; - - if (amdgpu_in_reset(adev)) - return -EPERM; - - ret = amdgpu_read_mask(buf, count, &mask); - if (ret) - return ret; - - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } - - if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_VCLK, mask); - else - ret = 0; - - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); - - if (ret) - return -EINVAL; - - return count; + return amdgpu_set_pp_dpm_clock(dev, PP_VCLK, buf, count); } static ssize_t amdgpu_get_pp_dpm_dclk(struct device *dev, struct device_attribute *attr, char *buf) { - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = drm_to_adev(ddev); - ssize_t size; - int ret; - - if (amdgpu_in_reset(adev)) - return -EPERM; - - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } - - if (is_support_sw_smu(adev)) - size = smu_print_clk_levels(&adev->smu, SMU_DCLK, buf); - else - size = snprintf(buf, PAGE_SIZE, "\n"); - - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); - - return size; + return amdgpu_get_pp_dpm_clock(dev, PP_DCLK, buf); } static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev, @@ -1466,67 +1219,14 @@ static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev, const char *buf, size_t count) { - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = drm_to_adev(ddev); - int ret; - uint32_t mask = 0; - - if (amdgpu_in_reset(adev)) - return -EPERM; - - ret = amdgpu_read_mask(buf, count, &mask); - if (ret) - return ret; - - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } - - if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_DCLK, mask); - else - ret = 0; - - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); - - if (ret) - return -EINVAL; - - return count; + return amdgpu_set_pp_dpm_clock(dev, PP_DCLK, buf, count); } static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev, struct device_attribute *attr, char *buf) { - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = drm_to_adev(ddev); - ssize_t size; - int ret; - - if (amdgpu_in_reset(adev)) - return -EPERM; - - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } - - if (is_support_sw_smu(adev)) - size = smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf); - else if (adev->powerplay.pp_funcs->print_clock_levels) - size = amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf); - else - size = snprintf(buf, PAGE_SIZE, "\n"); - - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); - - return size; + return amdgpu_get_pp_dpm_clock(dev, PP_DCEFCLK, buf); } static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev, @@ -1534,69 +1234,14 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev, const char *buf, size_t count) { - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = drm_to_adev(ddev); - int ret; - uint32_t mask = 0; - - if (amdgpu_in_reset(adev)) - return -EPERM; - - ret = amdgpu_read_mask(buf, count, &mask); - if (ret) - return ret; - - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } - - if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask); - else if (adev->powerplay.pp_funcs->force_clock_level) - ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask); - else - ret = 0; - - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); - - if (ret) - return -EINVAL; - - return count; + return amdgpu_set_pp_dpm_clock(dev, PP_DCEFCLK, buf, count); } static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev, struct device_attribute *attr, char *buf) { - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = drm_to_adev(ddev); - ssize_t size; - int ret; - - if (amdgpu_in_reset(adev)) - return -EPERM; - - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } - - if (is_support_sw_smu(adev)) - size = smu_print_clk_levels(&adev->smu, SMU_PCIE, buf); - else if (adev->powerplay.pp_funcs->print_clock_levels) - size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf); - else - size = snprintf(buf, PAGE_SIZE, "\n"); - - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); - - return size; + return amdgpu_get_pp_dpm_clock(dev, PP_PCIE, buf); } static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, @@ -1604,38 +1249,7 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, const char *buf, size_t count) { - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = drm_to_adev(ddev); - int ret; - uint32_t mask = 0; - - if (amdgpu_in_reset(adev)) - return -EPERM; - - ret = amdgpu_read_mask(buf, count, &mask); - if (ret) - return ret; - - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } - - if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask); - else if (adev->powerplay.pp_funcs->force_clock_level) - ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); - else - ret = 0; - - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); - - if (ret) - return -EINVAL; - - return count; + return amdgpu_set_pp_dpm_clock(dev, PP_PCIE, buf, count); } static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, @@ -1649,6 +1263,8 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) { @@ -1664,7 +1280,7 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); - return snprintf(buf, PAGE_SIZE, "%d\n", value); + return sysfs_emit(buf, "%d\n", value); } static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, @@ -1679,6 +1295,8 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = kstrtol(buf, 0, &value); @@ -1722,6 +1340,8 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) { @@ -1737,7 +1357,7 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); - return snprintf(buf, PAGE_SIZE, "%d\n", value); + return sysfs_emit(buf, "%d\n", value); } static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, @@ -1752,6 +1372,8 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = kstrtol(buf, 0, &value); @@ -1815,6 +1437,8 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) { @@ -1822,9 +1446,7 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev, return ret; } - if (is_support_sw_smu(adev)) - size = smu_get_power_profile_mode(&adev->smu, buf); - else if (adev->powerplay.pp_funcs->get_power_profile_mode) + if (adev->powerplay.pp_funcs->get_power_profile_mode) size = amdgpu_dpm_get_power_profile_mode(adev, buf); else size = snprintf(buf, PAGE_SIZE, "\n"); @@ -1855,6 +1477,8 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; tmp[0] = *(buf); tmp[1] = '\0'; @@ -1888,9 +1512,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, return ret; } - if (is_support_sw_smu(adev)) - ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true); - else if (adev->powerplay.pp_funcs->set_power_profile_mode) + if (adev->powerplay.pp_funcs->set_power_profile_mode) ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size); pm_runtime_mark_last_busy(ddev->dev); @@ -1920,6 +1542,8 @@ static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; r = pm_runtime_get_sync(ddev->dev); if (r < 0) { @@ -1937,7 +1561,7 @@ static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev, if (r) return r; - return snprintf(buf, PAGE_SIZE, "%d\n", value); + return sysfs_emit(buf, "%d\n", value); } /** @@ -1958,6 +1582,8 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; r = pm_runtime_get_sync(ddev->dev); if (r < 0) { @@ -1975,7 +1601,7 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev, if (r) return r; - return snprintf(buf, PAGE_SIZE, "%d\n", value); + return sysfs_emit(buf, "%d\n", value); } /** @@ -2001,6 +1627,8 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; if (adev->flags & AMD_IS_APU) return -ENODATA; @@ -2019,8 +1647,8 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev, pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); - return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n", - count0, count1, pcie_get_mps(adev->pdev)); + return sysfs_emit(buf, "%llu %llu %i\n", + count0, count1, pcie_get_mps(adev->pdev)); } /** @@ -2042,9 +1670,11 @@ static ssize_t amdgpu_get_unique_id(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; if (adev->unique_id) - return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id); + return sysfs_emit(buf, "%016llx\n", adev->unique_id); return 0; } @@ -2071,10 +1701,10 @@ static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - return snprintf(buf, PAGE_SIZE, "%s: thermal throttling logging %s, with interval %d seconds\n", - adev_to_drm(adev)->unique, - atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled", - adev->throttling_logging_rs.interval / HZ + 1); + return sysfs_emit(buf, "%s: thermal throttling logging %s, with interval %d seconds\n", + adev_to_drm(adev)->unique, + atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled", + adev->throttling_logging_rs.interval / HZ + 1); } static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev, @@ -2140,6 +1770,8 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) { @@ -2147,9 +1779,7 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev, return ret; } - if (is_support_sw_smu(adev)) - size = smu_sys_get_gpu_metrics(&adev->smu, &gpu_metrics); - else if (adev->powerplay.pp_funcs->get_gpu_metrics) + if (adev->powerplay.pp_funcs->get_gpu_metrics) size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics); if (size <= 0) @@ -2169,7 +1799,7 @@ out: static struct amdgpu_device_attr amdgpu_device_attrs[] = { AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC), AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC), AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC), @@ -2214,7 +1844,9 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ if (asic_type < CHIP_VEGA10) *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) { - if (asic_type < CHIP_VEGA10 || asic_type == CHIP_ARCTURUS) + if (asic_type < CHIP_VEGA10 || + asic_type == CHIP_ARCTURUS || + asic_type == CHIP_ALDEBARAN) *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) { if (asic_type < CHIP_VEGA20) @@ -2261,6 +1893,14 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ } } + if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) { + /* SMU MP1 does not support dcefclk level setting */ + if (asic_type >= CHIP_NAVI10) { + dev_attr->attr.mode &= ~S_IWUGO; + dev_attr->store = NULL; + } + } + #undef DEVICE_ATTR_IS return 0; @@ -2370,6 +2010,8 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; if (channel >= PP_TEMP_MAX) return -EINVAL; @@ -2407,7 +2049,7 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev, if (r) return r; - return snprintf(buf, PAGE_SIZE, "%d\n", temp); + return sysfs_emit(buf, "%d\n", temp); } static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev, @@ -2423,7 +2065,7 @@ static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev, else temp = adev->pm.dpm.thermal.max_temp; - return snprintf(buf, PAGE_SIZE, "%d\n", temp); + return sysfs_emit(buf, "%d\n", temp); } static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev, @@ -2439,7 +2081,7 @@ static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev, else temp = adev->pm.dpm.thermal.max_hotspot_crit_temp; - return snprintf(buf, PAGE_SIZE, "%d\n", temp); + return sysfs_emit(buf, "%d\n", temp); } static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev, @@ -2455,7 +2097,7 @@ static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev, else temp = adev->pm.dpm.thermal.max_mem_crit_temp; - return snprintf(buf, PAGE_SIZE, "%d\n", temp); + return sysfs_emit(buf, "%d\n", temp); } static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev, @@ -2467,7 +2109,7 @@ static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev, if (channel >= PP_TEMP_MAX) return -EINVAL; - return snprintf(buf, PAGE_SIZE, "%s\n", temp_label[channel].label); + return sysfs_emit(buf, "%s\n", temp_label[channel].label); } static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev, @@ -2493,7 +2135,7 @@ static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev, break; } - return snprintf(buf, PAGE_SIZE, "%d\n", temp); + return sysfs_emit(buf, "%d\n", temp); } static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, @@ -2506,6 +2148,8 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (ret < 0) { @@ -2513,22 +2157,18 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, return ret; } - if (is_support_sw_smu(adev)) { - pwm_mode = smu_get_fan_control_mode(&adev->smu); - } else { - if (!adev->powerplay.pp_funcs->get_fan_control_mode) { - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return -EINVAL; - } - - pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); + if (!adev->powerplay.pp_funcs->get_fan_control_mode) { + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return -EINVAL; } + pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return sprintf(buf, "%i\n", pwm_mode); + return sprintf(buf, "%u\n", pwm_mode); } static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, @@ -2542,6 +2182,8 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; err = kstrtoint(buf, 10, &value); if (err) @@ -2553,18 +2195,14 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, return ret; } - if (is_support_sw_smu(adev)) { - smu_set_fan_control_mode(&adev->smu, value); - } else { - if (!adev->powerplay.pp_funcs->set_fan_control_mode) { - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return -EINVAL; - } - - amdgpu_dpm_set_fan_control_mode(adev, value); + if (!adev->powerplay.pp_funcs->set_fan_control_mode) { + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return -EINVAL; } + amdgpu_dpm_set_fan_control_mode(adev, value); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); @@ -2596,6 +2234,8 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; err = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (err < 0) { @@ -2603,11 +2243,7 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, return err; } - if (is_support_sw_smu(adev)) - pwm_mode = smu_get_fan_control_mode(&adev->smu); - else - pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); - + pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); if (pwm_mode != AMD_FAN_CTRL_MANUAL) { pr_info("manual fan speed control should be enabled first\n"); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); @@ -2624,9 +2260,7 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, value = (value * 100) / 255; - if (is_support_sw_smu(adev)) - err = smu_set_fan_speed_percent(&adev->smu, value); - else if (adev->powerplay.pp_funcs->set_fan_speed_percent) + if (adev->powerplay.pp_funcs->set_fan_speed_percent) err = amdgpu_dpm_set_fan_speed_percent(adev, value); else err = -EINVAL; @@ -2650,6 +2284,8 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; err = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (err < 0) { @@ -2657,9 +2293,7 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev, return err; } - if (is_support_sw_smu(adev)) - err = smu_get_fan_speed_percent(&adev->smu, &speed); - else if (adev->powerplay.pp_funcs->get_fan_speed_percent) + if (adev->powerplay.pp_funcs->get_fan_speed_percent) err = amdgpu_dpm_get_fan_speed_percent(adev, &speed); else err = -EINVAL; @@ -2685,6 +2319,8 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; err = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (err < 0) { @@ -2692,9 +2328,7 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, return err; } - if (is_support_sw_smu(adev)) - err = smu_get_fan_speed_rpm(&adev->smu, &speed); - else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) + if (adev->powerplay.pp_funcs->get_fan_speed_rpm) err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed); else err = -EINVAL; @@ -2719,6 +2353,8 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { @@ -2735,7 +2371,7 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev, if (r) return r; - return snprintf(buf, PAGE_SIZE, "%d\n", min_rpm); + return sysfs_emit(buf, "%d\n", min_rpm); } static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev, @@ -2749,6 +2385,8 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { @@ -2765,7 +2403,7 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev, if (r) return r; - return snprintf(buf, PAGE_SIZE, "%d\n", max_rpm); + return sysfs_emit(buf, "%d\n", max_rpm); } static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, @@ -2778,6 +2416,8 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; err = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (err < 0) { @@ -2785,9 +2425,7 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, return err; } - if (is_support_sw_smu(adev)) - err = smu_get_fan_speed_rpm(&adev->smu, &rpm); - else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) + if (adev->powerplay.pp_funcs->get_fan_speed_rpm) err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm); else err = -EINVAL; @@ -2812,6 +2450,8 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; err = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (err < 0) { @@ -2819,10 +2459,7 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev, return err; } - if (is_support_sw_smu(adev)) - pwm_mode = smu_get_fan_control_mode(&adev->smu); - else - pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); + pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); if (pwm_mode != AMD_FAN_CTRL_MANUAL) { pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); @@ -2837,9 +2474,7 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev, return err; } - if (is_support_sw_smu(adev)) - err = smu_set_fan_speed_rpm(&adev->smu, value); - else if (adev->powerplay.pp_funcs->set_fan_speed_rpm) + if (adev->powerplay.pp_funcs->set_fan_speed_rpm) err = amdgpu_dpm_set_fan_speed_rpm(adev, value); else err = -EINVAL; @@ -2863,6 +2498,8 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (ret < 0) { @@ -2870,18 +2507,14 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev, return ret; } - if (is_support_sw_smu(adev)) { - pwm_mode = smu_get_fan_control_mode(&adev->smu); - } else { - if (!adev->powerplay.pp_funcs->get_fan_control_mode) { - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return -EINVAL; - } - - pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); + if (!adev->powerplay.pp_funcs->get_fan_control_mode) { + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return -EINVAL; } + pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); @@ -2900,6 +2533,8 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; err = kstrtoint(buf, 10, &value); if (err) @@ -2918,16 +2553,12 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev, return err; } - if (is_support_sw_smu(adev)) { - smu_set_fan_control_mode(&adev->smu, pwm_mode); - } else { - if (!adev->powerplay.pp_funcs->set_fan_control_mode) { - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return -EINVAL; - } - amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); + if (!adev->powerplay.pp_funcs->set_fan_control_mode) { + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return -EINVAL; } + amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); @@ -2945,6 +2576,8 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { @@ -2962,14 +2595,14 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev, if (r) return r; - return snprintf(buf, PAGE_SIZE, "%d\n", vddgfx); + return sysfs_emit(buf, "%d\n", vddgfx); } static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "vddgfx\n"); + return sysfs_emit(buf, "vddgfx\n"); } static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev, @@ -2982,6 +2615,8 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; /* only APUs have vddnb */ if (!(adev->flags & AMD_IS_APU)) @@ -3003,14 +2638,14 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev, if (r) return r; - return snprintf(buf, PAGE_SIZE, "%d\n", vddnb); + return sysfs_emit(buf, "%d\n", vddnb); } static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "vddnb\n"); + return sysfs_emit(buf, "vddnb\n"); } static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev, @@ -3024,6 +2659,8 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { @@ -3044,7 +2681,7 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev, /* convert to microwatts */ uw = (query >> 8) * 1000000 + (query & 0xff) * 1000; - return snprintf(buf, PAGE_SIZE, "%u\n", uw); + return sysfs_emit(buf, "%u\n", uw); } static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev, @@ -3059,13 +2696,17 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev, char *buf) { struct amdgpu_device *adev = dev_get_drvdata(dev); + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int limit_type = to_sensor_dev_attr(attr)->index; uint32_t limit = limit_type << 24; + uint32_t max_limit = 0; ssize_t size; int r; if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { @@ -3076,9 +2717,10 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev, if (is_support_sw_smu(adev)) { smu_get_power_limit(&adev->smu, &limit, SMU_PPT_LIMIT_MAX); size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); - } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { - adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true); - size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); + } else if (pp_funcs && pp_funcs->get_power_limit) { + pp_funcs->get_power_limit(adev->powerplay.pp_handle, + &limit, &max_limit, true); + size = snprintf(buf, PAGE_SIZE, "%u\n", max_limit * 1000000); } else { size = snprintf(buf, PAGE_SIZE, "\n"); } @@ -3094,6 +2736,7 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev, char *buf) { struct amdgpu_device *adev = dev_get_drvdata(dev); + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int limit_type = to_sensor_dev_attr(attr)->index; uint32_t limit = limit_type << 24; ssize_t size; @@ -3101,6 +2744,8 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { @@ -3111,8 +2756,9 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev, if (is_support_sw_smu(adev)) { smu_get_power_limit(&adev->smu, &limit, SMU_PPT_LIMIT_CURRENT); size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); - } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { - adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false); + } else if (pp_funcs && pp_funcs->get_power_limit) { + pp_funcs->get_power_limit(adev->powerplay.pp_handle, + &limit, NULL, false); size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); } else { size = snprintf(buf, PAGE_SIZE, "\n"); @@ -3124,13 +2770,51 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev, return size; } +static ssize_t amdgpu_hwmon_show_power_cap_default(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct amdgpu_device *adev = dev_get_drvdata(dev); + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int limit_type = to_sensor_dev_attr(attr)->index; + uint32_t limit = limit_type << 24; + ssize_t size; + int r; + + if (amdgpu_in_reset(adev)) + return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; + + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + + if (is_support_sw_smu(adev)) { + smu_get_power_limit(&adev->smu, &limit, SMU_PPT_LIMIT_DEFAULT); + size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); + } else if (pp_funcs && pp_funcs->get_power_limit) { + pp_funcs->get_power_limit(adev->powerplay.pp_handle, + &limit, NULL, true); + size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); + } else { + size = snprintf(buf, PAGE_SIZE, "\n"); + } + + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + + return size; +} static ssize_t amdgpu_hwmon_show_power_label(struct device *dev, struct device_attribute *attr, char *buf) { int limit_type = to_sensor_dev_attr(attr)->index; - return snprintf(buf, PAGE_SIZE, "%s\n", + return sysfs_emit(buf, "%s\n", limit_type == SMU_FAST_PPT_LIMIT ? "fastPPT" : "slowPPT"); } @@ -3140,12 +2824,15 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, size_t count) { struct amdgpu_device *adev = dev_get_drvdata(dev); + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int limit_type = to_sensor_dev_attr(attr)->index; int err; u32 value; if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; if (amdgpu_sriov_vf(adev)) return -EINVAL; @@ -3163,10 +2850,8 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, return err; } - if (is_support_sw_smu(adev)) - err = smu_set_power_limit(&adev->smu, value); - else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) - err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value); + if (pp_funcs && pp_funcs->set_power_limit) + err = pp_funcs->set_power_limit(adev->powerplay.pp_handle, value); else err = -EINVAL; @@ -3189,6 +2874,8 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { @@ -3206,14 +2893,14 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev, if (r) return r; - return snprintf(buf, PAGE_SIZE, "%u\n", sclk * 10 * 1000); + return sysfs_emit(buf, "%u\n", sclk * 10 * 1000); } static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "sclk\n"); + return sysfs_emit(buf, "sclk\n"); } static ssize_t amdgpu_hwmon_show_mclk(struct device *dev, @@ -3226,6 +2913,8 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev, if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { @@ -3243,14 +2932,14 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev, if (r) return r; - return snprintf(buf, PAGE_SIZE, "%u\n", mclk * 10 * 1000); + return sysfs_emit(buf, "%u\n", mclk * 10 * 1000); } static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "mclk\n"); + return sysfs_emit(buf, "mclk\n"); } /** @@ -3315,9 +3004,9 @@ static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev, * * - pwm1_max: pulse width modulation fan control maximum level (255) * - * - fan1_min: an minimum value Unit: revolution/min (RPM) + * - fan1_min: a minimum value Unit: revolution/min (RPM) * - * - fan1_max: an maxmum value Unit: revolution/max (RPM) + * - fan1_max: a maximum value Unit: revolution/max (RPM) * * - fan1_input: fan speed in RPM * @@ -3367,11 +3056,13 @@ static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0); static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0); static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0); +static SENSOR_DEVICE_ATTR(power1_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 0); static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0); static SENSOR_DEVICE_ATTR(power2_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 1); static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1); static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1); static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1); +static SENSOR_DEVICE_ATTR(power2_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 1); static SENSOR_DEVICE_ATTR(power2_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 1); static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0); static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0); @@ -3411,11 +3102,13 @@ static struct attribute *hwmon_attributes[] = { &sensor_dev_attr_power1_cap_max.dev_attr.attr, &sensor_dev_attr_power1_cap_min.dev_attr.attr, &sensor_dev_attr_power1_cap.dev_attr.attr, + &sensor_dev_attr_power1_cap_default.dev_attr.attr, &sensor_dev_attr_power1_label.dev_attr.attr, &sensor_dev_attr_power2_average.dev_attr.attr, &sensor_dev_attr_power2_cap_max.dev_attr.attr, &sensor_dev_attr_power2_cap_min.dev_attr.attr, &sensor_dev_attr_power2_cap.dev_attr.attr, + &sensor_dev_attr_power2_cap_default.dev_attr.attr, &sensor_dev_attr_power2_label.dev_attr.attr, &sensor_dev_attr_freq1_input.dev_attr.attr, &sensor_dev_attr_freq1_label.dev_attr.attr, @@ -3514,7 +3207,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, (adev->asic_type != CHIP_VANGOGH))) && /* not implemented yet */ (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr|| - attr == &sensor_dev_attr_power1_cap.dev_attr.attr)) + attr == &sensor_dev_attr_power1_cap.dev_attr.attr || + attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr)) return 0; if (((adev->family == AMDGPU_FAMILY_SI) || @@ -3580,6 +3274,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr || attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr || attr == &sensor_dev_attr_power2_cap.dev_attr.attr || + attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr || attr == &sensor_dev_attr_power2_label.dev_attr.attr || attr == &sensor_dev_attr_power1_label.dev_attr.attr)) return 0; @@ -3784,16 +3479,17 @@ static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags) (flags & clocks[i].flag) ? "On" : "Off"); } -static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) +static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused) { - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + struct drm_device *dev = adev_to_drm(adev); u32 flags = 0; int r; if (amdgpu_in_reset(adev)) return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; r = pm_runtime_get_sync(dev->dev); if (r < 0) { @@ -3836,16 +3532,18 @@ out: return r; } -static const struct drm_info_list amdgpu_pm_info_list[] = { - {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL}, -}; +DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_pm_info); + #endif -int amdgpu_debugfs_pm_init(struct amdgpu_device *adev) +void amdgpu_debugfs_pm_init(struct amdgpu_device *adev) { #if defined(CONFIG_DEBUG_FS) - return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list)); -#else - return 0; + struct drm_minor *minor = adev_to_drm(adev)->primary; + struct dentry *root = minor->debugfs_root; + + debugfs_create_file("amdgpu_pm_info", 0444, root, adev, + &amdgpu_debugfs_pm_info_fops); + #endif } diff --git a/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h new file mode 100644 index 000000000000..610266088ff1 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h @@ -0,0 +1,127 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef ALDEBARAN_PP_SMC_H +#define ALDEBARAN_PP_SMC_H + +#pragma pack(push, 1) + +// SMU Response Codes: +#define PPSMC_Result_OK 0x1 +#define PPSMC_Result_Failed 0xFF +#define PPSMC_Result_UnknownCmd 0xFE +#define PPSMC_Result_CmdRejectedPrereq 0xFD +#define PPSMC_Result_CmdRejectedBusy 0xFC + +// Message Definitions: +#define PPSMC_MSG_TestMessage 0x1 +#define PPSMC_MSG_GetSmuVersion 0x2 +#define PPSMC_MSG_GfxDriverReset 0x3 +#define PPSMC_MSG_GetDriverIfVersion 0x4 +#define PPSMC_MSG_spare1 0x5 +#define PPSMC_MSG_spare2 0x6 +#define PPSMC_MSG_EnableAllSmuFeatures 0x7 +#define PPSMC_MSG_DisableAllSmuFeatures 0x8 +#define PPSMC_MSG_spare3 0x9 +#define PPSMC_MSG_spare4 0xA +#define PPSMC_MSG_spare5 0xB +#define PPSMC_MSG_spare6 0xC +#define PPSMC_MSG_GetEnabledSmuFeaturesLow 0xD +#define PPSMC_MSG_GetEnabledSmuFeaturesHigh 0xE +#define PPSMC_MSG_SetDriverDramAddrHigh 0xF +#define PPSMC_MSG_SetDriverDramAddrLow 0x10 +#define PPSMC_MSG_SetToolsDramAddrHigh 0x11 +#define PPSMC_MSG_SetToolsDramAddrLow 0x12 +#define PPSMC_MSG_TransferTableSmu2Dram 0x13 +#define PPSMC_MSG_TransferTableDram2Smu 0x14 +#define PPSMC_MSG_UseDefaultPPTable 0x15 +#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x16 +#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x17 +#define PPSMC_MSG_SetSoftMinByFreq 0x18 +#define PPSMC_MSG_SetSoftMaxByFreq 0x19 +#define PPSMC_MSG_SetHardMinByFreq 0x1A +#define PPSMC_MSG_SetHardMaxByFreq 0x1B +#define PPSMC_MSG_GetMinDpmFreq 0x1C +#define PPSMC_MSG_GetMaxDpmFreq 0x1D +#define PPSMC_MSG_GetDpmFreqByIndex 0x1E +#define PPSMC_MSG_SetWorkloadMask 0x1F +#define PPSMC_MSG_GetVoltageByDpm 0x20 +#define PPSMC_MSG_GetVoltageByDpmOverdrive 0x21 +#define PPSMC_MSG_SetPptLimit 0x22 +#define PPSMC_MSG_GetPptLimit 0x23 +#define PPSMC_MSG_PrepareMp1ForUnload 0x24 +#define PPSMC_MSG_PrepareMp1ForReset 0x25 //retired in 68.07 +#define PPSMC_MSG_SoftReset 0x26 //retired in 68.07 +#define PPSMC_MSG_RunDcBtc 0x27 +#define PPSMC_MSG_DramLogSetDramAddrHigh 0x28 +#define PPSMC_MSG_DramLogSetDramAddrLow 0x29 +#define PPSMC_MSG_DramLogSetDramSize 0x2A +#define PPSMC_MSG_GetDebugData 0x2B +#define PPSMC_MSG_WaflTest 0x2C +#define PPSMC_MSG_spare7 0x2D +#define PPSMC_MSG_SetMemoryChannelEnable 0x2E +#define PPSMC_MSG_SetNumBadHbmPagesRetired 0x2F +#define PPSMC_MSG_DFCstateControl 0x32 +#define PPSMC_MSG_GetGmiPwrDnHyst 0x33 +#define PPSMC_MSG_SetGmiPwrDnHyst 0x34 +#define PPSMC_MSG_GmiPwrDnControl 0x35 +#define PPSMC_MSG_EnterGfxoff 0x36 +#define PPSMC_MSG_ExitGfxoff 0x37 +#define PPSMC_MSG_SetExecuteDMATest 0x38 +#define PPSMC_MSG_EnableDeterminism 0x39 +#define PPSMC_MSG_DisableDeterminism 0x3A +#define PPSMC_MSG_SetUclkDpmMode 0x3B + +//STB to dram log +#define PPSMC_MSG_DumpSTBtoDram 0x3C +#define PPSMC_MSG_STBtoDramLogSetDramAddrHigh 0x3D +#define PPSMC_MSG_STBtoDramLogSetDramAddrLow 0x3E +#define PPSMC_MSG_STBtoDramLogSetDramSize 0x3F +#define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrHigh 0x40 +#define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrLow 0x41 + +#define PPSMC_MSG_GfxDriverResetRecovery 0x42 +#define PPSMC_Message_Count 0x43 + +//PPSMC Reset Types +#define PPSMC_RESET_TYPE_WARM_RESET 0x00 +#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x01 //driver msg argument should be 1 for mode-1 +#define PPSMC_RESET_TYPE_DRIVER_MODE_2_RESET 0x02 //and 2 for mode-2 +#define PPSMC_RESET_TYPE_PCIE_LINK_RESET 0x03 +#define PPSMC_RESET_TYPE_BIF_LINK_RESET 0x04 +#define PPSMC_RESET_TYPE_PF0_FLR_RESET 0x05 + + +typedef enum { + GFXOFF_ERROR_NO_ERROR, + GFXOFF_ERROR_DISALLOWED, + GFXOFF_ERROR_GFX_BUSY, + GFXOFF_ERROR_GFX_OFF, + GFXOFF_ERROR_GFX_ON, +} GFXOFF_ERROR_e; + +typedef uint32_t PPSMC_Result; +typedef uint32_t PPSMC_Msg; +#pragma pack(pop) + +#endif diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h index 45a22e101d15..a920515e2274 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h @@ -84,6 +84,6 @@ int amdgpu_pm_virt_sysfs_init(struct amdgpu_device *adev); void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev); void amdgpu_pm_virt_sysfs_fini(struct amdgpu_device *adev); -int amdgpu_debugfs_pm_init(struct amdgpu_device *adev); +void amdgpu_debugfs_pm_init(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h index 10b0624ade65..8bb224f6c762 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h @@ -171,6 +171,7 @@ enum smu_ppt_limit_level { SMU_PPT_LIMIT_MIN = -1, SMU_PPT_LIMIT_CURRENT, + SMU_PPT_LIMIT_DEFAULT, SMU_PPT_LIMIT_MAX, }; @@ -194,6 +195,11 @@ struct smu_user_dpm_profile { uint32_t clk_dependency; }; +enum smu_event_type { + + SMU_EVENT_RESET_COMPLETE = 0, +}; + #define SMU_TABLE_INIT(tables, table_id, s, a, d) \ do { \ tables[table_id].size = s; \ @@ -337,7 +343,6 @@ struct smu_power_context { struct smu_power_gate power_gate; }; - #define SMU_FEATURE_MAX (64) struct smu_feature { @@ -439,9 +444,6 @@ struct smu_context struct smu_baco_context smu_baco; struct smu_temperature_range thermal_range; void *od_settings; -#if defined(CONFIG_DEBUG_FS) - struct dentry *debugfs_sclk; -#endif struct smu_umd_pstate_table pstate_table; uint32_t pstate_sclk; @@ -449,6 +451,7 @@ struct smu_context bool od_enabled; uint32_t current_power_limit; + uint32_t default_power_limit; uint32_t max_power_limit; /* soft pptable */ @@ -808,6 +811,13 @@ struct pptable_funcs { int (*check_fw_status)(struct smu_context *smu); /** + * @set_mp1_state: put SMU into a correct state for comming + * resume from runpm or gpu reset. + */ + int (*set_mp1_state)(struct smu_context *smu, + enum pp_mp1_state mp1_state); + + /** * @setup_pptable: Initialize the power play table and populate it with * default values. */ @@ -1047,6 +1057,10 @@ struct pptable_funcs { * @mode1_reset_is_support: Check if GPU supports mode1 reset. */ bool (*mode1_reset_is_support)(struct smu_context *smu); + /** + * @mode2_reset_is_support: Check if GPU supports mode2 reset. + */ + bool (*mode2_reset_is_support)(struct smu_context *smu); /** * @mode1_reset: Perform mode1 reset. @@ -1152,6 +1166,17 @@ struct pptable_funcs { * parameters to defaults. */ int (*set_fine_grain_gfx_freq_parameters)(struct smu_context *smu); + + /** + * @set_light_sbr: Set light sbr mode for the SMU. + */ + int (*set_light_sbr)(struct smu_context *smu, bool enable); + + /** + * @wait_for_event: Wait for events from SMU. + */ + int (*wait_for_event)(struct smu_context *smu, + enum smu_event_type event, uint64_t event_arg); }; typedef enum { @@ -1227,130 +1252,40 @@ enum smu_cmn2asic_mapping_type { [profile] = {1, (workload)} #if !defined(SWSMU_CODE_LAYER_L2) && !defined(SWSMU_CODE_LAYER_L3) && !defined(SWSMU_CODE_LAYER_L4) -int smu_load_microcode(struct smu_context *smu); - -int smu_check_fw_status(struct smu_context *smu); - -int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled); - -int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed); - int smu_get_power_limit(struct smu_context *smu, uint32_t *limit, enum smu_ppt_limit_level limit_level); -int smu_set_power_limit(struct smu_context *smu, uint32_t limit); -int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf); - -int smu_od_edit_dpm_table(struct smu_context *smu, - enum PP_OD_DPM_TABLE_COMMAND type, - long *input, uint32_t size); - -int smu_read_sensor(struct smu_context *smu, - enum amd_pp_sensors sensor, - void *data, uint32_t *size); -int smu_get_power_profile_mode(struct smu_context *smu, char *buf); - -int smu_set_power_profile_mode(struct smu_context *smu, - long *param, - uint32_t param_size, - bool lock_needed); -int smu_get_fan_control_mode(struct smu_context *smu); -int smu_set_fan_control_mode(struct smu_context *smu, int value); -int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed); -int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed); -int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed); - -int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk); - -int smu_get_clock_by_type_with_latency(struct smu_context *smu, - enum smu_clk_type clk_type, - struct pp_clock_levels_with_latency *clocks); - -int smu_display_clock_voltage_request(struct smu_context *smu, - struct pp_display_clock_request *clock_req); -int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch); - -int smu_set_xgmi_pstate(struct smu_context *smu, - uint32_t pstate); - -int smu_set_azalia_d3_pme(struct smu_context *smu); - -bool smu_baco_is_support(struct smu_context *smu); - -int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state); - -int smu_baco_enter(struct smu_context *smu); -int smu_baco_exit(struct smu_context *smu); - bool smu_mode1_reset_is_support(struct smu_context *smu); +bool smu_mode2_reset_is_support(struct smu_context *smu); int smu_mode1_reset(struct smu_context *smu); -int smu_mode2_reset(struct smu_context *smu); extern const struct amd_ip_funcs smu_ip_funcs; extern const struct amdgpu_ip_block_version smu_v11_0_ip_block; extern const struct amdgpu_ip_block_version smu_v12_0_ip_block; +extern const struct amdgpu_ip_block_version smu_v13_0_ip_block; bool is_support_sw_smu(struct amdgpu_device *adev); bool is_support_cclk_dpm(struct amdgpu_device *adev); -int smu_reset(struct smu_context *smu); -int smu_sys_get_pp_table(struct smu_context *smu, void **table); -int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size); -int smu_get_power_num_states(struct smu_context *smu, struct pp_states_info *state_info); -enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu); int smu_write_watermarks_table(struct smu_context *smu); -int smu_set_watermarks_for_clock_ranges( - struct smu_context *smu, - struct pp_smu_wm_range_sets *clock_ranges); - -/* smu to display interface */ -extern int smu_display_configuration_change(struct smu_context *smu, const - struct amd_pp_display_configuration - *display_config); -extern int smu_dpm_set_power_gate(struct smu_context *smu,uint32_t block_type, bool gate); -extern int smu_handle_task(struct smu_context *smu, - enum amd_dpm_forced_level level, - enum amd_pp_task task_id, - bool lock_needed); -int smu_switch_power_profile(struct smu_context *smu, - enum PP_SMC_POWER_PROFILE type, - bool en); + int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max); + int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max); -enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu); -int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level); -int smu_set_display_count(struct smu_context *smu, uint32_t count); -int smu_set_ac_dc(struct smu_context *smu); -size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf); -int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask); -int smu_force_clk_levels(struct smu_context *smu, - enum smu_clk_type clk_type, - uint32_t mask); -int smu_set_mp1_state(struct smu_context *smu, - enum pp_mp1_state mp1_state); -int smu_set_df_cstate(struct smu_context *smu, - enum pp_df_cstate state); -int smu_allow_xgmi_power_down(struct smu_context *smu, bool en); - -int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu, - struct pp_smu_nv_clock_table *max_clocks); -int smu_get_uclk_dpm_states(struct smu_context *smu, - unsigned int *clock_values_in_khz, - unsigned int *num_states); +int smu_set_ac_dc(struct smu_context *smu); -int smu_get_dpm_clock_table(struct smu_context *smu, - struct dpm_clocks *clock_table); +int smu_allow_xgmi_power_down(struct smu_context *smu, bool en); int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value); -ssize_t smu_sys_get_gpu_metrics(struct smu_context *smu, void **table); +int smu_set_light_sbr(struct smu_context *smu, bool enable); -int smu_enable_mgpu_fan_boost(struct smu_context *smu); -int smu_gfx_state_change_set(struct smu_context *smu, uint32_t state); +int smu_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event, + uint64_t event_arg); #endif #endif diff --git a/drivers/gpu/drm/amd/pm/inc/arcturus_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/arcturus_ppsmc.h index 79afb132164e..45f5d29bc705 100644 --- a/drivers/gpu/drm/amd/pm/inc/arcturus_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/inc/arcturus_ppsmc.h @@ -120,6 +120,13 @@ #define PPSMC_MSG_ReadSerialNumTop32 0x40 #define PPSMC_MSG_ReadSerialNumBottom32 0x41 +/* paramater for MSG_LightSBR + * 1 -- Enable light secondary bus reset, only do nbio respond without further handling, + * leave driver to handle the real reset + * 0 -- Disable LightSBR, default behavior, SMU will pass the reset to PSP + */ +#define PPSMC_MSG_LightSBR 0x42 + typedef uint32_t PPSMC_Result; typedef uint32_t PPSMC_Msg; #pragma pack(pop) diff --git a/drivers/gpu/drm/amd/pm/inc/rv_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/rv_ppsmc.h index 4c7e08ba5fa4..171f12b82716 100644 --- a/drivers/gpu/drm/amd/pm/inc/rv_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/inc/rv_ppsmc.h @@ -84,6 +84,7 @@ #define PPSMC_MSG_PowerGateMmHub 0x35 #define PPSMC_MSG_SetRccPfcPmeRestoreRegister 0x36 #define PPSMC_MSG_GpuChangeState 0x37 +#define PPSMC_MSG_GetGfxBusy 0x3D #define PPSMC_Message_Count 0x42 typedef uint16_t PPSMC_Result; diff --git a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_navi10.h b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_navi10.h index 246d3951a78a..04752ade1016 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_navi10.h +++ b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_navi10.h @@ -843,11 +843,15 @@ typedef struct { uint16_t FanMaximumRpm; uint16_t FanMinimumPwm; uint16_t FanTargetTemperature; // Degree Celcius + uint16_t FanMode; + uint16_t FanMaxPwm; + uint16_t FanMinPwm; + uint16_t FanMaxTemp; // Degree Celcius + uint16_t FanMinTemp; // Degree Celcius uint16_t MaxOpTemp; // Degree Celcius uint16_t FanZeroRpmEnable; - uint16_t Padding; - uint32_t MmHubPadding[8]; // SMU internal use + uint32_t MmHubPadding[6]; // SMU internal use } OverDriveTable_t; @@ -882,6 +886,45 @@ typedef struct { // Padding - ignore uint32_t MmHubPadding[8]; // SMU internal use +} SmuMetrics_legacy_t; + +typedef struct { + uint16_t CurrClock[PPCLK_COUNT]; + uint16_t AverageGfxclkFrequencyPostDs; + uint16_t AverageSocclkFrequency; + uint16_t AverageUclkFrequencyPostDs; + uint16_t AverageGfxActivity ; + uint16_t AverageUclkActivity ; + uint8_t CurrSocVoltageOffset ; + uint8_t CurrGfxVoltageOffset ; + uint8_t CurrMemVidOffset ; + uint8_t Padding8 ; + uint16_t AverageSocketPower ; + uint16_t TemperatureEdge ; + uint16_t TemperatureHotspot ; + uint16_t TemperatureMem ; + uint16_t TemperatureVrGfx ; + uint16_t TemperatureVrMem0 ; + uint16_t TemperatureVrMem1 ; + uint16_t TemperatureVrSoc ; + uint16_t TemperatureLiquid0 ; + uint16_t TemperatureLiquid1 ; + uint16_t TemperaturePlx ; + uint16_t Padding16 ; + uint32_t ThrottlerStatus ; + + uint8_t LinkDpmLevel; + uint8_t Padding8_2; + uint16_t CurrFanSpeed; + + uint16_t AverageGfxclkFrequencyPreDs; + uint16_t AverageUclkFrequencyPreDs; + uint8_t PcieRate; + uint8_t PcieWidth; + uint8_t Padding8_3[2]; + + // Padding - ignore + uint32_t MmHubPadding[8]; // SMU internal use } SmuMetrics_t; typedef struct { @@ -921,8 +964,59 @@ typedef struct { // Padding - ignore uint32_t MmHubPadding[8]; // SMU internal use +} SmuMetrics_NV12_legacy_t; + +typedef struct { + uint16_t CurrClock[PPCLK_COUNT]; + uint16_t AverageGfxclkFrequencyPostDs; + uint16_t AverageSocclkFrequency; + uint16_t AverageUclkFrequencyPostDs; + uint16_t AverageGfxActivity ; + uint16_t AverageUclkActivity ; + uint8_t CurrSocVoltageOffset ; + uint8_t CurrGfxVoltageOffset ; + uint8_t CurrMemVidOffset ; + uint8_t Padding8 ; + uint16_t AverageSocketPower ; + uint16_t TemperatureEdge ; + uint16_t TemperatureHotspot ; + uint16_t TemperatureMem ; + uint16_t TemperatureVrGfx ; + uint16_t TemperatureVrMem0 ; + uint16_t TemperatureVrMem1 ; + uint16_t TemperatureVrSoc ; + uint16_t TemperatureLiquid0 ; + uint16_t TemperatureLiquid1 ; + uint16_t TemperaturePlx ; + uint16_t Padding16 ; + uint32_t ThrottlerStatus ; + + uint8_t LinkDpmLevel; + uint8_t Padding8_2; + uint16_t CurrFanSpeed; + + uint16_t AverageVclkFrequency ; + uint16_t AverageDclkFrequency ; + uint16_t VcnActivityPercentage ; + uint16_t AverageGfxclkFrequencyPreDs; + uint16_t AverageUclkFrequencyPreDs; + uint8_t PcieRate; + uint8_t PcieWidth; + + uint32_t Padding32_1; + uint64_t EnergyAccumulator; + + // Padding - ignore + uint32_t MmHubPadding[8]; // SMU internal use } SmuMetrics_NV12_t; +typedef union SmuMetrics { + SmuMetrics_legacy_t nv10_legacy_metrics; + SmuMetrics_t nv10_metrics; + SmuMetrics_NV12_legacy_t nv12_legacy_metrics; + SmuMetrics_NV12_t nv12_metrics; +} SmuMetrics_NV1X_t; + typedef struct { uint16_t MinClock; // This is either DCEFCLK or SOCCLK (in MHz) uint16_t MaxClock; // This is either DCEFCLK or SOCCLK (in MHz) diff --git a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h index 6e23a3f803a7..8361ebd8d876 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h +++ b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h @@ -26,7 +26,7 @@ // *** IMPORTANT *** // SMU TEAM: Always increment the interface version if // any structure is changed in this file -#define SMU13_DRIVER_IF_VERSION 2 +#define SMU13_DRIVER_IF_VERSION 3 typedef struct { int32_t value; @@ -191,6 +191,44 @@ typedef struct { uint16_t SocTemperature; //[centi-Celsius] uint16_t EdgeTemperature; uint16_t ThrottlerStatus; +} SmuMetrics_legacy_t; + +typedef struct { + uint16_t GfxclkFrequency; //[MHz] + uint16_t SocclkFrequency; //[MHz] + uint16_t VclkFrequency; //[MHz] + uint16_t DclkFrequency; //[MHz] + uint16_t MemclkFrequency; //[MHz] + uint16_t spare; + + uint16_t GfxActivity; //[centi] + uint16_t UvdActivity; //[centi] + uint16_t C0Residency[4]; //percentage + + uint16_t Voltage[3]; //[mV] indices: VDDCR_VDD, VDDCR_SOC, VDDCR_GFX + uint16_t Current[3]; //[mA] indices: VDDCR_VDD, VDDCR_SOC, VDDCR_GFX + uint16_t Power[3]; //[mW] indices: VDDCR_VDD, VDDCR_SOC, VDDCR_GFX + uint16_t CurrentSocketPower; //[mW] + + //3rd party tools in Windows need info in the case of APUs + uint16_t CoreFrequency[4]; //[MHz] + uint16_t CorePower[4]; //[mW] + uint16_t CoreTemperature[4]; //[centi-Celsius] + uint16_t L3Frequency[1]; //[MHz] + uint16_t L3Temperature[1]; //[centi-Celsius] + + uint16_t GfxTemperature; //[centi-Celsius] + uint16_t SocTemperature; //[centi-Celsius] + uint16_t EdgeTemperature; + uint16_t ThrottlerStatus; +} SmuMetricsTable_t; + +typedef struct { + SmuMetricsTable_t Current; + SmuMetricsTable_t Average; + //uint32_t AccCnt; + uint32_t SampleStartTime; + uint32_t SampleStopTime; } SmuMetrics_t; diff --git a/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h b/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h new file mode 100644 index 000000000000..d23533bda002 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h @@ -0,0 +1,519 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef SMU13_DRIVER_IF_ALDEBARAN_H +#define SMU13_DRIVER_IF_ALDEBARAN_H + +#define NUM_VCLK_DPM_LEVELS 8 +#define NUM_DCLK_DPM_LEVELS 8 +#define NUM_SOCCLK_DPM_LEVELS 8 +#define NUM_LCLK_DPM_LEVELS 8 +#define NUM_UCLK_DPM_LEVELS 4 +#define NUM_FCLK_DPM_LEVELS 8 +#define NUM_XGMI_DPM_LEVELS 4 + +// Feature Control Defines +#define FEATURE_DATA_CALCULATIONS 0 +#define FEATURE_DPM_GFXCLK_BIT 1 +#define FEATURE_DPM_UCLK_BIT 2 +#define FEATURE_DPM_SOCCLK_BIT 3 +#define FEATURE_DPM_FCLK_BIT 4 +#define FEATURE_DPM_LCLK_BIT 5 +#define FEATURE_DPM_XGMI_BIT 6 +#define FEATURE_DS_GFXCLK_BIT 7 +#define FEATURE_DS_SOCCLK_BIT 8 +#define FEATURE_DS_LCLK_BIT 9 +#define FEATURE_DS_FCLK_BIT 10 +#define FEATURE_DS_UCLK_BIT 11 +#define FEATURE_GFX_SS_BIT 12 +#define FEATURE_DPM_VCN_BIT 13 +#define FEATURE_RSMU_SMN_CG_BIT 14 +#define FEATURE_WAFL_CG_BIT 15 +#define FEATURE_PPT_BIT 16 +#define FEATURE_TDC_BIT 17 +#define FEATURE_APCC_PLUS_BIT 18 +#define FEATURE_APCC_DFLL_BIT 19 +#define FEATURE_FW_CTF_BIT 20 +#define FEATURE_THERMAL_BIT 21 +#define FEATURE_OUT_OF_BAND_MONITOR_BIT 22 +#define FEATURE_SPARE_23_BIT 23 +#define FEATURE_XGMI_PER_LINK_PWR_DWN 24 +#define FEATURE_DF_CSTATE 25 +#define FEATURE_FUSE_CG_BIT 26 +#define FEATURE_MP1_CG_BIT 27 +#define FEATURE_SMUIO_CG_BIT 28 +#define FEATURE_THM_CG_BIT 29 +#define FEATURE_CLK_CG_BIT 30 +#define FEATURE_SPARE_31_BIT 31 +#define FEATURE_SPARE_32_BIT 32 +#define FEATURE_SPARE_33_BIT 33 +#define FEATURE_SPARE_34_BIT 34 +#define FEATURE_SPARE_35_BIT 35 +#define FEATURE_SPARE_36_BIT 36 +#define FEATURE_SPARE_37_BIT 37 +#define FEATURE_SPARE_38_BIT 38 +#define FEATURE_SPARE_39_BIT 39 +#define FEATURE_SPARE_40_BIT 40 +#define FEATURE_SPARE_41_BIT 41 +#define FEATURE_SPARE_42_BIT 42 +#define FEATURE_SPARE_43_BIT 43 +#define FEATURE_SPARE_44_BIT 44 +#define FEATURE_SPARE_45_BIT 45 +#define FEATURE_SPARE_46_BIT 46 +#define FEATURE_SPARE_47_BIT 47 +#define FEATURE_SPARE_48_BIT 48 +#define FEATURE_SPARE_49_BIT 49 +#define FEATURE_SPARE_50_BIT 50 +#define FEATURE_SPARE_51_BIT 51 +#define FEATURE_SPARE_52_BIT 52 +#define FEATURE_SPARE_53_BIT 53 +#define FEATURE_SPARE_54_BIT 54 +#define FEATURE_SPARE_55_BIT 55 +#define FEATURE_SPARE_56_BIT 56 +#define FEATURE_SPARE_57_BIT 57 +#define FEATURE_SPARE_58_BIT 58 +#define FEATURE_SPARE_59_BIT 59 +#define FEATURE_SPARE_60_BIT 60 +#define FEATURE_SPARE_61_BIT 61 +#define FEATURE_SPARE_62_BIT 62 +#define FEATURE_SPARE_63_BIT 63 + +#define NUM_FEATURES 64 + +// I2C Config Bit Defines +#define I2C_CONTROLLER_ENABLED 1 +#define I2C_CONTROLLER_DISABLED 0 + +// Throttler Status Bits. +// These are aligned with the out of band monitor alarm bits for common throttlers +#define THROTTLER_PPT0_BIT 0 +#define THROTTLER_PPT1_BIT 1 +#define THROTTLER_TDC_GFX_BIT 2 +#define THROTTLER_TDC_SOC_BIT 3 +#define THROTTLER_TDC_HBM_BIT 4 +#define THROTTLER_SPARE_5 5 +#define THROTTLER_TEMP_GPU_BIT 6 +#define THROTTLER_TEMP_MEM_BIT 7 +#define THORTTLER_SPARE_8 8 +#define THORTTLER_SPARE_9 9 +#define THORTTLER_SPARE_10 10 +#define THROTTLER_TEMP_VR_GFX_BIT 11 +#define THROTTLER_TEMP_VR_SOC_BIT 12 +#define THROTTLER_TEMP_VR_MEM_BIT 13 +#define THORTTLER_SPARE_14 14 +#define THORTTLER_SPARE_15 15 +#define THORTTLER_SPARE_16 16 +#define THORTTLER_SPARE_17 17 +#define THORTTLER_SPARE_18 18 +#define THROTTLER_APCC_BIT 19 + +// Table transfer status +#define TABLE_TRANSFER_OK 0x0 +#define TABLE_TRANSFER_FAILED 0xFF +#define TABLE_TRANSFER_PENDING 0xAB + +//I2C Interface +#define NUM_I2C_CONTROLLERS 8 + +#define I2C_CONTROLLER_ENABLED 1 +#define I2C_CONTROLLER_DISABLED 0 + +#define MAX_SW_I2C_COMMANDS 24 + +typedef enum { + I2C_CONTROLLER_PORT_0, //CKSVII2C0 + I2C_CONTROLLER_PORT_1, //CKSVII2C1 + I2C_CONTROLLER_PORT_COUNT, +} I2cControllerPort_e; + +typedef enum { + I2C_CONTROLLER_THROTTLER_TYPE_NONE, + I2C_CONTROLLER_THROTTLER_VR_GFX0, + I2C_CONTROLLER_THROTTLER_VR_GFX1, + I2C_CONTROLLER_THROTTLER_VR_SOC, + I2C_CONTROLLER_THROTTLER_VR_MEM, + I2C_CONTROLLER_THROTTLER_COUNT, +} I2cControllerThrottler_e; + +typedef enum { + I2C_CONTROLLER_PROTOCOL_VR_MP2855, + I2C_CONTROLLER_PROTOCOL_COUNT, +} I2cControllerProtocol_e; + +typedef struct { + uint8_t Enabled; + uint8_t Speed; + uint8_t SlaveAddress; + uint8_t ControllerPort; + uint8_t ThermalThrotter; + uint8_t I2cProtocol; + uint8_t PaddingConfig[2]; +} I2cControllerConfig_t; + +typedef enum { + I2C_PORT_SVD_SCL, + I2C_PORT_GPIO, +} I2cPort_e; + +typedef enum { + I2C_SPEED_FAST_50K, //50 Kbits/s + I2C_SPEED_FAST_100K, //100 Kbits/s + I2C_SPEED_FAST_400K, //400 Kbits/s + I2C_SPEED_FAST_PLUS_1M, //1 Mbits/s (in fast mode) + I2C_SPEED_HIGH_1M, //1 Mbits/s (in high speed mode) + I2C_SPEED_HIGH_2M, //2.3 Mbits/s + I2C_SPEED_COUNT, +} I2cSpeed_e; + +typedef enum { + I2C_CMD_READ, + I2C_CMD_WRITE, + I2C_CMD_COUNT, +} I2cCmdType_e; + +#define CMDCONFIG_STOP_BIT 0 +#define CMDCONFIG_RESTART_BIT 1 +#define CMDCONFIG_READWRITE_BIT 2 //bit should be 0 for read, 1 for write + +#define CMDCONFIG_STOP_MASK (1 << CMDCONFIG_STOP_BIT) +#define CMDCONFIG_RESTART_MASK (1 << CMDCONFIG_RESTART_BIT) +#define CMDCONFIG_READWRITE_MASK (1 << CMDCONFIG_READWRITE_BIT) + +typedef struct { + uint8_t ReadWriteData; //Return data for read. Data to send for write + uint8_t CmdConfig; //Includes whether associated command should have a stop or restart command, and is a read or write +} SwI2cCmd_t; //SW I2C Command Table + +typedef struct { + uint8_t I2CcontrollerPort; //CKSVII2C0(0) or //CKSVII2C1(1) + uint8_t I2CSpeed; //Use I2cSpeed_e to indicate speed to select + uint8_t SlaveAddress; //Slave address of device + uint8_t NumCmds; //Number of commands + SwI2cCmd_t SwI2cCmds[MAX_SW_I2C_COMMANDS]; +} SwI2cRequest_t; // SW I2C Request Table + +typedef struct { + SwI2cRequest_t SwI2cRequest; + uint32_t Spare[8]; + uint32_t MmHubPadding[8]; // SMU internal use +} SwI2cRequestExternal_t; + +typedef struct { + uint32_t a; // store in IEEE float format in this variable + uint32_t b; // store in IEEE float format in this variable + uint32_t c; // store in IEEE float format in this variable +} QuadraticInt_t; + +typedef struct { + uint32_t m; // store in IEEE float format in this variable + uint32_t b; // store in IEEE float format in this variable +} LinearInt_t; + +typedef enum { + GFXCLK_SOURCE_PLL, + GFXCLK_SOURCE_DFLL, + GFXCLK_SOURCE_COUNT, +} GfxclkSrc_e; + +typedef enum { + PPCLK_GFXCLK, + PPCLK_VCLK, + PPCLK_DCLK, + PPCLK_SOCCLK, + PPCLK_UCLK, + PPCLK_FCLK, + PPCLK_LCLK, + PPCLK_COUNT, +} PPCLK_e; + +typedef enum { + GPIO_INT_POLARITY_ACTIVE_LOW, + GPIO_INT_POLARITY_ACTIVE_HIGH, +} GpioIntPolarity_e; + +//PPSMC_MSG_SetUclkDpmMode +typedef enum { + UCLK_DPM_MODE_BANDWIDTH, + UCLK_DPM_MODE_LATENCY, +} UCLK_DPM_MODE_e; + +typedef struct { + uint8_t StartupLevel; + uint8_t NumDiscreteLevels; // Set to 2 (Fmin, Fmax) when using fine grained DPM, otherwise set to # discrete levels used + uint16_t SsFmin; // Fmin for SS curve. If SS curve is selected, will use V@SSFmin for F <= Fmin + LinearInt_t ConversionToAvfsClk; // Transfer function to AVFS Clock (GHz->GHz) + QuadraticInt_t SsCurve; // Slow-slow curve (GHz->V) +} DpmDescriptor_t; + +typedef struct { + uint32_t Version; + + // SECTION: Feature Enablement + uint32_t FeaturesToRun[2]; + + // SECTION: Infrastructure Limits + uint16_t PptLimit; // Watts + uint16_t TdcLimitGfx; // Amps + uint16_t TdcLimitSoc; // Amps + uint16_t TdcLimitHbm; // Amps + uint16_t ThotspotLimit; // Celcius + uint16_t TmemLimit; // Celcius + uint16_t Tvr_gfxLimit; // Celcius + uint16_t Tvr_memLimit; // Celcius + uint16_t Tvr_socLimit; // Celcius + uint16_t PaddingLimit; + + // SECTION: Voltage Control Parameters + uint16_t MaxVoltageGfx; // In mV(Q2) Maximum Voltage allowable of VDD_GFX + uint16_t MaxVoltageSoc; // In mV(Q2) Maximum Voltage allowable of VDD_SOC + + //SECTION: DPM Config 1 + DpmDescriptor_t DpmDescriptor[PPCLK_COUNT]; + + uint8_t DidTableVclk[NUM_VCLK_DPM_LEVELS]; //PPCLK_VCLK + uint8_t DidTableDclk[NUM_DCLK_DPM_LEVELS]; //PPCLK_DCLK + uint8_t DidTableSocclk[NUM_SOCCLK_DPM_LEVELS]; //PPCLK_SOCCLK + uint8_t DidTableLclk[NUM_LCLK_DPM_LEVELS]; //PPCLK_LCLK + uint32_t FidTableFclk[NUM_FCLK_DPM_LEVELS]; //PPCLK_FCLK + uint8_t DidTableFclk[NUM_FCLK_DPM_LEVELS]; //PPCLK_FCLK + uint32_t FidTableUclk[NUM_UCLK_DPM_LEVELS]; //PPCLK_UCLK + uint8_t DidTableUclk[NUM_UCLK_DPM_LEVELS]; //PPCLK_UCLK + + uint32_t StartupFidPll0; //GFXAVFSCLK, SOCCLK, MP0CLK, MPIOCLK, DXIOCLK + uint32_t StartupFidPll4; //VCLK, DCLK, WAFLCLK + uint32_t StartupFidPll5; //SMNCLK, MP1CLK, LCLK + + uint8_t StartupSmnclkDid; + uint8_t StartupMp0clkDid; + uint8_t StartupMp1clkDid; + uint8_t StartupWaflclkDid; + uint8_t StartupGfxavfsclkDid; + uint8_t StartupMpioclkDid; + uint8_t StartupDxioclkDid; + uint8_t spare123; + + uint8_t StartupVidGpu0Svi0Plane0; //VDDCR_GFX0 + uint8_t StartupVidGpu0Svi0Plane1; //VDDCR_SOC + uint8_t StartupVidGpu0Svi1Plane0; //VDDCR_HBM + uint8_t StartupVidGpu0Svi1Plane1; //UNUSED [0 = plane is not used and should not be programmed] + + uint8_t StartupVidGpu1Svi0Plane0; //VDDCR_GFX1 + uint8_t StartupVidGpu1Svi0Plane1; //UNUSED [0 = plane is not used and should not be programmed] + uint8_t StartupVidGpu1Svi1Plane0; //UNUSED [0 = plane is not used and should not be programmed] + uint8_t StartupVidGpu1Svi1Plane1; //UNUSED [0 = plane is not used and should not be programmed] + + // GFXCLK DPM + uint16_t GfxclkFmax; // In MHz + uint16_t GfxclkFmin; // In MHz + uint16_t GfxclkFidle; // In MHz + uint16_t GfxclkFinit; // In MHz + uint8_t GfxclkSource; // GfxclkSrc_e [0 = PLL, 1 = DFLL] + uint8_t spare1[2]; + uint8_t StartupGfxclkDid; + uint32_t StartupGfxclkFid; + + // SECTION: AVFS + uint16_t GFX_Guardband_Freq[8]; // MHz [unsigned] + int16_t GFX_Guardband_Voltage_Cold[8]; // mV [signed] + int16_t GFX_Guardband_Voltage_Mid[8]; // mV [signed] + int16_t GFX_Guardband_Voltage_Hot[8]; // mV [signed] + + uint16_t SOC_Guardband_Freq[8]; // MHz [unsigned] + int16_t SOC_Guardband_Voltage_Cold[8]; // mV [signed] + int16_t SOC_Guardband_Voltage_Mid[8]; // mV [signed] + int16_t SOC_Guardband_Voltage_Hot[8]; // mV [signed] + + // VDDCR_GFX BTC + uint16_t DcBtcEnabled; + int16_t DcBtcMin; // mV [signed] + int16_t DcBtcMax; // mV [signed] + int16_t DcBtcGb; // mV [signed] + + // SECTION: XGMI + uint8_t XgmiLinkSpeed[NUM_XGMI_DPM_LEVELS]; //Gbps [EX: 32 = 32Gbps] + uint8_t XgmiLinkWidth[NUM_XGMI_DPM_LEVELS]; //Width [EX: 16 = x16] + uint8_t XgmiStartupLevel; + uint8_t spare12[3]; + + // GFX Vmin + uint16_t GFX_PPVmin_Enabled; + uint16_t GFX_Vmin_Plat_Offset_Hot; // mV + uint16_t GFX_Vmin_Plat_Offset_Cold; // mV + uint16_t GFX_Vmin_Hot_T0; // mV + uint16_t GFX_Vmin_Cold_T0; // mV + uint16_t GFX_Vmin_Hot_Eol; // mV + uint16_t GFX_Vmin_Cold_Eol; // mV + uint16_t GFX_Vmin_Aging_Offset; // mV + uint16_t GFX_Vmin_Temperature_Hot; // 'C + uint16_t GFX_Vmin_Temperature_Cold; // 'C + + // SOC Vmin + uint16_t SOC_PPVmin_Enabled; + uint16_t SOC_Vmin_Plat_Offset_Hot; // mV + uint16_t SOC_Vmin_Plat_Offset_Cold; // mV + uint16_t SOC_Vmin_Hot_T0; // mV + uint16_t SOC_Vmin_Cold_T0; // mV + uint16_t SOC_Vmin_Hot_Eol; // mV + uint16_t SOC_Vmin_Cold_Eol; // mV + uint16_t SOC_Vmin_Aging_Offset; // mV + uint16_t SOC_Vmin_Temperature_Hot; // 'C + uint16_t SOC_Vmin_Temperature_Cold; // 'C + + // APCC Settings + uint32_t ApccPlusResidencyLimit; //PCC residency % (0-100) + + // Determinism + uint16_t DeterminismVoltageOffset; //mV + uint16_t spare22; + + // reserved + uint32_t spare3[14]; + + // SECTION: BOARD PARAMETERS + // Telemetry Settings + uint16_t GfxMaxCurrent; // in Amps + int8_t GfxOffset; // in Amps + uint8_t Padding_TelemetryGfx; + + uint16_t SocMaxCurrent; // in Amps + int8_t SocOffset; // in Amps + uint8_t Padding_TelemetrySoc; + + uint16_t MemMaxCurrent; // in Amps + int8_t MemOffset; // in Amps + uint8_t Padding_TelemetryMem; + + uint16_t BoardMaxCurrent; // in Amps + int8_t BoardOffset; // in Amps + uint8_t Padding_TelemetryBoardInput; + + // Platform input telemetry voltage coefficient + uint32_t BoardVoltageCoeffA; // decode by /1000 + uint32_t BoardVoltageCoeffB; // decode by /1000 + + // GPIO Settings + uint8_t VR0HotGpio; // GPIO pin configured for VR0 HOT event + uint8_t VR0HotPolarity; // GPIO polarity for VR0 HOT event + uint8_t VR1HotGpio; // GPIO pin configured for VR1 HOT event + uint8_t VR1HotPolarity; // GPIO polarity for VR1 HOT event + + // UCLK Spread Spectrum + uint8_t UclkSpreadEnabled; // on or off + uint8_t UclkSpreadPercent; // Q4.4 + uint16_t UclkSpreadFreq; // kHz + + // FCLK Spread Spectrum + uint8_t FclkSpreadEnabled; // on or off + uint8_t FclkSpreadPercent; // Q4.4 + uint16_t FclkSpreadFreq; // kHz + + // I2C Controller Structure + I2cControllerConfig_t I2cControllers[NUM_I2C_CONTROLLERS]; + + // GPIO pins for I2C communications with 2nd controller for Input Telemetry Sequence + uint8_t GpioI2cScl; // Serial Clock + uint8_t GpioI2cSda; // Serial Data + uint16_t spare5; + + uint16_t XgmiMaxCurrent; // in Amps + int8_t XgmiOffset; // in Amps + uint8_t Padding_TelemetryXgmi; + + //reserved + uint32_t reserved[15]; + +} PPTable_t; + +typedef struct { + // Time constant parameters for clock averages in ms + uint16_t GfxclkAverageLpfTau; + uint16_t SocclkAverageLpfTau; + uint16_t UclkAverageLpfTau; + uint16_t GfxActivityLpfTau; + uint16_t UclkActivityLpfTau; + + uint16_t SocketPowerLpfTau; + + uint32_t Spare[8]; + // Padding - ignore + uint32_t MmHubPadding[8]; // SMU internal use +} DriverSmuConfig_t; + +typedef struct { + uint16_t CurrClock[PPCLK_COUNT]; + uint16_t Padding1 ; + uint16_t AverageGfxclkFrequency; + uint16_t AverageSocclkFrequency; + uint16_t AverageUclkFrequency ; + uint16_t AverageGfxActivity ; + uint16_t AverageUclkActivity ; + uint8_t CurrSocVoltageOffset ; + uint8_t CurrGfxVoltageOffset ; + uint8_t CurrMemVidOffset ; + uint8_t Padding8 ; + uint16_t AverageSocketPower ; + uint16_t TemperatureEdge ; + uint16_t TemperatureHotspot ; + uint16_t TemperatureHBM ; // Max + uint16_t TemperatureVrGfx ; + uint16_t TemperatureVrSoc ; + uint16_t TemperatureVrMem ; + uint32_t ThrottlerStatus ; + + uint32_t PublicSerialNumLower32; + uint32_t PublicSerialNumUpper32; + uint16_t TemperatureAllHBM[4] ; + uint32_t GfxBusyAcc ; + uint32_t DramBusyAcc ; + uint32_t EnergyAcc64bitLow ; //15.259uJ resolution + uint32_t EnergyAcc64bitHigh ; + uint32_t TimeStampLow ; //10ns resolution + uint32_t TimeStampHigh ; + + // Padding - ignore + uint32_t MmHubPadding[8]; // SMU internal use +} SmuMetrics_t; + + +typedef struct { + uint16_t avgPsmCount[76]; + uint16_t minPsmCount[76]; + float avgPsmVoltage[76]; + float minPsmVoltage[76]; + + uint32_t MmHubPadding[8]; // SMU internal use +} AvfsDebugTable_t; + +// These defines are used with the following messages: +// SMC_MSG_TransferTableDram2Smu +// SMC_MSG_TransferTableSmu2Dram +#define TABLE_PPTABLE 0 +#define TABLE_AVFS_PSM_DEBUG 1 +#define TABLE_AVFS_FUSE_OVERRIDE 2 +#define TABLE_PMSTATUSLOG 3 +#define TABLE_SMU_METRICS 4 +#define TABLE_DRIVER_SMU_CONFIG 5 +#define TABLE_I2C_COMMANDS 6 +#define TABLE_COUNT 7 + +#endif diff --git a/drivers/gpu/drm/amd/pm/inc/smu_types.h b/drivers/gpu/drm/amd/pm/inc/smu_types.h index aa4822202587..89a16dcd0fff 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_types.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_types.h @@ -168,9 +168,16 @@ __SMU_DUMMY_MAP(PowerGateAtHub), \ __SMU_DUMMY_MAP(SetSoftMinJpeg), \ __SMU_DUMMY_MAP(SetHardMinFclkByFreq), \ - __SMU_DUMMY_MAP(DFCstateControl), \ - __SMU_DUMMY_MAP(GmiPwrDnControl), \ - __SMU_DUMMY_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE),\ + __SMU_DUMMY_MAP(DFCstateControl), \ + __SMU_DUMMY_MAP(GmiPwrDnControl), \ + __SMU_DUMMY_MAP(spare), \ + __SMU_DUMMY_MAP(SetNumBadHbmPagesRetired), \ + __SMU_DUMMY_MAP(GetGmiPwrDnHyst), \ + __SMU_DUMMY_MAP(SetGmiPwrDnHyst), \ + __SMU_DUMMY_MAP(EnterGfxoff), \ + __SMU_DUMMY_MAP(ExitGfxoff), \ + __SMU_DUMMY_MAP(SetExecuteDMATest), \ + __SMU_DUMMY_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE), \ __SMU_DUMMY_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE), \ __SMU_DUMMY_MAP(SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_HIGH), \ __SMU_DUMMY_MAP(SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_LOW), \ @@ -214,6 +221,11 @@ __SMU_DUMMY_MAP(SetSlowPPTLimit), \ __SMU_DUMMY_MAP(GetFastPPTLimit), \ __SMU_DUMMY_MAP(GetSlowPPTLimit), \ + __SMU_DUMMY_MAP(EnableDeterminism), \ + __SMU_DUMMY_MAP(DisableDeterminism), \ + __SMU_DUMMY_MAP(SetUclkDpmMode), \ + __SMU_DUMMY_MAP(LightSBR), \ + __SMU_DUMMY_MAP(GfxDriverResetRecovery), #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(type) SMU_MSG_##type @@ -239,6 +251,7 @@ enum smu_clk_type { SMU_SCLK, SMU_MCLK, SMU_PCIE, + SMU_LCLK, SMU_OD_CCLK, SMU_OD_SCLK, SMU_OD_MCLK, @@ -255,6 +268,7 @@ enum smu_clk_type { __SMU_DUMMY_MAP(DPM_SOCCLK), \ __SMU_DUMMY_MAP(DPM_UVD), \ __SMU_DUMMY_MAP(DPM_VCE), \ + __SMU_DUMMY_MAP(DPM_LCLK), \ __SMU_DUMMY_MAP(ULV), \ __SMU_DUMMY_MAP(DPM_MP0CLK), \ __SMU_DUMMY_MAP(DPM_LINK), \ @@ -283,6 +297,7 @@ enum smu_clk_type { __SMU_DUMMY_MAP(DS_MP1CLK), \ __SMU_DUMMY_MAP(DS_MP0CLK), \ __SMU_DUMMY_MAP(XGMI), \ + __SMU_DUMMY_MAP(XGMI_PER_LINK_PWR_DWN), \ __SMU_DUMMY_MAP(DPM_GFX_PACE), \ __SMU_DUMMY_MAP(MEM_VDDCI_SCALING), \ __SMU_DUMMY_MAP(MEM_MVDD_SCALING), \ @@ -304,6 +319,7 @@ enum smu_clk_type { __SMU_DUMMY_MAP(MMHUB_PG), \ __SMU_DUMMY_MAP(ATHUB_PG), \ __SMU_DUMMY_MAP(APCC_DFLL), \ + __SMU_DUMMY_MAP(DF_CSTATE), \ __SMU_DUMMY_MAP(DPM_GFX_GPO), \ __SMU_DUMMY_MAP(WAFL_CG), \ __SMU_DUMMY_MAP(CCLK_DPM), \ @@ -335,7 +351,12 @@ enum smu_clk_type { __SMU_DUMMY_MAP(ISP_DPM), \ __SMU_DUMMY_MAP(A55_DPM), \ __SMU_DUMMY_MAP(CVIP_DSP_DPM), \ - __SMU_DUMMY_MAP(MSMU_LOW_POWER), + __SMU_DUMMY_MAP(MSMU_LOW_POWER), \ + __SMU_DUMMY_MAP(FUSE_CG), \ + __SMU_DUMMY_MAP(MP1_CG), \ + __SMU_DUMMY_MAP(SMUIO_CG), \ + __SMU_DUMMY_MAP(THM_CG), \ + __SMU_DUMMY_MAP(CLK_CG), \ #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(feature) SMU_FEATURE_##feature##_BIT diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h index d4cddd2390a2..bb55a96f98e9 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h @@ -27,12 +27,12 @@ #define SMU11_DRIVER_IF_VERSION_INV 0xFFFFFFFF #define SMU11_DRIVER_IF_VERSION_ARCT 0x17 -#define SMU11_DRIVER_IF_VERSION_NV10 0x36 -#define SMU11_DRIVER_IF_VERSION_NV12 0x36 -#define SMU11_DRIVER_IF_VERSION_NV14 0x36 +#define SMU11_DRIVER_IF_VERSION_NV10 0x37 +#define SMU11_DRIVER_IF_VERSION_NV12 0x38 +#define SMU11_DRIVER_IF_VERSION_NV14 0x38 #define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x3D #define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0xE -#define SMU11_DRIVER_IF_VERSION_VANGOGH 0x02 +#define SMU11_DRIVER_IF_VERSION_VANGOGH 0x03 #define SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish 0xF /* MP Apertures */ @@ -58,6 +58,12 @@ #define CTF_OFFSET_HOTSPOT 5 #define CTF_OFFSET_MEM 5 +#define LINK_WIDTH_MAX 6 +#define LINK_SPEED_MAX 3 + +static const __maybe_unused uint16_t link_width[] = {0, 1, 2, 4, 8, 12, 16}; +static const __maybe_unused uint16_t link_speed[] = {25, 50, 80, 160}; + static const struct smu_temperature_range __maybe_unused smu11_thermal_policy[] = { @@ -135,6 +141,7 @@ struct smu_11_5_power_context { enum smu_11_0_power_state power_state; uint32_t current_fast_ppt_limit; + uint32_t default_fast_ppt_limit; uint32_t max_fast_ppt_limit; }; @@ -275,11 +282,11 @@ int smu_v11_0_get_dpm_level_range(struct smu_context *smu, int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu); -int smu_v11_0_get_current_pcie_link_width(struct smu_context *smu); +uint16_t smu_v11_0_get_current_pcie_link_width(struct smu_context *smu); int smu_v11_0_get_current_pcie_link_speed_level(struct smu_context *smu); -int smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu); +uint16_t smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu); int smu_v11_0_gfx_ulv_control(struct smu_context *smu, bool enablement); @@ -289,5 +296,7 @@ int smu_v11_0_deep_sleep_control(struct smu_context *smu, void smu_v11_0_interrupt_work(struct smu_context *smu); +int smu_v11_0_set_light_sbr(struct smu_context *smu, bool enable); + #endif #endif diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v12_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v12_0.h index 02de3b6199e5..1ad2dff71090 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_v12_0.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_v12_0.h @@ -60,5 +60,7 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_ int smu_v12_0_set_driver_table_location(struct smu_context *smu); +int smu_v12_0_get_vbios_bootup_values(struct smu_context *smu); + #endif #endif diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h new file mode 100644 index 000000000000..8145e1cbf181 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h @@ -0,0 +1,275 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __SMU_V13_0_H__ +#define __SMU_V13_0_H__ + +#include "amdgpu_smu.h" + +#define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF +#define SMU13_DRIVER_IF_VERSION_ALDE 0x6 + +/* MP Apertures */ +#define MP0_Public 0x03800000 +#define MP0_SRAM 0x03900000 +#define MP1_Public 0x03b00000 +#define MP1_SRAM 0x03c00004 + +/* address block */ +#define smnMP1_FIRMWARE_FLAGS 0x3010024 +#define smnMP0_FW_INTF 0x30101c0 +#define smnMP1_PUB_CTRL 0x3010b14 + +#define TEMP_RANGE_MIN (0) +#define TEMP_RANGE_MAX (80 * 1000) + +#define SMU13_TOOL_SIZE 0x19000 + +#define MAX_DPM_LEVELS 16 +#define MAX_PCIE_CONF 2 + +#define CTF_OFFSET_EDGE 5 +#define CTF_OFFSET_HOTSPOT 5 +#define CTF_OFFSET_MEM 5 + +static const struct smu_temperature_range smu13_thermal_policy[] = +{ + {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000}, + { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000}, +}; + +struct smu_13_0_max_sustainable_clocks { + uint32_t display_clock; + uint32_t phy_clock; + uint32_t pixel_clock; + uint32_t uclock; + uint32_t dcef_clock; + uint32_t soc_clock; +}; + +struct smu_13_0_dpm_clk_level { + bool enabled; + uint32_t value; +}; + +struct smu_13_0_dpm_table { + uint32_t min; /* MHz */ + uint32_t max; /* MHz */ + uint32_t count; + struct smu_13_0_dpm_clk_level dpm_levels[MAX_DPM_LEVELS]; +}; + +struct smu_13_0_pcie_table { + uint8_t pcie_gen[MAX_PCIE_CONF]; + uint8_t pcie_lane[MAX_PCIE_CONF]; +}; + +struct smu_13_0_dpm_tables { + struct smu_13_0_dpm_table soc_table; + struct smu_13_0_dpm_table gfx_table; + struct smu_13_0_dpm_table uclk_table; + struct smu_13_0_dpm_table eclk_table; + struct smu_13_0_dpm_table vclk_table; + struct smu_13_0_dpm_table dclk_table; + struct smu_13_0_dpm_table dcef_table; + struct smu_13_0_dpm_table pixel_table; + struct smu_13_0_dpm_table display_table; + struct smu_13_0_dpm_table phy_table; + struct smu_13_0_dpm_table fclk_table; + struct smu_13_0_pcie_table pcie_table; +}; + +struct smu_13_0_dpm_context { + struct smu_13_0_dpm_tables dpm_tables; + uint32_t workload_policy_mask; + uint32_t dcef_min_ds_clk; +}; + +enum smu_13_0_power_state { + SMU_13_0_POWER_STATE__D0 = 0, + SMU_13_0_POWER_STATE__D1, + SMU_13_0_POWER_STATE__D3, /* Sleep*/ + SMU_13_0_POWER_STATE__D4, /* Hibernate*/ + SMU_13_0_POWER_STATE__D5, /* Power off*/ +}; + +struct smu_13_0_power_context { + uint32_t power_source; + uint8_t in_power_limit_boost_mode; + enum smu_13_0_power_state power_state; +}; + +enum smu_v13_0_baco_seq { + BACO_SEQ_BACO = 0, + BACO_SEQ_MSR, + BACO_SEQ_BAMACO, + BACO_SEQ_ULPS, + BACO_SEQ_COUNT, +}; + +#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3) + +int smu_v13_0_init_microcode(struct smu_context *smu); + +void smu_v13_0_fini_microcode(struct smu_context *smu); + +int smu_v13_0_load_microcode(struct smu_context *smu); + +int smu_v13_0_init_smc_tables(struct smu_context *smu); + +int smu_v13_0_fini_smc_tables(struct smu_context *smu); + +int smu_v13_0_init_power(struct smu_context *smu); + +int smu_v13_0_fini_power(struct smu_context *smu); + +int smu_v13_0_check_fw_status(struct smu_context *smu); + +int smu_v13_0_setup_pptable(struct smu_context *smu); + +int smu_v13_0_get_vbios_bootup_values(struct smu_context *smu); + +int smu_v13_0_check_fw_version(struct smu_context *smu); + +int smu_v13_0_set_driver_table_location(struct smu_context *smu); + +int smu_v13_0_set_tool_table_location(struct smu_context *smu); + +int smu_v13_0_notify_memory_pool_location(struct smu_context *smu); + +int smu_v13_0_system_features_control(struct smu_context *smu, + bool en); + +int smu_v13_0_init_display_count(struct smu_context *smu, uint32_t count); + +int smu_v13_0_set_allowed_mask(struct smu_context *smu); + +int smu_v13_0_notify_display_change(struct smu_context *smu); + +int smu_v13_0_get_current_power_limit(struct smu_context *smu, + uint32_t *power_limit); + +int smu_v13_0_set_power_limit(struct smu_context *smu, uint32_t n); + +int smu_v13_0_init_max_sustainable_clocks(struct smu_context *smu); + +int smu_v13_0_enable_thermal_alert(struct smu_context *smu); + +int smu_v13_0_disable_thermal_alert(struct smu_context *smu); + +int smu_v13_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value); + +int smu_v13_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk); + +int +smu_v13_0_display_clock_voltage_request(struct smu_context *smu, + struct pp_display_clock_request + *clock_req); + +uint32_t +smu_v13_0_get_fan_control_mode(struct smu_context *smu); + +int +smu_v13_0_set_fan_control_mode(struct smu_context *smu, + uint32_t mode); + +int +smu_v13_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed); + +int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu, + uint32_t speed); + +int smu_v13_0_set_xgmi_pstate(struct smu_context *smu, + uint32_t pstate); + +int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable); + +int smu_v13_0_register_irq_handler(struct smu_context *smu); + +int smu_v13_0_set_azalia_d3_pme(struct smu_context *smu); + +int smu_v13_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, + struct pp_smu_nv_clock_table *max_clocks); + +bool smu_v13_0_baco_is_support(struct smu_context *smu); + +enum smu_baco_state smu_v13_0_baco_get_state(struct smu_context *smu); + +int smu_v13_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state); + +int smu_v13_0_baco_enter(struct smu_context *smu); +int smu_v13_0_baco_exit(struct smu_context *smu); + +int smu_v13_0_mode1_reset(struct smu_context *smu); +int smu_v13_0_mode2_reset(struct smu_context *smu); + +int smu_v13_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t *min, uint32_t *max); + +int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t min, uint32_t max); + +int smu_v13_0_set_hard_freq_limited_range(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t min, + uint32_t max); + +int smu_v13_0_set_performance_level(struct smu_context *smu, + enum amd_dpm_forced_level level); + +int smu_v13_0_set_power_source(struct smu_context *smu, + enum smu_power_src_type power_src); + +int smu_v13_0_get_dpm_freq_by_index(struct smu_context *smu, + enum smu_clk_type clk_type, + uint16_t level, + uint32_t *value); + +int smu_v13_0_get_dpm_level_count(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *value); + +int smu_v13_0_set_single_dpm_table(struct smu_context *smu, + enum smu_clk_type clk_type, + struct smu_13_0_dpm_table *single_dpm_table); + +int smu_v13_0_get_dpm_level_range(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *min_value, + uint32_t *max_value); + +int smu_v13_0_get_current_pcie_link_width_level(struct smu_context *smu); + +int smu_v13_0_get_current_pcie_link_width(struct smu_context *smu); + +int smu_v13_0_get_current_pcie_link_speed_level(struct smu_context *smu); + +int smu_v13_0_get_current_pcie_link_speed(struct smu_context *smu); + +int smu_v13_0_gfx_ulv_control(struct smu_context *smu, + bool enablement); + +int smu_v13_0_wait_for_event(struct smu_context *smu, enum smu_event_type event, + uint64_t event_arg); + +#endif +#endif diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_pptable.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0_pptable.h new file mode 100644 index 000000000000..1f311396b706 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/inc/smu_v13_0_pptable.h @@ -0,0 +1,165 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef SMU_13_0_PPTABLE_H +#define SMU_13_0_PPTABLE_H + +#define SMU_13_0_TABLE_FORMAT_REVISION 1 + +//// POWERPLAYTABLE::ulPlatformCaps +#define SMU_13_0_PP_PLATFORM_CAP_POWERPLAY 0x1 +#define SMU_13_0_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 0x2 +#define SMU_13_0_PP_PLATFORM_CAP_HARDWAREDC 0x4 +#define SMU_13_0_PP_PLATFORM_CAP_BACO 0x8 +#define SMU_13_0_PP_PLATFORM_CAP_MACO 0x10 +#define SMU_13_0_PP_PLATFORM_CAP_SHADOWPSTATE 0x20 + +// SMU_13_0_PP_THERMALCONTROLLER - Thermal Controller Type +#define SMU_13_0_PP_THERMALCONTROLLER_NONE 0 + +#define SMU_13_0_PP_OVERDRIVE_VERSION 0x0800 +#define SMU_13_0_PP_POWERSAVINGCLOCK_VERSION 0x0100 + +enum SMU_13_0_ODFEATURE_CAP { + SMU_13_0_ODCAP_GFXCLK_LIMITS = 0, + SMU_13_0_ODCAP_GFXCLK_CURVE, + SMU_13_0_ODCAP_UCLK_MAX, + SMU_13_0_ODCAP_POWER_LIMIT, + SMU_13_0_ODCAP_FAN_ACOUSTIC_LIMIT, + SMU_13_0_ODCAP_FAN_SPEED_MIN, + SMU_13_0_ODCAP_TEMPERATURE_FAN, + SMU_13_0_ODCAP_TEMPERATURE_SYSTEM, + SMU_13_0_ODCAP_MEMORY_TIMING_TUNE, + SMU_13_0_ODCAP_FAN_ZERO_RPM_CONTROL, + SMU_13_0_ODCAP_AUTO_UV_ENGINE, + SMU_13_0_ODCAP_AUTO_OC_ENGINE, + SMU_13_0_ODCAP_AUTO_OC_MEMORY, + SMU_13_0_ODCAP_FAN_CURVE, + SMU_13_0_ODCAP_COUNT, +}; + +enum SMU_13_0_ODFEATURE_ID { + SMU_13_0_ODFEATURE_GFXCLK_LIMITS = 1 << SMU_13_0_ODCAP_GFXCLK_LIMITS, //GFXCLK Limit feature + SMU_13_0_ODFEATURE_GFXCLK_CURVE = 1 << SMU_13_0_ODCAP_GFXCLK_CURVE, //GFXCLK Curve feature + SMU_13_0_ODFEATURE_UCLK_MAX = 1 << SMU_13_0_ODCAP_UCLK_MAX, //UCLK Limit feature + SMU_13_0_ODFEATURE_POWER_LIMIT = 1 << SMU_13_0_ODCAP_POWER_LIMIT, //Power Limit feature + SMU_13_0_ODFEATURE_FAN_ACOUSTIC_LIMIT = 1 << SMU_13_0_ODCAP_FAN_ACOUSTIC_LIMIT, //Fan Acoustic RPM feature + SMU_13_0_ODFEATURE_FAN_SPEED_MIN = 1 << SMU_13_0_ODCAP_FAN_SPEED_MIN, //Minimum Fan Speed feature + SMU_13_0_ODFEATURE_TEMPERATURE_FAN = 1 << SMU_13_0_ODCAP_TEMPERATURE_FAN, //Fan Target Temperature Limit feature + SMU_13_0_ODFEATURE_TEMPERATURE_SYSTEM = 1 << SMU_13_0_ODCAP_TEMPERATURE_SYSTEM, //Operating Temperature Limit feature + SMU_13_0_ODFEATURE_MEMORY_TIMING_TUNE = 1 << SMU_13_0_ODCAP_MEMORY_TIMING_TUNE, //AC Timing Tuning feature + SMU_13_0_ODFEATURE_FAN_ZERO_RPM_CONTROL = 1 << SMU_13_0_ODCAP_FAN_ZERO_RPM_CONTROL, //Zero RPM feature + SMU_13_0_ODFEATURE_AUTO_UV_ENGINE = 1 << SMU_13_0_ODCAP_AUTO_UV_ENGINE, //Auto Under Volt GFXCLK feature + SMU_13_0_ODFEATURE_AUTO_OC_ENGINE = 1 << SMU_13_0_ODCAP_AUTO_OC_ENGINE, //Auto Over Clock GFXCLK feature + SMU_13_0_ODFEATURE_AUTO_OC_MEMORY = 1 << SMU_13_0_ODCAP_AUTO_OC_MEMORY, //Auto Over Clock MCLK feature + SMU_13_0_ODFEATURE_FAN_CURVE = 1 << SMU_13_0_ODCAP_FAN_CURVE, //Fan Curve feature + SMU_13_0_ODFEATURE_COUNT = 14, +}; + +#define SMU_13_0_MAX_ODFEATURE 32 //Maximum Number of OD Features + +enum SMU_13_0_ODSETTING_ID { + SMU_13_0_ODSETTING_GFXCLKFMAX = 0, + SMU_13_0_ODSETTING_GFXCLKFMIN, + SMU_13_0_ODSETTING_VDDGFXCURVEFREQ_P1, + SMU_13_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1, + SMU_13_0_ODSETTING_VDDGFXCURVEFREQ_P2, + SMU_13_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2, + SMU_13_0_ODSETTING_VDDGFXCURVEFREQ_P3, + SMU_13_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3, + SMU_13_0_ODSETTING_UCLKFMAX, + SMU_13_0_ODSETTING_POWERPERCENTAGE, + SMU_13_0_ODSETTING_FANRPMMIN, + SMU_13_0_ODSETTING_FANRPMACOUSTICLIMIT, + SMU_13_0_ODSETTING_FANTARGETTEMPERATURE, + SMU_13_0_ODSETTING_OPERATINGTEMPMAX, + SMU_13_0_ODSETTING_ACTIMING, + SMU_13_0_ODSETTING_FAN_ZERO_RPM_CONTROL, + SMU_13_0_ODSETTING_AUTOUVENGINE, + SMU_13_0_ODSETTING_AUTOOCENGINE, + SMU_13_0_ODSETTING_AUTOOCMEMORY, + SMU_13_0_ODSETTING_COUNT, +}; + +#define SMU_13_0_MAX_ODSETTING 32 //Maximum Number of ODSettings + +struct smu_13_0_overdrive_table { + uint8_t revision; //Revision = SMU_11_0_PP_OVERDRIVE_VERSION + uint8_t reserve[3]; //Zero filled field reserved for future use + uint32_t feature_count; //Total number of supported features + uint32_t setting_count; //Total number of supported settings + uint8_t cap[SMU_13_0_MAX_ODFEATURE]; //OD feature support flags + uint32_t max[SMU_13_0_MAX_ODSETTING]; //default maximum settings + uint32_t min[SMU_13_0_MAX_ODSETTING]; //default minimum settings +} __attribute__((packed)); + +enum SMU_13_0_PPCLOCK_ID { + SMU_13_0_PPCLOCK_GFXCLK = 0, + SMU_13_0_PPCLOCK_VCLK, + SMU_13_0_PPCLOCK_DCLK, + SMU_13_0_PPCLOCK_ECLK, + SMU_13_0_PPCLOCK_SOCCLK, + SMU_13_0_PPCLOCK_UCLK, + SMU_13_0_PPCLOCK_DCEFCLK, + SMU_13_0_PPCLOCK_DISPCLK, + SMU_13_0_PPCLOCK_PIXCLK, + SMU_13_0_PPCLOCK_PHYCLK, + SMU_13_0_PPCLOCK_COUNT, +}; +#define SMU_13_0_MAX_PPCLOCK 16 //Maximum Number of PP Clocks + +struct smu_13_0_power_saving_clock_table { + uint8_t revision; //Revision = SMU_11_0_PP_POWERSAVINGCLOCK_VERSION + uint8_t reserve[3]; //Zero filled field reserved for future use + uint32_t count; //power_saving_clock_count = SMU_11_0_PPCLOCK_COUNT + uint32_t max[SMU_13_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Maximum array In MHz + uint32_t min[SMU_13_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Minimum array In MHz +} __attribute__((packed)); + +struct smu_13_0_powerplay_table { + struct atom_common_table_header header; + uint8_t table_revision; + uint16_t table_size; //Driver portion table size. The offset to smc_pptable including header size + uint32_t golden_pp_id; + uint32_t golden_revision; + uint16_t format_id; + uint32_t platform_caps; //POWERPLAYABLE::ulPlatformCaps + + uint8_t thermal_controller_type; //one of SMU_13_0_PP_THERMALCONTROLLER + + uint16_t small_power_limit1; + uint16_t small_power_limit2; + uint16_t boost_power_limit; + uint16_t od_turbo_power_limit; //Power limit setting for Turbo mode in Performance UI Tuning. + uint16_t od_power_save_power_limit; //Power limit setting for PowerSave/Optimal mode in Performance UI Tuning. + uint16_t software_shutdown_temp; + + uint16_t reserve[6]; //Zero filled field reserved for future use + + struct smu_13_0_power_saving_clock_table power_saving_clock; + struct smu_13_0_overdrive_table overdrive_table; + +#ifndef SMU_13_0_PARTIAL_PPTABLE + PPTable_t smc_pptable; //PPTable_t in driver_if.h +#endif +} __attribute__((packed)); + +#endif diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index e0d288208220..ee6340c6f921 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -1034,7 +1034,8 @@ static int pp_set_power_limit(void *handle, uint32_t limit) return 0; } -static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit) +static int pp_get_power_limit(void *handle, uint32_t *limit, + uint32_t *max_limit, bool default_limit) { struct pp_hwmgr *hwmgr = handle; @@ -1045,9 +1046,12 @@ static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit) if (default_limit) { *limit = hwmgr->default_power_limit; - if (hwmgr->od_enabled) { - *limit *= (100 + hwmgr->platform_descriptor.TDPODLimit); - *limit /= 100; + if (max_limit) { + *max_limit = *limit; + if (hwmgr->od_enabled) { + *max_limit *= (100 + hwmgr->platform_descriptor.TDPODLimit); + *max_limit /= 100; + } } } else diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c index ed05a30d1139..f5fe540cd536 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c @@ -1261,9 +1261,21 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx, void *value, int *size) { struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); - uint32_t sclk, mclk; + struct amdgpu_device *adev = hwmgr->adev; + uint32_t sclk, mclk, activity_percent; + bool has_gfx_busy; int ret = 0; + /* GetGfxBusy support was added on RV SMU FW 30.85.00 and PCO 4.30.59 */ + if ((adev->apu_flags & AMD_APU_IS_PICASSO) && + (hwmgr->smu_version >= 0x41e3b)) + has_gfx_busy = true; + else if ((adev->apu_flags & AMD_APU_IS_RAVEN) && + (hwmgr->smu_version >= 0x1e5500)) + has_gfx_busy = true; + else + has_gfx_busy = false; + switch (idx) { case AMDGPU_PP_SENSOR_GFX_SCLK: smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &sclk); @@ -1284,8 +1296,21 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx, *(uint32_t *)value = smu10_data->vcn_power_gated ? 0 : 1; *size = 4; break; + case AMDGPU_PP_SENSOR_GPU_LOAD: + if (!has_gfx_busy) + ret = -EOPNOTSUPP; + else { + ret = smum_send_msg_to_smc(hwmgr, + PPSMC_MSG_GetGfxBusy, + &activity_percent); + if (!ret) + *((uint32_t *)value) = min(activity_percent, (u32)100); + else + ret = -EIO; + } + break; default: - ret = -EINVAL; + ret = -EOPNOTSUPP; break; } @@ -1487,7 +1512,7 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr, } if (!smu10_data->fine_grain_enabled) { - pr_err("pp_od_clk_voltage is not accessible if power_dpm_force_perfomance_level is not in manual mode!\n"); + pr_err("pp_od_clk_voltage is not accessible if power_dpm_force_performance_level is not in manual mode!\n"); return -EINVAL; } @@ -1526,20 +1551,6 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr, smu10_data->gfx_actual_soft_min_freq = min_freq; smu10_data->gfx_actual_soft_max_freq = max_freq; - - ret = smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SetHardMinGfxClk, - min_freq, - NULL); - if (ret) - return ret; - - ret = smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SetSoftMaxGfxClk, - max_freq, - NULL); - if (ret) - return ret; } else if (type == PP_OD_COMMIT_DPM_TABLE) { if (size != 0) { pr_err("Input parameter number not correct\n"); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index c0565a932a12..0541bfc81c1b 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -1301,7 +1301,7 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr) (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_Disable, NULL)), - "Failed to disble pcie DPM during DPM Start Function!", + "Failed to disable pcie DPM during DPM Start Function!", return -EINVAL); } @@ -4001,7 +4001,7 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, *((uint32_t *)value) = (uint32_t)convert_to_vddc(val_vid); return 0; default: - return -EINVAL; + return -EOPNOTSUPP; } } diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c index ed9b89980184..d425b02b1418 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c @@ -1788,11 +1788,10 @@ static int smu8_read_sensor(struct pp_hwmgr *hwmgr, int idx, result = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGraphicsActivity, &activity_percent); - if (0 == result) { + if (0 == result) activity_percent = activity_percent > 100 ? 100 : activity_percent; - } else { - activity_percent = 50; - } + else + return -EIO; *((uint32_t *)value) = activity_percent; return 0; case AMDGPU_PP_SENSOR_UVD_POWER: @@ -1805,7 +1804,7 @@ static int smu8_read_sensor(struct pp_hwmgr *hwmgr, int idx, *((uint32_t *)value) = smu8_thermal_get_temperature(hwmgr); return 0; default: - return -EINVAL; + return -EOPNOTSUPP; } } diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c index 599ec9726601..31c61ac3bd5e 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c @@ -3955,7 +3955,7 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx, *size = 8; break; default: - ret = -EINVAL; + ret = -EOPNOTSUPP; break; } @@ -5160,7 +5160,7 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui out: smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask, - 1 << power_profile_mode, + (!power_profile_mode) ? 0 : 1 << (power_profile_mode - 1), NULL); hwmgr->power_profile_mode = power_profile_mode; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_pptable.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_pptable.h index c934e9612c1b..9c479bd9a786 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_pptable.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_pptable.h @@ -161,9 +161,9 @@ typedef struct _ATOM_Vega10_MCLK_Dependency_Record { } ATOM_Vega10_MCLK_Dependency_Record; typedef struct _ATOM_Vega10_GFXCLK_Dependency_Table { - UCHAR ucRevId; - UCHAR ucNumEntries; /* Number of entries. */ - ATOM_Vega10_GFXCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ + UCHAR ucRevId; + UCHAR ucNumEntries; /* Number of entries. */ + ATOM_Vega10_GFXCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */ } ATOM_Vega10_GFXCLK_Dependency_Table; typedef struct _ATOM_Vega10_MCLK_Dependency_Table { diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c index 4f6da11e8f10..1a097e608808 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c @@ -52,8 +52,8 @@ #define LINK_WIDTH_MAX 6 #define LINK_SPEED_MAX 3 -static int link_width[] = {0, 1, 2, 4, 8, 12, 16}; -static int link_speed[] = {25, 50, 80, 160}; +static const int link_width[] = {0, 1, 2, 4, 8, 12, 16}; +static const int link_speed[] = {25, 50, 80, 160}; static int vega12_force_clock_level(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask); @@ -1519,7 +1519,7 @@ static int vega12_read_sensor(struct pp_hwmgr *hwmgr, int idx, *size = 8; break; default: - ret = -EINVAL; + ret = -EOPNOTSUPP; break; } return ret; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c index b6ee3a285c9d..d3177a534fdf 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c @@ -57,8 +57,8 @@ #define LINK_WIDTH_MAX 6 #define LINK_SPEED_MAX 3 -static int link_width[] = {0, 1, 2, 4, 8, 12, 16}; -static int link_speed[] = {25, 50, 80, 160}; +static const int link_width[] = {0, 1, 2, 4, 8, 12, 16}; +static const int link_speed[] = {25, 50, 80, 160}; static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr) { @@ -2277,7 +2277,7 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx, *size = 8; break; default: - ret = -EINVAL; + ret = -EOPNOTSUPP; break; } return ret; diff --git a/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c b/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c index 66daabebee35..bcae42cef374 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c +++ b/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c @@ -3305,7 +3305,7 @@ static int kv_dpm_read_sensor(void *handle, int idx, *size = 4; return 0; default: - return -EINVAL; + return -EOPNOTSUPP; } } diff --git a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c index afa1711c9620..26a5321e621b 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c @@ -5715,11 +5715,9 @@ static int si_upload_sw_state(struct amdgpu_device *adev, int ret; u32 address = si_pi->state_table_start + offsetof(SISLANDS_SMC_STATETABLE, driverState); - u32 state_size = sizeof(SISLANDS_SMC_SWSTATE) + - ((new_state->performance_level_count - 1) * - sizeof(SISLANDS_SMC_HW_PERFORMANCE_LEVEL)); SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.driverState; - + size_t state_size = struct_size(smc_state, levels, + new_state->performance_level_count); memset(smc_state, 0, state_size); ret = si_convert_power_state_to_smc(adev, amdgpu_new_state, smc_state); @@ -8016,7 +8014,7 @@ static int si_dpm_read_sensor(void *handle, int idx, *size = 4; return 0; default: - return -EINVAL; + return -EOPNOTSUPP; } } diff --git a/drivers/gpu/drm/amd/pm/powerplay/sislands_smc.h b/drivers/gpu/drm/amd/pm/powerplay/sislands_smc.h index d2930eceaf3c..0f7554052c90 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/sislands_smc.h +++ b/drivers/gpu/drm/amd/pm/powerplay/sislands_smc.h @@ -182,11 +182,11 @@ typedef struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL SISLANDS_SMC_HW_PERFORMANCE_LEV struct SISLANDS_SMC_SWSTATE { - uint8_t flags; - uint8_t levelCount; - uint8_t padding2; - uint8_t padding3; - SISLANDS_SMC_HW_PERFORMANCE_LEVEL levels[1]; + uint8_t flags; + uint8_t levelCount; + uint8_t padding2; + uint8_t padding3; + SISLANDS_SMC_HW_PERFORMANCE_LEVEL levels[]; }; typedef struct SISLANDS_SMC_SWSTATE SISLANDS_SMC_SWSTATE; diff --git a/drivers/gpu/drm/amd/pm/swsmu/Makefile b/drivers/gpu/drm/amd/pm/swsmu/Makefile index 6f281990b7b4..7987c6cf849d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/Makefile +++ b/drivers/gpu/drm/amd/pm/swsmu/Makefile @@ -22,7 +22,7 @@ AMD_SWSMU_PATH = ../pm/swsmu -SWSMU_LIBS = smu11 smu12 +SWSMU_LIBS = smu11 smu12 smu13 AMD_SWSMU = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/pm/swsmu/,$(SWSMU_LIBS))) diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index cd905e41080e..c29d8b3131b7 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -34,6 +34,7 @@ #include "sienna_cichlid_ppt.h" #include "renoir_ppt.h" #include "vangogh_ppt.h" +#include "aldebaran_ppt.h" #include "amd_pcie.h" /* @@ -46,9 +47,26 @@ #undef pr_info #undef pr_debug -size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf) -{ - size_t size = 0; +static const struct amd_pm_funcs swsmu_pm_funcs; +static int smu_force_smuclk_levels(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t mask); +static int smu_handle_task(struct smu_context *smu, + enum amd_dpm_forced_level level, + enum amd_pp_task task_id, + bool lock_needed); +static int smu_reset(struct smu_context *smu); +static int smu_set_fan_speed_percent(void *handle, u32 speed); +static int smu_set_fan_control_mode(struct smu_context *smu, int value); +static int smu_set_power_limit(void *handle, uint32_t limit); +static int smu_set_fan_speed_rpm(void *handle, uint32_t speed); +static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled); + +static int smu_sys_get_pp_feature_mask(void *handle, + char *buf) +{ + struct smu_context *smu = handle; + int size = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; @@ -62,8 +80,10 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf) return size; } -int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask) +static int smu_sys_set_pp_feature_mask(void *handle, + uint64_t new_mask) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -134,6 +154,34 @@ int smu_get_dpm_freq_range(struct smu_context *smu, return ret; } +static u32 smu_get_mclk(void *handle, bool low) +{ + struct smu_context *smu = handle; + uint32_t clk_freq; + int ret = 0; + + ret = smu_get_dpm_freq_range(smu, SMU_UCLK, + low ? &clk_freq : NULL, + !low ? &clk_freq : NULL); + if (ret) + return 0; + return clk_freq * 100; +} + +static u32 smu_get_sclk(void *handle, bool low) +{ + struct smu_context *smu = handle; + uint32_t clk_freq; + int ret = 0; + + ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, + low ? &clk_freq : NULL, + !low ? &clk_freq : NULL); + if (ret) + return 0; + return clk_freq * 100; +} + static int smu_dpm_set_vcn_enable_locked(struct smu_context *smu, bool enable) { @@ -209,7 +257,7 @@ static int smu_dpm_set_jpeg_enable(struct smu_context *smu, /** * smu_dpm_set_power_gate - power gate/ungate the specific IP block * - * @smu: smu_context pointer + * @handle: smu_context pointer * @block_type: the IP block to power gate/ungate * @gate: to power gate if true, ungate otherwise * @@ -220,9 +268,11 @@ static int smu_dpm_set_jpeg_enable(struct smu_context *smu, * Under this case, the smu->mutex lock protection is already enforced on * the parent API smu_force_performance_level of the call path. */ -int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type, - bool gate) +static int smu_dpm_set_power_gate(void *handle, + uint32_t block_type, + bool gate) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -279,35 +329,25 @@ static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_ if (smu->adev->in_suspend) return; - /* - * mclk, fclk and socclk are interdependent - * on each other - */ if (clk == SMU_MCLK) { - /* reset clock dependency */ smu->user_dpm_profile.clk_dependency = 0; - /* set mclk dependent clocks(fclk and socclk) */ smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK); } else if (clk == SMU_FCLK) { - /* give priority to mclk, if mclk dependent clocks are set */ + /* MCLK takes precedence over FCLK */ if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK))) return; - /* reset clock dependency */ smu->user_dpm_profile.clk_dependency = 0; - /* set fclk dependent clocks(mclk and socclk) */ smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK); } else if (clk == SMU_SOCCLK) { - /* give priority to mclk, if mclk dependent clocks are set */ + /* MCLK takes precedence over SOCCLK */ if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK))) return; - /* reset clock dependency */ smu->user_dpm_profile.clk_dependency = 0; - /* set socclk dependent clocks(mclk and fclk) */ smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK); } else - /* add clk dependencies here, if any */ + /* Add clk dependencies here, if any */ return; } @@ -331,7 +371,7 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu) return; /* Enable restore flag */ - smu->user_dpm_profile.flags = SMU_DPM_USER_PROFILE_RESTORE; + smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE; /* set the user dpm power limit */ if (smu->user_dpm_profile.power_limit) { @@ -351,11 +391,11 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu) */ if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) && smu->user_dpm_profile.clk_mask[clk_type]) { - ret = smu_force_clk_levels(smu, clk_type, + ret = smu_force_smuclk_levels(smu, clk_type, smu->user_dpm_profile.clk_mask[clk_type]); if (ret) - dev_err(smu->adev->dev, "Failed to set clock type = %d\n", - clk_type); + dev_err(smu->adev->dev, + "Failed to set clock type = %d\n", clk_type); } } } @@ -379,8 +419,8 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu) smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE; } -int smu_get_power_num_states(struct smu_context *smu, - struct pp_states_info *state_info) +static int smu_get_power_num_states(void *handle, + struct pp_states_info *state_info) { if (!state_info) return -EINVAL; @@ -415,8 +455,10 @@ bool is_support_cclk_dpm(struct amdgpu_device *adev) } -int smu_sys_get_pp_table(struct smu_context *smu, void **table) +static int smu_sys_get_pp_table(void *handle, + char **table) { + struct smu_context *smu = handle; struct smu_table_context *smu_table = &smu->smu_table; uint32_t powerplay_table_size; @@ -440,8 +482,11 @@ int smu_sys_get_pp_table(struct smu_context *smu, void **table) return powerplay_table_size; } -int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size) +static int smu_sys_set_pp_table(void *handle, + const char *buf, + size_t size) { + struct smu_context *smu = handle; struct smu_table_context *smu_table = &smu->smu_table; ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf; int ret = 0; @@ -527,6 +572,11 @@ static int smu_set_funcs(struct amdgpu_device *adev) case CHIP_DIMGREY_CAVEFISH: sienna_cichlid_set_ppt_funcs(smu); break; + case CHIP_ALDEBARAN: + aldebaran_set_ppt_funcs(smu); + /* Enable pp_od_clk_voltage node */ + smu->od_enabled = true; + break; case CHIP_RENOIR: renoir_set_ppt_funcs(smu); break; @@ -553,6 +603,9 @@ static int smu_early_init(void *handle) smu->smu_baco.state = SMU_BACO_STATE_EXIT; smu->smu_baco.platform_support = false; + adev->powerplay.pp_handle = smu; + adev->powerplay.pp_funcs = &swsmu_pm_funcs; + return smu_set_funcs(adev); } @@ -595,6 +648,7 @@ err0_out: return ret; } + static int smu_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -612,10 +666,12 @@ static int smu_late_init(void *handle) return ret; } - ret = smu_set_default_od_settings(smu); - if (ret) { - dev_err(adev->dev, "Failed to setup default OD settings!\n"); - return ret; + if (!amdgpu_sriov_vf(adev) || smu->od_enabled) { + ret = smu_set_default_od_settings(smu); + if (ret) { + dev_err(adev->dev, "Failed to setup default OD settings!\n"); + return ret; + } } ret = smu_populate_umd_state_clk(smu); @@ -989,6 +1045,10 @@ static int smu_sw_init(void *handle) return ret; } + /* If there is no way to query fan control mode, fan control is not supported */ + if (!smu->ppt_funcs->get_fan_control_mode) + smu->adev->pm.no_fan = true; + return 0; } @@ -1387,7 +1447,7 @@ static int smu_hw_fini(void *handle) return smu_smc_hw_cleanup(smu); } -int smu_reset(struct smu_context *smu) +static int smu_reset(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; int ret; @@ -1476,9 +1536,10 @@ static int smu_resume(void *handle) return 0; } -int smu_display_configuration_change(struct smu_context *smu, - const struct amd_pp_display_configuration *display_config) +static int smu_display_configuration_change(void *handle, + const struct amd_pp_display_configuration *display_config) { + struct smu_context *smu = handle; int index = 0; int num_of_active_display = 0; @@ -1567,6 +1628,18 @@ static int smu_enable_umd_pstate(void *handle, return 0; } +static int smu_bump_power_profile_mode(struct smu_context *smu, + long *param, + uint32_t param_size) +{ + int ret = 0; + + if (smu->ppt_funcs->set_power_profile_mode) + ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size); + + return ret; +} + static int smu_adjust_power_state_dynamic(struct smu_context *smu, enum amd_dpm_forced_level level, bool skip_display_settings) @@ -1609,22 +1682,23 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu, smu_dpm_ctx->dpm_level = level; } - if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { + if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && + smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { index = fls(smu->workload_mask); index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; workload = smu->workload_setting[index]; if (smu->power_profile_mode != workload) - smu_set_power_profile_mode(smu, &workload, 0, false); + smu_bump_power_profile_mode(smu, &workload, 0); } return ret; } -int smu_handle_task(struct smu_context *smu, - enum amd_dpm_forced_level level, - enum amd_pp_task task_id, - bool lock_needed) +static int smu_handle_task(struct smu_context *smu, + enum amd_dpm_forced_level level, + enum amd_pp_task task_id, + bool lock_needed) { int ret = 0; @@ -1656,10 +1730,22 @@ out: return ret; } -int smu_switch_power_profile(struct smu_context *smu, - enum PP_SMC_POWER_PROFILE type, - bool en) +static int smu_handle_dpm_task(void *handle, + enum amd_pp_task task_id, + enum amd_pm_state_type *user_state) +{ + struct smu_context *smu = handle; + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + + return smu_handle_task(smu, smu_dpm->dpm_level, task_id, true); + +} + +static int smu_switch_power_profile(void *handle, + enum PP_SMC_POWER_PROFILE type, + bool en) { + struct smu_context *smu = handle; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); long workload; uint32_t index; @@ -1684,16 +1770,18 @@ int smu_switch_power_profile(struct smu_context *smu, workload = smu->workload_setting[index]; } - if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) - smu_set_power_profile_mode(smu, &workload, 0, false); + if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && + smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) + smu_bump_power_profile_mode(smu, &workload, 0); mutex_unlock(&smu->mutex); return 0; } -enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu) +static enum amd_dpm_forced_level smu_get_performance_level(void *handle) { + struct smu_context *smu = handle; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); enum amd_dpm_forced_level level; @@ -1710,8 +1798,10 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu) return level; } -int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) +static int smu_force_performance_level(void *handle, + enum amd_dpm_forced_level level) { + struct smu_context *smu = handle; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); int ret = 0; @@ -1744,8 +1834,9 @@ int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_lev return ret; } -int smu_set_display_count(struct smu_context *smu, uint32_t count) +static int smu_set_display_count(void *handle, uint32_t count) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -1758,7 +1849,7 @@ int smu_set_display_count(struct smu_context *smu, uint32_t count) return ret; } -int smu_force_clk_levels(struct smu_context *smu, +static int smu_force_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t mask) { @@ -1777,7 +1868,7 @@ int smu_force_clk_levels(struct smu_context *smu, if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) { ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask); - if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE) { + if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { smu->user_dpm_profile.clk_mask[clk_type] = mask; smu_set_user_clk_dependencies(smu, clk_type); } @@ -1788,6 +1879,45 @@ int smu_force_clk_levels(struct smu_context *smu, return ret; } +static int smu_force_ppclk_levels(void *handle, + enum pp_clock_type type, + uint32_t mask) +{ + struct smu_context *smu = handle; + enum smu_clk_type clk_type; + + switch (type) { + case PP_SCLK: + clk_type = SMU_SCLK; break; + case PP_MCLK: + clk_type = SMU_MCLK; break; + case PP_PCIE: + clk_type = SMU_PCIE; break; + case PP_SOCCLK: + clk_type = SMU_SOCCLK; break; + case PP_FCLK: + clk_type = SMU_FCLK; break; + case PP_DCEFCLK: + clk_type = SMU_DCEFCLK; break; + case PP_VCLK: + clk_type = SMU_VCLK; break; + case PP_DCLK: + clk_type = SMU_DCLK; break; + case OD_SCLK: + clk_type = SMU_OD_SCLK; break; + case OD_MCLK: + clk_type = SMU_OD_MCLK; break; + case OD_VDDC_CURVE: + clk_type = SMU_OD_VDDC_CURVE; break; + case OD_RANGE: + clk_type = SMU_OD_RANGE; break; + default: + return -EINVAL; + } + + return smu_force_smuclk_levels(smu, clk_type, mask); +} + /* * On system suspending or resetting, the dpm_enabled * flag will be cleared. So that those SMU services which @@ -1795,48 +1925,30 @@ int smu_force_clk_levels(struct smu_context *smu, * However, the mp1 state setting should still be granted * even if the dpm_enabled cleared. */ -int smu_set_mp1_state(struct smu_context *smu, - enum pp_mp1_state mp1_state) +static int smu_set_mp1_state(void *handle, + enum pp_mp1_state mp1_state) { - uint16_t msg; - int ret; + struct smu_context *smu = handle; + int ret = 0; if (!smu->pm_enabled) return -EOPNOTSUPP; mutex_lock(&smu->mutex); - switch (mp1_state) { - case PP_MP1_STATE_SHUTDOWN: - msg = SMU_MSG_PrepareMp1ForShutdown; - break; - case PP_MP1_STATE_UNLOAD: - msg = SMU_MSG_PrepareMp1ForUnload; - break; - case PP_MP1_STATE_RESET: - msg = SMU_MSG_PrepareMp1ForReset; - break; - case PP_MP1_STATE_NONE: - default: - mutex_unlock(&smu->mutex); - return 0; - } - - ret = smu_send_smc_msg(smu, msg, NULL); - /* some asics may not support those messages */ - if (ret == -EINVAL) - ret = 0; - if (ret) - dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n"); + if (smu->ppt_funcs && + smu->ppt_funcs->set_mp1_state) + ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state); mutex_unlock(&smu->mutex); return ret; } -int smu_set_df_cstate(struct smu_context *smu, - enum pp_df_cstate state) +static int smu_set_df_cstate(void *handle, + enum pp_df_cstate state) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -1893,9 +2005,10 @@ int smu_write_watermarks_table(struct smu_context *smu) return ret; } -int smu_set_watermarks_for_clock_ranges(struct smu_context *smu, - struct pp_smu_wm_range_sets *clock_ranges) +static int smu_set_watermarks_for_clock_ranges(void *handle, + struct pp_smu_wm_range_sets *clock_ranges) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -1973,41 +2086,48 @@ const struct amdgpu_ip_block_version smu_v12_0_ip_block = .funcs = &smu_ip_funcs, }; -int smu_load_microcode(struct smu_context *smu) +const struct amdgpu_ip_block_version smu_v13_0_ip_block = { - int ret = 0; - - if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) - return -EOPNOTSUPP; - - mutex_lock(&smu->mutex); - - if (smu->ppt_funcs->load_microcode) - ret = smu->ppt_funcs->load_microcode(smu); - - mutex_unlock(&smu->mutex); - - return ret; -} + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 13, + .minor = 0, + .rev = 0, + .funcs = &smu_ip_funcs, +}; -int smu_check_fw_status(struct smu_context *smu) +static int smu_load_microcode(void *handle) { + struct smu_context *smu = handle; + struct amdgpu_device *adev = smu->adev; int ret = 0; - if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) + if (!smu->pm_enabled) return -EOPNOTSUPP; - mutex_lock(&smu->mutex); + /* This should be used for non PSP loading */ + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) + return 0; - if (smu->ppt_funcs->check_fw_status) - ret = smu->ppt_funcs->check_fw_status(smu); + if (smu->ppt_funcs->load_microcode) { + ret = smu->ppt_funcs->load_microcode(smu); + if (ret) { + dev_err(adev->dev, "Load microcode failed\n"); + return ret; + } + } - mutex_unlock(&smu->mutex); + if (smu->ppt_funcs->check_fw_status) { + ret = smu->ppt_funcs->check_fw_status(smu); + if (ret) { + dev_err(adev->dev, "SMC is not ready\n"); + return ret; + } + } return ret; } -int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled) +static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled) { int ret = 0; @@ -2021,8 +2141,9 @@ int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled) return ret; } -int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed) +static int smu_set_fan_speed_rpm(void *handle, uint32_t speed) { + struct smu_context *smu = handle; u32 percent; int ret = 0; @@ -2034,7 +2155,7 @@ int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed) if (smu->ppt_funcs->set_fan_speed_percent) { percent = speed * 100 / smu->fan_max_rpm; ret = smu->ppt_funcs->set_fan_speed_percent(smu, percent); - if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE) + if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) smu->user_dpm_profile.fan_speed_percent = percent; } @@ -2063,6 +2184,9 @@ int smu_get_power_limit(struct smu_context *smu, case SMU_PPT_LIMIT_CURRENT: *limit = smu->current_power_limit; break; + case SMU_PPT_LIMIT_DEFAULT: + *limit = smu->default_power_limit; + break; case SMU_PPT_LIMIT_MAX: *limit = smu->max_power_limit; break; @@ -2076,8 +2200,9 @@ int smu_get_power_limit(struct smu_context *smu, return ret; } -int smu_set_power_limit(struct smu_context *smu, uint32_t limit) +static int smu_set_power_limit(void *handle, uint32_t limit) { + struct smu_context *smu = handle; uint32_t limit_type = limit >> 24; int ret = 0; @@ -2096,6 +2221,7 @@ int smu_set_power_limit(struct smu_context *smu, uint32_t limit) dev_err(smu->adev->dev, "New power limit (%d) is over the max allowed %d\n", limit, smu->max_power_limit); + ret = -EINVAL; goto out; } @@ -2104,7 +2230,7 @@ int smu_set_power_limit(struct smu_context *smu, uint32_t limit) if (smu->ppt_funcs->set_power_limit) { ret = smu->ppt_funcs->set_power_limit(smu, limit); - if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE) + if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) smu->user_dpm_profile.power_limit = limit; } @@ -2114,7 +2240,7 @@ out: return ret; } -int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) +static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { int ret = 0; @@ -2131,10 +2257,54 @@ int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, ch return ret; } -int smu_od_edit_dpm_table(struct smu_context *smu, - enum PP_OD_DPM_TABLE_COMMAND type, - long *input, uint32_t size) +static int smu_print_ppclk_levels(void *handle, + enum pp_clock_type type, + char *buf) +{ + struct smu_context *smu = handle; + enum smu_clk_type clk_type; + + switch (type) { + case PP_SCLK: + clk_type = SMU_SCLK; break; + case PP_MCLK: + clk_type = SMU_MCLK; break; + case PP_PCIE: + clk_type = SMU_PCIE; break; + case PP_SOCCLK: + clk_type = SMU_SOCCLK; break; + case PP_FCLK: + clk_type = SMU_FCLK; break; + case PP_DCEFCLK: + clk_type = SMU_DCEFCLK; break; + case PP_VCLK: + clk_type = SMU_VCLK; break; + case PP_DCLK: + clk_type = SMU_DCLK; break; + case OD_SCLK: + clk_type = SMU_OD_SCLK; break; + case OD_MCLK: + clk_type = SMU_OD_MCLK; break; + case OD_VDDC_CURVE: + clk_type = SMU_OD_VDDC_CURVE; break; + case OD_RANGE: + clk_type = SMU_OD_RANGE; break; + case OD_VDDGFX_OFFSET: + clk_type = SMU_OD_VDDGFX_OFFSET; break; + case OD_CCLK: + clk_type = SMU_OD_CCLK; break; + default: + return -EINVAL; + } + + return smu_print_smuclk_levels(smu, clk_type, buf); +} + +static int smu_od_edit_dpm_table(void *handle, + enum PP_OD_DPM_TABLE_COMMAND type, + long *input, uint32_t size) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2144,11 +2314,6 @@ int smu_od_edit_dpm_table(struct smu_context *smu, if (smu->ppt_funcs->od_edit_dpm_table) { ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size); - if (!ret && (type == PP_OD_COMMIT_DPM_TABLE)) - ret = smu_handle_task(smu, - smu->smu_dpm.dpm_level, - AMD_PP_TASK_READJUST_POWER_STATE, - false); } mutex_unlock(&smu->mutex); @@ -2156,20 +2321,26 @@ int smu_od_edit_dpm_table(struct smu_context *smu, return ret; } -int smu_read_sensor(struct smu_context *smu, - enum amd_pp_sensors sensor, - void *data, uint32_t *size) +static int smu_read_sensor(void *handle, + int sensor, + void *data, + int *size_arg) { + struct smu_context *smu = handle; struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; int ret = 0; + uint32_t *size, size_val; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; - if (!data || !size) + if (!data || !size_arg) return -EINVAL; + size_val = *size_arg; + size = &size_val; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->read_sensor) @@ -2214,11 +2385,15 @@ int smu_read_sensor(struct smu_context *smu, unlock: mutex_unlock(&smu->mutex); + // assign uint32_t to int + *size_arg = size_val; + return ret; } -int smu_get_power_profile_mode(struct smu_context *smu, char *buf) +static int smu_get_power_profile_mode(void *handle, char *buf) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2234,35 +2409,33 @@ int smu_get_power_profile_mode(struct smu_context *smu, char *buf) return ret; } -int smu_set_power_profile_mode(struct smu_context *smu, - long *param, - uint32_t param_size, - bool lock_needed) +static int smu_set_power_profile_mode(void *handle, + long *param, + uint32_t param_size) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; - if (lock_needed) - mutex_lock(&smu->mutex); + mutex_lock(&smu->mutex); - if (smu->ppt_funcs->set_power_profile_mode) - ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size); + smu_bump_power_profile_mode(smu, param, param_size); - if (lock_needed) - mutex_unlock(&smu->mutex); + mutex_unlock(&smu->mutex); return ret; } -int smu_get_fan_control_mode(struct smu_context *smu) +static u32 smu_get_fan_control_mode(void *handle) { - int ret = 0; + struct smu_context *smu = handle; + u32 ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) - return -EOPNOTSUPP; + return AMD_FAN_CTRL_NONE; mutex_lock(&smu->mutex); @@ -2274,18 +2447,18 @@ int smu_get_fan_control_mode(struct smu_context *smu) return ret; } -int smu_set_fan_control_mode(struct smu_context *smu, int value) +static int smu_set_fan_control_mode(struct smu_context *smu, int value) { int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) - return -EOPNOTSUPP; + return -EOPNOTSUPP; mutex_lock(&smu->mutex); if (smu->ppt_funcs->set_fan_control_mode) { ret = smu->ppt_funcs->set_fan_control_mode(smu, value); - if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE) + if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) smu->user_dpm_profile.fan_mode = value; } @@ -2293,14 +2466,23 @@ int smu_set_fan_control_mode(struct smu_context *smu, int value) /* reset user dpm fan speed */ if (!ret && value != AMD_FAN_CTRL_MANUAL && - smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE) + !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) smu->user_dpm_profile.fan_speed_percent = 0; return ret; } -int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed) +static void smu_pp_set_fan_control_mode(void *handle, u32 value) { + struct smu_context *smu = handle; + + smu_set_fan_control_mode(smu, value); +} + + +static int smu_get_fan_speed_percent(void *handle, u32 *speed) +{ + struct smu_context *smu = handle; int ret = 0; uint32_t percent; @@ -2322,8 +2504,9 @@ int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed) return ret; } -int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed) +static int smu_set_fan_speed_percent(void *handle, u32 speed) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2335,7 +2518,7 @@ int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed) if (speed > 100) speed = 100; ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed); - if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE) + if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) smu->user_dpm_profile.fan_speed_percent = speed; } @@ -2344,8 +2527,9 @@ int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed) return ret; } -int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed) +static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed) { + struct smu_context *smu = handle; int ret = 0; u32 percent; @@ -2364,8 +2548,9 @@ int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed) return ret; } -int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk) +static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2380,10 +2565,12 @@ int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk) return ret; } -int smu_get_clock_by_type_with_latency(struct smu_context *smu, - enum smu_clk_type clk_type, - struct pp_clock_levels_with_latency *clocks) +static int smu_get_clock_by_type_with_latency(void *handle, + enum amd_pp_clock_type type, + struct pp_clock_levels_with_latency *clocks) { + struct smu_context *smu = handle; + enum smu_clk_type clk_type; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2391,17 +2578,38 @@ int smu_get_clock_by_type_with_latency(struct smu_context *smu, mutex_lock(&smu->mutex); - if (smu->ppt_funcs->get_clock_by_type_with_latency) + if (smu->ppt_funcs->get_clock_by_type_with_latency) { + switch (type) { + case amd_pp_sys_clock: + clk_type = SMU_GFXCLK; + break; + case amd_pp_mem_clock: + clk_type = SMU_MCLK; + break; + case amd_pp_dcef_clock: + clk_type = SMU_DCEFCLK; + break; + case amd_pp_disp_clock: + clk_type = SMU_DISPCLK; + break; + default: + dev_err(smu->adev->dev, "Invalid clock type!\n"); + mutex_unlock(&smu->mutex); + return -EINVAL; + } + ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks); + } mutex_unlock(&smu->mutex); return ret; } -int smu_display_clock_voltage_request(struct smu_context *smu, - struct pp_display_clock_request *clock_req) +static int smu_display_clock_voltage_request(void *handle, + struct pp_display_clock_request *clock_req) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2418,8 +2626,10 @@ int smu_display_clock_voltage_request(struct smu_context *smu, } -int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch) +static int smu_display_disable_memory_clock_switch(void *handle, + bool disable_memory_clock_switch) { + struct smu_context *smu = handle; int ret = -EINVAL; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2435,9 +2645,10 @@ int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disabl return ret; } -int smu_set_xgmi_pstate(struct smu_context *smu, - uint32_t pstate) +static int smu_set_xgmi_pstate(void *handle, + uint32_t pstate) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2456,101 +2667,78 @@ int smu_set_xgmi_pstate(struct smu_context *smu, return ret; } -int smu_set_azalia_d3_pme(struct smu_context *smu) +static int smu_get_baco_capability(void *handle, bool *cap) { + struct smu_context *smu = handle; int ret = 0; - if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) - return -EOPNOTSUPP; - - mutex_lock(&smu->mutex); - - if (smu->ppt_funcs->set_azalia_d3_pme) - ret = smu->ppt_funcs->set_azalia_d3_pme(smu); - - mutex_unlock(&smu->mutex); - - return ret; -} - -/* - * On system suspending or resetting, the dpm_enabled - * flag will be cleared. So that those SMU services which - * are not supported will be gated. - * - * However, the baco/mode1 reset should still be granted - * as they are still supported and necessary. - */ -bool smu_baco_is_support(struct smu_context *smu) -{ - bool ret = false; + *cap = false; if (!smu->pm_enabled) - return false; + return 0; mutex_lock(&smu->mutex); if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support) - ret = smu->ppt_funcs->baco_is_support(smu); + *cap = smu->ppt_funcs->baco_is_support(smu); mutex_unlock(&smu->mutex); return ret; } -int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state) -{ - if (smu->ppt_funcs->baco_get_state) - return -EINVAL; - - mutex_lock(&smu->mutex); - *state = smu->ppt_funcs->baco_get_state(smu); - mutex_unlock(&smu->mutex); - - return 0; -} - -int smu_baco_enter(struct smu_context *smu) +static int smu_baco_set_state(void *handle, int state) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled) return -EOPNOTSUPP; - mutex_lock(&smu->mutex); + if (state == 0) { + mutex_lock(&smu->mutex); - if (smu->ppt_funcs->baco_enter) - ret = smu->ppt_funcs->baco_enter(smu); + if (smu->ppt_funcs->baco_exit) + ret = smu->ppt_funcs->baco_exit(smu); - mutex_unlock(&smu->mutex); + mutex_unlock(&smu->mutex); + } else if (state == 1) { + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->baco_enter) + ret = smu->ppt_funcs->baco_enter(smu); + + mutex_unlock(&smu->mutex); + + } else { + return -EINVAL; + } if (ret) - dev_err(smu->adev->dev, "Failed to enter BACO state!\n"); + dev_err(smu->adev->dev, "Failed to %s BACO state!\n", + (state)?"enter":"exit"); return ret; } -int smu_baco_exit(struct smu_context *smu) +bool smu_mode1_reset_is_support(struct smu_context *smu) { - int ret = 0; + bool ret = false; if (!smu->pm_enabled) - return -EOPNOTSUPP; + return false; mutex_lock(&smu->mutex); - if (smu->ppt_funcs->baco_exit) - ret = smu->ppt_funcs->baco_exit(smu); + if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support) + ret = smu->ppt_funcs->mode1_reset_is_support(smu); mutex_unlock(&smu->mutex); - if (ret) - dev_err(smu->adev->dev, "Failed to exit BACO state!\n"); - return ret; } -bool smu_mode1_reset_is_support(struct smu_context *smu) +bool smu_mode2_reset_is_support(struct smu_context *smu) { bool ret = false; @@ -2559,8 +2747,8 @@ bool smu_mode1_reset_is_support(struct smu_context *smu) mutex_lock(&smu->mutex); - if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support) - ret = smu->ppt_funcs->mode1_reset_is_support(smu); + if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support) + ret = smu->ppt_funcs->mode2_reset_is_support(smu); mutex_unlock(&smu->mutex); @@ -2584,8 +2772,9 @@ int smu_mode1_reset(struct smu_context *smu) return ret; } -int smu_mode2_reset(struct smu_context *smu) +static int smu_mode2_reset(void *handle) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled) @@ -2604,9 +2793,10 @@ int smu_mode2_reset(struct smu_context *smu) return ret; } -int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu, - struct pp_smu_nv_clock_table *max_clocks) +static int smu_get_max_sustainable_clocks_by_dc(void *handle, + struct pp_smu_nv_clock_table *max_clocks) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2622,10 +2812,11 @@ int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu, return ret; } -int smu_get_uclk_dpm_states(struct smu_context *smu, - unsigned int *clock_values_in_khz, - unsigned int *num_states) +static int smu_get_uclk_dpm_states(void *handle, + unsigned int *clock_values_in_khz, + unsigned int *num_states) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2641,8 +2832,9 @@ int smu_get_uclk_dpm_states(struct smu_context *smu, return ret; } -enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu) +static enum amd_pm_state_type smu_get_current_power_state(void *handle) { + struct smu_context *smu = handle; enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2658,9 +2850,10 @@ enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu) return pm_state; } -int smu_get_dpm_clock_table(struct smu_context *smu, - struct dpm_clocks *clock_table) +static int smu_get_dpm_clock_table(void *handle, + struct dpm_clocks *clock_table) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2676,9 +2869,9 @@ int smu_get_dpm_clock_table(struct smu_context *smu, return ret; } -ssize_t smu_sys_get_gpu_metrics(struct smu_context *smu, - void **table) +static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table) { + struct smu_context *smu = handle; ssize_t size; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2696,8 +2889,9 @@ ssize_t smu_sys_get_gpu_metrics(struct smu_context *smu, return size; } -int smu_enable_mgpu_fan_boost(struct smu_context *smu) +static int smu_enable_mgpu_fan_boost(void *handle) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2713,8 +2907,10 @@ int smu_enable_mgpu_fan_boost(struct smu_context *smu) return ret; } -int smu_gfx_state_change_set(struct smu_context *smu, uint32_t state) +static int smu_gfx_state_change_set(void *handle, + uint32_t state) { + struct smu_context *smu = handle; int ret = 0; mutex_lock(&smu->mutex); @@ -2724,3 +2920,83 @@ int smu_gfx_state_change_set(struct smu_context *smu, uint32_t state) return ret; } + +int smu_set_light_sbr(struct smu_context *smu, bool enable) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + if (smu->ppt_funcs->set_light_sbr) + ret = smu->ppt_funcs->set_light_sbr(smu, enable); + mutex_unlock(&smu->mutex); + + return ret; +} + + +static const struct amd_pm_funcs swsmu_pm_funcs = { + /* export for sysfs */ + .set_fan_control_mode = smu_pp_set_fan_control_mode, + .get_fan_control_mode = smu_get_fan_control_mode, + .set_fan_speed_percent = smu_set_fan_speed_percent, + .get_fan_speed_percent = smu_get_fan_speed_percent, + .force_performance_level = smu_force_performance_level, + .read_sensor = smu_read_sensor, + .get_performance_level = smu_get_performance_level, + .get_current_power_state = smu_get_current_power_state, + .get_fan_speed_rpm = smu_get_fan_speed_rpm, + .set_fan_speed_rpm = smu_set_fan_speed_rpm, + .get_pp_num_states = smu_get_power_num_states, + .get_pp_table = smu_sys_get_pp_table, + .set_pp_table = smu_sys_set_pp_table, + .switch_power_profile = smu_switch_power_profile, + /* export to amdgpu */ + .dispatch_tasks = smu_handle_dpm_task, + .set_powergating_by_smu = smu_dpm_set_power_gate, + .set_power_limit = smu_set_power_limit, + .odn_edit_dpm_table = smu_od_edit_dpm_table, + .set_mp1_state = smu_set_mp1_state, + /* export to DC */ + .get_sclk = smu_get_sclk, + .get_mclk = smu_get_mclk, + .enable_mgpu_fan_boost = smu_enable_mgpu_fan_boost, + .get_asic_baco_capability = smu_get_baco_capability, + .set_asic_baco_state = smu_baco_set_state, + .get_ppfeature_status = smu_sys_get_pp_feature_mask, + .set_ppfeature_status = smu_sys_set_pp_feature_mask, + .asic_reset_mode_2 = smu_mode2_reset, + .set_df_cstate = smu_set_df_cstate, + .set_xgmi_pstate = smu_set_xgmi_pstate, + .get_gpu_metrics = smu_sys_get_gpu_metrics, + .set_power_profile_mode = smu_set_power_profile_mode, + .get_power_profile_mode = smu_get_power_profile_mode, + .force_clock_level = smu_force_ppclk_levels, + .print_clock_levels = smu_print_ppclk_levels, + .get_uclk_dpm_states = smu_get_uclk_dpm_states, + .get_dpm_clock_table = smu_get_dpm_clock_table, + .display_configuration_change = smu_display_configuration_change, + .get_clock_by_type_with_latency = smu_get_clock_by_type_with_latency, + .display_clock_voltage_request = smu_display_clock_voltage_request, + .set_active_display_count = smu_set_display_count, + .set_min_deep_sleep_dcefclk = smu_set_deep_sleep_dcefclk, + .set_watermarks_for_clock_ranges = smu_set_watermarks_for_clock_ranges, + .display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch, + .get_max_sustainable_clocks_by_dc = smu_get_max_sustainable_clocks_by_dc, + .load_firmware = smu_load_microcode, + .gfx_state_change_set = smu_gfx_state_change_set, +}; + +int smu_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event, + uint64_t event_arg) +{ + int ret = -EINVAL; + struct smu_context *smu = &adev->smu; + + if (smu->ppt_funcs->wait_for_event) { + mutex_lock(&smu->mutex); + ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg); + mutex_unlock(&smu->mutex); + } + + return ret; +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index 9f0d03ae3109..77693bf0840c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -142,6 +142,7 @@ static const struct cmn2asic_msg_mapping arcturus_message_map[SMU_MSG_MAX_COUNT] MSG_MAP(GmiPwrDnControl, PPSMC_MSG_GmiPwrDnControl, 0), MSG_MAP(ReadSerialNumTop32, PPSMC_MSG_ReadSerialNumTop32, 1), MSG_MAP(ReadSerialNumBottom32, PPSMC_MSG_ReadSerialNumBottom32, 1), + MSG_MAP(LightSBR, PPSMC_MSG_LightSBR, 0), }; static const struct cmn2asic_mapping arcturus_clk_map[SMU_CLK_COUNT] = { @@ -236,7 +237,7 @@ static int arcturus_tables_init(struct smu_context *smu) return -ENOMEM; smu_table->metrics_time = 0; - smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_0); + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_1); smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); if (!smu_table->gpu_metrics_table) { kfree(smu_table->metrics_table); @@ -1128,7 +1129,7 @@ static int arcturus_get_power_limit(struct smu_context *smu) power_limit = pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0]; } - smu->current_power_limit = power_limit; + smu->current_power_limit = smu->default_power_limit = power_limit; if (smu->od_enabled) { od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]); @@ -2211,7 +2212,7 @@ static void arcturus_log_thermal_throttling_event(struct smu_context *smu) kgd2kfd_smi_event_throttle(smu->adev->kfd.dev, throttler_status); } -static int arcturus_get_current_pcie_link_speed(struct smu_context *smu) +static uint16_t arcturus_get_current_pcie_link_speed(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; uint32_t esm_ctrl; @@ -2219,7 +2220,7 @@ static int arcturus_get_current_pcie_link_speed(struct smu_context *smu) /* TODO: confirm this on real target */ esm_ctrl = RREG32_PCIE(smnPCIE_ESM_CTRL); if ((esm_ctrl >> 15) & 0x1FFFF) - return (((esm_ctrl >> 8) & 0x3F) + 128); + return (uint16_t)(((esm_ctrl >> 8) & 0x3F) + 128); return smu_v11_0_get_current_pcie_link_speed(smu); } @@ -2228,8 +2229,8 @@ static ssize_t arcturus_get_gpu_metrics(struct smu_context *smu, void **table) { struct smu_table_context *smu_table = &smu->smu_table; - struct gpu_metrics_v1_0 *gpu_metrics = - (struct gpu_metrics_v1_0 *)smu_table->gpu_metrics_table; + struct gpu_metrics_v1_1 *gpu_metrics = + (struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table; SmuMetrics_t metrics; int ret = 0; @@ -2239,7 +2240,7 @@ static ssize_t arcturus_get_gpu_metrics(struct smu_context *smu, if (ret) return ret; - smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 0); + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1); gpu_metrics->temperature_edge = metrics.TemperatureEdge; gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot; @@ -2280,7 +2281,7 @@ static ssize_t arcturus_get_gpu_metrics(struct smu_context *smu, *table = (void *)gpu_metrics; - return sizeof(struct gpu_metrics_v1_0); + return sizeof(struct gpu_metrics_v1_1); } static const struct pptable_funcs arcturus_ppt_funcs = { @@ -2363,6 +2364,8 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .deep_sleep_control = smu_v11_0_deep_sleep_control, .get_fan_parameters = arcturus_get_fan_parameters, .interrupt_work = smu_v11_0_interrupt_work, + .set_light_sbr = smu_v11_0_set_light_sbr, + .set_mp1_state = smu_cmn_set_mp1_state, }; void arcturus_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 6e641f1513d8..ac13042672ea 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -70,6 +70,8 @@ FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \ FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT)) +#define SMU_11_0_GFX_BUSY_THRESHOLD 15 + static struct cmn2asic_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), @@ -429,6 +431,30 @@ static int navi10_store_powerplay_table(struct smu_context *smu) return 0; } +static int navi10_set_mp1_state(struct smu_context *smu, + enum pp_mp1_state mp1_state) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t mp1_fw_flags; + int ret = 0; + + ret = smu_cmn_set_mp1_state(smu, mp1_state); + if (ret) + return ret; + + if (mp1_state == PP_MP1_STATE_UNLOAD) { + mp1_fw_flags = RREG32_PCIE(MP1_Public | + (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); + + mp1_fw_flags &= ~MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK; + + WREG32_PCIE(MP1_Public | + (smnMP1_FIRMWARE_FLAGS & 0xffffffff), mp1_fw_flags); + } + + return 0; +} + static int navi10_setup_pptable(struct smu_context *smu) { int ret = 0; @@ -456,18 +482,13 @@ static int navi10_tables_init(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *tables = smu_table->tables; - struct amdgpu_device *adev = smu->adev; SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); - if (adev->asic_type == CHIP_NAVI12) - SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_NV12_t), - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); - else - SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_NV1X_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t), @@ -478,14 +499,13 @@ static int navi10_tables_init(struct smu_context *smu) sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); - smu_table->metrics_table = kzalloc(adev->asic_type == CHIP_NAVI12 ? - sizeof(SmuMetrics_NV12_t) : - sizeof(SmuMetrics_t), GFP_KERNEL); + smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_NV1X_t), + GFP_KERNEL); if (!smu_table->metrics_table) goto err0_out; smu_table->metrics_time = 0; - smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_0); + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_1); smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); if (!smu_table->gpu_metrics_table) goto err1_out; @@ -504,17 +524,200 @@ err0_out: return -ENOMEM; } +static int navi10_get_legacy_smu_metrics_data(struct smu_context *smu, + MetricsMember_t member, + uint32_t *value) +{ + struct smu_table_context *smu_table= &smu->smu_table; + SmuMetrics_legacy_t *metrics = + (SmuMetrics_legacy_t *)smu_table->metrics_table; + int ret = 0; + + mutex_lock(&smu->metrics_lock); + + ret = smu_cmn_get_metrics_table_locked(smu, + NULL, + false); + if (ret) { + mutex_unlock(&smu->metrics_lock); + return ret; + } + + switch (member) { + case METRICS_CURR_GFXCLK: + *value = metrics->CurrClock[PPCLK_GFXCLK]; + break; + case METRICS_CURR_SOCCLK: + *value = metrics->CurrClock[PPCLK_SOCCLK]; + break; + case METRICS_CURR_UCLK: + *value = metrics->CurrClock[PPCLK_UCLK]; + break; + case METRICS_CURR_VCLK: + *value = metrics->CurrClock[PPCLK_VCLK]; + break; + case METRICS_CURR_DCLK: + *value = metrics->CurrClock[PPCLK_DCLK]; + break; + case METRICS_CURR_DCEFCLK: + *value = metrics->CurrClock[PPCLK_DCEFCLK]; + break; + case METRICS_AVERAGE_GFXCLK: + *value = metrics->AverageGfxclkFrequency; + break; + case METRICS_AVERAGE_SOCCLK: + *value = metrics->AverageSocclkFrequency; + break; + case METRICS_AVERAGE_UCLK: + *value = metrics->AverageUclkFrequency; + break; + case METRICS_AVERAGE_GFXACTIVITY: + *value = metrics->AverageGfxActivity; + break; + case METRICS_AVERAGE_MEMACTIVITY: + *value = metrics->AverageUclkActivity; + break; + case METRICS_AVERAGE_SOCKETPOWER: + *value = metrics->AverageSocketPower << 8; + break; + case METRICS_TEMPERATURE_EDGE: + *value = metrics->TemperatureEdge * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_HOTSPOT: + *value = metrics->TemperatureHotspot * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_MEM: + *value = metrics->TemperatureMem * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_VRGFX: + *value = metrics->TemperatureVrGfx * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_VRSOC: + *value = metrics->TemperatureVrSoc * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_THROTTLER_STATUS: + *value = metrics->ThrottlerStatus; + break; + case METRICS_CURR_FANSPEED: + *value = metrics->CurrFanSpeed; + break; + default: + *value = UINT_MAX; + break; + } + + mutex_unlock(&smu->metrics_lock); + + return ret; +} + static int navi10_get_smu_metrics_data(struct smu_context *smu, MetricsMember_t member, uint32_t *value) { struct smu_table_context *smu_table= &smu->smu_table; - /* - * This works for NV12 also. As although NV12 uses a different - * SmuMetrics structure from other NV1X ASICs, they share the - * same offsets for the heading parts(those members used here). - */ - SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; + SmuMetrics_t *metrics = + (SmuMetrics_t *)smu_table->metrics_table; + int ret = 0; + + mutex_lock(&smu->metrics_lock); + + ret = smu_cmn_get_metrics_table_locked(smu, + NULL, + false); + if (ret) { + mutex_unlock(&smu->metrics_lock); + return ret; + } + + switch (member) { + case METRICS_CURR_GFXCLK: + *value = metrics->CurrClock[PPCLK_GFXCLK]; + break; + case METRICS_CURR_SOCCLK: + *value = metrics->CurrClock[PPCLK_SOCCLK]; + break; + case METRICS_CURR_UCLK: + *value = metrics->CurrClock[PPCLK_UCLK]; + break; + case METRICS_CURR_VCLK: + *value = metrics->CurrClock[PPCLK_VCLK]; + break; + case METRICS_CURR_DCLK: + *value = metrics->CurrClock[PPCLK_DCLK]; + break; + case METRICS_CURR_DCEFCLK: + *value = metrics->CurrClock[PPCLK_DCEFCLK]; + break; + case METRICS_AVERAGE_GFXCLK: + if (metrics->AverageGfxActivity > SMU_11_0_GFX_BUSY_THRESHOLD) + *value = metrics->AverageGfxclkFrequencyPreDs; + else + *value = metrics->AverageGfxclkFrequencyPostDs; + break; + case METRICS_AVERAGE_SOCCLK: + *value = metrics->AverageSocclkFrequency; + break; + case METRICS_AVERAGE_UCLK: + *value = metrics->AverageUclkFrequencyPostDs; + break; + case METRICS_AVERAGE_GFXACTIVITY: + *value = metrics->AverageGfxActivity; + break; + case METRICS_AVERAGE_MEMACTIVITY: + *value = metrics->AverageUclkActivity; + break; + case METRICS_AVERAGE_SOCKETPOWER: + *value = metrics->AverageSocketPower << 8; + break; + case METRICS_TEMPERATURE_EDGE: + *value = metrics->TemperatureEdge * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_HOTSPOT: + *value = metrics->TemperatureHotspot * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_MEM: + *value = metrics->TemperatureMem * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_VRGFX: + *value = metrics->TemperatureVrGfx * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_VRSOC: + *value = metrics->TemperatureVrSoc * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_THROTTLER_STATUS: + *value = metrics->ThrottlerStatus; + break; + case METRICS_CURR_FANSPEED: + *value = metrics->CurrFanSpeed; + break; + default: + *value = UINT_MAX; + break; + } + + mutex_unlock(&smu->metrics_lock); + + return ret; +} + +static int navi12_get_legacy_smu_metrics_data(struct smu_context *smu, + MetricsMember_t member, + uint32_t *value) +{ + struct smu_table_context *smu_table= &smu->smu_table; + SmuMetrics_NV12_legacy_t *metrics = + (SmuMetrics_NV12_legacy_t *)smu_table->metrics_table; int ret = 0; mutex_lock(&smu->metrics_lock); @@ -600,6 +803,136 @@ static int navi10_get_smu_metrics_data(struct smu_context *smu, return ret; } +static int navi12_get_smu_metrics_data(struct smu_context *smu, + MetricsMember_t member, + uint32_t *value) +{ + struct smu_table_context *smu_table= &smu->smu_table; + SmuMetrics_NV12_t *metrics = + (SmuMetrics_NV12_t *)smu_table->metrics_table; + int ret = 0; + + mutex_lock(&smu->metrics_lock); + + ret = smu_cmn_get_metrics_table_locked(smu, + NULL, + false); + if (ret) { + mutex_unlock(&smu->metrics_lock); + return ret; + } + + switch (member) { + case METRICS_CURR_GFXCLK: + *value = metrics->CurrClock[PPCLK_GFXCLK]; + break; + case METRICS_CURR_SOCCLK: + *value = metrics->CurrClock[PPCLK_SOCCLK]; + break; + case METRICS_CURR_UCLK: + *value = metrics->CurrClock[PPCLK_UCLK]; + break; + case METRICS_CURR_VCLK: + *value = metrics->CurrClock[PPCLK_VCLK]; + break; + case METRICS_CURR_DCLK: + *value = metrics->CurrClock[PPCLK_DCLK]; + break; + case METRICS_CURR_DCEFCLK: + *value = metrics->CurrClock[PPCLK_DCEFCLK]; + break; + case METRICS_AVERAGE_GFXCLK: + if (metrics->AverageGfxActivity > SMU_11_0_GFX_BUSY_THRESHOLD) + *value = metrics->AverageGfxclkFrequencyPreDs; + else + *value = metrics->AverageGfxclkFrequencyPostDs; + break; + case METRICS_AVERAGE_SOCCLK: + *value = metrics->AverageSocclkFrequency; + break; + case METRICS_AVERAGE_UCLK: + *value = metrics->AverageUclkFrequencyPostDs; + break; + case METRICS_AVERAGE_GFXACTIVITY: + *value = metrics->AverageGfxActivity; + break; + case METRICS_AVERAGE_MEMACTIVITY: + *value = metrics->AverageUclkActivity; + break; + case METRICS_AVERAGE_SOCKETPOWER: + *value = metrics->AverageSocketPower << 8; + break; + case METRICS_TEMPERATURE_EDGE: + *value = metrics->TemperatureEdge * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_HOTSPOT: + *value = metrics->TemperatureHotspot * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_MEM: + *value = metrics->TemperatureMem * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_VRGFX: + *value = metrics->TemperatureVrGfx * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_VRSOC: + *value = metrics->TemperatureVrSoc * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_THROTTLER_STATUS: + *value = metrics->ThrottlerStatus; + break; + case METRICS_CURR_FANSPEED: + *value = metrics->CurrFanSpeed; + break; + default: + *value = UINT_MAX; + break; + } + + mutex_unlock(&smu->metrics_lock); + + return ret; +} + +static int navi1x_get_smu_metrics_data(struct smu_context *smu, + MetricsMember_t member, + uint32_t *value) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t smu_version; + int ret = 0; + + ret = smu_cmn_get_smc_version(smu, NULL, &smu_version); + if (ret) { + dev_err(adev->dev, "Failed to get smu version!\n"); + return ret; + } + + switch (adev->asic_type) { + case CHIP_NAVI12: + if (smu_version > 0x00341C00) + ret = navi12_get_smu_metrics_data(smu, member, value); + else + ret = navi12_get_legacy_smu_metrics_data(smu, member, value); + break; + case CHIP_NAVI10: + case CHIP_NAVI14: + default: + if (((adev->asic_type == CHIP_NAVI14) && smu_version > 0x00351F00) || + ((adev->asic_type == CHIP_NAVI10) && smu_version > 0x002A3B00)) + ret = navi10_get_smu_metrics_data(smu, member, value); + else + ret = navi10_get_legacy_smu_metrics_data(smu, member, value); + break; + } + + return ret; +} + static int navi10_allocate_dpm_context(struct smu_context *smu) { struct smu_dpm_context *smu_dpm = &smu->smu_dpm; @@ -880,7 +1213,7 @@ static int navi10_get_current_clk_freq_by_table(struct smu_context *smu, return -EINVAL; } - return navi10_get_smu_metrics_data(smu, + return navi1x_get_smu_metrics_data(smu, member_type, value); } @@ -897,7 +1230,7 @@ static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu dpm_desc = &pptable->DpmDescriptor[clk_index]; /* 0 - Fine grained DPM, 1 - Discrete DPM */ - return dpm_desc->SnapToDiscrete == 0 ? true : false; + return dpm_desc->SnapToDiscrete == 0; } static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_CAP cap) @@ -1110,7 +1443,6 @@ static int navi10_force_clk_levels(struct smu_context *smu, case SMU_SOCCLK: case SMU_MCLK: case SMU_UCLK: - case SMU_DCEFCLK: case SMU_FCLK: /* There is only 2 levels for fine grained DPM */ if (navi10_is_support_fine_grained_dpm(smu, clk_type)) { @@ -1130,6 +1462,10 @@ static int navi10_force_clk_levels(struct smu_context *smu, if (ret) return size; break; + case SMU_DCEFCLK: + dev_info(smu->adev->dev,"Setting DCEFCLK min/max dpm level is not supported!\n"); + break; + default: break; } @@ -1328,7 +1664,7 @@ static int navi10_get_fan_speed_percent(struct smu_context *smu, switch (smu_v11_0_get_fan_control_mode(smu)) { case AMD_FAN_CTRL_AUTO: - ret = navi10_get_smu_metrics_data(smu, + ret = navi1x_get_smu_metrics_data(smu, METRICS_CURR_FANSPEED, &rpm); if (!ret && smu->fan_max_rpm) @@ -1644,37 +1980,37 @@ static int navi10_read_sensor(struct smu_context *smu, *size = 4; break; case AMDGPU_PP_SENSOR_MEM_LOAD: - ret = navi10_get_smu_metrics_data(smu, + ret = navi1x_get_smu_metrics_data(smu, METRICS_AVERAGE_MEMACTIVITY, (uint32_t *)data); *size = 4; break; case AMDGPU_PP_SENSOR_GPU_LOAD: - ret = navi10_get_smu_metrics_data(smu, + ret = navi1x_get_smu_metrics_data(smu, METRICS_AVERAGE_GFXACTIVITY, (uint32_t *)data); *size = 4; break; case AMDGPU_PP_SENSOR_GPU_POWER: - ret = navi10_get_smu_metrics_data(smu, + ret = navi1x_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER, (uint32_t *)data); *size = 4; break; case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: - ret = navi10_get_smu_metrics_data(smu, + ret = navi1x_get_smu_metrics_data(smu, METRICS_TEMPERATURE_HOTSPOT, (uint32_t *)data); *size = 4; break; case AMDGPU_PP_SENSOR_EDGE_TEMP: - ret = navi10_get_smu_metrics_data(smu, + ret = navi1x_get_smu_metrics_data(smu, METRICS_TEMPERATURE_EDGE, (uint32_t *)data); *size = 4; break; case AMDGPU_PP_SENSOR_MEM_TEMP: - ret = navi10_get_smu_metrics_data(smu, + ret = navi1x_get_smu_metrics_data(smu, METRICS_TEMPERATURE_MEM, (uint32_t *)data); *size = 4; @@ -1685,7 +2021,7 @@ static int navi10_read_sensor(struct smu_context *smu, *size = 4; break; case AMDGPU_PP_SENSOR_GFX_SCLK: - ret = navi10_get_smu_metrics_data(smu, METRICS_AVERAGE_GFXCLK, (uint32_t *)data); + ret = navi1x_get_smu_metrics_data(smu, METRICS_AVERAGE_GFXCLK, (uint32_t *)data); *(uint32_t *)data *= 100; *size = 4; break; @@ -1802,7 +2138,7 @@ static int navi10_get_power_limit(struct smu_context *smu) power_limit = pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0]; } - smu->current_power_limit = power_limit; + smu->current_power_limit = smu->default_power_limit = power_limit; if (smu->od_enabled && navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) { @@ -2287,14 +2623,75 @@ static int navi10_run_umc_cdr_workaround(struct smu_context *smu) return 0; } +static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu, + void **table) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct gpu_metrics_v1_1 *gpu_metrics = + (struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table; + SmuMetrics_legacy_t metrics; + int ret = 0; + + mutex_lock(&smu->metrics_lock); + + ret = smu_cmn_get_metrics_table_locked(smu, + NULL, + true); + if (ret) { + mutex_unlock(&smu->metrics_lock); + return ret; + } + + memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_legacy_t)); + + mutex_unlock(&smu->metrics_lock); + + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1); + + gpu_metrics->temperature_edge = metrics.TemperatureEdge; + gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot; + gpu_metrics->temperature_mem = metrics.TemperatureMem; + gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx; + gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc; + gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0; + + gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity; + gpu_metrics->average_umc_activity = metrics.AverageUclkActivity; + + gpu_metrics->average_socket_power = metrics.AverageSocketPower; + + gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency; + gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency; + gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency; + + gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK]; + gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK]; + gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK]; + gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK]; + gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK]; + + gpu_metrics->throttle_status = metrics.ThrottlerStatus; + + gpu_metrics->current_fan_speed = metrics.CurrFanSpeed; + + gpu_metrics->pcie_link_width = + smu_v11_0_get_current_pcie_link_width(smu); + gpu_metrics->pcie_link_speed = + smu_v11_0_get_current_pcie_link_speed(smu); + + gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); + + *table = (void *)gpu_metrics; + + return sizeof(struct gpu_metrics_v1_1); +} + static ssize_t navi10_get_gpu_metrics(struct smu_context *smu, void **table) { struct smu_table_context *smu_table = &smu->smu_table; - struct gpu_metrics_v1_0 *gpu_metrics = - (struct gpu_metrics_v1_0 *)smu_table->gpu_metrics_table; - struct amdgpu_device *adev = smu->adev; - SmuMetrics_NV12_t nv12_metrics = { 0 }; + struct gpu_metrics_v1_1 *gpu_metrics = + (struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table; SmuMetrics_t metrics; int ret = 0; @@ -2309,12 +2706,75 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu, } memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_t)); - if (adev->asic_type == CHIP_NAVI12) - memcpy(&nv12_metrics, smu_table->metrics_table, sizeof(SmuMetrics_NV12_t)); mutex_unlock(&smu->metrics_lock); - smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 0); + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1); + + gpu_metrics->temperature_edge = metrics.TemperatureEdge; + gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot; + gpu_metrics->temperature_mem = metrics.TemperatureMem; + gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx; + gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc; + gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0; + + gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity; + gpu_metrics->average_umc_activity = metrics.AverageUclkActivity; + + gpu_metrics->average_socket_power = metrics.AverageSocketPower; + + if (metrics.AverageGfxActivity > SMU_11_0_GFX_BUSY_THRESHOLD) + gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequencyPreDs; + else + gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequencyPostDs; + + gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency; + gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequencyPostDs; + + gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK]; + gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK]; + gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK]; + gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK]; + gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK]; + + gpu_metrics->throttle_status = metrics.ThrottlerStatus; + + gpu_metrics->current_fan_speed = metrics.CurrFanSpeed; + + gpu_metrics->pcie_link_width = metrics.PcieWidth; + gpu_metrics->pcie_link_speed = link_speed[metrics.PcieRate]; + + gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); + + *table = (void *)gpu_metrics; + + return sizeof(struct gpu_metrics_v1_1); +} + +static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu, + void **table) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct gpu_metrics_v1_1 *gpu_metrics = + (struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table; + SmuMetrics_NV12_legacy_t metrics; + int ret = 0; + + mutex_lock(&smu->metrics_lock); + + ret = smu_cmn_get_metrics_table_locked(smu, + NULL, + true); + if (ret) { + mutex_unlock(&smu->metrics_lock); + return ret; + } + + memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_NV12_legacy_t)); + + mutex_unlock(&smu->metrics_lock); + + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1); gpu_metrics->temperature_edge = metrics.TemperatureEdge; gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot; @@ -2332,12 +2792,10 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu, gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency; gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency; - if (adev->asic_type == CHIP_NAVI12) { - gpu_metrics->energy_accumulator = nv12_metrics.EnergyAccumulator; - gpu_metrics->average_vclk0_frequency = nv12_metrics.AverageVclkFrequency; - gpu_metrics->average_dclk0_frequency = nv12_metrics.AverageDclkFrequency; - gpu_metrics->average_mm_activity = nv12_metrics.VcnActivityPercentage; - } + gpu_metrics->energy_accumulator = metrics.EnergyAccumulator; + gpu_metrics->average_vclk0_frequency = metrics.AverageVclkFrequency; + gpu_metrics->average_dclk0_frequency = metrics.AverageDclkFrequency; + gpu_metrics->average_mm_activity = metrics.VcnActivityPercentage; gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK]; gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK]; @@ -2358,7 +2816,111 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu, *table = (void *)gpu_metrics; - return sizeof(struct gpu_metrics_v1_0); + return sizeof(struct gpu_metrics_v1_1); +} + +static ssize_t navi12_get_gpu_metrics(struct smu_context *smu, + void **table) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct gpu_metrics_v1_1 *gpu_metrics = + (struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table; + SmuMetrics_NV12_t metrics; + int ret = 0; + + mutex_lock(&smu->metrics_lock); + + ret = smu_cmn_get_metrics_table_locked(smu, + NULL, + true); + if (ret) { + mutex_unlock(&smu->metrics_lock); + return ret; + } + + memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_NV12_t)); + + mutex_unlock(&smu->metrics_lock); + + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1); + + gpu_metrics->temperature_edge = metrics.TemperatureEdge; + gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot; + gpu_metrics->temperature_mem = metrics.TemperatureMem; + gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx; + gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc; + gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0; + + gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity; + gpu_metrics->average_umc_activity = metrics.AverageUclkActivity; + + gpu_metrics->average_socket_power = metrics.AverageSocketPower; + + if (metrics.AverageGfxActivity > SMU_11_0_GFX_BUSY_THRESHOLD) + gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequencyPreDs; + else + gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequencyPostDs; + + gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency; + gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequencyPostDs; + + gpu_metrics->energy_accumulator = metrics.EnergyAccumulator; + gpu_metrics->average_vclk0_frequency = metrics.AverageVclkFrequency; + gpu_metrics->average_dclk0_frequency = metrics.AverageDclkFrequency; + gpu_metrics->average_mm_activity = metrics.VcnActivityPercentage; + + gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK]; + gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK]; + gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK]; + gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK]; + gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK]; + + gpu_metrics->throttle_status = metrics.ThrottlerStatus; + + gpu_metrics->current_fan_speed = metrics.CurrFanSpeed; + + gpu_metrics->pcie_link_width = metrics.PcieWidth; + gpu_metrics->pcie_link_speed = link_speed[metrics.PcieRate]; + + gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); + + *table = (void *)gpu_metrics; + + return sizeof(struct gpu_metrics_v1_1); +} + +static ssize_t navi1x_get_gpu_metrics(struct smu_context *smu, + void **table) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t smu_version; + int ret = 0; + + ret = smu_cmn_get_smc_version(smu, NULL, &smu_version); + if (ret) { + dev_err(adev->dev, "Failed to get smu version!\n"); + return ret; + } + + switch (adev->asic_type) { + case CHIP_NAVI12: + if (smu_version > 0x00341C00) + ret = navi12_get_gpu_metrics(smu, table); + else + ret = navi12_get_legacy_gpu_metrics(smu, table); + break; + case CHIP_NAVI10: + case CHIP_NAVI14: + default: + if (((adev->asic_type == CHIP_NAVI14) && smu_version > 0x00351F00) || + ((adev->asic_type == CHIP_NAVI10) && smu_version > 0x002A3B00)) + ret = navi10_get_gpu_metrics(smu, table); + else + ret =navi10_get_legacy_gpu_metrics(smu, table); + break; + } + + return ret; } static int navi10_enable_mgpu_fan_boost(struct smu_context *smu) @@ -2489,13 +3051,14 @@ static const struct pptable_funcs navi10_ppt_funcs = { .set_power_source = smu_v11_0_set_power_source, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, - .get_gpu_metrics = navi10_get_gpu_metrics, + .get_gpu_metrics = navi1x_get_gpu_metrics, .enable_mgpu_fan_boost = navi10_enable_mgpu_fan_boost, .gfx_ulv_control = smu_v11_0_gfx_ulv_control, .deep_sleep_control = smu_v11_0_deep_sleep_control, .get_fan_parameters = navi10_get_fan_parameters, .post_init = navi10_post_smu_init, .interrupt_work = smu_v11_0_interrupt_work, + .set_mp1_state = navi10_set_mp1_state, }; void navi10_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index af73e1430af5..d2fd44b903ca 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -89,17 +89,17 @@ static struct cmn2asic_msg_mapping sienna_cichlid_message_map[SMU_MSG_MAX_COUNT] MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetRunningSmuFeaturesHigh, 1), MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1), MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0), - MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 0), - MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 0), + MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), + MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0), MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0), - MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0), + MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1), MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0), MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0), MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc, 0), MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0), - MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0), - MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 0), + MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1), + MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1), MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1), MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0), MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1), @@ -416,7 +416,7 @@ static int sienna_cichlid_tables_init(struct smu_context *smu) goto err0_out; smu_table->metrics_time = 0; - smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_0); + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_1); smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); if (!smu_table->gpu_metrics_table) goto err1_out; @@ -921,7 +921,7 @@ static bool sienna_cichlid_is_support_fine_grained_dpm(struct smu_context *smu, dpm_desc = &pptable->DpmDescriptor[clk_index]; /* 0 - Fine grained DPM, 1 - Discrete DPM */ - return dpm_desc->SnapToDiscrete == 0 ? true : false; + return dpm_desc->SnapToDiscrete == 0; } static bool sienna_cichlid_is_od_feature_supported(struct smu_11_0_7_overdrive_table *od_table, @@ -1127,7 +1127,6 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu, case SMU_SOCCLK: case SMU_MCLK: case SMU_UCLK: - case SMU_DCEFCLK: case SMU_FCLK: /* There is only 2 levels for fine grained DPM */ if (sienna_cichlid_is_support_fine_grained_dpm(smu, clk_type)) { @@ -1147,6 +1146,9 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu, if (ret) goto forec_level_out; break; + case SMU_DCEFCLK: + dev_info(smu->adev->dev,"Setting DCEFCLK min/max dpm level is not supported!\n"); + break; default: break; } @@ -1736,7 +1738,7 @@ static int sienna_cichlid_get_power_limit(struct smu_context *smu) power_limit = pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0]; } - smu->current_power_limit = power_limit; + smu->current_power_limit = smu->default_power_limit = power_limit; if (smu->od_enabled) { od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]); @@ -2948,11 +2950,13 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu, void **table) { struct smu_table_context *smu_table = &smu->smu_table; - struct gpu_metrics_v1_0 *gpu_metrics = - (struct gpu_metrics_v1_0 *)smu_table->gpu_metrics_table; + struct gpu_metrics_v1_1 *gpu_metrics = + (struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table; SmuMetricsExternal_t metrics_external; SmuMetrics_t *metrics = &(metrics_external.SmuMetrics); + struct amdgpu_device *adev = smu->adev; + uint32_t smu_version; int ret = 0; ret = smu_cmn_get_metrics_table(smu, @@ -2961,7 +2965,7 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu, if (ret) return ret; - smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 0); + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1); gpu_metrics->temperature_edge = metrics->TemperatureEdge; gpu_metrics->temperature_hotspot = metrics->TemperatureHotspot; @@ -2999,16 +3003,26 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu, gpu_metrics->current_fan_speed = metrics->CurrFanSpeed; - gpu_metrics->pcie_link_width = - smu_v11_0_get_current_pcie_link_width(smu); - gpu_metrics->pcie_link_speed = - smu_v11_0_get_current_pcie_link_speed(smu); + ret = smu_cmn_get_smc_version(smu, NULL, &smu_version); + if (ret) + return ret; + + if (((adev->asic_type == CHIP_SIENNA_CICHLID) && smu_version > 0x003A1E00) || + ((adev->asic_type == CHIP_NAVY_FLOUNDER) && smu_version > 0x00410400)) { + gpu_metrics->pcie_link_width = metrics->PcieWidth; + gpu_metrics->pcie_link_speed = link_speed[metrics->PcieRate]; + } else { + gpu_metrics->pcie_link_width = + smu_v11_0_get_current_pcie_link_width(smu); + gpu_metrics->pcie_link_speed = + smu_v11_0_get_current_pcie_link_speed(smu); + } gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); *table = (void *)gpu_metrics; - return sizeof(struct gpu_metrics_v1_0); + return sizeof(struct gpu_metrics_v1_1); } static int sienna_cichlid_enable_mgpu_fan_boost(struct smu_context *smu) @@ -3098,6 +3112,23 @@ static int sienna_cichlid_system_features_control(struct smu_context *smu, return smu_v11_0_system_features_control(smu, en); } +static int sienna_cichlid_set_mp1_state(struct smu_context *smu, + enum pp_mp1_state mp1_state) +{ + int ret; + + switch (mp1_state) { + case PP_MP1_STATE_UNLOAD: + ret = smu_cmn_set_mp1_state(smu, mp1_state); + break; + default: + /* Ignore others */ + ret = 0; + } + + return ret; +} + static const struct pptable_funcs sienna_cichlid_ppt_funcs = { .get_allowed_feature_mask = sienna_cichlid_get_allowed_feature_mask, .set_default_dpm_table = sienna_cichlid_set_default_dpm_table, @@ -3183,6 +3214,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = { .get_fan_parameters = sienna_cichlid_get_fan_parameters, .interrupt_work = smu_v11_0_interrupt_work, .gpo_control = sienna_cichlid_gpo_control, + .set_mp1_state = sienna_cichlid_set_mp1_state, }; void sienna_cichlid_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index a6211858ead4..6274cae4a065 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -68,9 +68,6 @@ MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_smc.bin"); #define SMU11_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms -#define LINK_WIDTH_MAX 6 -#define LINK_SPEED_MAX 3 - #define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288 #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4 @@ -81,9 +78,6 @@ MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_smc.bin"); #define mmTHM_BACO_CNTL_ARCT 0xA7 #define mmTHM_BACO_CNTL_ARCT_BASE_IDX 0 -static int link_width[] = {0, 1, 2, 4, 8, 12, 16}; -static int link_speed[] = {25, 50, 80, 160}; - int smu_v11_0_init_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; @@ -567,6 +561,7 @@ int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu) smu->smu_table.boot_values.firmware_caps = v_3_1->firmware_capability; break; case 3: + case 4: default: v_3_3 = (struct atom_firmware_info_v3_3 *)header; smu->smu_table.boot_values.revision = v_3_3->firmware_revision; @@ -750,8 +745,10 @@ int smu_v11_0_set_allowed_mask(struct smu_context *smu) int ret = 0; uint32_t feature_mask[2]; - if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64) + if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64) { + ret = -EINVAL; goto failed; + } bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64); @@ -1534,7 +1531,7 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) NULL); break; default: - if (!ras || !ras->supported) { + if (!ras || !ras->supported || adev->gmc.xgmi.pending_reset) { if (adev->asic_type == CHIP_ARCTURUS) { data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT); data |= 0x80000000; @@ -1607,6 +1604,16 @@ int smu_v11_0_mode1_reset(struct smu_context *smu) return ret; } +int smu_v11_0_set_light_sbr(struct smu_context *smu, bool enable) +{ + int ret = 0; + + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_LightSBR, enable ? 1 : 0, NULL); + + return ret; +} + + int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max) { @@ -2001,7 +2008,7 @@ int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu) >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; } -int smu_v11_0_get_current_pcie_link_width(struct smu_context *smu) +uint16_t smu_v11_0_get_current_pcie_link_width(struct smu_context *smu) { uint32_t width_level; @@ -2021,7 +2028,7 @@ int smu_v11_0_get_current_pcie_link_speed_level(struct smu_context *smu) >> PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; } -int smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu) +uint16_t smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu) { uint32_t speed_level; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 101eaa20db9b..77f532a49e37 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -194,23 +194,39 @@ static int vangogh_tables_init(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *tables = smu_table->tables; + struct amdgpu_device *adev = smu->adev; + uint32_t if_version; + uint32_t ret = 0; + + ret = smu_cmn_get_smc_version(smu, &if_version, NULL); + if (ret) { + dev_err(adev->dev, "Failed to get smu if version!\n"); + goto err0_out; + } SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); - SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, sizeof(DpmActivityMonitorCoeffExt_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); - smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); + + if (if_version < 0x3) { + SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_legacy_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_legacy_t), GFP_KERNEL); + } else { + SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); + } if (!smu_table->metrics_table) goto err0_out; smu_table->metrics_time = 0; - smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_0); + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_1); smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); if (!smu_table->gpu_metrics_table) goto err1_out; @@ -235,13 +251,12 @@ err0_out: return -ENOMEM; } -static int vangogh_get_smu_metrics_data(struct smu_context *smu, +static int vangogh_get_legacy_smu_metrics_data(struct smu_context *smu, MetricsMember_t member, uint32_t *value) { struct smu_table_context *smu_table = &smu->smu_table; - - SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; + SmuMetrics_legacy_t *metrics = (SmuMetrics_legacy_t *)smu_table->metrics_table; int ret = 0; mutex_lock(&smu->metrics_lock); @@ -255,7 +270,7 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu, } switch (member) { - case METRICS_AVERAGE_GFXCLK: + case METRICS_CURR_GFXCLK: *value = metrics->GfxclkFrequency; break; case METRICS_AVERAGE_SOCCLK: @@ -267,7 +282,7 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu, case METRICS_AVERAGE_DCLK: *value = metrics->DclkFrequency; break; - case METRICS_AVERAGE_UCLK: + case METRICS_CURR_UCLK: *value = metrics->MemclkFrequency; break; case METRICS_AVERAGE_GFXACTIVITY: @@ -311,6 +326,103 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu, return ret; } +static int vangogh_get_smu_metrics_data(struct smu_context *smu, + MetricsMember_t member, + uint32_t *value) +{ + struct smu_table_context *smu_table = &smu->smu_table; + SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; + int ret = 0; + + mutex_lock(&smu->metrics_lock); + + ret = smu_cmn_get_metrics_table_locked(smu, + NULL, + false); + if (ret) { + mutex_unlock(&smu->metrics_lock); + return ret; + } + + switch (member) { + case METRICS_CURR_GFXCLK: + *value = metrics->Current.GfxclkFrequency; + break; + case METRICS_AVERAGE_SOCCLK: + *value = metrics->Current.SocclkFrequency; + break; + case METRICS_AVERAGE_VCLK: + *value = metrics->Current.VclkFrequency; + break; + case METRICS_AVERAGE_DCLK: + *value = metrics->Current.DclkFrequency; + break; + case METRICS_CURR_UCLK: + *value = metrics->Current.MemclkFrequency; + break; + case METRICS_AVERAGE_GFXACTIVITY: + *value = metrics->Current.GfxActivity; + break; + case METRICS_AVERAGE_VCNACTIVITY: + *value = metrics->Current.UvdActivity; + break; + case METRICS_AVERAGE_SOCKETPOWER: + *value = (metrics->Current.CurrentSocketPower << 8) / + 1000; + break; + case METRICS_TEMPERATURE_EDGE: + *value = metrics->Current.GfxTemperature / 100 * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_HOTSPOT: + *value = metrics->Current.SocTemperature / 100 * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_THROTTLER_STATUS: + *value = metrics->Current.ThrottlerStatus; + break; + case METRICS_VOLTAGE_VDDGFX: + *value = metrics->Current.Voltage[2]; + break; + case METRICS_VOLTAGE_VDDSOC: + *value = metrics->Current.Voltage[1]; + break; + case METRICS_AVERAGE_CPUCLK: + memcpy(value, &metrics->Current.CoreFrequency[0], + smu->cpu_core_num * sizeof(uint16_t)); + break; + default: + *value = UINT_MAX; + break; + } + + mutex_unlock(&smu->metrics_lock); + + return ret; +} + +static int vangogh_common_get_smu_metrics_data(struct smu_context *smu, + MetricsMember_t member, + uint32_t *value) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t if_version; + int ret = 0; + + ret = smu_cmn_get_smc_version(smu, &if_version, NULL); + if (ret) { + dev_err(adev->dev, "Failed to get smu if version!\n"); + return ret; + } + + if (if_version < 0x3) + ret = vangogh_get_legacy_smu_metrics_data(smu, member, value); + else + ret = vangogh_get_smu_metrics_data(smu, member, value); + + return ret; +} + static int vangogh_allocate_dpm_context(struct smu_context *smu) { struct smu_dpm_context *smu_dpm = &smu->smu_dpm; @@ -447,11 +559,11 @@ static int vangogh_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_typ return 0; } -static int vangogh_print_fine_grain_clk(struct smu_context *smu, +static int vangogh_print_legacy_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { DpmClocks_t *clk_table = smu->smu_table.clocks_table; - SmuMetrics_t metrics; + SmuMetrics_legacy_t metrics; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); int i, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; @@ -546,6 +658,126 @@ static int vangogh_print_fine_grain_clk(struct smu_context *smu, return size; } +static int vangogh_print_clk_levels(struct smu_context *smu, + enum smu_clk_type clk_type, char *buf) +{ + DpmClocks_t *clk_table = smu->smu_table.clocks_table; + SmuMetrics_t metrics; + struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); + int i, size = 0, ret = 0; + uint32_t cur_value = 0, value = 0, count = 0; + bool cur_value_match_level = false; + + memset(&metrics, 0, sizeof(metrics)); + + ret = smu_cmn_get_metrics_table(smu, &metrics, false); + if (ret) + return ret; + + switch (clk_type) { + case SMU_OD_SCLK: + if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { + size = sprintf(buf, "%s:\n", "OD_SCLK"); + size += sprintf(buf + size, "0: %10uMhz\n", + (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); + size += sprintf(buf + size, "1: %10uMhz\n", + (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); + } + break; + case SMU_OD_CCLK: + if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { + size = sprintf(buf, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); + size += sprintf(buf + size, "0: %10uMhz\n", + (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); + size += sprintf(buf + size, "1: %10uMhz\n", + (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq); + } + break; + case SMU_OD_RANGE: + if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { + size = sprintf(buf, "%s:\n", "OD_RANGE"); + size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n", + smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); + size += sprintf(buf + size, "CCLK: %7uMhz %10uMhz\n", + smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq); + } + break; + case SMU_SOCCLK: + /* the level 3 ~ 6 of socclk use the same frequency for vangogh */ + count = clk_table->NumSocClkLevelsEnabled; + cur_value = metrics.Current.SocclkFrequency; + break; + case SMU_VCLK: + count = clk_table->VcnClkLevelsEnabled; + cur_value = metrics.Current.VclkFrequency; + break; + case SMU_DCLK: + count = clk_table->VcnClkLevelsEnabled; + cur_value = metrics.Current.DclkFrequency; + break; + case SMU_MCLK: + count = clk_table->NumDfPstatesEnabled; + cur_value = metrics.Current.MemclkFrequency; + break; + case SMU_FCLK: + count = clk_table->NumDfPstatesEnabled; + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value); + if (ret) + return ret; + break; + default: + break; + } + + switch (clk_type) { + case SMU_SOCCLK: + case SMU_VCLK: + case SMU_DCLK: + case SMU_MCLK: + case SMU_FCLK: + for (i = 0; i < count; i++) { + ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value); + if (ret) + return ret; + if (!value) + continue; + size += sprintf(buf + size, "%d: %uMhz %s\n", i, value, + cur_value == value ? "*" : ""); + if (cur_value == value) + cur_value_match_level = true; + } + + if (!cur_value_match_level) + size += sprintf(buf + size, " %uMhz *\n", cur_value); + break; + default: + break; + } + + return size; +} + +static int vangogh_common_print_clk_levels(struct smu_context *smu, + enum smu_clk_type clk_type, char *buf) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t if_version; + int ret = 0; + + ret = smu_cmn_get_smc_version(smu, &if_version, NULL); + if (ret) { + dev_err(adev->dev, "Failed to get smu if version!\n"); + return ret; + } + + if (if_version < 0x3) + ret = vangogh_print_legacy_clk_levels(smu, clk_type, buf); + else + ret = vangogh_print_clk_levels(smu, clk_type, buf); + + return ret; +} + static int vangogh_get_profiling_clk_mask(struct smu_context *smu, enum amd_dpm_forced_level level, uint32_t *vclk_mask, @@ -860,7 +1092,6 @@ static int vangogh_set_soft_freq_limited_range(struct smu_context *smu, return ret; break; case SMU_FCLK: - case SMU_MCLK: ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min, NULL); @@ -948,7 +1179,6 @@ static int vangogh_force_clk_levels(struct smu_context *smu, if (ret) return ret; break; - case SMU_MCLK: case SMU_FCLK: ret = vangogh_get_dpm_clk_limited(smu, clk_type, soft_min_level, &min_freq); @@ -1035,7 +1265,6 @@ static int vangogh_force_dpm_limit_value(struct smu_context *smu, bool highest) SMU_SOCCLK, SMU_VCLK, SMU_DCLK, - SMU_MCLK, SMU_FCLK, }; @@ -1064,7 +1293,6 @@ static int vangogh_unforce_dpm_levels(struct smu_context *smu) enum smu_clk_type clk_type; uint32_t feature; } clk_feature_map[] = { - {SMU_MCLK, SMU_FEATURE_DPM_FCLK_BIT}, {SMU_FCLK, SMU_FEATURE_DPM_FCLK_BIT}, {SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT}, {SMU_VCLK, SMU_FEATURE_VCN_DPM_BIT}, @@ -1196,7 +1424,6 @@ static int vangogh_set_performance_level(struct smu_context *smu, if (ret) return ret; - vangogh_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask); vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask); vangogh_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask); vangogh_force_clk_levels(smu, SMU_VCLK, 1 << vclk_mask); @@ -1236,7 +1463,6 @@ static int vangogh_set_performance_level(struct smu_context *smu, if (ret) return ret; - vangogh_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask); vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask); break; case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: @@ -1278,57 +1504,57 @@ static int vangogh_read_sensor(struct smu_context *smu, mutex_lock(&smu->sensor_lock); switch (sensor) { case AMDGPU_PP_SENSOR_GPU_LOAD: - ret = vangogh_get_smu_metrics_data(smu, + ret = vangogh_common_get_smu_metrics_data(smu, METRICS_AVERAGE_GFXACTIVITY, (uint32_t *)data); *size = 4; break; case AMDGPU_PP_SENSOR_GPU_POWER: - ret = vangogh_get_smu_metrics_data(smu, + ret = vangogh_common_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER, (uint32_t *)data); *size = 4; break; case AMDGPU_PP_SENSOR_EDGE_TEMP: - ret = vangogh_get_smu_metrics_data(smu, + ret = vangogh_common_get_smu_metrics_data(smu, METRICS_TEMPERATURE_EDGE, (uint32_t *)data); *size = 4; break; case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: - ret = vangogh_get_smu_metrics_data(smu, + ret = vangogh_common_get_smu_metrics_data(smu, METRICS_TEMPERATURE_HOTSPOT, (uint32_t *)data); *size = 4; break; case AMDGPU_PP_SENSOR_GFX_MCLK: - ret = vangogh_get_smu_metrics_data(smu, - METRICS_AVERAGE_UCLK, + ret = vangogh_common_get_smu_metrics_data(smu, + METRICS_CURR_UCLK, (uint32_t *)data); *(uint32_t *)data *= 100; *size = 4; break; case AMDGPU_PP_SENSOR_GFX_SCLK: - ret = vangogh_get_smu_metrics_data(smu, - METRICS_AVERAGE_GFXCLK, + ret = vangogh_common_get_smu_metrics_data(smu, + METRICS_CURR_GFXCLK, (uint32_t *)data); *(uint32_t *)data *= 100; *size = 4; break; case AMDGPU_PP_SENSOR_VDDGFX: - ret = vangogh_get_smu_metrics_data(smu, + ret = vangogh_common_get_smu_metrics_data(smu, METRICS_VOLTAGE_VDDGFX, (uint32_t *)data); *size = 4; break; case AMDGPU_PP_SENSOR_VDDNB: - ret = vangogh_get_smu_metrics_data(smu, + ret = vangogh_common_get_smu_metrics_data(smu, METRICS_VOLTAGE_VDDSOC, (uint32_t *)data); *size = 4; break; case AMDGPU_PP_SENSOR_CPU_CLK: - ret = vangogh_get_smu_metrics_data(smu, + ret = vangogh_common_get_smu_metrics_data(smu, METRICS_AVERAGE_CPUCLK, (uint32_t *)data); *size = smu->cpu_core_num * sizeof(uint16_t); @@ -1402,28 +1628,27 @@ static int vangogh_set_watermarks_table(struct smu_context *smu, return 0; } -static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu, +static ssize_t vangogh_get_legacy_gpu_metrics(struct smu_context *smu, void **table) { struct smu_table_context *smu_table = &smu->smu_table; - struct gpu_metrics_v2_0 *gpu_metrics = - (struct gpu_metrics_v2_0 *)smu_table->gpu_metrics_table; - SmuMetrics_t metrics; + struct gpu_metrics_v2_1 *gpu_metrics = + (struct gpu_metrics_v2_1 *)smu_table->gpu_metrics_table; + SmuMetrics_legacy_t metrics; int ret = 0; ret = smu_cmn_get_metrics_table(smu, &metrics, true); if (ret) return ret; - smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 0); + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1); gpu_metrics->temperature_gfx = metrics.GfxTemperature; gpu_metrics->temperature_soc = metrics.SocTemperature; memcpy(&gpu_metrics->temperature_core[0], &metrics.CoreTemperature[0], - sizeof(uint16_t) * 8); + sizeof(uint16_t) * 4); gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0]; - gpu_metrics->temperature_l3[1] = metrics.L3Temperature[1]; gpu_metrics->average_gfx_activity = metrics.GfxActivity; gpu_metrics->average_mm_activity = metrics.UvdActivity; @@ -1434,7 +1659,7 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu, gpu_metrics->average_gfx_power = metrics.Power[2]; memcpy(&gpu_metrics->average_core_power[0], &metrics.CorePower[0], - sizeof(uint16_t) * 8); + sizeof(uint16_t) * 4); gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency; gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency; @@ -1445,9 +1670,8 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu, memcpy(&gpu_metrics->current_coreclk[0], &metrics.CoreFrequency[0], - sizeof(uint16_t) * 8); + sizeof(uint16_t) * 4); gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0]; - gpu_metrics->current_l3clk[1] = metrics.L3Frequency[1]; gpu_metrics->throttle_status = metrics.ThrottlerStatus; @@ -1455,19 +1679,100 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu, *table = (void *)gpu_metrics; - return sizeof(struct gpu_metrics_v2_0); + return sizeof(struct gpu_metrics_v2_1); +} + +static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu, + void **table) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct gpu_metrics_v2_1 *gpu_metrics = + (struct gpu_metrics_v2_1 *)smu_table->gpu_metrics_table; + SmuMetrics_t metrics; + int ret = 0; + + ret = smu_cmn_get_metrics_table(smu, &metrics, true); + if (ret) + return ret; + + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1); + + gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature; + gpu_metrics->temperature_soc = metrics.Current.SocTemperature; + memcpy(&gpu_metrics->temperature_core[0], + &metrics.Current.CoreTemperature[0], + sizeof(uint16_t) * 4); + gpu_metrics->temperature_l3[0] = metrics.Current.L3Temperature[0]; + + gpu_metrics->average_gfx_activity = metrics.Current.GfxActivity; + gpu_metrics->average_mm_activity = metrics.Current.UvdActivity; + + gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower; + gpu_metrics->average_cpu_power = metrics.Current.Power[0]; + gpu_metrics->average_soc_power = metrics.Current.Power[1]; + gpu_metrics->average_gfx_power = metrics.Current.Power[2]; + memcpy(&gpu_metrics->average_core_power[0], + &metrics.Average.CorePower[0], + sizeof(uint16_t) * 4); + + gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency; + gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency; + gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency; + gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency; + gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency; + gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency; + + gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency; + gpu_metrics->current_socclk = metrics.Current.SocclkFrequency; + gpu_metrics->current_uclk = metrics.Current.MemclkFrequency; + gpu_metrics->current_fclk = metrics.Current.MemclkFrequency; + gpu_metrics->current_vclk = metrics.Current.VclkFrequency; + gpu_metrics->current_dclk = metrics.Current.DclkFrequency; + + memcpy(&gpu_metrics->current_coreclk[0], + &metrics.Current.CoreFrequency[0], + sizeof(uint16_t) * 4); + gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0]; + + gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus; + + gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); + + *table = (void *)gpu_metrics; + + return sizeof(struct gpu_metrics_v2_1); +} + +static ssize_t vangogh_common_get_gpu_metrics(struct smu_context *smu, + void **table) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t if_version; + int ret = 0; + + ret = smu_cmn_get_smc_version(smu, &if_version, NULL); + if (ret) { + dev_err(adev->dev, "Failed to get smu if version!\n"); + return ret; + } + + if (if_version < 0x3) + ret = vangogh_get_legacy_gpu_metrics(smu, table); + else + ret = vangogh_get_gpu_metrics(smu, table); + + return ret; } static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size) { int ret = 0; - int i; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); if (!(smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)) { dev_warn(smu->adev->dev, - "pp_od_clk_voltage is not accessible if power_dpm_force_perfomance_level is not in manual mode!\n"); + "pp_od_clk_voltage is not accessible if power_dpm_force_performance_level is not in manual mode!\n"); return -EINVAL; } @@ -1535,43 +1840,6 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; - - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, - smu->gfx_actual_hard_min_freq, NULL); - if (ret) { - dev_err(smu->adev->dev, "Restore the default hard min sclk failed!"); - return ret; - } - - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, - smu->gfx_actual_soft_max_freq, NULL); - if (ret) { - dev_err(smu->adev->dev, "Restore the default soft max sclk failed!"); - return ret; - } - - if (smu->adev->pm.fw_version < 0x43f1b00) { - dev_warn(smu->adev->dev, "CPUSoftMax/CPUSoftMin are not supported, please update SBIOS!\n"); - break; - } - - for (i = 0; i < smu->cpu_core_num; i++) { - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk, - (i << 20) | smu->cpu_actual_soft_min_freq, - NULL); - if (ret) { - dev_err(smu->adev->dev, "Set hard min cclk failed!"); - return ret; - } - - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk, - (i << 20) | smu->cpu_actual_soft_max_freq, - NULL); - if (ret) { - dev_err(smu->adev->dev, "Set soft max cclk failed!"); - return ret; - } - } } break; case PP_OD_COMMIT_DPM_TABLE: @@ -1799,7 +2067,7 @@ static int vangogh_get_power_limit(struct smu_context *smu) return ret; } /* convert from milliwatt to watt */ - smu->current_power_limit = ppt_limit / 1000; + smu->current_power_limit = smu->default_power_limit = ppt_limit / 1000; smu->max_power_limit = 29; ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetFastPPTLimit, &ppt_limit); @@ -1808,7 +2076,8 @@ static int vangogh_get_power_limit(struct smu_context *smu) return ret; } /* convert from milliwatt to watt */ - power_context->current_fast_ppt_limit = ppt_limit / 1000; + power_context->current_fast_ppt_limit = + power_context->default_fast_ppt_limit = ppt_limit / 1000; power_context->max_fast_ppt_limit = 30; return ret; @@ -1833,6 +2102,9 @@ static int vangogh_get_ppt_limit(struct smu_context *smu, case SMU_PPT_LIMIT_CURRENT: *ppt_limit = power_context->current_fast_ppt_limit; break; + case SMU_PPT_LIMIT_DEFAULT: + *ppt_limit = power_context->default_fast_ppt_limit; + break; default: break; } @@ -1910,9 +2182,9 @@ static const struct pptable_funcs vangogh_ppt_funcs = { .set_watermarks_table = vangogh_set_watermarks_table, .set_driver_table_location = smu_v11_0_set_driver_table_location, .interrupt_work = smu_v11_0_interrupt_work, - .get_gpu_metrics = vangogh_get_gpu_metrics, + .get_gpu_metrics = vangogh_common_get_gpu_metrics, .od_edit_dpm_table = vangogh_od_edit_dpm_table, - .print_clk_levels = vangogh_print_fine_grain_clk, + .print_clk_levels = vangogh_common_print_clk_levels, .set_default_dpm_table = vangogh_set_default_dpm_tables, .set_fine_grain_gfx_freq_parameters = vangogh_set_fine_grain_gfx_freq_parameters, .system_features_control = vangogh_system_features_control, @@ -1928,6 +2200,7 @@ static const struct pptable_funcs vangogh_ppt_funcs = { .get_ppt_limit = vangogh_get_ppt_limit, .get_power_limit = vangogh_get_power_limit, .set_power_limit = vangogh_set_power_limit, + .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values, }; void vangogh_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index 5493388fcb10..f43b4c623685 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -151,7 +151,7 @@ static int renoir_init_smc_tables(struct smu_context *smu) if (!smu_table->watermarks_table) goto err2_out; - smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_0); + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_1); smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); if (!smu_table->gpu_metrics_table) goto err3_out; @@ -351,7 +351,7 @@ static int renoir_od_edit_dpm_table(struct smu_context *smu, if (!(smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)) { dev_warn(smu->adev->dev, - "pp_od_clk_voltage is not accessible if power_dpm_force_perfomance_level is not in manual mode!\n"); + "pp_od_clk_voltage is not accessible if power_dpm_force_performance_level is not in manual mode!\n"); return -EINVAL; } @@ -389,24 +389,6 @@ static int renoir_od_edit_dpm_table(struct smu_context *smu, } smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; - - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_SetHardMinGfxClk, - smu->gfx_actual_hard_min_freq, - NULL); - if (ret) { - dev_err(smu->adev->dev, "Restore the default hard min sclk failed!"); - return ret; - } - - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_SetSoftMaxGfxClk, - smu->gfx_actual_soft_max_freq, - NULL); - if (ret) { - dev_err(smu->adev->dev, "Restore the default soft max sclk failed!"); - return ret; - } break; case PP_OD_COMMIT_DPM_TABLE: if (size != 0) { @@ -1249,8 +1231,8 @@ static ssize_t renoir_get_gpu_metrics(struct smu_context *smu, void **table) { struct smu_table_context *smu_table = &smu->smu_table; - struct gpu_metrics_v2_0 *gpu_metrics = - (struct gpu_metrics_v2_0 *)smu_table->gpu_metrics_table; + struct gpu_metrics_v2_1 *gpu_metrics = + (struct gpu_metrics_v2_1 *)smu_table->gpu_metrics_table; SmuMetrics_t metrics; int ret = 0; @@ -1258,7 +1240,7 @@ static ssize_t renoir_get_gpu_metrics(struct smu_context *smu, if (ret) return ret; - smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 0); + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1); gpu_metrics->temperature_gfx = metrics.GfxTemperature; gpu_metrics->temperature_soc = metrics.SocTemperature; @@ -1303,7 +1285,7 @@ static ssize_t renoir_get_gpu_metrics(struct smu_context *smu, *table = (void *)gpu_metrics; - return sizeof(struct gpu_metrics_v2_0); + return sizeof(struct gpu_metrics_v2_1); } static int renoir_gfx_state_change_set(struct smu_context *smu, uint32_t state) @@ -1350,6 +1332,7 @@ static const struct pptable_funcs renoir_ppt_funcs = { .gfx_state_change_set = renoir_gfx_state_change_set, .set_fine_grain_gfx_freq_parameters = renoir_set_fine_grain_gfx_freq_parameters, .od_edit_dpm_table = renoir_od_edit_dpm_table, + .get_vbios_bootup_values = smu_v12_0_get_vbios_bootup_values, }; void renoir_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c index 6cc4855c8a37..d60b8c5e8715 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c @@ -27,6 +27,7 @@ #include "amdgpu_smu.h" #include "atomfirmware.h" #include "amdgpu_atomfirmware.h" +#include "amdgpu_atombios.h" #include "smu_v12_0.h" #include "soc15_common.h" #include "atom.h" @@ -278,3 +279,125 @@ int smu_v12_0_set_driver_table_location(struct smu_context *smu) return ret; } + +static int smu_v12_0_atom_get_smu_clockinfo(struct amdgpu_device *adev, + uint8_t clk_id, + uint8_t syspll_id, + uint32_t *clk_freq) +{ + struct atom_get_smu_clock_info_parameters_v3_1 input = {0}; + struct atom_get_smu_clock_info_output_parameters_v3_1 *output; + int ret, index; + + input.clk_id = clk_id; + input.syspll_id = syspll_id; + input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ; + index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1, + getsmuclockinfo); + + ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index, + (uint32_t *)&input); + if (ret) + return -EINVAL; + + output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input; + *clk_freq = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000; + + return 0; +} + +int smu_v12_0_get_vbios_bootup_values(struct smu_context *smu) +{ + int ret, index; + uint16_t size; + uint8_t frev, crev; + struct atom_common_table_header *header; + struct atom_firmware_info_v3_1 *v_3_1; + struct atom_firmware_info_v3_3 *v_3_3; + + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, + firmwareinfo); + + ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev, + (uint8_t **)&header); + if (ret) + return ret; + + if (header->format_revision != 3) { + dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu12\n"); + return -EINVAL; + } + + switch (header->content_revision) { + case 0: + case 1: + case 2: + v_3_1 = (struct atom_firmware_info_v3_1 *)header; + smu->smu_table.boot_values.revision = v_3_1->firmware_revision; + smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz; + smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz; + smu->smu_table.boot_values.socclk = 0; + smu->smu_table.boot_values.dcefclk = 0; + smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv; + smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv; + smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv; + smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv; + smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id; + smu->smu_table.boot_values.pp_table_id = 0; + smu->smu_table.boot_values.firmware_caps = v_3_1->firmware_capability; + break; + case 3: + case 4: + default: + v_3_3 = (struct atom_firmware_info_v3_3 *)header; + smu->smu_table.boot_values.revision = v_3_3->firmware_revision; + smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz; + smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz; + smu->smu_table.boot_values.socclk = 0; + smu->smu_table.boot_values.dcefclk = 0; + smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv; + smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv; + smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv; + smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv; + smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id; + smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id; + smu->smu_table.boot_values.firmware_caps = v_3_3->firmware_capability; + } + + smu->smu_table.boot_values.format_revision = header->format_revision; + smu->smu_table.boot_values.content_revision = header->content_revision; + + smu_v12_0_atom_get_smu_clockinfo(smu->adev, + (uint8_t)SMU12_SYSPLL0_SOCCLK_ID, + (uint8_t)SMU12_SYSPLL0_ID, + &smu->smu_table.boot_values.socclk); + + smu_v12_0_atom_get_smu_clockinfo(smu->adev, + (uint8_t)SMU12_SYSPLL1_DCFCLK_ID, + (uint8_t)SMU12_SYSPLL1_ID, + &smu->smu_table.boot_values.dcefclk); + + smu_v12_0_atom_get_smu_clockinfo(smu->adev, + (uint8_t)SMU12_SYSPLL0_VCLK_ID, + (uint8_t)SMU12_SYSPLL0_ID, + &smu->smu_table.boot_values.vclk); + + smu_v12_0_atom_get_smu_clockinfo(smu->adev, + (uint8_t)SMU12_SYSPLL0_DCLK_ID, + (uint8_t)SMU12_SYSPLL0_ID, + &smu->smu_table.boot_values.dclk); + + if ((smu->smu_table.boot_values.format_revision == 3) && + (smu->smu_table.boot_values.content_revision >= 2)) + smu_v12_0_atom_get_smu_clockinfo(smu->adev, + (uint8_t)SMU12_SYSPLL3_0_FCLK_ID, + (uint8_t)SMU12_SYSPLL3_0_ID, + &smu->smu_table.boot_values.fclk); + + smu_v12_0_atom_get_smu_clockinfo(smu->adev, + (uint8_t)SMU12_SYSPLL0_LCLK_ID, + (uint8_t)SMU12_SYSPLL0_ID, + &smu->smu_table.boot_values.lclk); + + return 0; +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile new file mode 100644 index 000000000000..652b4e554378 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile @@ -0,0 +1,30 @@ +# +# Copyright 2020 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +# +# Makefile for the 'smu manager' sub-component of powerplay. +# It provides the smu management services for the driver. + +SMU13_MGR = smu_v13_0.o aldebaran_ppt.o + +AMD_SWSMU_SMU13MGR = $(addprefix $(AMD_SWSMU_PATH)/smu13/,$(SMU13_MGR)) + +AMD_POWERPLAY_FILES += $(AMD_SWSMU_SMU13MGR) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c new file mode 100644 index 000000000000..dcbe3a72da09 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -0,0 +1,1859 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#define SWSMU_CODE_LAYER_L2 + +#include <linux/firmware.h> +#include "amdgpu.h" +#include "amdgpu_smu.h" +#include "atomfirmware.h" +#include "amdgpu_atomfirmware.h" +#include "amdgpu_atombios.h" +#include "smu_v13_0.h" +#include "smu13_driver_if_aldebaran.h" +#include "soc15_common.h" +#include "atom.h" +#include "power_state.h" +#include "aldebaran_ppt.h" +#include "smu_v13_0_pptable.h" +#include "aldebaran_ppsmc.h" +#include "nbio/nbio_7_4_offset.h" +#include "nbio/nbio_7_4_sh_mask.h" +#include "thm/thm_11_0_2_offset.h" +#include "thm/thm_11_0_2_sh_mask.h" +#include "amdgpu_xgmi.h" +#include <linux/pci.h> +#include "amdgpu_ras.h" +#include "smu_cmn.h" +#include "mp/mp_13_0_2_offset.h" + +/* + * DO NOT use these for err/warn/info/debug messages. + * Use dev_err, dev_warn, dev_info and dev_dbg instead. + * They are more MGPU friendly. + */ +#undef pr_err +#undef pr_warn +#undef pr_info +#undef pr_debug + +#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c)) + +#define ALDEBARAN_FEA_MAP(smu_feature, aldebaran_feature) \ + [smu_feature] = {1, (aldebaran_feature)} + +#define FEATURE_MASK(feature) (1ULL << feature) +#define SMC_DPM_FEATURE ( \ + FEATURE_MASK(FEATURE_DATA_CALCULATIONS) | \ + FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \ + FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \ + FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \ + FEATURE_MASK(FEATURE_DPM_FCLK_BIT) | \ + FEATURE_MASK(FEATURE_DPM_LCLK_BIT) | \ + FEATURE_MASK(FEATURE_DPM_XGMI_BIT) | \ + FEATURE_MASK(FEATURE_DPM_VCN_BIT)) + +/* possible frequency drift (1Mhz) */ +#define EPSILON 1 + +#define smnPCIE_ESM_CTRL 0x111003D0 + +#define CLOCK_VALID (1 << 31) + +static const struct cmn2asic_msg_mapping aldebaran_message_map[SMU_MSG_MAX_COUNT] = { + MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), + MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), + MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1), + MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0), + MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0), + MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetEnabledSmuFeaturesLow, 0), + MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh, 0), + MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), + MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), + MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0), + MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0), + MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1), + MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0), + MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0), + MSG_MAP(SetSystemVirtualDramAddrHigh, PPSMC_MSG_SetSystemVirtualDramAddrHigh, 0), + MSG_MAP(SetSystemVirtualDramAddrLow, PPSMC_MSG_SetSystemVirtualDramAddrLow, 0), + MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0), + MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 0), + MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 0), + MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0), + MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 0), + MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 0), + MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1), + MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1), + MSG_MAP(GetVoltageByDpm, PPSMC_MSG_GetVoltageByDpm, 0), + MSG_MAP(GetVoltageByDpmOverdrive, PPSMC_MSG_GetVoltageByDpmOverdrive, 0), + MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0), + MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1), + MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0), + MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, 0), + MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc, 0), + MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0), + MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0), + MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0), + MSG_MAP(GetDebugData, PPSMC_MSG_GetDebugData, 0), + MSG_MAP(WaflTest, PPSMC_MSG_WaflTest, 0), + MSG_MAP(SetMemoryChannelEnable, PPSMC_MSG_SetMemoryChannelEnable, 0), + MSG_MAP(SetNumBadHbmPagesRetired, PPSMC_MSG_SetNumBadHbmPagesRetired, 0), + MSG_MAP(DFCstateControl, PPSMC_MSG_DFCstateControl, 0), + MSG_MAP(GetGmiPwrDnHyst, PPSMC_MSG_GetGmiPwrDnHyst, 0), + MSG_MAP(SetGmiPwrDnHyst, PPSMC_MSG_SetGmiPwrDnHyst, 0), + MSG_MAP(GmiPwrDnControl, PPSMC_MSG_GmiPwrDnControl, 0), + MSG_MAP(EnterGfxoff, PPSMC_MSG_EnterGfxoff, 0), + MSG_MAP(ExitGfxoff, PPSMC_MSG_ExitGfxoff, 0), + MSG_MAP(SetExecuteDMATest, PPSMC_MSG_SetExecuteDMATest, 0), + MSG_MAP(EnableDeterminism, PPSMC_MSG_EnableDeterminism, 0), + MSG_MAP(DisableDeterminism, PPSMC_MSG_DisableDeterminism, 0), + MSG_MAP(SetUclkDpmMode, PPSMC_MSG_SetUclkDpmMode, 0), + MSG_MAP(GfxDriverResetRecovery, PPSMC_MSG_GfxDriverResetRecovery, 0), +}; + +static const struct cmn2asic_mapping aldebaran_clk_map[SMU_CLK_COUNT] = { + CLK_MAP(GFXCLK, PPCLK_GFXCLK), + CLK_MAP(SCLK, PPCLK_GFXCLK), + CLK_MAP(SOCCLK, PPCLK_SOCCLK), + CLK_MAP(FCLK, PPCLK_FCLK), + CLK_MAP(UCLK, PPCLK_UCLK), + CLK_MAP(MCLK, PPCLK_UCLK), + CLK_MAP(DCLK, PPCLK_DCLK), + CLK_MAP(VCLK, PPCLK_VCLK), + CLK_MAP(LCLK, PPCLK_LCLK), +}; + +static const struct cmn2asic_mapping aldebaran_feature_mask_map[SMU_FEATURE_COUNT] = { + ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_PREFETCHER_BIT, FEATURE_DATA_CALCULATIONS), + ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_GFXCLK_BIT, FEATURE_DPM_GFXCLK_BIT), + ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_UCLK_BIT, FEATURE_DPM_UCLK_BIT), + ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_SOCCLK_BIT, FEATURE_DPM_SOCCLK_BIT), + ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_FCLK_BIT, FEATURE_DPM_FCLK_BIT), + ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_LCLK_BIT, FEATURE_DPM_LCLK_BIT), + ALDEBARAN_FEA_MAP(SMU_FEATURE_XGMI_BIT, FEATURE_DPM_XGMI_BIT), + ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_GFXCLK_BIT, FEATURE_DS_GFXCLK_BIT), + ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_SOCCLK_BIT, FEATURE_DS_SOCCLK_BIT), + ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_LCLK_BIT, FEATURE_DS_LCLK_BIT), + ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_FCLK_BIT, FEATURE_DS_FCLK_BIT), + ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_UCLK_BIT, FEATURE_DS_UCLK_BIT), + ALDEBARAN_FEA_MAP(SMU_FEATURE_GFX_SS_BIT, FEATURE_GFX_SS_BIT), + ALDEBARAN_FEA_MAP(SMU_FEATURE_VCN_PG_BIT, FEATURE_DPM_VCN_BIT), + ALDEBARAN_FEA_MAP(SMU_FEATURE_RSMU_SMN_CG_BIT, FEATURE_RSMU_SMN_CG_BIT), + ALDEBARAN_FEA_MAP(SMU_FEATURE_WAFL_CG_BIT, FEATURE_WAFL_CG_BIT), + ALDEBARAN_FEA_MAP(SMU_FEATURE_PPT_BIT, FEATURE_PPT_BIT), + ALDEBARAN_FEA_MAP(SMU_FEATURE_TDC_BIT, FEATURE_TDC_BIT), + ALDEBARAN_FEA_MAP(SMU_FEATURE_APCC_PLUS_BIT, FEATURE_APCC_PLUS_BIT), + ALDEBARAN_FEA_MAP(SMU_FEATURE_APCC_DFLL_BIT, FEATURE_APCC_DFLL_BIT), + ALDEBARAN_FEA_MAP(SMU_FEATURE_FUSE_CG_BIT, FEATURE_FUSE_CG_BIT), + ALDEBARAN_FEA_MAP(SMU_FEATURE_MP1_CG_BIT, FEATURE_MP1_CG_BIT), + ALDEBARAN_FEA_MAP(SMU_FEATURE_SMUIO_CG_BIT, FEATURE_SMUIO_CG_BIT), + ALDEBARAN_FEA_MAP(SMU_FEATURE_THM_CG_BIT, FEATURE_THM_CG_BIT), + ALDEBARAN_FEA_MAP(SMU_FEATURE_CLK_CG_BIT, FEATURE_CLK_CG_BIT), + ALDEBARAN_FEA_MAP(SMU_FEATURE_FW_CTF_BIT, FEATURE_FW_CTF_BIT), + ALDEBARAN_FEA_MAP(SMU_FEATURE_THERMAL_BIT, FEATURE_THERMAL_BIT), + ALDEBARAN_FEA_MAP(SMU_FEATURE_OUT_OF_BAND_MONITOR_BIT, FEATURE_OUT_OF_BAND_MONITOR_BIT), + ALDEBARAN_FEA_MAP(SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT,FEATURE_XGMI_PER_LINK_PWR_DWN), + ALDEBARAN_FEA_MAP(SMU_FEATURE_DF_CSTATE_BIT, FEATURE_DF_CSTATE), +}; + +static const struct cmn2asic_mapping aldebaran_table_map[SMU_TABLE_COUNT] = { + TAB_MAP(PPTABLE), + TAB_MAP(AVFS_PSM_DEBUG), + TAB_MAP(AVFS_FUSE_OVERRIDE), + TAB_MAP(PMSTATUSLOG), + TAB_MAP(SMU_METRICS), + TAB_MAP(DRIVER_SMU_CONFIG), + TAB_MAP(I2C_COMMANDS), +}; + +static int aldebaran_tables_init(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_table *tables = smu_table->tables; + + SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + + SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + + SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + + SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + + smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); + if (!smu_table->metrics_table) + return -ENOMEM; + smu_table->metrics_time = 0; + + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_1); + smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); + if (!smu_table->gpu_metrics_table) { + kfree(smu_table->metrics_table); + return -ENOMEM; + } + + return 0; +} + +static int aldebaran_allocate_dpm_context(struct smu_context *smu) +{ + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + + smu_dpm->dpm_context = kzalloc(sizeof(struct smu_13_0_dpm_context), + GFP_KERNEL); + if (!smu_dpm->dpm_context) + return -ENOMEM; + smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context); + + smu_dpm->dpm_current_power_state = kzalloc(sizeof(struct smu_power_state), + GFP_KERNEL); + if (!smu_dpm->dpm_current_power_state) + return -ENOMEM; + + smu_dpm->dpm_request_power_state = kzalloc(sizeof(struct smu_power_state), + GFP_KERNEL); + if (!smu_dpm->dpm_request_power_state) + return -ENOMEM; + + return 0; +} + +static int aldebaran_init_smc_tables(struct smu_context *smu) +{ + int ret = 0; + + ret = aldebaran_tables_init(smu); + if (ret) + return ret; + + ret = aldebaran_allocate_dpm_context(smu); + if (ret) + return ret; + + return smu_v13_0_init_smc_tables(smu); +} + +static int aldebaran_get_allowed_feature_mask(struct smu_context *smu, + uint32_t *feature_mask, uint32_t num) +{ + if (num > 2) + return -EINVAL; + + /* pptable will handle the features to enable */ + memset(feature_mask, 0xFF, sizeof(uint32_t) * num); + + return 0; +} + +static int aldebaran_set_default_dpm_table(struct smu_context *smu) +{ + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + struct smu_13_0_dpm_table *dpm_table = NULL; + PPTable_t *pptable = smu->smu_table.driver_pptable; + int ret = 0; + + /* socclk dpm table setup */ + dpm_table = &dpm_context->dpm_tables.soc_table; + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { + ret = smu_v13_0_set_single_dpm_table(smu, + SMU_SOCCLK, + dpm_table); + if (ret) + return ret; + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100; + dpm_table->dpm_levels[0].enabled = true; + dpm_table->min = dpm_table->dpm_levels[0].value; + dpm_table->max = dpm_table->dpm_levels[0].value; + } + + /* gfxclk dpm table setup */ + dpm_table = &dpm_context->dpm_tables.gfx_table; + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) { + /* in the case of gfxclk, only fine-grained dpm is honored */ + dpm_table->count = 2; + dpm_table->dpm_levels[0].value = pptable->GfxclkFmin; + dpm_table->dpm_levels[0].enabled = true; + dpm_table->dpm_levels[1].value = pptable->GfxclkFmax; + dpm_table->dpm_levels[1].enabled = true; + dpm_table->min = dpm_table->dpm_levels[0].value; + dpm_table->max = dpm_table->dpm_levels[1].value; + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100; + dpm_table->dpm_levels[0].enabled = true; + dpm_table->min = dpm_table->dpm_levels[0].value; + dpm_table->max = dpm_table->dpm_levels[0].value; + } + + /* memclk dpm table setup */ + dpm_table = &dpm_context->dpm_tables.uclk_table; + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { + ret = smu_v13_0_set_single_dpm_table(smu, + SMU_UCLK, + dpm_table); + if (ret) + return ret; + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100; + dpm_table->dpm_levels[0].enabled = true; + dpm_table->min = dpm_table->dpm_levels[0].value; + dpm_table->max = dpm_table->dpm_levels[0].value; + } + + /* fclk dpm table setup */ + dpm_table = &dpm_context->dpm_tables.fclk_table; + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) { + ret = smu_v13_0_set_single_dpm_table(smu, + SMU_FCLK, + dpm_table); + if (ret) + return ret; + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100; + dpm_table->dpm_levels[0].enabled = true; + dpm_table->min = dpm_table->dpm_levels[0].value; + dpm_table->max = dpm_table->dpm_levels[0].value; + } + + return 0; +} + +static int aldebaran_check_powerplay_table(struct smu_context *smu) +{ + struct smu_table_context *table_context = &smu->smu_table; + struct smu_13_0_powerplay_table *powerplay_table = + table_context->power_play_table; + struct smu_baco_context *smu_baco = &smu->smu_baco; + + mutex_lock(&smu_baco->mutex); + if (powerplay_table->platform_caps & SMU_13_0_PP_PLATFORM_CAP_BACO || + powerplay_table->platform_caps & SMU_13_0_PP_PLATFORM_CAP_MACO) + smu_baco->platform_support = true; + mutex_unlock(&smu_baco->mutex); + + table_context->thermal_controller_type = + powerplay_table->thermal_controller_type; + + return 0; +} + +static int aldebaran_store_powerplay_table(struct smu_context *smu) +{ + struct smu_table_context *table_context = &smu->smu_table; + struct smu_13_0_powerplay_table *powerplay_table = + table_context->power_play_table; + memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable, + sizeof(PPTable_t)); + + return 0; +} + +static int aldebaran_append_powerplay_table(struct smu_context *smu) +{ + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *smc_pptable = table_context->driver_pptable; + struct atom_smc_dpm_info_v4_10 *smc_dpm_table; + int index, ret; + + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, + smc_dpm_info); + + ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL, NULL, NULL, + (uint8_t **)&smc_dpm_table); + if (ret) + return ret; + + dev_info(smu->adev->dev, "smc_dpm_info table revision(format.content): %d.%d\n", + smc_dpm_table->table_header.format_revision, + smc_dpm_table->table_header.content_revision); + + if ((smc_dpm_table->table_header.format_revision == 4) && + (smc_dpm_table->table_header.content_revision == 10)) + memcpy(&smc_pptable->GfxMaxCurrent, + &smc_dpm_table->GfxMaxCurrent, + sizeof(*smc_dpm_table) - offsetof(struct atom_smc_dpm_info_v4_10, GfxMaxCurrent)); + return 0; +} + +static int aldebaran_setup_pptable(struct smu_context *smu) +{ + int ret = 0; + + /* VBIOS pptable is the first choice */ + smu->smu_table.boot_values.pp_table_id = 0; + + ret = smu_v13_0_setup_pptable(smu); + if (ret) + return ret; + + ret = aldebaran_store_powerplay_table(smu); + if (ret) + return ret; + + ret = aldebaran_append_powerplay_table(smu); + if (ret) + return ret; + + ret = aldebaran_check_powerplay_table(smu); + if (ret) + return ret; + + return ret; +} + +static int aldebaran_run_btc(struct smu_context *smu) +{ + int ret; + + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL); + if (ret) + dev_err(smu->adev->dev, "RunDcBtc failed!\n"); + + return ret; +} + +static int aldebaran_populate_umd_state_clk(struct smu_context *smu) +{ + struct smu_13_0_dpm_context *dpm_context = + smu->smu_dpm.dpm_context; + struct smu_13_0_dpm_table *gfx_table = + &dpm_context->dpm_tables.gfx_table; + struct smu_13_0_dpm_table *mem_table = + &dpm_context->dpm_tables.uclk_table; + struct smu_13_0_dpm_table *soc_table = + &dpm_context->dpm_tables.soc_table; + struct smu_umd_pstate_table *pstate_table = + &smu->pstate_table; + + pstate_table->gfxclk_pstate.min = gfx_table->min; + pstate_table->gfxclk_pstate.peak = gfx_table->max; + + pstate_table->uclk_pstate.min = mem_table->min; + pstate_table->uclk_pstate.peak = mem_table->max; + + pstate_table->socclk_pstate.min = soc_table->min; + pstate_table->socclk_pstate.peak = soc_table->max; + + if (gfx_table->count > ALDEBARAN_UMD_PSTATE_GFXCLK_LEVEL && + mem_table->count > ALDEBARAN_UMD_PSTATE_MCLK_LEVEL && + soc_table->count > ALDEBARAN_UMD_PSTATE_SOCCLK_LEVEL) { + pstate_table->gfxclk_pstate.standard = + gfx_table->dpm_levels[ALDEBARAN_UMD_PSTATE_GFXCLK_LEVEL].value; + pstate_table->uclk_pstate.standard = + mem_table->dpm_levels[ALDEBARAN_UMD_PSTATE_MCLK_LEVEL].value; + pstate_table->socclk_pstate.standard = + soc_table->dpm_levels[ALDEBARAN_UMD_PSTATE_SOCCLK_LEVEL].value; + } else { + pstate_table->gfxclk_pstate.standard = + pstate_table->gfxclk_pstate.min; + pstate_table->uclk_pstate.standard = + pstate_table->uclk_pstate.min; + pstate_table->socclk_pstate.standard = + pstate_table->socclk_pstate.min; + } + + return 0; +} + +static int aldebaran_get_clk_table(struct smu_context *smu, + struct pp_clock_levels_with_latency *clocks, + struct smu_13_0_dpm_table *dpm_table) +{ + int i, count; + + count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; + clocks->num_levels = count; + + for (i = 0; i < count; i++) { + clocks->data[i].clocks_in_khz = + dpm_table->dpm_levels[i].value * 1000; + clocks->data[i].latency_in_us = 0; + } + + return 0; +} + +static int aldebaran_freqs_in_same_level(int32_t frequency1, + int32_t frequency2) +{ + return (abs(frequency1 - frequency2) <= EPSILON); +} + +static int aldebaran_get_smu_metrics_data(struct smu_context *smu, + MetricsMember_t member, + uint32_t *value) +{ + struct smu_table_context *smu_table= &smu->smu_table; + SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; + int ret = 0; + + mutex_lock(&smu->metrics_lock); + + ret = smu_cmn_get_metrics_table_locked(smu, + NULL, + false); + if (ret) { + mutex_unlock(&smu->metrics_lock); + return ret; + } + + switch (member) { + case METRICS_CURR_GFXCLK: + *value = metrics->CurrClock[PPCLK_GFXCLK]; + break; + case METRICS_CURR_SOCCLK: + *value = metrics->CurrClock[PPCLK_SOCCLK]; + break; + case METRICS_CURR_UCLK: + *value = metrics->CurrClock[PPCLK_UCLK]; + break; + case METRICS_CURR_VCLK: + *value = metrics->CurrClock[PPCLK_VCLK]; + break; + case METRICS_CURR_DCLK: + *value = metrics->CurrClock[PPCLK_DCLK]; + break; + case METRICS_CURR_FCLK: + *value = metrics->CurrClock[PPCLK_FCLK]; + break; + case METRICS_AVERAGE_GFXCLK: + *value = metrics->AverageGfxclkFrequency; + break; + case METRICS_AVERAGE_SOCCLK: + *value = metrics->AverageSocclkFrequency; + break; + case METRICS_AVERAGE_UCLK: + *value = metrics->AverageUclkFrequency; + break; + case METRICS_AVERAGE_GFXACTIVITY: + *value = metrics->AverageGfxActivity; + break; + case METRICS_AVERAGE_MEMACTIVITY: + *value = metrics->AverageUclkActivity; + break; + case METRICS_AVERAGE_SOCKETPOWER: + *value = metrics->AverageSocketPower << 8; + break; + case METRICS_TEMPERATURE_EDGE: + *value = metrics->TemperatureEdge * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_HOTSPOT: + *value = metrics->TemperatureHotspot * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_MEM: + *value = metrics->TemperatureHBM * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_VRGFX: + *value = metrics->TemperatureVrGfx * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_VRSOC: + *value = metrics->TemperatureVrSoc * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_VRMEM: + *value = metrics->TemperatureVrMem * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_THROTTLER_STATUS: + *value = metrics->ThrottlerStatus; + break; + default: + *value = UINT_MAX; + break; + } + + mutex_unlock(&smu->metrics_lock); + + return ret; +} + +static int aldebaran_get_current_clk_freq_by_table(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *value) +{ + MetricsMember_t member_type; + int clk_id = 0; + + if (!value) + return -EINVAL; + + clk_id = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_CLK, + clk_type); + if (clk_id < 0) + return -EINVAL; + + switch (clk_id) { + case PPCLK_GFXCLK: + /* + * CurrClock[clk_id] can provide accurate + * output only when the dpm feature is enabled. + * We can use Average_* for dpm disabled case. + * But this is available for gfxclk/uclk/socclk/vclk/dclk. + */ + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) + member_type = METRICS_CURR_GFXCLK; + else + member_type = METRICS_AVERAGE_GFXCLK; + break; + case PPCLK_UCLK: + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) + member_type = METRICS_CURR_UCLK; + else + member_type = METRICS_AVERAGE_UCLK; + break; + case PPCLK_SOCCLK: + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) + member_type = METRICS_CURR_SOCCLK; + else + member_type = METRICS_AVERAGE_SOCCLK; + break; + case PPCLK_VCLK: + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) + member_type = METRICS_CURR_VCLK; + else + member_type = METRICS_AVERAGE_VCLK; + break; + case PPCLK_DCLK: + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) + member_type = METRICS_CURR_DCLK; + else + member_type = METRICS_AVERAGE_DCLK; + break; + case PPCLK_FCLK: + member_type = METRICS_CURR_FCLK; + break; + default: + return -EINVAL; + } + + return aldebaran_get_smu_metrics_data(smu, + member_type, + value); +} + +static int aldebaran_print_clk_levels(struct smu_context *smu, + enum smu_clk_type type, char *buf) +{ + int i, now, size = 0; + int ret = 0; + struct pp_clock_levels_with_latency clocks; + struct smu_13_0_dpm_table *single_dpm_table; + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + struct smu_13_0_dpm_context *dpm_context = NULL; + uint32_t display_levels; + uint32_t freq_values[3] = {0}; + uint32_t min_clk, max_clk; + + if (amdgpu_ras_intr_triggered()) + return snprintf(buf, PAGE_SIZE, "unavailable\n"); + + dpm_context = smu_dpm->dpm_context; + + switch (type) { + + case SMU_OD_SCLK: + size = sprintf(buf, "%s:\n", "GFXCLK"); + fallthrough; + case SMU_SCLK: + ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &now); + if (ret) { + dev_err(smu->adev->dev, "Attempt to get current gfx clk Failed!"); + return ret; + } + + single_dpm_table = &(dpm_context->dpm_tables.gfx_table); + ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table); + if (ret) { + dev_err(smu->adev->dev, "Attempt to get gfx clk levels Failed!"); + return ret; + } + + display_levels = clocks.num_levels; + + min_clk = smu->gfx_actual_hard_min_freq & CLOCK_VALID ? + smu->gfx_actual_hard_min_freq & ~CLOCK_VALID : + single_dpm_table->dpm_levels[0].value; + max_clk = smu->gfx_actual_soft_max_freq & CLOCK_VALID ? + smu->gfx_actual_soft_max_freq & ~CLOCK_VALID : + single_dpm_table->dpm_levels[1].value; + + freq_values[0] = min_clk; + freq_values[1] = max_clk; + + /* fine-grained dpm has only 2 levels */ + if (now > min_clk && now < max_clk) { + display_levels = clocks.num_levels + 1; + freq_values[2] = max_clk; + freq_values[1] = now; + } + + /* + * For DPM disabled case, there will be only one clock level. + * And it's safe to assume that is always the current clock. + */ + if (display_levels == clocks.num_levels) { + for (i = 0; i < clocks.num_levels; i++) + size += sprintf( + buf + size, "%d: %uMhz %s\n", i, + freq_values[i], + (clocks.num_levels == 1) ? + "*" : + (aldebaran_freqs_in_same_level( + freq_values[i], now) ? + "*" : + "")); + } else { + for (i = 0; i < display_levels; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", i, + freq_values[i], i == 1 ? "*" : ""); + } + + break; + + case SMU_OD_MCLK: + size = sprintf(buf, "%s:\n", "MCLK"); + fallthrough; + case SMU_MCLK: + ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, &now); + if (ret) { + dev_err(smu->adev->dev, "Attempt to get current mclk Failed!"); + return ret; + } + + single_dpm_table = &(dpm_context->dpm_tables.uclk_table); + ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table); + if (ret) { + dev_err(smu->adev->dev, "Attempt to get memory clk levels Failed!"); + return ret; + } + + for (i = 0; i < clocks.num_levels; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, clocks.data[i].clocks_in_khz / 1000, + (clocks.num_levels == 1) ? "*" : + (aldebaran_freqs_in_same_level( + clocks.data[i].clocks_in_khz / 1000, + now) ? "*" : "")); + break; + + case SMU_SOCCLK: + ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_SOCCLK, &now); + if (ret) { + dev_err(smu->adev->dev, "Attempt to get current socclk Failed!"); + return ret; + } + + single_dpm_table = &(dpm_context->dpm_tables.soc_table); + ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table); + if (ret) { + dev_err(smu->adev->dev, "Attempt to get socclk levels Failed!"); + return ret; + } + + for (i = 0; i < clocks.num_levels; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, clocks.data[i].clocks_in_khz / 1000, + (clocks.num_levels == 1) ? "*" : + (aldebaran_freqs_in_same_level( + clocks.data[i].clocks_in_khz / 1000, + now) ? "*" : "")); + break; + + case SMU_FCLK: + ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_FCLK, &now); + if (ret) { + dev_err(smu->adev->dev, "Attempt to get current fclk Failed!"); + return ret; + } + + single_dpm_table = &(dpm_context->dpm_tables.fclk_table); + ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table); + if (ret) { + dev_err(smu->adev->dev, "Attempt to get fclk levels Failed!"); + return ret; + } + + for (i = 0; i < single_dpm_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, single_dpm_table->dpm_levels[i].value, + (clocks.num_levels == 1) ? "*" : + (aldebaran_freqs_in_same_level( + clocks.data[i].clocks_in_khz / 1000, + now) ? "*" : "")); + break; + + default: + break; + } + + return size; +} + +static int aldebaran_upload_dpm_level(struct smu_context *smu, + bool max, + uint32_t feature_mask, + uint32_t level) +{ + struct smu_13_0_dpm_context *dpm_context = + smu->smu_dpm.dpm_context; + uint32_t freq; + int ret = 0; + + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) && + (feature_mask & FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT))) { + freq = dpm_context->dpm_tables.gfx_table.dpm_levels[level].value; + ret = smu_cmn_send_smc_msg_with_param(smu, + (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), + (PPCLK_GFXCLK << 16) | (freq & 0xffff), + NULL); + if (ret) { + dev_err(smu->adev->dev, "Failed to set soft %s gfxclk !\n", + max ? "max" : "min"); + return ret; + } + } + + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) && + (feature_mask & FEATURE_MASK(FEATURE_DPM_UCLK_BIT))) { + freq = dpm_context->dpm_tables.uclk_table.dpm_levels[level].value; + ret = smu_cmn_send_smc_msg_with_param(smu, + (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), + (PPCLK_UCLK << 16) | (freq & 0xffff), + NULL); + if (ret) { + dev_err(smu->adev->dev, "Failed to set soft %s memclk !\n", + max ? "max" : "min"); + return ret; + } + } + + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT) && + (feature_mask & FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT))) { + freq = dpm_context->dpm_tables.soc_table.dpm_levels[level].value; + ret = smu_cmn_send_smc_msg_with_param(smu, + (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), + (PPCLK_SOCCLK << 16) | (freq & 0xffff), + NULL); + if (ret) { + dev_err(smu->adev->dev, "Failed to set soft %s socclk !\n", + max ? "max" : "min"); + return ret; + } + } + + return ret; +} + +static int aldebaran_force_clk_levels(struct smu_context *smu, + enum smu_clk_type type, uint32_t mask) +{ + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + struct smu_13_0_dpm_table *single_dpm_table = NULL; + uint32_t soft_min_level, soft_max_level; + int ret = 0; + + soft_min_level = mask ? (ffs(mask) - 1) : 0; + soft_max_level = mask ? (fls(mask) - 1) : 0; + + switch (type) { + case SMU_SCLK: + single_dpm_table = &(dpm_context->dpm_tables.gfx_table); + if (soft_max_level >= single_dpm_table->count) { + dev_err(smu->adev->dev, "Clock level specified %d is over max allowed %d\n", + soft_max_level, single_dpm_table->count - 1); + ret = -EINVAL; + break; + } + + ret = aldebaran_upload_dpm_level(smu, + false, + FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT), + soft_min_level); + if (ret) { + dev_err(smu->adev->dev, "Failed to upload boot level to lowest!\n"); + break; + } + + ret = aldebaran_upload_dpm_level(smu, + true, + FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT), + soft_max_level); + if (ret) + dev_err(smu->adev->dev, "Failed to upload dpm max level to highest!\n"); + + break; + + case SMU_MCLK: + case SMU_SOCCLK: + case SMU_FCLK: + /* + * Should not arrive here since aldebaran does not + * support mclk/socclk/fclk softmin/softmax settings + */ + ret = -EINVAL; + break; + + default: + break; + } + + return ret; +} + +static int aldebaran_get_thermal_temperature_range(struct smu_context *smu, + struct smu_temperature_range *range) +{ + struct smu_table_context *table_context = &smu->smu_table; + struct smu_13_0_powerplay_table *powerplay_table = + table_context->power_play_table; + PPTable_t *pptable = smu->smu_table.driver_pptable; + + if (!range) + return -EINVAL; + + memcpy(range, &smu13_thermal_policy[0], sizeof(struct smu_temperature_range)); + + range->hotspot_crit_max = pptable->ThotspotLimit * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + range->mem_crit_max = pptable->TmemLimit * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + range->mem_emergency_max = (pptable->TmemLimit + CTF_OFFSET_MEM)* + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + range->software_shutdown_temp = powerplay_table->software_shutdown_temp; + + return 0; +} + +static int aldebaran_get_current_activity_percent(struct smu_context *smu, + enum amd_pp_sensors sensor, + uint32_t *value) +{ + int ret = 0; + + if (!value) + return -EINVAL; + + switch (sensor) { + case AMDGPU_PP_SENSOR_GPU_LOAD: + ret = aldebaran_get_smu_metrics_data(smu, + METRICS_AVERAGE_GFXACTIVITY, + value); + break; + case AMDGPU_PP_SENSOR_MEM_LOAD: + ret = aldebaran_get_smu_metrics_data(smu, + METRICS_AVERAGE_MEMACTIVITY, + value); + break; + default: + dev_err(smu->adev->dev, "Invalid sensor for retrieving clock activity\n"); + return -EINVAL; + } + + return ret; +} + +static int aldebaran_get_gpu_power(struct smu_context *smu, uint32_t *value) +{ + if (!value) + return -EINVAL; + + return aldebaran_get_smu_metrics_data(smu, + METRICS_AVERAGE_SOCKETPOWER, + value); +} + +static int aldebaran_thermal_get_temperature(struct smu_context *smu, + enum amd_pp_sensors sensor, + uint32_t *value) +{ + int ret = 0; + + if (!value) + return -EINVAL; + + switch (sensor) { + case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: + ret = aldebaran_get_smu_metrics_data(smu, + METRICS_TEMPERATURE_HOTSPOT, + value); + break; + case AMDGPU_PP_SENSOR_EDGE_TEMP: + ret = aldebaran_get_smu_metrics_data(smu, + METRICS_TEMPERATURE_EDGE, + value); + break; + case AMDGPU_PP_SENSOR_MEM_TEMP: + ret = aldebaran_get_smu_metrics_data(smu, + METRICS_TEMPERATURE_MEM, + value); + break; + default: + dev_err(smu->adev->dev, "Invalid sensor for retrieving temp\n"); + return -EINVAL; + } + + return ret; +} + +static int aldebaran_read_sensor(struct smu_context *smu, + enum amd_pp_sensors sensor, + void *data, uint32_t *size) +{ + int ret = 0; + + if (amdgpu_ras_intr_triggered()) + return 0; + + if (!data || !size) + return -EINVAL; + + mutex_lock(&smu->sensor_lock); + switch (sensor) { + case AMDGPU_PP_SENSOR_MEM_LOAD: + case AMDGPU_PP_SENSOR_GPU_LOAD: + ret = aldebaran_get_current_activity_percent(smu, + sensor, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_GPU_POWER: + ret = aldebaran_get_gpu_power(smu, (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: + case AMDGPU_PP_SENSOR_EDGE_TEMP: + case AMDGPU_PP_SENSOR_MEM_TEMP: + ret = aldebaran_thermal_get_temperature(smu, sensor, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_GFX_MCLK: + ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, (uint32_t *)data); + /* the output clock frequency in 10K unit */ + *(uint32_t *)data *= 100; + *size = 4; + break; + case AMDGPU_PP_SENSOR_GFX_SCLK: + ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, (uint32_t *)data); + *(uint32_t *)data *= 100; + *size = 4; + break; + case AMDGPU_PP_SENSOR_VDDGFX: + ret = smu_v13_0_get_gfx_vdd(smu, (uint32_t *)data); + *size = 4; + break; + default: + ret = -EOPNOTSUPP; + break; + } + mutex_unlock(&smu->sensor_lock); + + return ret; +} + +static int aldebaran_get_power_limit(struct smu_context *smu) +{ + PPTable_t *pptable = smu->smu_table.driver_pptable; + uint32_t power_limit = 0; + int ret; + + if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) + return -EINVAL; + + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetPptLimit, &power_limit); + + if (ret) { + /* the last hope to figure out the ppt limit */ + if (!pptable) { + dev_err(smu->adev->dev, "Cannot get PPT limit due to pptable missing!"); + return -EINVAL; + } + power_limit = pptable->PptLimit; + } + + smu->current_power_limit = smu->default_power_limit = power_limit; + if (pptable) + smu->max_power_limit = pptable->PptLimit; + + return 0; +} + +static int aldebaran_system_features_control(struct smu_context *smu, bool enable) +{ + int ret; + + ret = smu_v13_0_system_features_control(smu, enable); + if (!ret && enable) + ret = aldebaran_run_btc(smu); + + return ret; +} + +static int aldebaran_set_performance_level(struct smu_context *smu, + enum amd_dpm_forced_level level) +{ + struct smu_dpm_context *smu_dpm = &(smu->smu_dpm); + + /* Disable determinism if switching to another mode */ + if ((smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) + && (level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)) + smu_cmn_send_smc_msg(smu, SMU_MSG_DisableDeterminism, NULL); + + /* Reset user min/max gfx clock */ + smu->gfx_actual_hard_min_freq = 0; + smu->gfx_actual_soft_max_freq = 0; + + switch (level) { + + case AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM: + return 0; + + case AMD_DPM_FORCED_LEVEL_HIGH: + case AMD_DPM_FORCED_LEVEL_LOW: + case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: + case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: + default: + break; + } + + return smu_v13_0_set_performance_level(smu, level); +} + +static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t min, + uint32_t max) +{ + struct smu_dpm_context *smu_dpm = &(smu->smu_dpm); + struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; + struct amdgpu_device *adev = smu->adev; + uint32_t min_clk; + uint32_t max_clk; + int ret = 0; + + if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) + return -EINVAL; + + if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) + && (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)) + return -EINVAL; + + if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { + min_clk = max(min, dpm_context->dpm_tables.gfx_table.min); + max_clk = min(max, dpm_context->dpm_tables.gfx_table.max); + ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, + min_clk, max_clk); + + if (!ret) { + smu->gfx_actual_hard_min_freq = min_clk | CLOCK_VALID; + smu->gfx_actual_soft_max_freq = max_clk | CLOCK_VALID; + } + return ret; + } + + if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { + if (!max || (max < dpm_context->dpm_tables.gfx_table.min) || + (max > dpm_context->dpm_tables.gfx_table.max)) { + dev_warn(adev->dev, + "Invalid max frequency %d MHz specified for determinism\n", max); + return -EINVAL; + } + + /* Restore default min/max clocks and enable determinism */ + min_clk = dpm_context->dpm_tables.gfx_table.min; + max_clk = dpm_context->dpm_tables.gfx_table.max; + ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk); + if (!ret) { + usleep_range(500, 1000); + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_EnableDeterminism, + max, NULL); + if (ret) { + dev_err(adev->dev, + "Failed to enable determinism at GFX clock %d MHz\n", max); + } else { + smu->gfx_actual_hard_min_freq = + min_clk | CLOCK_VALID; + smu->gfx_actual_soft_max_freq = + max | CLOCK_VALID; + } + } + } + + return ret; +} + +static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, + long input[], uint32_t size) +{ + struct smu_dpm_context *smu_dpm = &(smu->smu_dpm); + struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; + uint32_t min_clk; + uint32_t max_clk; + int ret = 0; + + /* Only allowed in manual or determinism mode */ + if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) + && (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)) + return -EINVAL; + + switch (type) { + case PP_OD_EDIT_SCLK_VDDC_TABLE: + if (size != 2) { + dev_err(smu->adev->dev, "Input parameter number not correct\n"); + return -EINVAL; + } + + if (input[0] == 0) { + if (input[1] < dpm_context->dpm_tables.gfx_table.min) { + dev_warn(smu->adev->dev, "Minimum GFX clk (%ld) MHz specified is less than the minimum allowed (%d) MHz\n", + input[1], dpm_context->dpm_tables.gfx_table.min); + return -EINVAL; + } + smu->gfx_actual_hard_min_freq = input[1]; + } else if (input[0] == 1) { + if (input[1] > dpm_context->dpm_tables.gfx_table.max) { + dev_warn(smu->adev->dev, "Maximum GFX clk (%ld) MHz specified is greater than the maximum allowed (%d) MHz\n", + input[1], dpm_context->dpm_tables.gfx_table.max); + return -EINVAL; + } + smu->gfx_actual_soft_max_freq = input[1]; + } else { + return -EINVAL; + } + break; + case PP_OD_RESTORE_DEFAULT_TABLE: + if (size != 0) { + dev_err(smu->adev->dev, "Input parameter number not correct\n"); + return -EINVAL; + } else { + /* Use the default frequencies for manual and determinism mode */ + min_clk = dpm_context->dpm_tables.gfx_table.min; + max_clk = dpm_context->dpm_tables.gfx_table.max; + + return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk); + } + break; + case PP_OD_COMMIT_DPM_TABLE: + if (size != 0) { + dev_err(smu->adev->dev, "Input parameter number not correct\n"); + return -EINVAL; + } else { + min_clk = smu->gfx_actual_hard_min_freq; + max_clk = smu->gfx_actual_soft_max_freq; + return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk); + } + break; + default: + return -ENOSYS; + } + + return ret; +} + +static bool aldebaran_is_dpm_running(struct smu_context *smu) +{ + int ret = 0; + uint32_t feature_mask[2]; + unsigned long feature_enabled; + ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2); + feature_enabled = (unsigned long)((uint64_t)feature_mask[0] | + ((uint64_t)feature_mask[1] << 32)); + return !!(feature_enabled & SMC_DPM_FEATURE); +} + +static void aldebaran_fill_i2c_req(SwI2cRequest_t *req, bool write, + uint8_t address, uint32_t numbytes, + uint8_t *data) +{ + int i; + + req->I2CcontrollerPort = 0; + req->I2CSpeed = 2; + req->SlaveAddress = address; + req->NumCmds = numbytes; + + for (i = 0; i < numbytes; i++) { + SwI2cCmd_t *cmd = &req->SwI2cCmds[i]; + + /* First 2 bytes are always write for lower 2b EEPROM address */ + if (i < 2) + cmd->CmdConfig = CMDCONFIG_READWRITE_MASK; + else + cmd->CmdConfig = write ? CMDCONFIG_READWRITE_MASK : 0; + + + /* Add RESTART for read after address filled */ + cmd->CmdConfig |= (i == 2 && !write) ? CMDCONFIG_RESTART_MASK : 0; + + /* Add STOP in the end */ + cmd->CmdConfig |= (i == (numbytes - 1)) ? CMDCONFIG_STOP_MASK : 0; + + /* Fill with data regardless if read or write to simplify code */ + cmd->ReadWriteData = data[i]; + } +} + +static int aldebaran_i2c_read_data(struct i2c_adapter *control, + uint8_t address, + uint8_t *data, + uint32_t numbytes) +{ + uint32_t i, ret = 0; + SwI2cRequest_t req; + struct amdgpu_device *adev = to_amdgpu_device(control); + struct smu_table_context *smu_table = &adev->smu.smu_table; + struct smu_table *table = &smu_table->driver_table; + + if (numbytes > MAX_SW_I2C_COMMANDS) { + dev_err(adev->dev, "numbytes requested %d is over max allowed %d\n", + numbytes, MAX_SW_I2C_COMMANDS); + return -EINVAL; + } + + memset(&req, 0, sizeof(req)); + aldebaran_fill_i2c_req(&req, false, address, numbytes, data); + + mutex_lock(&adev->smu.mutex); + /* Now read data starting with that address */ + ret = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req, + true); + mutex_unlock(&adev->smu.mutex); + + if (!ret) { + SwI2cRequest_t *res = (SwI2cRequest_t *)table->cpu_addr; + + /* Assume SMU fills res.SwI2cCmds[i].Data with read bytes */ + for (i = 0; i < numbytes; i++) + data[i] = res->SwI2cCmds[i].ReadWriteData; + + dev_dbg(adev->dev, "aldebaran_i2c_read_data, address = %x, bytes = %d, data :", + (uint16_t)address, numbytes); + + print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE, + 8, 1, data, numbytes, false); + } else + dev_err(adev->dev, "aldebaran_i2c_read_data - error occurred :%x", ret); + + return ret; +} + +static int aldebaran_i2c_write_data(struct i2c_adapter *control, + uint8_t address, + uint8_t *data, + uint32_t numbytes) +{ + uint32_t ret; + SwI2cRequest_t req; + struct amdgpu_device *adev = to_amdgpu_device(control); + + if (numbytes > MAX_SW_I2C_COMMANDS) { + dev_err(adev->dev, "numbytes requested %d is over max allowed %d\n", + numbytes, MAX_SW_I2C_COMMANDS); + return -EINVAL; + } + + memset(&req, 0, sizeof(req)); + aldebaran_fill_i2c_req(&req, true, address, numbytes, data); + + mutex_lock(&adev->smu.mutex); + ret = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req, true); + mutex_unlock(&adev->smu.mutex); + + if (!ret) { + dev_dbg(adev->dev, "aldebaran_i2c_write(), address = %x, bytes = %d , data: ", + (uint16_t)address, numbytes); + + print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE, + 8, 1, data, numbytes, false); + /* + * According to EEPROM spec there is a MAX of 10 ms required for + * EEPROM to flush internal RX buffer after STOP was issued at the + * end of write transaction. During this time the EEPROM will not be + * responsive to any more commands - so wait a bit more. + */ + msleep(10); + + } else + dev_err(adev->dev, "aldebaran_i2c_write- error occurred :%x", ret); + + return ret; +} + +static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap, + struct i2c_msg *msgs, int num) +{ + uint32_t i, j, ret, data_size, data_chunk_size, next_eeprom_addr = 0; + uint8_t *data_ptr, data_chunk[MAX_SW_I2C_COMMANDS] = { 0 }; + + for (i = 0; i < num; i++) { + /* + * SMU interface allows at most MAX_SW_I2C_COMMANDS bytes of data at + * once and hence the data needs to be spliced into chunks and sent each + * chunk separately + */ + data_size = msgs[i].len - 2; + data_chunk_size = MAX_SW_I2C_COMMANDS - 2; + next_eeprom_addr = (msgs[i].buf[0] << 8 & 0xff00) | (msgs[i].buf[1] & 0xff); + data_ptr = msgs[i].buf + 2; + + for (j = 0; j < data_size / data_chunk_size; j++) { + /* Insert the EEPROM dest addess, bits 0-15 */ + data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff); + data_chunk[1] = (next_eeprom_addr & 0xff); + + if (msgs[i].flags & I2C_M_RD) { + ret = aldebaran_i2c_read_data(i2c_adap, + (uint8_t)msgs[i].addr, + data_chunk, MAX_SW_I2C_COMMANDS); + + memcpy(data_ptr, data_chunk + 2, data_chunk_size); + } else { + + memcpy(data_chunk + 2, data_ptr, data_chunk_size); + + ret = aldebaran_i2c_write_data(i2c_adap, + (uint8_t)msgs[i].addr, + data_chunk, MAX_SW_I2C_COMMANDS); + } + + if (ret) { + num = -EIO; + goto fail; + } + + next_eeprom_addr += data_chunk_size; + data_ptr += data_chunk_size; + } + + if (data_size % data_chunk_size) { + data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff); + data_chunk[1] = (next_eeprom_addr & 0xff); + + if (msgs[i].flags & I2C_M_RD) { + ret = aldebaran_i2c_read_data(i2c_adap, + (uint8_t)msgs[i].addr, + data_chunk, (data_size % data_chunk_size) + 2); + + memcpy(data_ptr, data_chunk + 2, data_size % data_chunk_size); + } else { + memcpy(data_chunk + 2, data_ptr, data_size % data_chunk_size); + + ret = aldebaran_i2c_write_data(i2c_adap, + (uint8_t)msgs[i].addr, + data_chunk, (data_size % data_chunk_size) + 2); + } + + if (ret) { + num = -EIO; + goto fail; + } + } + } + +fail: + return num; +} + +static u32 aldebaran_i2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + + +static const struct i2c_algorithm aldebaran_i2c_algo = { + .master_xfer = aldebaran_i2c_xfer, + .functionality = aldebaran_i2c_func, +}; + +static int aldebaran_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control) +{ + struct amdgpu_device *adev = to_amdgpu_device(control); + int res; + + control->owner = THIS_MODULE; + control->class = I2C_CLASS_SPD; + control->dev.parent = &adev->pdev->dev; + control->algo = &aldebaran_i2c_algo; + snprintf(control->name, sizeof(control->name), "AMDGPU SMU"); + + res = i2c_add_adapter(control); + if (res) + DRM_ERROR("Failed to register hw i2c, err: %d\n", res); + + return res; +} + +static void aldebaran_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control) +{ + i2c_del_adapter(control); +} + +static void aldebaran_get_unique_id(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + SmuMetrics_t *metrics = smu->smu_table.metrics_table; + uint32_t upper32 = 0, lower32 = 0; + int ret; + + mutex_lock(&smu->metrics_lock); + ret = smu_cmn_get_metrics_table_locked(smu, NULL, false); + if (ret) + goto out_unlock; + + upper32 = metrics->PublicSerialNumUpper32; + lower32 = metrics->PublicSerialNumLower32; + +out_unlock: + mutex_unlock(&smu->metrics_lock); + + adev->unique_id = ((uint64_t)upper32 << 32) | lower32; + sprintf(adev->serial, "%016llx", adev->unique_id); +} + +static bool aldebaran_is_baco_supported(struct smu_context *smu) +{ + /* aldebaran is not support baco */ + + return false; +} + +static int aldebaran_set_df_cstate(struct smu_context *smu, + enum pp_df_cstate state) +{ + return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL); +} + +static int aldebaran_allow_xgmi_power_down(struct smu_context *smu, bool en) +{ + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GmiPwrDnControl, + en ? 1 : 0, + NULL); +} + +static const struct throttling_logging_label { + uint32_t feature_mask; + const char *label; +} logging_label[] = { + {(1U << THROTTLER_TEMP_MEM_BIT), "HBM"}, + {(1U << THROTTLER_TEMP_VR_GFX_BIT), "VR of GFX rail"}, + {(1U << THROTTLER_TEMP_VR_MEM_BIT), "VR of HBM rail"}, + {(1U << THROTTLER_TEMP_VR_SOC_BIT), "VR of SOC rail"}, +}; +static void aldebaran_log_thermal_throttling_event(struct smu_context *smu) +{ + int ret; + int throttler_idx, throtting_events = 0, buf_idx = 0; + struct amdgpu_device *adev = smu->adev; + uint32_t throttler_status; + char log_buf[256]; + + ret = aldebaran_get_smu_metrics_data(smu, + METRICS_THROTTLER_STATUS, + &throttler_status); + if (ret) + return; + + memset(log_buf, 0, sizeof(log_buf)); + for (throttler_idx = 0; throttler_idx < ARRAY_SIZE(logging_label); + throttler_idx++) { + if (throttler_status & logging_label[throttler_idx].feature_mask) { + throtting_events++; + buf_idx += snprintf(log_buf + buf_idx, + sizeof(log_buf) - buf_idx, + "%s%s", + throtting_events > 1 ? " and " : "", + logging_label[throttler_idx].label); + if (buf_idx >= sizeof(log_buf)) { + dev_err(adev->dev, "buffer overflow!\n"); + log_buf[sizeof(log_buf) - 1] = '\0'; + break; + } + } + } + + dev_warn(adev->dev, "WARN: GPU thermal throttling temperature reached, expect performance decrease. %s.\n", + log_buf); + kgd2kfd_smi_event_throttle(smu->adev->kfd.dev, throttler_status); +} + +static int aldebaran_get_current_pcie_link_speed(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t esm_ctrl; + + /* TODO: confirm this on real target */ + esm_ctrl = RREG32_PCIE(smnPCIE_ESM_CTRL); + if ((esm_ctrl >> 15) & 0x1FFFF) + return (((esm_ctrl >> 8) & 0x3F) + 128); + + return smu_v13_0_get_current_pcie_link_speed(smu); +} + +static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu, + void **table) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct gpu_metrics_v1_1 *gpu_metrics = + (struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table; + SmuMetrics_t metrics; + int i, ret = 0; + + ret = smu_cmn_get_metrics_table(smu, + &metrics, + true); + if (ret) + return ret; + + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1); + + gpu_metrics->temperature_edge = metrics.TemperatureEdge; + gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot; + gpu_metrics->temperature_mem = metrics.TemperatureHBM; + gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx; + gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc; + gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem; + + gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity; + gpu_metrics->average_umc_activity = metrics.AverageUclkActivity; + gpu_metrics->average_mm_activity = 0; + + gpu_metrics->average_socket_power = metrics.AverageSocketPower; + gpu_metrics->energy_accumulator = 0; + + gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency; + gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency; + gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency; + gpu_metrics->average_vclk0_frequency = 0; + gpu_metrics->average_dclk0_frequency = 0; + + gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK]; + gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK]; + gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK]; + gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK]; + gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK]; + + gpu_metrics->throttle_status = metrics.ThrottlerStatus; + + gpu_metrics->current_fan_speed = 0; + + gpu_metrics->pcie_link_width = + smu_v13_0_get_current_pcie_link_width(smu); + gpu_metrics->pcie_link_speed = + aldebaran_get_current_pcie_link_speed(smu); + + gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); + + gpu_metrics->gfx_activity_acc = metrics.GfxBusyAcc; + gpu_metrics->mem_activity_acc = metrics.DramBusyAcc; + + for (i = 0; i < NUM_HBM_INSTANCES; i++) + gpu_metrics->temperature_hbm[i] = metrics.TemperatureAllHBM[i]; + + *table = (void *)gpu_metrics; + + return sizeof(struct gpu_metrics_v1_1); +} + +static int aldebaran_mode2_reset(struct smu_context *smu) +{ + u32 smu_version; + int ret = 0, index; + struct amdgpu_device *adev = smu->adev; + int timeout = 10; + + smu_cmn_get_smc_version(smu, NULL, &smu_version); + + index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, + SMU_MSG_GfxDeviceDriverReset); + + mutex_lock(&smu->message_lock); + if (smu_version >= 0x00441400) { + ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, SMU_RESET_MODE_2); + /* This is similar to FLR, wait till max FLR timeout */ + msleep(100); + dev_dbg(smu->adev->dev, "restore config space...\n"); + /* Restore the config space saved during init */ + amdgpu_device_load_pci_state(adev->pdev); + + dev_dbg(smu->adev->dev, "wait for reset ack\n"); + while (ret == -ETIME && timeout) { + ret = smu_cmn_wait_for_response(smu); + /* Wait a bit more time for getting ACK */ + if (ret == -ETIME) { + --timeout; + usleep_range(500, 1000); + continue; + } + + if (ret != 1) { + dev_err(adev->dev, "failed to send mode2 message \tparam: 0x%08x response %#x\n", + SMU_RESET_MODE_2, ret); + goto out; + } + } + + } else { + dev_err(adev->dev, "smu fw 0x%x does not support MSG_GfxDeviceDriverReset MSG\n", + smu_version); + } + + if (ret == 1) + ret = 0; +out: + mutex_unlock(&smu->message_lock); + + return ret; +} + +static bool aldebaran_is_mode1_reset_supported(struct smu_context *smu) +{ +#if 0 + struct amdgpu_device *adev = smu->adev; + u32 smu_version; + uint32_t val; + /** + * PM FW version support mode1 reset from 68.07 + */ + smu_cmn_get_smc_version(smu, NULL, &smu_version); + if ((smu_version < 0x00440700)) + return false; + /** + * mode1 reset relies on PSP, so we should check if + * PSP is alive. + */ + val = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81); + + return val != 0x0; +#endif + return true; +} + +static bool aldebaran_is_mode2_reset_supported(struct smu_context *smu) +{ + return true; +} + +static int aldebaran_set_mp1_state(struct smu_context *smu, + enum pp_mp1_state mp1_state) +{ + switch (mp1_state) { + case PP_MP1_STATE_UNLOAD: + return smu_cmn_set_mp1_state(smu, mp1_state); + default: + return -EINVAL; + } + + return 0; +} + +static const struct pptable_funcs aldebaran_ppt_funcs = { + /* init dpm */ + .get_allowed_feature_mask = aldebaran_get_allowed_feature_mask, + /* dpm/clk tables */ + .set_default_dpm_table = aldebaran_set_default_dpm_table, + .populate_umd_state_clk = aldebaran_populate_umd_state_clk, + .get_thermal_temperature_range = aldebaran_get_thermal_temperature_range, + .print_clk_levels = aldebaran_print_clk_levels, + .force_clk_levels = aldebaran_force_clk_levels, + .read_sensor = aldebaran_read_sensor, + .set_performance_level = aldebaran_set_performance_level, + .get_power_limit = aldebaran_get_power_limit, + .is_dpm_running = aldebaran_is_dpm_running, + .get_unique_id = aldebaran_get_unique_id, + .init_microcode = smu_v13_0_init_microcode, + .load_microcode = smu_v13_0_load_microcode, + .fini_microcode = smu_v13_0_fini_microcode, + .init_smc_tables = aldebaran_init_smc_tables, + .fini_smc_tables = smu_v13_0_fini_smc_tables, + .init_power = smu_v13_0_init_power, + .fini_power = smu_v13_0_fini_power, + .check_fw_status = smu_v13_0_check_fw_status, + /* pptable related */ + .setup_pptable = aldebaran_setup_pptable, + .get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values, + .check_fw_version = smu_v13_0_check_fw_version, + .write_pptable = smu_cmn_write_pptable, + .set_driver_table_location = smu_v13_0_set_driver_table_location, + .set_tool_table_location = smu_v13_0_set_tool_table_location, + .notify_memory_pool_location = smu_v13_0_notify_memory_pool_location, + .system_features_control = aldebaran_system_features_control, + .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param, + .send_smc_msg = smu_cmn_send_smc_msg, + .get_enabled_mask = smu_cmn_get_enabled_mask, + .feature_is_enabled = smu_cmn_feature_is_enabled, + .disable_all_features_with_exception = smu_cmn_disable_all_features_with_exception, + .set_power_limit = smu_v13_0_set_power_limit, + .init_max_sustainable_clocks = smu_v13_0_init_max_sustainable_clocks, + .enable_thermal_alert = smu_v13_0_enable_thermal_alert, + .disable_thermal_alert = smu_v13_0_disable_thermal_alert, + .set_xgmi_pstate = smu_v13_0_set_xgmi_pstate, + .register_irq_handler = smu_v13_0_register_irq_handler, + .set_azalia_d3_pme = smu_v13_0_set_azalia_d3_pme, + .get_max_sustainable_clocks_by_dc = smu_v13_0_get_max_sustainable_clocks_by_dc, + .baco_is_support= aldebaran_is_baco_supported, + .get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq, + .set_soft_freq_limited_range = aldebaran_set_soft_freq_limited_range, + .od_edit_dpm_table = aldebaran_usr_edit_dpm_table, + .set_df_cstate = aldebaran_set_df_cstate, + .allow_xgmi_power_down = aldebaran_allow_xgmi_power_down, + .log_thermal_throttling_event = aldebaran_log_thermal_throttling_event, + .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, + .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, + .get_gpu_metrics = aldebaran_get_gpu_metrics, + .mode1_reset_is_support = aldebaran_is_mode1_reset_supported, + .mode2_reset_is_support = aldebaran_is_mode2_reset_supported, + .mode1_reset = smu_v13_0_mode1_reset, + .set_mp1_state = aldebaran_set_mp1_state, + .mode2_reset = aldebaran_mode2_reset, + .wait_for_event = smu_v13_0_wait_for_event, + .i2c_init = aldebaran_i2c_control_init, + .i2c_fini = aldebaran_i2c_control_fini, +}; + +void aldebaran_set_ppt_funcs(struct smu_context *smu) +{ + smu->ppt_funcs = &aldebaran_ppt_funcs; + smu->message_map = aldebaran_message_map; + smu->clock_map = aldebaran_clk_map; + smu->feature_map = aldebaran_feature_mask_map; + smu->table_map = aldebaran_table_map; +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.h new file mode 100644 index 000000000000..33a85d57cf15 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.h @@ -0,0 +1,72 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __ALDEBARAN_PPT_H__ +#define __ALDEBARAN_PPT_H__ + +#define ALDEBARAN_UMD_PSTATE_GFXCLK_LEVEL 0x3 +#define ALDEBARAN_UMD_PSTATE_SOCCLK_LEVEL 0x3 +#define ALDEBARAN_UMD_PSTATE_MCLK_LEVEL 0x2 + +#define MAX_DPM_NUMBER 16 +#define MAX_PCIE_CONF 2 + +struct aldebaran_dpm_level { + bool enabled; + uint32_t value; + uint32_t param1; +}; + +struct aldebaran_dpm_state { + uint32_t soft_min_level; + uint32_t soft_max_level; + uint32_t hard_min_level; + uint32_t hard_max_level; +}; + +struct aldebaran_single_dpm_table { + uint32_t count; + struct aldebaran_dpm_state dpm_state; + struct aldebaran_dpm_level dpm_levels[MAX_DPM_NUMBER]; +}; + +struct aldebaran_pcie_table { + uint16_t count; + uint8_t pcie_gen[MAX_PCIE_CONF]; + uint8_t pcie_lane[MAX_PCIE_CONF]; + uint32_t lclk[MAX_PCIE_CONF]; +}; + +struct aldebaran_dpm_table { + struct aldebaran_single_dpm_table soc_table; + struct aldebaran_single_dpm_table gfx_table; + struct aldebaran_single_dpm_table mem_table; + struct aldebaran_single_dpm_table eclk_table; + struct aldebaran_single_dpm_table vclk_table; + struct aldebaran_single_dpm_table dclk_table; + struct aldebaran_single_dpm_table fclk_table; + struct aldebaran_pcie_table pcie_table; +}; + +extern void aldebaran_set_ppt_funcs(struct smu_context *smu); + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c new file mode 100644 index 000000000000..0864083e7435 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -0,0 +1,1837 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <linux/firmware.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/reboot.h> + +#define SMU_13_0_PARTIAL_PPTABLE +#define SWSMU_CODE_LAYER_L3 + +#include "amdgpu.h" +#include "amdgpu_smu.h" +#include "atomfirmware.h" +#include "amdgpu_atomfirmware.h" +#include "amdgpu_atombios.h" +#include "smu_v13_0.h" +#include "soc15_common.h" +#include "atom.h" +#include "amdgpu_ras.h" +#include "smu_cmn.h" + +#include "asic_reg/thm/thm_13_0_2_offset.h" +#include "asic_reg/thm/thm_13_0_2_sh_mask.h" +#include "asic_reg/mp/mp_13_0_2_offset.h" +#include "asic_reg/mp/mp_13_0_2_sh_mask.h" +#include "asic_reg/smuio/smuio_13_0_2_offset.h" +#include "asic_reg/smuio/smuio_13_0_2_sh_mask.h" + +/* + * DO NOT use these for err/warn/info/debug messages. + * Use dev_err, dev_warn, dev_info and dev_dbg instead. + * They are more MGPU friendly. + */ +#undef pr_err +#undef pr_warn +#undef pr_info +#undef pr_debug + +MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin"); + +#define SMU13_VOLTAGE_SCALE 4 + +#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms + +#define LINK_WIDTH_MAX 6 +#define LINK_SPEED_MAX 3 + +#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288 +#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L +#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4 +#define smnPCIE_LC_SPEED_CNTL 0x11140290 +#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xC000 +#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xE + +static const int link_width[] = {0, 1, 2, 4, 8, 12, 16}; +static const int link_speed[] = {25, 50, 80, 160}; + +int smu_v13_0_init_microcode(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + const char *chip_name; + char fw_name[30]; + int err = 0; + const struct smc_firmware_header_v1_0 *hdr; + const struct common_firmware_header *header; + struct amdgpu_firmware_info *ucode = NULL; + + switch (adev->asic_type) { + case CHIP_ALDEBARAN: + chip_name = "aldebaran"; + break; + default: + dev_err(adev->dev, "Unsupported ASIC type %d\n", adev->asic_type); + return -EINVAL; + } + + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name); + + err = request_firmware(&adev->pm.fw, fw_name, adev->dev); + if (err) + goto out; + err = amdgpu_ucode_validate(adev->pm.fw); + if (err) + goto out; + + hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; + amdgpu_ucode_print_smc_hdr(&hdr->header); + adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; + ucode->ucode_id = AMDGPU_UCODE_ID_SMC; + ucode->fw = adev->pm.fw; + header = (const struct common_firmware_header *)ucode->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); + } + +out: + if (err) { + DRM_ERROR("smu_v13_0: Failed to load firmware \"%s\"\n", + fw_name); + release_firmware(adev->pm.fw); + adev->pm.fw = NULL; + } + return err; +} + +void smu_v13_0_fini_microcode(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + + release_firmware(adev->pm.fw); + adev->pm.fw = NULL; + adev->pm.fw_version = 0; +} + +int smu_v13_0_load_microcode(struct smu_context *smu) +{ +#if 0 + struct amdgpu_device *adev = smu->adev; + const uint32_t *src; + const struct smc_firmware_header_v1_0 *hdr; + uint32_t addr_start = MP1_SRAM; + uint32_t i; + uint32_t smc_fw_size; + uint32_t mp1_fw_flags; + + hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; + src = (const uint32_t *)(adev->pm.fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + smc_fw_size = hdr->header.ucode_size_bytes; + + for (i = 1; i < smc_fw_size/4 - 1; i++) { + WREG32_PCIE(addr_start, src[i]); + addr_start += 4; + } + + WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff), + 1 & MP1_SMN_PUB_CTRL__RESET_MASK); + WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff), + 1 & ~MP1_SMN_PUB_CTRL__RESET_MASK); + + for (i = 0; i < adev->usec_timeout; i++) { + mp1_fw_flags = RREG32_PCIE(MP1_Public | + (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); + if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> + MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) + break; + udelay(1); + } + + if (i == adev->usec_timeout) + return -ETIME; +#endif + return 0; +} + +int smu_v13_0_check_fw_status(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t mp1_fw_flags; + + mp1_fw_flags = RREG32_PCIE(MP1_Public | + (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); + + if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> + MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) + return 0; + + return -EIO; +} + +int smu_v13_0_check_fw_version(struct smu_context *smu) +{ + uint32_t if_version = 0xff, smu_version = 0xff; + uint16_t smu_major; + uint8_t smu_minor, smu_debug; + int ret = 0; + + ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); + if (ret) + return ret; + + smu_major = (smu_version >> 16) & 0xffff; + smu_minor = (smu_version >> 8) & 0xff; + smu_debug = (smu_version >> 0) & 0xff; + + switch (smu->adev->asic_type) { + case CHIP_ALDEBARAN: + smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_ALDE; + break; + default: + dev_err(smu->adev->dev, "smu unsupported asic type:%d.\n", smu->adev->asic_type); + smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_INV; + break; + } + + dev_info(smu->adev->dev, "smu fw reported version = 0x%08x (%d.%d.%d)\n", + smu_version, smu_major, smu_minor, smu_debug); + + /* + * 1. if_version mismatch is not critical as our fw is designed + * to be backward compatible. + * 2. New fw usually brings some optimizations. But that's visible + * only on the paired driver. + * Considering above, we just leave user a warning message instead + * of halt driver loading. + */ + if (if_version != smu->smc_driver_if_version) { + dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, " + "smu fw version = 0x%08x (%d.%d.%d)\n", + smu->smc_driver_if_version, if_version, + smu_version, smu_major, smu_minor, smu_debug); + dev_warn(smu->adev->dev, "SMU driver if version not matched\n"); + } + + return ret; +} + +static int smu_v13_0_set_pptable_v2_1(struct smu_context *smu, void **table, + uint32_t *size, uint32_t pptable_id) +{ + struct amdgpu_device *adev = smu->adev; + const struct smc_firmware_header_v2_1 *v2_1; + struct smc_soft_pptable_entry *entries; + uint32_t pptable_count = 0; + int i = 0; + + v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data; + entries = (struct smc_soft_pptable_entry *) + ((uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset)); + pptable_count = le32_to_cpu(v2_1->pptable_count); + for (i = 0; i < pptable_count; i++) { + if (le32_to_cpu(entries[i].id) == pptable_id) { + *table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes)); + *size = le32_to_cpu(entries[i].ppt_size_bytes); + break; + } + } + + if (i == pptable_count) + return -EINVAL; + + return 0; +} + +int smu_v13_0_setup_pptable(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + const struct smc_firmware_header_v1_0 *hdr; + int ret, index; + uint32_t size = 0; + uint16_t atom_table_size; + uint8_t frev, crev; + void *table; + uint16_t version_major, version_minor; + + + if (amdgpu_smu_pptable_id >= 0) { + smu->smu_table.boot_values.pp_table_id = amdgpu_smu_pptable_id; + dev_info(adev->dev, "override pptable id %d\n", amdgpu_smu_pptable_id); + } + + hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; + version_major = le16_to_cpu(hdr->header.header_version_major); + version_minor = le16_to_cpu(hdr->header.header_version_minor); + if (version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) { + dev_info(adev->dev, "use driver provided pptable %d\n", smu->smu_table.boot_values.pp_table_id); + switch (version_minor) { + case 1: + ret = smu_v13_0_set_pptable_v2_1(smu, &table, &size, + smu->smu_table.boot_values.pp_table_id); + break; + default: + ret = -EINVAL; + break; + } + if (ret) + return ret; + + } else { + dev_info(adev->dev, "use vbios provided pptable\n"); + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, + powerplayinfo); + + ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev, + (uint8_t **)&table); + if (ret) + return ret; + size = atom_table_size; + } + + if (!smu->smu_table.power_play_table) + smu->smu_table.power_play_table = table; + if (!smu->smu_table.power_play_table_size) + smu->smu_table.power_play_table_size = size; + + return 0; +} + +int smu_v13_0_init_smc_tables(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_table *tables = smu_table->tables; + int ret = 0; + + smu_table->driver_pptable = + kzalloc(tables[SMU_TABLE_PPTABLE].size, GFP_KERNEL); + if (!smu_table->driver_pptable) { + ret = -ENOMEM; + goto err0_out; + } + + smu_table->max_sustainable_clocks = + kzalloc(sizeof(struct smu_13_0_max_sustainable_clocks), GFP_KERNEL); + if (!smu_table->max_sustainable_clocks) { + ret = -ENOMEM; + goto err1_out; + } + + /* Aldebaran does not support OVERDRIVE */ + if (tables[SMU_TABLE_OVERDRIVE].size) { + smu_table->overdrive_table = + kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); + if (!smu_table->overdrive_table) { + ret = -ENOMEM; + goto err2_out; + } + + smu_table->boot_overdrive_table = + kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); + if (!smu_table->boot_overdrive_table) { + ret = -ENOMEM; + goto err3_out; + } + } + + return 0; + +err3_out: + kfree(smu_table->overdrive_table); +err2_out: + kfree(smu_table->max_sustainable_clocks); +err1_out: + kfree(smu_table->driver_pptable); +err0_out: + return ret; +} + +int smu_v13_0_fini_smc_tables(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + + kfree(smu_table->gpu_metrics_table); + kfree(smu_table->boot_overdrive_table); + kfree(smu_table->overdrive_table); + kfree(smu_table->max_sustainable_clocks); + kfree(smu_table->driver_pptable); + smu_table->gpu_metrics_table = NULL; + smu_table->boot_overdrive_table = NULL; + smu_table->overdrive_table = NULL; + smu_table->max_sustainable_clocks = NULL; + smu_table->driver_pptable = NULL; + kfree(smu_table->hardcode_pptable); + smu_table->hardcode_pptable = NULL; + + kfree(smu_table->metrics_table); + kfree(smu_table->watermarks_table); + smu_table->metrics_table = NULL; + smu_table->watermarks_table = NULL; + smu_table->metrics_time = 0; + + kfree(smu_dpm->dpm_context); + kfree(smu_dpm->golden_dpm_context); + kfree(smu_dpm->dpm_current_power_state); + kfree(smu_dpm->dpm_request_power_state); + smu_dpm->dpm_context = NULL; + smu_dpm->golden_dpm_context = NULL; + smu_dpm->dpm_context_size = 0; + smu_dpm->dpm_current_power_state = NULL; + smu_dpm->dpm_request_power_state = NULL; + + return 0; +} + +int smu_v13_0_init_power(struct smu_context *smu) +{ + struct smu_power_context *smu_power = &smu->smu_power; + + if (smu_power->power_context || smu_power->power_context_size != 0) + return -EINVAL; + + smu_power->power_context = kzalloc(sizeof(struct smu_13_0_dpm_context), + GFP_KERNEL); + if (!smu_power->power_context) + return -ENOMEM; + smu_power->power_context_size = sizeof(struct smu_13_0_dpm_context); + + return 0; +} + +int smu_v13_0_fini_power(struct smu_context *smu) +{ + struct smu_power_context *smu_power = &smu->smu_power; + + if (!smu_power->power_context || smu_power->power_context_size == 0) + return -EINVAL; + + kfree(smu_power->power_context); + smu_power->power_context = NULL; + smu_power->power_context_size = 0; + + return 0; +} + +static int smu_v13_0_atom_get_smu_clockinfo(struct amdgpu_device *adev, + uint8_t clk_id, + uint8_t syspll_id, + uint32_t *clk_freq) +{ + struct atom_get_smu_clock_info_parameters_v3_1 input = {0}; + struct atom_get_smu_clock_info_output_parameters_v3_1 *output; + int ret, index; + + input.clk_id = clk_id; + input.syspll_id = syspll_id; + input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ; + index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1, + getsmuclockinfo); + + ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index, + (uint32_t *)&input); + if (ret) + return -EINVAL; + + output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input; + *clk_freq = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000; + + return 0; +} + +int smu_v13_0_get_vbios_bootup_values(struct smu_context *smu) +{ + int ret, index; + uint16_t size; + uint8_t frev, crev; + struct atom_common_table_header *header; + struct atom_firmware_info_v3_4 *v_3_4; + struct atom_firmware_info_v3_3 *v_3_3; + struct atom_firmware_info_v3_1 *v_3_1; + + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, + firmwareinfo); + + ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev, + (uint8_t **)&header); + if (ret) + return ret; + + if (header->format_revision != 3) { + dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu13\n"); + return -EINVAL; + } + + switch (header->content_revision) { + case 0: + case 1: + case 2: + v_3_1 = (struct atom_firmware_info_v3_1 *)header; + smu->smu_table.boot_values.revision = v_3_1->firmware_revision; + smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz; + smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz; + smu->smu_table.boot_values.socclk = 0; + smu->smu_table.boot_values.dcefclk = 0; + smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv; + smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv; + smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv; + smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv; + smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id; + smu->smu_table.boot_values.pp_table_id = 0; + break; + case 3: + v_3_3 = (struct atom_firmware_info_v3_3 *)header; + smu->smu_table.boot_values.revision = v_3_3->firmware_revision; + smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz; + smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz; + smu->smu_table.boot_values.socclk = 0; + smu->smu_table.boot_values.dcefclk = 0; + smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv; + smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv; + smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv; + smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv; + smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id; + smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id; + break; + case 4: + default: + v_3_4 = (struct atom_firmware_info_v3_4 *)header; + smu->smu_table.boot_values.revision = v_3_4->firmware_revision; + smu->smu_table.boot_values.gfxclk = v_3_4->bootup_sclk_in10khz; + smu->smu_table.boot_values.uclk = v_3_4->bootup_mclk_in10khz; + smu->smu_table.boot_values.socclk = 0; + smu->smu_table.boot_values.dcefclk = 0; + smu->smu_table.boot_values.vddc = v_3_4->bootup_vddc_mv; + smu->smu_table.boot_values.vddci = v_3_4->bootup_vddci_mv; + smu->smu_table.boot_values.mvddc = v_3_4->bootup_mvddc_mv; + smu->smu_table.boot_values.vdd_gfx = v_3_4->bootup_vddgfx_mv; + smu->smu_table.boot_values.cooling_id = v_3_4->coolingsolution_id; + smu->smu_table.boot_values.pp_table_id = v_3_4->pplib_pptable_id; + break; + } + + smu->smu_table.boot_values.format_revision = header->format_revision; + smu->smu_table.boot_values.content_revision = header->content_revision; + + smu_v13_0_atom_get_smu_clockinfo(smu->adev, + (uint8_t)SMU11_SYSPLL0_SOCCLK_ID, + (uint8_t)0, + &smu->smu_table.boot_values.socclk); + + smu_v13_0_atom_get_smu_clockinfo(smu->adev, + (uint8_t)SMU11_SYSPLL0_DCEFCLK_ID, + (uint8_t)0, + &smu->smu_table.boot_values.dcefclk); + + smu_v13_0_atom_get_smu_clockinfo(smu->adev, + (uint8_t)SMU11_SYSPLL0_ECLK_ID, + (uint8_t)0, + &smu->smu_table.boot_values.eclk); + + smu_v13_0_atom_get_smu_clockinfo(smu->adev, + (uint8_t)SMU11_SYSPLL0_VCLK_ID, + (uint8_t)0, + &smu->smu_table.boot_values.vclk); + + smu_v13_0_atom_get_smu_clockinfo(smu->adev, + (uint8_t)SMU11_SYSPLL0_DCLK_ID, + (uint8_t)0, + &smu->smu_table.boot_values.dclk); + + if ((smu->smu_table.boot_values.format_revision == 3) && + (smu->smu_table.boot_values.content_revision >= 2)) + smu_v13_0_atom_get_smu_clockinfo(smu->adev, + (uint8_t)SMU11_SYSPLL1_0_FCLK_ID, + (uint8_t)SMU11_SYSPLL1_2_ID, + &smu->smu_table.boot_values.fclk); + + return 0; +} + + +int smu_v13_0_notify_memory_pool_location(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_table *memory_pool = &smu_table->memory_pool; + int ret = 0; + uint64_t address; + uint32_t address_low, address_high; + + if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL) + return ret; + + address = memory_pool->mc_address; + address_high = (uint32_t)upper_32_bits(address); + address_low = (uint32_t)lower_32_bits(address); + + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh, + address_high, NULL); + if (ret) + return ret; + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow, + address_low, NULL); + if (ret) + return ret; + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize, + (uint32_t)memory_pool->size, NULL); + if (ret) + return ret; + + return ret; +} + +int smu_v13_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk) +{ + int ret; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL); + if (ret) + dev_err(smu->adev->dev, "SMU13 attempt to set divider for DCEFCLK Failed!"); + + return ret; +} + +int smu_v13_0_set_driver_table_location(struct smu_context *smu) +{ + struct smu_table *driver_table = &smu->smu_table.driver_table; + int ret = 0; + + if (driver_table->mc_address) { + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_SetDriverDramAddrHigh, + upper_32_bits(driver_table->mc_address), + NULL); + if (!ret) + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_SetDriverDramAddrLow, + lower_32_bits(driver_table->mc_address), + NULL); + } + + return ret; +} + +int smu_v13_0_set_tool_table_location(struct smu_context *smu) +{ + int ret = 0; + struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG]; + + if (tool_table->mc_address) { + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_SetToolsDramAddrHigh, + upper_32_bits(tool_table->mc_address), + NULL); + if (!ret) + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_SetToolsDramAddrLow, + lower_32_bits(tool_table->mc_address), + NULL); + } + + return ret; +} + +int smu_v13_0_init_display_count(struct smu_context *smu, uint32_t count) +{ + int ret = 0; + + if (!smu->pm_enabled) + return ret; + + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL); + + return ret; +} + + +int smu_v13_0_set_allowed_mask(struct smu_context *smu) +{ + struct smu_feature *feature = &smu->smu_feature; + int ret = 0; + uint32_t feature_mask[2]; + + mutex_lock(&feature->mutex); + if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64) + goto failed; + + bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64); + + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh, + feature_mask[1], NULL); + if (ret) + goto failed; + + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow, + feature_mask[0], NULL); + if (ret) + goto failed; + +failed: + mutex_unlock(&feature->mutex); + return ret; +} + +int smu_v13_0_system_features_control(struct smu_context *smu, + bool en) +{ + struct smu_feature *feature = &smu->smu_feature; + uint32_t feature_mask[2]; + int ret = 0; + + ret = smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures : + SMU_MSG_DisableAllSmuFeatures), NULL); + if (ret) + return ret; + + bitmap_zero(feature->enabled, feature->feature_num); + bitmap_zero(feature->supported, feature->feature_num); + + if (en) { + ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2); + if (ret) + return ret; + + bitmap_copy(feature->enabled, (unsigned long *)&feature_mask, + feature->feature_num); + bitmap_copy(feature->supported, (unsigned long *)&feature_mask, + feature->feature_num); + } + + return ret; +} + +int smu_v13_0_notify_display_change(struct smu_context *smu) +{ + int ret = 0; + + if (!smu->pm_enabled) + return ret; + + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) && + smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM) + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL); + + return ret; +} + + static int +smu_v13_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock, + enum smu_clk_type clock_select) +{ + int ret = 0; + int clk_id; + + if ((smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetDcModeMaxDpmFreq) < 0) || + (smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetMaxDpmFreq) < 0)) + return 0; + + clk_id = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_CLK, + clock_select); + if (clk_id < 0) + return -EINVAL; + + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq, + clk_id << 16, clock); + if (ret) { + dev_err(smu->adev->dev, "[GetMaxSustainableClock] Failed to get max DC clock from SMC!"); + return ret; + } + + if (*clock != 0) + return 0; + + /* if DC limit is zero, return AC limit */ + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, + clk_id << 16, clock); + if (ret) { + dev_err(smu->adev->dev, "[GetMaxSustainableClock] failed to get max AC clock from SMC!"); + return ret; + } + + return 0; +} + +int smu_v13_0_init_max_sustainable_clocks(struct smu_context *smu) +{ + struct smu_13_0_max_sustainable_clocks *max_sustainable_clocks = + smu->smu_table.max_sustainable_clocks; + int ret = 0; + + max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100; + max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100; + max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100; + max_sustainable_clocks->display_clock = 0xFFFFFFFF; + max_sustainable_clocks->phy_clock = 0xFFFFFFFF; + max_sustainable_clocks->pixel_clock = 0xFFFFFFFF; + + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { + ret = smu_v13_0_get_max_sustainable_clock(smu, + &(max_sustainable_clocks->uclock), + SMU_UCLK); + if (ret) { + dev_err(smu->adev->dev, "[%s] failed to get max UCLK from SMC!", + __func__); + return ret; + } + } + + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { + ret = smu_v13_0_get_max_sustainable_clock(smu, + &(max_sustainable_clocks->soc_clock), + SMU_SOCCLK); + if (ret) { + dev_err(smu->adev->dev, "[%s] failed to get max SOCCLK from SMC!", + __func__); + return ret; + } + } + + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) { + ret = smu_v13_0_get_max_sustainable_clock(smu, + &(max_sustainable_clocks->dcef_clock), + SMU_DCEFCLK); + if (ret) { + dev_err(smu->adev->dev, "[%s] failed to get max DCEFCLK from SMC!", + __func__); + return ret; + } + + ret = smu_v13_0_get_max_sustainable_clock(smu, + &(max_sustainable_clocks->display_clock), + SMU_DISPCLK); + if (ret) { + dev_err(smu->adev->dev, "[%s] failed to get max DISPCLK from SMC!", + __func__); + return ret; + } + ret = smu_v13_0_get_max_sustainable_clock(smu, + &(max_sustainable_clocks->phy_clock), + SMU_PHYCLK); + if (ret) { + dev_err(smu->adev->dev, "[%s] failed to get max PHYCLK from SMC!", + __func__); + return ret; + } + ret = smu_v13_0_get_max_sustainable_clock(smu, + &(max_sustainable_clocks->pixel_clock), + SMU_PIXCLK); + if (ret) { + dev_err(smu->adev->dev, "[%s] failed to get max PIXCLK from SMC!", + __func__); + return ret; + } + } + + if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock) + max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock; + + return 0; +} + +int smu_v13_0_get_current_power_limit(struct smu_context *smu, + uint32_t *power_limit) +{ + int power_src; + int ret = 0; + + if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) + return -EINVAL; + + power_src = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_PWR, + smu->adev->pm.ac_power ? + SMU_POWER_SOURCE_AC : + SMU_POWER_SOURCE_DC); + if (power_src < 0) + return -EINVAL; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetPptLimit, + power_src << 16, + power_limit); + if (ret) + dev_err(smu->adev->dev, "[%s] get PPT limit failed!", __func__); + + return ret; +} + +int smu_v13_0_set_power_limit(struct smu_context *smu, uint32_t n) +{ + int ret = 0; + + if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { + dev_err(smu->adev->dev, "Setting new power limit is not supported!\n"); + return -EOPNOTSUPP; + } + + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL); + if (ret) { + dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__); + return ret; + } + + smu->current_power_limit = n; + + return 0; +} + +int smu_v13_0_enable_thermal_alert(struct smu_context *smu) +{ + if (smu->smu_table.thermal_controller_type) + return amdgpu_irq_get(smu->adev, &smu->irq_source, 0); + + return 0; +} + +int smu_v13_0_disable_thermal_alert(struct smu_context *smu) +{ + return amdgpu_irq_put(smu->adev, &smu->irq_source, 0); +} + +static uint16_t convert_to_vddc(uint8_t vid) +{ + return (uint16_t) ((6200 - (vid * 25)) / SMU13_VOLTAGE_SCALE); +} + +int smu_v13_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t vdd = 0, val_vid = 0; + + if (!value) + return -EINVAL; + val_vid = (RREG32_SOC15(SMUIO, 0, regSMUSVI0_TEL_PLANE0) & + SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >> + SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT; + + vdd = (uint32_t)convert_to_vddc((uint8_t)val_vid); + + *value = vdd; + + return 0; + +} + +int +smu_v13_0_display_clock_voltage_request(struct smu_context *smu, + struct pp_display_clock_request + *clock_req) +{ + enum amd_pp_clock_type clk_type = clock_req->clock_type; + int ret = 0; + enum smu_clk_type clk_select = 0; + uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000; + + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) || + smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { + switch (clk_type) { + case amd_pp_dcef_clock: + clk_select = SMU_DCEFCLK; + break; + case amd_pp_disp_clock: + clk_select = SMU_DISPCLK; + break; + case amd_pp_pixel_clock: + clk_select = SMU_PIXCLK; + break; + case amd_pp_phy_clock: + clk_select = SMU_PHYCLK; + break; + case amd_pp_mem_clock: + clk_select = SMU_UCLK; + break; + default: + dev_info(smu->adev->dev, "[%s] Invalid Clock Type!", __func__); + ret = -EINVAL; + break; + } + + if (ret) + goto failed; + + if (clk_select == SMU_UCLK && smu->disable_uclk_switch) + return 0; + + ret = smu_v13_0_set_hard_freq_limited_range(smu, clk_select, clk_freq, 0); + + if(clk_select == SMU_UCLK) + smu->hard_min_uclk_req_from_dal = clk_freq; + } + +failed: + return ret; +} + +uint32_t smu_v13_0_get_fan_control_mode(struct smu_context *smu) +{ + if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT)) + return AMD_FAN_CTRL_MANUAL; + else + return AMD_FAN_CTRL_AUTO; +} + + static int +smu_v13_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control) +{ + int ret = 0; + + if (!smu_cmn_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT)) + return 0; + + ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control); + if (ret) + dev_err(smu->adev->dev, "[%s]%s smc FAN CONTROL feature failed!", + __func__, (auto_fan_control ? "Start" : "Stop")); + + return ret; +} + + static int +smu_v13_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode) +{ + struct amdgpu_device *adev = smu->adev; + + WREG32_SOC15(THM, 0, regCG_FDO_CTRL2, + REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_FDO_CTRL2), + CG_FDO_CTRL2, TMIN, 0)); + WREG32_SOC15(THM, 0, regCG_FDO_CTRL2, + REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_FDO_CTRL2), + CG_FDO_CTRL2, FDO_PWM_MODE, mode)); + + return 0; +} + + int +smu_v13_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t duty100, duty; + uint64_t tmp64; + + if (speed > 100) + speed = 100; + + if (smu_v13_0_auto_fan_control(smu, 0)) + return -EINVAL; + + duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, regCG_FDO_CTRL1), + CG_FDO_CTRL1, FMAX_DUTY100); + if (!duty100) + return -EINVAL; + + tmp64 = (uint64_t)speed * duty100; + do_div(tmp64, 100); + duty = (uint32_t)tmp64; + + WREG32_SOC15(THM, 0, regCG_FDO_CTRL0, + REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_FDO_CTRL0), + CG_FDO_CTRL0, FDO_STATIC_DUTY, duty)); + + return smu_v13_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC); +} + + int +smu_v13_0_set_fan_control_mode(struct smu_context *smu, + uint32_t mode) +{ + int ret = 0; + + switch (mode) { + case AMD_FAN_CTRL_NONE: + ret = smu_v13_0_set_fan_speed_percent(smu, 100); + break; + case AMD_FAN_CTRL_MANUAL: + ret = smu_v13_0_auto_fan_control(smu, 0); + break; + case AMD_FAN_CTRL_AUTO: + ret = smu_v13_0_auto_fan_control(smu, 1); + break; + default: + break; + } + + if (ret) { + dev_err(smu->adev->dev, "[%s]Set fan control mode failed!", __func__); + return -EINVAL; + } + + return ret; +} + +int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu, + uint32_t speed) +{ + struct amdgpu_device *adev = smu->adev; + int ret; + uint32_t tach_period, crystal_clock_freq; + + if (!speed) + return -EINVAL; + + ret = smu_v13_0_auto_fan_control(smu, 0); + if (ret) + return ret; + + crystal_clock_freq = amdgpu_asic_get_xclk(adev); + tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); + WREG32_SOC15(THM, 0, regCG_TACH_CTRL, + REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_TACH_CTRL), + CG_TACH_CTRL, TARGET_PERIOD, + tach_period)); + + ret = smu_v13_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM); + + return ret; +} + +int smu_v13_0_set_xgmi_pstate(struct smu_context *smu, + uint32_t pstate) +{ + int ret = 0; + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_SetXgmiMode, + pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3, + NULL); + return ret; +} + +static int smu_v13_0_set_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned tyep, + enum amdgpu_interrupt_state state) +{ + struct smu_context *smu = &adev->smu; + uint32_t low, high; + uint32_t val = 0; + + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + /* For THM irqs */ + val = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 1); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 1); + WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, val); + + WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_ENA, 0); + + /* For MP1 SW irqs */ + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1); + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); + + break; + case AMDGPU_IRQ_STATE_ENABLE: + /* For THM irqs */ + low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP, + smu->thermal_range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES); + high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP, + smu->thermal_range.software_shutdown_temp); + + val = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff)); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff)); + val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); + WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, val); + + val = (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT); + val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT); + val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT); + WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_ENA, val); + + /* For MP1 SW irqs */ + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0); + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val); + + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0); + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); + + break; + default: + break; + } + + return 0; +} + +static int smu_v13_0_ack_ac_dc_interrupt(struct smu_context *smu) +{ + return smu_cmn_send_smc_msg(smu, + SMU_MSG_ReenableAcDcInterrupt, + NULL); +} + +#define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */ +#define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */ +#define SMUIO_11_0__SRCID__SMUIO_GPIO19 83 + +static int smu_v13_0_irq_process(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + struct smu_context *smu = &adev->smu; + uint32_t client_id = entry->client_id; + uint32_t src_id = entry->src_id; + /* + * ctxid is used to distinguish different + * events for SMCToHost interrupt. + */ + uint32_t ctxid = entry->src_data[0]; + uint32_t data; + + if (client_id == SOC15_IH_CLIENTID_THM) { + switch (src_id) { + case THM_11_0__SRCID__THM_DIG_THERM_L2H: + dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n"); + /* + * SW CTF just occurred. + * Try to do a graceful shutdown to prevent further damage. + */ + dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n"); + orderly_poweroff(true); + break; + case THM_11_0__SRCID__THM_DIG_THERM_H2L: + dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n"); + break; + default: + dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n", + src_id); + break; + } + } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) { + dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n"); + /* + * HW CTF just occurred. Shutdown to prevent further damage. + */ + dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n"); + orderly_poweroff(true); + } else if (client_id == SOC15_IH_CLIENTID_MP1) { + if (src_id == 0xfe) { + /* ACK SMUToHost interrupt */ + data = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); + data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1); + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, data); + + switch (ctxid) { + case 0x3: + dev_dbg(adev->dev, "Switched to AC mode!\n"); + smu_v13_0_ack_ac_dc_interrupt(&adev->smu); + break; + case 0x4: + dev_dbg(adev->dev, "Switched to DC mode!\n"); + smu_v13_0_ack_ac_dc_interrupt(&adev->smu); + break; + case 0x7: + /* + * Increment the throttle interrupt counter + */ + atomic64_inc(&smu->throttle_int_counter); + + if (!atomic_read(&adev->throttling_logging_enabled)) + return 0; + + if (__ratelimit(&adev->throttling_logging_rs)) + schedule_work(&smu->throttling_logging_work); + + break; + } + } + } + + return 0; +} + +static const struct amdgpu_irq_src_funcs smu_v13_0_irq_funcs = +{ + .set = smu_v13_0_set_irq_state, + .process = smu_v13_0_irq_process, +}; + +int smu_v13_0_register_irq_handler(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + struct amdgpu_irq_src *irq_src = &smu->irq_source; + int ret = 0; + + irq_src->num_types = 1; + irq_src->funcs = &smu_v13_0_irq_funcs; + + ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM, + THM_11_0__SRCID__THM_DIG_THERM_L2H, + irq_src); + if (ret) + return ret; + + ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM, + THM_11_0__SRCID__THM_DIG_THERM_H2L, + irq_src); + if (ret) + return ret; + + /* Register CTF(GPIO_19) interrupt */ + ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_ROM_SMUIO, + SMUIO_11_0__SRCID__SMUIO_GPIO19, + irq_src); + if (ret) + return ret; + + ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1, + 0xfe, + irq_src); + if (ret) + return ret; + + return ret; +} + +int smu_v13_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, + struct pp_smu_nv_clock_table *max_clocks) +{ + struct smu_table_context *table_context = &smu->smu_table; + struct smu_13_0_max_sustainable_clocks *sustainable_clocks = NULL; + + if (!max_clocks || !table_context->max_sustainable_clocks) + return -EINVAL; + + sustainable_clocks = table_context->max_sustainable_clocks; + + max_clocks->dcfClockInKhz = + (unsigned int) sustainable_clocks->dcef_clock * 1000; + max_clocks->displayClockInKhz = + (unsigned int) sustainable_clocks->display_clock * 1000; + max_clocks->phyClockInKhz = + (unsigned int) sustainable_clocks->phy_clock * 1000; + max_clocks->pixelClockInKhz = + (unsigned int) sustainable_clocks->pixel_clock * 1000; + max_clocks->uClockInKhz = + (unsigned int) sustainable_clocks->uclock * 1000; + max_clocks->socClockInKhz = + (unsigned int) sustainable_clocks->soc_clock * 1000; + max_clocks->dscClockInKhz = 0; + max_clocks->dppClockInKhz = 0; + max_clocks->fabricClockInKhz = 0; + + return 0; +} + +int smu_v13_0_set_azalia_d3_pme(struct smu_context *smu) +{ + int ret = 0; + + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL); + + return ret; +} + +int smu_v13_0_mode1_reset(struct smu_context *smu) +{ + u32 smu_version; + int ret = 0; + /* + * PM FW support SMU_MSG_GfxDeviceDriverReset from 68.07 + */ + smu_cmn_get_smc_version(smu, NULL, &smu_version); + if (smu_version < 0x00440700) + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL); + else + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_1, NULL); + + if (!ret) + msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS); + + return ret; +} + +static int smu_v13_0_wait_for_reset_complete(struct smu_context *smu, + uint64_t event_arg) +{ + int ret = 0; + + dev_dbg(smu->adev->dev, "waiting for smu reset complete\n"); + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GfxDriverResetRecovery, NULL); + + return ret; +} + +int smu_v13_0_wait_for_event(struct smu_context *smu, enum smu_event_type event, + uint64_t event_arg) +{ + int ret = -EINVAL; + + switch (event) { + case SMU_EVENT_RESET_COMPLETE: + ret = smu_v13_0_wait_for_reset_complete(smu, event_arg); + break; + default: + break; + } + + return ret; +} + +int smu_v13_0_mode2_reset(struct smu_context *smu) +{ + int ret; + + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, + SMU_RESET_MODE_2, NULL); + /*TODO: mode2 reset wait time should be shorter, add ASIC specific func if required */ + if (!ret) + msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS); + + return ret; +} + +int smu_v13_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t *min, uint32_t *max) +{ + int ret = 0, clk_id = 0; + uint32_t param = 0; + uint32_t clock_limit; + + if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) { + switch (clk_type) { + case SMU_MCLK: + case SMU_UCLK: + clock_limit = smu->smu_table.boot_values.uclk; + break; + case SMU_GFXCLK: + case SMU_SCLK: + clock_limit = smu->smu_table.boot_values.gfxclk; + break; + case SMU_SOCCLK: + clock_limit = smu->smu_table.boot_values.socclk; + break; + default: + clock_limit = 0; + break; + } + + /* clock in Mhz unit */ + if (min) + *min = clock_limit / 100; + if (max) + *max = clock_limit / 100; + + return 0; + } + + clk_id = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_CLK, + clk_type); + if (clk_id < 0) { + ret = -EINVAL; + goto failed; + } + param = (clk_id & 0xffff) << 16; + + if (max) { + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param, max); + if (ret) + goto failed; + } + + if (min) { + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min); + if (ret) + goto failed; + } + +failed: + return ret; +} + +int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t min, + uint32_t max) +{ + struct amdgpu_device *adev = smu->adev; + int ret = 0, clk_id = 0; + uint32_t param; + + if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) + return 0; + + clk_id = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_CLK, + clk_type); + if (clk_id < 0) + return clk_id; + + if (clk_type == SMU_GFXCLK) + amdgpu_gfx_off_ctrl(adev, false); + + if (max > 0) { + param = (uint32_t)((clk_id << 16) | (max & 0xffff)); + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, + param, NULL); + if (ret) + goto out; + } + + if (min > 0) { + param = (uint32_t)((clk_id << 16) | (min & 0xffff)); + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, + param, NULL); + if (ret) + goto out; + } + +out: + if (clk_type == SMU_GFXCLK) + amdgpu_gfx_off_ctrl(adev, true); + + return ret; +} + +int smu_v13_0_set_hard_freq_limited_range(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t min, + uint32_t max) +{ + int ret = 0, clk_id = 0; + uint32_t param; + + if (min <= 0 && max <= 0) + return -EINVAL; + + if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) + return 0; + + clk_id = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_CLK, + clk_type); + if (clk_id < 0) + return clk_id; + + if (max > 0) { + param = (uint32_t)((clk_id << 16) | (max & 0xffff)); + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq, + param, NULL); + if (ret) + return ret; + } + + if (min > 0) { + param = (uint32_t)((clk_id << 16) | (min & 0xffff)); + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, + param, NULL); + if (ret) + return ret; + } + + return ret; +} + +int smu_v13_0_set_performance_level(struct smu_context *smu, + enum amd_dpm_forced_level level) +{ + struct smu_13_0_dpm_context *dpm_context = + smu->smu_dpm.dpm_context; + struct smu_13_0_dpm_table *gfx_table = + &dpm_context->dpm_tables.gfx_table; + struct smu_13_0_dpm_table *mem_table = + &dpm_context->dpm_tables.uclk_table; + struct smu_13_0_dpm_table *soc_table = + &dpm_context->dpm_tables.soc_table; + struct smu_umd_pstate_table *pstate_table = + &smu->pstate_table; + struct amdgpu_device *adev = smu->adev; + uint32_t sclk_min = 0, sclk_max = 0; + uint32_t mclk_min = 0, mclk_max = 0; + uint32_t socclk_min = 0, socclk_max = 0; + int ret = 0; + + switch (level) { + case AMD_DPM_FORCED_LEVEL_HIGH: + sclk_min = sclk_max = gfx_table->max; + mclk_min = mclk_max = mem_table->max; + socclk_min = socclk_max = soc_table->max; + break; + case AMD_DPM_FORCED_LEVEL_LOW: + sclk_min = sclk_max = gfx_table->min; + mclk_min = mclk_max = mem_table->min; + socclk_min = socclk_max = soc_table->min; + break; + case AMD_DPM_FORCED_LEVEL_AUTO: + sclk_min = gfx_table->min; + sclk_max = gfx_table->max; + mclk_min = mem_table->min; + mclk_max = mem_table->max; + socclk_min = soc_table->min; + socclk_max = soc_table->max; + break; + case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: + sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard; + mclk_min = mclk_max = pstate_table->uclk_pstate.standard; + socclk_min = socclk_max = pstate_table->socclk_pstate.standard; + break; + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: + sclk_min = sclk_max = pstate_table->gfxclk_pstate.min; + break; + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: + mclk_min = mclk_max = pstate_table->uclk_pstate.min; + break; + case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: + sclk_min = sclk_max = pstate_table->gfxclk_pstate.peak; + mclk_min = mclk_max = pstate_table->uclk_pstate.peak; + socclk_min = socclk_max = pstate_table->socclk_pstate.peak; + break; + case AMD_DPM_FORCED_LEVEL_MANUAL: + case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: + return 0; + default: + dev_err(adev->dev, "Invalid performance level %d\n", level); + return -EINVAL; + } + + mclk_min = mclk_max = 0; + socclk_min = socclk_max = 0; + + if (sclk_min && sclk_max) { + ret = smu_v13_0_set_soft_freq_limited_range(smu, + SMU_GFXCLK, + sclk_min, + sclk_max); + if (ret) + return ret; + } + + if (mclk_min && mclk_max) { + ret = smu_v13_0_set_soft_freq_limited_range(smu, + SMU_MCLK, + mclk_min, + mclk_max); + if (ret) + return ret; + } + + if (socclk_min && socclk_max) { + ret = smu_v13_0_set_soft_freq_limited_range(smu, + SMU_SOCCLK, + socclk_min, + socclk_max); + if (ret) + return ret; + } + + return ret; +} + +int smu_v13_0_set_power_source(struct smu_context *smu, + enum smu_power_src_type power_src) +{ + int pwr_source; + + pwr_source = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_PWR, + (uint32_t)power_src); + if (pwr_source < 0) + return -EINVAL; + + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_NotifyPowerSource, + pwr_source, + NULL); +} + +int smu_v13_0_get_dpm_freq_by_index(struct smu_context *smu, + enum smu_clk_type clk_type, + uint16_t level, + uint32_t *value) +{ + int ret = 0, clk_id = 0; + uint32_t param; + + if (!value) + return -EINVAL; + + if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) + return 0; + + clk_id = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_CLK, + clk_type); + if (clk_id < 0) + return clk_id; + + param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff)); + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetDpmFreqByIndex, + param, + value); + if (ret) + return ret; + + /* + * BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM + * now, we un-support it + */ + *value = *value & 0x7fffffff; + + return ret; +} + +int smu_v13_0_get_dpm_level_count(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *value) +{ + int ret; + + ret = smu_v13_0_get_dpm_freq_by_index(smu, clk_type, 0xff, value); + /* FW returns 0 based max level, increment by one */ + if (!ret && value) + ++(*value); + + return ret; +} + +int smu_v13_0_set_single_dpm_table(struct smu_context *smu, + enum smu_clk_type clk_type, + struct smu_13_0_dpm_table *single_dpm_table) +{ + int ret = 0; + uint32_t clk; + int i; + + ret = smu_v13_0_get_dpm_level_count(smu, + clk_type, + &single_dpm_table->count); + if (ret) { + dev_err(smu->adev->dev, "[%s] failed to get dpm levels!\n", __func__); + return ret; + } + + for (i = 0; i < single_dpm_table->count; i++) { + ret = smu_v13_0_get_dpm_freq_by_index(smu, + clk_type, + i, + &clk); + if (ret) { + dev_err(smu->adev->dev, "[%s] failed to get dpm freq by index!\n", __func__); + return ret; + } + + single_dpm_table->dpm_levels[i].value = clk; + single_dpm_table->dpm_levels[i].enabled = true; + + if (i == 0) + single_dpm_table->min = clk; + else if (i == single_dpm_table->count - 1) + single_dpm_table->max = clk; + } + + return 0; +} + +int smu_v13_0_get_dpm_level_range(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *min_value, + uint32_t *max_value) +{ + uint32_t level_count = 0; + int ret = 0; + + if (!min_value && !max_value) + return -EINVAL; + + if (min_value) { + /* by default, level 0 clock value as min value */ + ret = smu_v13_0_get_dpm_freq_by_index(smu, + clk_type, + 0, + min_value); + if (ret) + return ret; + } + + if (max_value) { + ret = smu_v13_0_get_dpm_level_count(smu, + clk_type, + &level_count); + if (ret) + return ret; + + ret = smu_v13_0_get_dpm_freq_by_index(smu, + clk_type, + level_count - 1, + max_value); + if (ret) + return ret; + } + + return ret; +} + +int smu_v13_0_get_current_pcie_link_width_level(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + + return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & + PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) + >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; +} + +int smu_v13_0_get_current_pcie_link_width(struct smu_context *smu) +{ + uint32_t width_level; + + width_level = smu_v13_0_get_current_pcie_link_width_level(smu); + if (width_level > LINK_WIDTH_MAX) + width_level = 0; + + return link_width[width_level]; +} + +int smu_v13_0_get_current_pcie_link_speed_level(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + + return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & + PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) + >> PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; +} + +int smu_v13_0_get_current_pcie_link_speed(struct smu_context *smu) +{ + uint32_t speed_level; + + speed_level = smu_v13_0_get_current_pcie_link_speed_level(smu); + if (speed_level > LINK_SPEED_MAX) + speed_level = 0; + + return link_speed[speed_level]; +} + diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index bcedd4d92e35..dc7d2e71aa6f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -73,16 +73,16 @@ static void smu_cmn_read_arg(struct smu_context *smu, { struct amdgpu_device *adev = smu->adev; - *arg = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82); + *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); } -static int smu_cmn_wait_for_response(struct smu_context *smu) +int smu_cmn_wait_for_response(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - uint32_t cur_value, i, timeout = adev->usec_timeout * 10; + uint32_t cur_value, i, timeout = adev->usec_timeout * 20; for (i = 0; i < timeout; i++) { - cur_value = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90); + cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90); if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0) return cur_value; @@ -93,7 +93,7 @@ static int smu_cmn_wait_for_response(struct smu_context *smu) if (i == timeout) return -ETIME; - return RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90); + return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90); } int smu_cmn_send_msg_without_waiting(struct smu_context *smu, @@ -111,9 +111,9 @@ int smu_cmn_send_msg_without_waiting(struct smu_context *smu, return ret; } - WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); - WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82, param); - WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_66, msg); + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param); + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg); return 0; } @@ -758,9 +758,15 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev) case METRICS_VERSION(1, 0): structure_size = sizeof(struct gpu_metrics_v1_0); break; + case METRICS_VERSION(1, 1): + structure_size = sizeof(struct gpu_metrics_v1_1); + break; case METRICS_VERSION(2, 0): structure_size = sizeof(struct gpu_metrics_v2_0); break; + case METRICS_VERSION(2, 1): + structure_size = sizeof(struct gpu_metrics_v2_1); + break; default: return; } @@ -774,3 +780,31 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev) header->structure_size = structure_size; } + +int smu_cmn_set_mp1_state(struct smu_context *smu, + enum pp_mp1_state mp1_state) +{ + enum smu_message_type msg; + int ret; + + switch (mp1_state) { + case PP_MP1_STATE_SHUTDOWN: + msg = SMU_MSG_PrepareMp1ForShutdown; + break; + case PP_MP1_STATE_UNLOAD: + msg = SMU_MSG_PrepareMp1ForUnload; + break; + case PP_MP1_STATE_RESET: + msg = SMU_MSG_PrepareMp1ForReset; + break; + case PP_MP1_STATE_NONE: + default: + return 0; + } + + ret = smu_cmn_send_smc_msg(smu, msg, NULL); + if (ret) + dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n"); + + return ret; +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h index c69250185575..da6ff6f024f9 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h @@ -37,6 +37,8 @@ int smu_cmn_send_smc_msg(struct smu_context *smu, enum smu_message_type msg, uint32_t *read_arg); +int smu_cmn_wait_for_response(struct smu_context *smu); + int smu_cmn_to_asic_specific_index(struct smu_context *smu, enum smu_cmn2asic_mapping_type type, uint32_t index); @@ -99,5 +101,8 @@ int smu_cmn_get_metrics_table(struct smu_context *smu, void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev); +int smu_cmn_set_mp1_state(struct smu_context *smu, + enum pp_mp1_state mp1_state); + #endif #endif |