diff options
Diffstat (limited to 'drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c')
-rw-r--r-- | drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 717 |
1 files changed, 496 insertions, 221 deletions
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index cd905e41080e..e0eb7ca112e2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -34,6 +34,7 @@ #include "sienna_cichlid_ppt.h" #include "renoir_ppt.h" #include "vangogh_ppt.h" +#include "aldebaran_ppt.h" #include "amd_pcie.h" /* @@ -46,9 +47,26 @@ #undef pr_info #undef pr_debug -size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf) -{ - size_t size = 0; +static const struct amd_pm_funcs swsmu_pm_funcs; +static int smu_force_smuclk_levels(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t mask); +static int smu_handle_task(struct smu_context *smu, + enum amd_dpm_forced_level level, + enum amd_pp_task task_id, + bool lock_needed); +static int smu_reset(struct smu_context *smu); +static int smu_set_fan_speed_percent(void *handle, u32 speed); +static int smu_set_fan_control_mode(struct smu_context *smu, int value); +static int smu_set_power_limit(void *handle, uint32_t limit); +static int smu_set_fan_speed_rpm(void *handle, uint32_t speed); +static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled); + +static int smu_sys_get_pp_feature_mask(void *handle, + char *buf) +{ + struct smu_context *smu = handle; + int size = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; @@ -62,8 +80,10 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf) return size; } -int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask) +static int smu_sys_set_pp_feature_mask(void *handle, + uint64_t new_mask) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -134,6 +154,34 @@ int smu_get_dpm_freq_range(struct smu_context *smu, return ret; } +static u32 smu_get_mclk(void *handle, bool low) +{ + struct smu_context *smu = handle; + uint32_t clk_freq; + int ret = 0; + + ret = smu_get_dpm_freq_range(smu, SMU_UCLK, + low ? &clk_freq : NULL, + !low ? &clk_freq : NULL); + if (ret) + return 0; + return clk_freq * 100; +} + +static u32 smu_get_sclk(void *handle, bool low) +{ + struct smu_context *smu = handle; + uint32_t clk_freq; + int ret = 0; + + ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, + low ? &clk_freq : NULL, + !low ? &clk_freq : NULL); + if (ret) + return 0; + return clk_freq * 100; +} + static int smu_dpm_set_vcn_enable_locked(struct smu_context *smu, bool enable) { @@ -209,7 +257,7 @@ static int smu_dpm_set_jpeg_enable(struct smu_context *smu, /** * smu_dpm_set_power_gate - power gate/ungate the specific IP block * - * @smu: smu_context pointer + * @handle: smu_context pointer * @block_type: the IP block to power gate/ungate * @gate: to power gate if true, ungate otherwise * @@ -220,9 +268,11 @@ static int smu_dpm_set_jpeg_enable(struct smu_context *smu, * Under this case, the smu->mutex lock protection is already enforced on * the parent API smu_force_performance_level of the call path. */ -int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type, - bool gate) +static int smu_dpm_set_power_gate(void *handle, + uint32_t block_type, + bool gate) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -279,35 +329,25 @@ static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_ if (smu->adev->in_suspend) return; - /* - * mclk, fclk and socclk are interdependent - * on each other - */ if (clk == SMU_MCLK) { - /* reset clock dependency */ smu->user_dpm_profile.clk_dependency = 0; - /* set mclk dependent clocks(fclk and socclk) */ smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK); } else if (clk == SMU_FCLK) { - /* give priority to mclk, if mclk dependent clocks are set */ + /* MCLK takes precedence over FCLK */ if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK))) return; - /* reset clock dependency */ smu->user_dpm_profile.clk_dependency = 0; - /* set fclk dependent clocks(mclk and socclk) */ smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK); } else if (clk == SMU_SOCCLK) { - /* give priority to mclk, if mclk dependent clocks are set */ + /* MCLK takes precedence over SOCCLK */ if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK))) return; - /* reset clock dependency */ smu->user_dpm_profile.clk_dependency = 0; - /* set socclk dependent clocks(mclk and fclk) */ smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK); } else - /* add clk dependencies here, if any */ + /* Add clk dependencies here, if any */ return; } @@ -331,7 +371,7 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu) return; /* Enable restore flag */ - smu->user_dpm_profile.flags = SMU_DPM_USER_PROFILE_RESTORE; + smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE; /* set the user dpm power limit */ if (smu->user_dpm_profile.power_limit) { @@ -351,11 +391,11 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu) */ if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) && smu->user_dpm_profile.clk_mask[clk_type]) { - ret = smu_force_clk_levels(smu, clk_type, + ret = smu_force_smuclk_levels(smu, clk_type, smu->user_dpm_profile.clk_mask[clk_type]); if (ret) - dev_err(smu->adev->dev, "Failed to set clock type = %d\n", - clk_type); + dev_err(smu->adev->dev, + "Failed to set clock type = %d\n", clk_type); } } } @@ -379,8 +419,8 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu) smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE; } -int smu_get_power_num_states(struct smu_context *smu, - struct pp_states_info *state_info) +static int smu_get_power_num_states(void *handle, + struct pp_states_info *state_info) { if (!state_info) return -EINVAL; @@ -415,8 +455,10 @@ bool is_support_cclk_dpm(struct amdgpu_device *adev) } -int smu_sys_get_pp_table(struct smu_context *smu, void **table) +static int smu_sys_get_pp_table(void *handle, + char **table) { + struct smu_context *smu = handle; struct smu_table_context *smu_table = &smu->smu_table; uint32_t powerplay_table_size; @@ -440,8 +482,11 @@ int smu_sys_get_pp_table(struct smu_context *smu, void **table) return powerplay_table_size; } -int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size) +static int smu_sys_set_pp_table(void *handle, + const char *buf, + size_t size) { + struct smu_context *smu = handle; struct smu_table_context *smu_table = &smu->smu_table; ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf; int ret = 0; @@ -527,6 +572,11 @@ static int smu_set_funcs(struct amdgpu_device *adev) case CHIP_DIMGREY_CAVEFISH: sienna_cichlid_set_ppt_funcs(smu); break; + case CHIP_ALDEBARAN: + aldebaran_set_ppt_funcs(smu); + /* Enable pp_od_clk_voltage node */ + smu->od_enabled = true; + break; case CHIP_RENOIR: renoir_set_ppt_funcs(smu); break; @@ -553,6 +603,9 @@ static int smu_early_init(void *handle) smu->smu_baco.state = SMU_BACO_STATE_EXIT; smu->smu_baco.platform_support = false; + adev->powerplay.pp_handle = smu; + adev->powerplay.pp_funcs = &swsmu_pm_funcs; + return smu_set_funcs(adev); } @@ -595,6 +648,7 @@ err0_out: return ret; } + static int smu_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -612,10 +666,12 @@ static int smu_late_init(void *handle) return ret; } - ret = smu_set_default_od_settings(smu); - if (ret) { - dev_err(adev->dev, "Failed to setup default OD settings!\n"); - return ret; + if (!amdgpu_sriov_vf(adev) || smu->od_enabled) { + ret = smu_set_default_od_settings(smu); + if (ret) { + dev_err(adev->dev, "Failed to setup default OD settings!\n"); + return ret; + } } ret = smu_populate_umd_state_clk(smu); @@ -989,6 +1045,10 @@ static int smu_sw_init(void *handle) return ret; } + /* If there is no way to query fan control mode, fan control is not supported */ + if (!smu->ppt_funcs->get_fan_control_mode) + smu->adev->pm.no_fan = true; + return 0; } @@ -1387,7 +1447,7 @@ static int smu_hw_fini(void *handle) return smu_smc_hw_cleanup(smu); } -int smu_reset(struct smu_context *smu) +static int smu_reset(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; int ret; @@ -1476,9 +1536,10 @@ static int smu_resume(void *handle) return 0; } -int smu_display_configuration_change(struct smu_context *smu, - const struct amd_pp_display_configuration *display_config) +static int smu_display_configuration_change(void *handle, + const struct amd_pp_display_configuration *display_config) { + struct smu_context *smu = handle; int index = 0; int num_of_active_display = 0; @@ -1567,6 +1628,18 @@ static int smu_enable_umd_pstate(void *handle, return 0; } +static int smu_bump_power_profile_mode(struct smu_context *smu, + long *param, + uint32_t param_size) +{ + int ret = 0; + + if (smu->ppt_funcs->set_power_profile_mode) + ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size); + + return ret; +} + static int smu_adjust_power_state_dynamic(struct smu_context *smu, enum amd_dpm_forced_level level, bool skip_display_settings) @@ -1609,22 +1682,23 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu, smu_dpm_ctx->dpm_level = level; } - if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { + if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && + smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { index = fls(smu->workload_mask); index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; workload = smu->workload_setting[index]; if (smu->power_profile_mode != workload) - smu_set_power_profile_mode(smu, &workload, 0, false); + smu_bump_power_profile_mode(smu, &workload, 0); } return ret; } -int smu_handle_task(struct smu_context *smu, - enum amd_dpm_forced_level level, - enum amd_pp_task task_id, - bool lock_needed) +static int smu_handle_task(struct smu_context *smu, + enum amd_dpm_forced_level level, + enum amd_pp_task task_id, + bool lock_needed) { int ret = 0; @@ -1656,10 +1730,22 @@ out: return ret; } -int smu_switch_power_profile(struct smu_context *smu, - enum PP_SMC_POWER_PROFILE type, - bool en) +static int smu_handle_dpm_task(void *handle, + enum amd_pp_task task_id, + enum amd_pm_state_type *user_state) +{ + struct smu_context *smu = handle; + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + + return smu_handle_task(smu, smu_dpm->dpm_level, task_id, true); + +} + +static int smu_switch_power_profile(void *handle, + enum PP_SMC_POWER_PROFILE type, + bool en) { + struct smu_context *smu = handle; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); long workload; uint32_t index; @@ -1684,16 +1770,18 @@ int smu_switch_power_profile(struct smu_context *smu, workload = smu->workload_setting[index]; } - if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) - smu_set_power_profile_mode(smu, &workload, 0, false); + if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && + smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) + smu_bump_power_profile_mode(smu, &workload, 0); mutex_unlock(&smu->mutex); return 0; } -enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu) +static enum amd_dpm_forced_level smu_get_performance_level(void *handle) { + struct smu_context *smu = handle; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); enum amd_dpm_forced_level level; @@ -1710,8 +1798,10 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu) return level; } -int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) +static int smu_force_performance_level(void *handle, + enum amd_dpm_forced_level level) { + struct smu_context *smu = handle; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); int ret = 0; @@ -1744,8 +1834,9 @@ int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_lev return ret; } -int smu_set_display_count(struct smu_context *smu, uint32_t count) +static int smu_set_display_count(void *handle, uint32_t count) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -1758,7 +1849,7 @@ int smu_set_display_count(struct smu_context *smu, uint32_t count) return ret; } -int smu_force_clk_levels(struct smu_context *smu, +static int smu_force_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t mask) { @@ -1777,7 +1868,7 @@ int smu_force_clk_levels(struct smu_context *smu, if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) { ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask); - if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE) { + if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { smu->user_dpm_profile.clk_mask[clk_type] = mask; smu_set_user_clk_dependencies(smu, clk_type); } @@ -1788,6 +1879,45 @@ int smu_force_clk_levels(struct smu_context *smu, return ret; } +static int smu_force_ppclk_levels(void *handle, + enum pp_clock_type type, + uint32_t mask) +{ + struct smu_context *smu = handle; + enum smu_clk_type clk_type; + + switch (type) { + case PP_SCLK: + clk_type = SMU_SCLK; break; + case PP_MCLK: + clk_type = SMU_MCLK; break; + case PP_PCIE: + clk_type = SMU_PCIE; break; + case PP_SOCCLK: + clk_type = SMU_SOCCLK; break; + case PP_FCLK: + clk_type = SMU_FCLK; break; + case PP_DCEFCLK: + clk_type = SMU_DCEFCLK; break; + case PP_VCLK: + clk_type = SMU_VCLK; break; + case PP_DCLK: + clk_type = SMU_DCLK; break; + case OD_SCLK: + clk_type = SMU_OD_SCLK; break; + case OD_MCLK: + clk_type = SMU_OD_MCLK; break; + case OD_VDDC_CURVE: + clk_type = SMU_OD_VDDC_CURVE; break; + case OD_RANGE: + clk_type = SMU_OD_RANGE; break; + default: + return -EINVAL; + } + + return smu_force_smuclk_levels(smu, clk_type, mask); +} + /* * On system suspending or resetting, the dpm_enabled * flag will be cleared. So that those SMU services which @@ -1795,48 +1925,30 @@ int smu_force_clk_levels(struct smu_context *smu, * However, the mp1 state setting should still be granted * even if the dpm_enabled cleared. */ -int smu_set_mp1_state(struct smu_context *smu, - enum pp_mp1_state mp1_state) +static int smu_set_mp1_state(void *handle, + enum pp_mp1_state mp1_state) { - uint16_t msg; - int ret; + struct smu_context *smu = handle; + int ret = 0; if (!smu->pm_enabled) return -EOPNOTSUPP; mutex_lock(&smu->mutex); - switch (mp1_state) { - case PP_MP1_STATE_SHUTDOWN: - msg = SMU_MSG_PrepareMp1ForShutdown; - break; - case PP_MP1_STATE_UNLOAD: - msg = SMU_MSG_PrepareMp1ForUnload; - break; - case PP_MP1_STATE_RESET: - msg = SMU_MSG_PrepareMp1ForReset; - break; - case PP_MP1_STATE_NONE: - default: - mutex_unlock(&smu->mutex); - return 0; - } - - ret = smu_send_smc_msg(smu, msg, NULL); - /* some asics may not support those messages */ - if (ret == -EINVAL) - ret = 0; - if (ret) - dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n"); + if (smu->ppt_funcs && + smu->ppt_funcs->set_mp1_state) + ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state); mutex_unlock(&smu->mutex); return ret; } -int smu_set_df_cstate(struct smu_context *smu, - enum pp_df_cstate state) +static int smu_set_df_cstate(void *handle, + enum pp_df_cstate state) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -1893,9 +2005,10 @@ int smu_write_watermarks_table(struct smu_context *smu) return ret; } -int smu_set_watermarks_for_clock_ranges(struct smu_context *smu, - struct pp_smu_wm_range_sets *clock_ranges) +static int smu_set_watermarks_for_clock_ranges(void *handle, + struct pp_smu_wm_range_sets *clock_ranges) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -1973,41 +2086,48 @@ const struct amdgpu_ip_block_version smu_v12_0_ip_block = .funcs = &smu_ip_funcs, }; -int smu_load_microcode(struct smu_context *smu) +const struct amdgpu_ip_block_version smu_v13_0_ip_block = { - int ret = 0; - - if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) - return -EOPNOTSUPP; - - mutex_lock(&smu->mutex); - - if (smu->ppt_funcs->load_microcode) - ret = smu->ppt_funcs->load_microcode(smu); - - mutex_unlock(&smu->mutex); - - return ret; -} + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 13, + .minor = 0, + .rev = 0, + .funcs = &smu_ip_funcs, +}; -int smu_check_fw_status(struct smu_context *smu) +static int smu_load_microcode(void *handle) { + struct smu_context *smu = handle; + struct amdgpu_device *adev = smu->adev; int ret = 0; - if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) + if (!smu->pm_enabled) return -EOPNOTSUPP; - mutex_lock(&smu->mutex); + /* This should be used for non PSP loading */ + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) + return 0; - if (smu->ppt_funcs->check_fw_status) - ret = smu->ppt_funcs->check_fw_status(smu); + if (smu->ppt_funcs->load_microcode) { + ret = smu->ppt_funcs->load_microcode(smu); + if (ret) { + dev_err(adev->dev, "Load microcode failed\n"); + return ret; + } + } - mutex_unlock(&smu->mutex); + if (smu->ppt_funcs->check_fw_status) { + ret = smu->ppt_funcs->check_fw_status(smu); + if (ret) { + dev_err(adev->dev, "SMC is not ready\n"); + return ret; + } + } return ret; } -int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled) +static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled) { int ret = 0; @@ -2021,8 +2141,9 @@ int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled) return ret; } -int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed) +static int smu_set_fan_speed_rpm(void *handle, uint32_t speed) { + struct smu_context *smu = handle; u32 percent; int ret = 0; @@ -2034,7 +2155,7 @@ int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed) if (smu->ppt_funcs->set_fan_speed_percent) { percent = speed * 100 / smu->fan_max_rpm; ret = smu->ppt_funcs->set_fan_speed_percent(smu, percent); - if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE) + if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) smu->user_dpm_profile.fan_speed_percent = percent; } @@ -2063,6 +2184,9 @@ int smu_get_power_limit(struct smu_context *smu, case SMU_PPT_LIMIT_CURRENT: *limit = smu->current_power_limit; break; + case SMU_PPT_LIMIT_DEFAULT: + *limit = smu->default_power_limit; + break; case SMU_PPT_LIMIT_MAX: *limit = smu->max_power_limit; break; @@ -2076,8 +2200,9 @@ int smu_get_power_limit(struct smu_context *smu, return ret; } -int smu_set_power_limit(struct smu_context *smu, uint32_t limit) +static int smu_set_power_limit(void *handle, uint32_t limit) { + struct smu_context *smu = handle; uint32_t limit_type = limit >> 24; int ret = 0; @@ -2104,7 +2229,7 @@ int smu_set_power_limit(struct smu_context *smu, uint32_t limit) if (smu->ppt_funcs->set_power_limit) { ret = smu->ppt_funcs->set_power_limit(smu, limit); - if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE) + if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) smu->user_dpm_profile.power_limit = limit; } @@ -2114,7 +2239,7 @@ out: return ret; } -int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) +static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { int ret = 0; @@ -2131,10 +2256,54 @@ int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, ch return ret; } -int smu_od_edit_dpm_table(struct smu_context *smu, - enum PP_OD_DPM_TABLE_COMMAND type, - long *input, uint32_t size) +static int smu_print_ppclk_levels(void *handle, + enum pp_clock_type type, + char *buf) +{ + struct smu_context *smu = handle; + enum smu_clk_type clk_type; + + switch (type) { + case PP_SCLK: + clk_type = SMU_SCLK; break; + case PP_MCLK: + clk_type = SMU_MCLK; break; + case PP_PCIE: + clk_type = SMU_PCIE; break; + case PP_SOCCLK: + clk_type = SMU_SOCCLK; break; + case PP_FCLK: + clk_type = SMU_FCLK; break; + case PP_DCEFCLK: + clk_type = SMU_DCEFCLK; break; + case PP_VCLK: + clk_type = SMU_VCLK; break; + case PP_DCLK: + clk_type = SMU_DCLK; break; + case OD_SCLK: + clk_type = SMU_OD_SCLK; break; + case OD_MCLK: + clk_type = SMU_OD_MCLK; break; + case OD_VDDC_CURVE: + clk_type = SMU_OD_VDDC_CURVE; break; + case OD_RANGE: + clk_type = SMU_OD_RANGE; break; + case OD_VDDGFX_OFFSET: + clk_type = SMU_OD_VDDGFX_OFFSET; break; + case OD_CCLK: + clk_type = SMU_OD_CCLK; break; + default: + return -EINVAL; + } + + return smu_print_smuclk_levels(smu, clk_type, buf); +} + +static int smu_od_edit_dpm_table(void *handle, + enum PP_OD_DPM_TABLE_COMMAND type, + long *input, uint32_t size) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2144,11 +2313,6 @@ int smu_od_edit_dpm_table(struct smu_context *smu, if (smu->ppt_funcs->od_edit_dpm_table) { ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size); - if (!ret && (type == PP_OD_COMMIT_DPM_TABLE)) - ret = smu_handle_task(smu, - smu->smu_dpm.dpm_level, - AMD_PP_TASK_READJUST_POWER_STATE, - false); } mutex_unlock(&smu->mutex); @@ -2156,20 +2320,26 @@ int smu_od_edit_dpm_table(struct smu_context *smu, return ret; } -int smu_read_sensor(struct smu_context *smu, - enum amd_pp_sensors sensor, - void *data, uint32_t *size) +static int smu_read_sensor(void *handle, + int sensor, + void *data, + int *size_arg) { + struct smu_context *smu = handle; struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; int ret = 0; + uint32_t *size, size_val; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; - if (!data || !size) + if (!data || !size_arg) return -EINVAL; + size_val = *size_arg; + size = &size_val; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->read_sensor) @@ -2214,11 +2384,15 @@ int smu_read_sensor(struct smu_context *smu, unlock: mutex_unlock(&smu->mutex); + // assign uint32_t to int + *size_arg = size_val; + return ret; } -int smu_get_power_profile_mode(struct smu_context *smu, char *buf) +static int smu_get_power_profile_mode(void *handle, char *buf) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2234,35 +2408,33 @@ int smu_get_power_profile_mode(struct smu_context *smu, char *buf) return ret; } -int smu_set_power_profile_mode(struct smu_context *smu, - long *param, - uint32_t param_size, - bool lock_needed) +static int smu_set_power_profile_mode(void *handle, + long *param, + uint32_t param_size) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; - if (lock_needed) - mutex_lock(&smu->mutex); + mutex_lock(&smu->mutex); - if (smu->ppt_funcs->set_power_profile_mode) - ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size); + smu_bump_power_profile_mode(smu, param, param_size); - if (lock_needed) - mutex_unlock(&smu->mutex); + mutex_unlock(&smu->mutex); return ret; } -int smu_get_fan_control_mode(struct smu_context *smu) +static u32 smu_get_fan_control_mode(void *handle) { - int ret = 0; + struct smu_context *smu = handle; + u32 ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) - return -EOPNOTSUPP; + return AMD_FAN_CTRL_NONE; mutex_lock(&smu->mutex); @@ -2274,18 +2446,18 @@ int smu_get_fan_control_mode(struct smu_context *smu) return ret; } -int smu_set_fan_control_mode(struct smu_context *smu, int value) +static int smu_set_fan_control_mode(struct smu_context *smu, int value) { int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) - return -EOPNOTSUPP; + return -EOPNOTSUPP; mutex_lock(&smu->mutex); if (smu->ppt_funcs->set_fan_control_mode) { ret = smu->ppt_funcs->set_fan_control_mode(smu, value); - if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE) + if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) smu->user_dpm_profile.fan_mode = value; } @@ -2293,14 +2465,23 @@ int smu_set_fan_control_mode(struct smu_context *smu, int value) /* reset user dpm fan speed */ if (!ret && value != AMD_FAN_CTRL_MANUAL && - smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE) + !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) smu->user_dpm_profile.fan_speed_percent = 0; return ret; } -int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed) +static void smu_pp_set_fan_control_mode(void *handle, u32 value) { + struct smu_context *smu = handle; + + smu_set_fan_control_mode(smu, value); +} + + +static int smu_get_fan_speed_percent(void *handle, u32 *speed) +{ + struct smu_context *smu = handle; int ret = 0; uint32_t percent; @@ -2322,8 +2503,9 @@ int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed) return ret; } -int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed) +static int smu_set_fan_speed_percent(void *handle, u32 speed) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2335,7 +2517,7 @@ int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed) if (speed > 100) speed = 100; ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed); - if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE) + if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) smu->user_dpm_profile.fan_speed_percent = speed; } @@ -2344,8 +2526,9 @@ int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed) return ret; } -int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed) +static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed) { + struct smu_context *smu = handle; int ret = 0; u32 percent; @@ -2364,8 +2547,9 @@ int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed) return ret; } -int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk) +static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2380,10 +2564,12 @@ int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk) return ret; } -int smu_get_clock_by_type_with_latency(struct smu_context *smu, - enum smu_clk_type clk_type, - struct pp_clock_levels_with_latency *clocks) +static int smu_get_clock_by_type_with_latency(void *handle, + enum amd_pp_clock_type type, + struct pp_clock_levels_with_latency *clocks) { + struct smu_context *smu = handle; + enum smu_clk_type clk_type; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2391,17 +2577,38 @@ int smu_get_clock_by_type_with_latency(struct smu_context *smu, mutex_lock(&smu->mutex); - if (smu->ppt_funcs->get_clock_by_type_with_latency) + if (smu->ppt_funcs->get_clock_by_type_with_latency) { + switch (type) { + case amd_pp_sys_clock: + clk_type = SMU_GFXCLK; + break; + case amd_pp_mem_clock: + clk_type = SMU_MCLK; + break; + case amd_pp_dcef_clock: + clk_type = SMU_DCEFCLK; + break; + case amd_pp_disp_clock: + clk_type = SMU_DISPCLK; + break; + default: + dev_err(smu->adev->dev, "Invalid clock type!\n"); + mutex_unlock(&smu->mutex); + return -EINVAL; + } + ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks); + } mutex_unlock(&smu->mutex); return ret; } -int smu_display_clock_voltage_request(struct smu_context *smu, - struct pp_display_clock_request *clock_req) +static int smu_display_clock_voltage_request(void *handle, + struct pp_display_clock_request *clock_req) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2418,8 +2625,10 @@ int smu_display_clock_voltage_request(struct smu_context *smu, } -int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch) +static int smu_display_disable_memory_clock_switch(void *handle, + bool disable_memory_clock_switch) { + struct smu_context *smu = handle; int ret = -EINVAL; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2435,9 +2644,10 @@ int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disabl return ret; } -int smu_set_xgmi_pstate(struct smu_context *smu, - uint32_t pstate) +static int smu_set_xgmi_pstate(void *handle, + uint32_t pstate) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2456,101 +2666,78 @@ int smu_set_xgmi_pstate(struct smu_context *smu, return ret; } -int smu_set_azalia_d3_pme(struct smu_context *smu) +static int smu_get_baco_capability(void *handle, bool *cap) { + struct smu_context *smu = handle; int ret = 0; - if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) - return -EOPNOTSUPP; - - mutex_lock(&smu->mutex); - - if (smu->ppt_funcs->set_azalia_d3_pme) - ret = smu->ppt_funcs->set_azalia_d3_pme(smu); - - mutex_unlock(&smu->mutex); - - return ret; -} - -/* - * On system suspending or resetting, the dpm_enabled - * flag will be cleared. So that those SMU services which - * are not supported will be gated. - * - * However, the baco/mode1 reset should still be granted - * as they are still supported and necessary. - */ -bool smu_baco_is_support(struct smu_context *smu) -{ - bool ret = false; + *cap = false; if (!smu->pm_enabled) - return false; + return 0; mutex_lock(&smu->mutex); if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support) - ret = smu->ppt_funcs->baco_is_support(smu); + *cap = smu->ppt_funcs->baco_is_support(smu); mutex_unlock(&smu->mutex); return ret; } -int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state) -{ - if (smu->ppt_funcs->baco_get_state) - return -EINVAL; - - mutex_lock(&smu->mutex); - *state = smu->ppt_funcs->baco_get_state(smu); - mutex_unlock(&smu->mutex); - - return 0; -} - -int smu_baco_enter(struct smu_context *smu) +static int smu_baco_set_state(void *handle, int state) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled) return -EOPNOTSUPP; - mutex_lock(&smu->mutex); + if (state == 0) { + mutex_lock(&smu->mutex); - if (smu->ppt_funcs->baco_enter) - ret = smu->ppt_funcs->baco_enter(smu); + if (smu->ppt_funcs->baco_exit) + ret = smu->ppt_funcs->baco_exit(smu); - mutex_unlock(&smu->mutex); + mutex_unlock(&smu->mutex); + } else if (state == 1) { + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->baco_enter) + ret = smu->ppt_funcs->baco_enter(smu); + + mutex_unlock(&smu->mutex); + + } else { + return -EINVAL; + } if (ret) - dev_err(smu->adev->dev, "Failed to enter BACO state!\n"); + dev_err(smu->adev->dev, "Failed to %s BACO state!\n", + (state)?"enter":"exit"); return ret; } -int smu_baco_exit(struct smu_context *smu) +bool smu_mode1_reset_is_support(struct smu_context *smu) { - int ret = 0; + bool ret = false; if (!smu->pm_enabled) - return -EOPNOTSUPP; + return false; mutex_lock(&smu->mutex); - if (smu->ppt_funcs->baco_exit) - ret = smu->ppt_funcs->baco_exit(smu); + if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support) + ret = smu->ppt_funcs->mode1_reset_is_support(smu); mutex_unlock(&smu->mutex); - if (ret) - dev_err(smu->adev->dev, "Failed to exit BACO state!\n"); - return ret; } -bool smu_mode1_reset_is_support(struct smu_context *smu) +bool smu_mode2_reset_is_support(struct smu_context *smu) { bool ret = false; @@ -2559,8 +2746,8 @@ bool smu_mode1_reset_is_support(struct smu_context *smu) mutex_lock(&smu->mutex); - if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support) - ret = smu->ppt_funcs->mode1_reset_is_support(smu); + if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support) + ret = smu->ppt_funcs->mode2_reset_is_support(smu); mutex_unlock(&smu->mutex); @@ -2584,8 +2771,9 @@ int smu_mode1_reset(struct smu_context *smu) return ret; } -int smu_mode2_reset(struct smu_context *smu) +static int smu_mode2_reset(void *handle) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled) @@ -2604,9 +2792,10 @@ int smu_mode2_reset(struct smu_context *smu) return ret; } -int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu, - struct pp_smu_nv_clock_table *max_clocks) +static int smu_get_max_sustainable_clocks_by_dc(void *handle, + struct pp_smu_nv_clock_table *max_clocks) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2622,10 +2811,11 @@ int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu, return ret; } -int smu_get_uclk_dpm_states(struct smu_context *smu, - unsigned int *clock_values_in_khz, - unsigned int *num_states) +static int smu_get_uclk_dpm_states(void *handle, + unsigned int *clock_values_in_khz, + unsigned int *num_states) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2641,8 +2831,9 @@ int smu_get_uclk_dpm_states(struct smu_context *smu, return ret; } -enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu) +static enum amd_pm_state_type smu_get_current_power_state(void *handle) { + struct smu_context *smu = handle; enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2658,9 +2849,10 @@ enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu) return pm_state; } -int smu_get_dpm_clock_table(struct smu_context *smu, - struct dpm_clocks *clock_table) +static int smu_get_dpm_clock_table(void *handle, + struct dpm_clocks *clock_table) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2676,9 +2868,9 @@ int smu_get_dpm_clock_table(struct smu_context *smu, return ret; } -ssize_t smu_sys_get_gpu_metrics(struct smu_context *smu, - void **table) +static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table) { + struct smu_context *smu = handle; ssize_t size; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2696,8 +2888,9 @@ ssize_t smu_sys_get_gpu_metrics(struct smu_context *smu, return size; } -int smu_enable_mgpu_fan_boost(struct smu_context *smu) +static int smu_enable_mgpu_fan_boost(void *handle) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2713,8 +2906,10 @@ int smu_enable_mgpu_fan_boost(struct smu_context *smu) return ret; } -int smu_gfx_state_change_set(struct smu_context *smu, uint32_t state) +static int smu_gfx_state_change_set(void *handle, + uint32_t state) { + struct smu_context *smu = handle; int ret = 0; mutex_lock(&smu->mutex); @@ -2724,3 +2919,83 @@ int smu_gfx_state_change_set(struct smu_context *smu, uint32_t state) return ret; } + +int smu_set_light_sbr(struct smu_context *smu, bool enable) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + if (smu->ppt_funcs->set_light_sbr) + ret = smu->ppt_funcs->set_light_sbr(smu, enable); + mutex_unlock(&smu->mutex); + + return ret; +} + + +static const struct amd_pm_funcs swsmu_pm_funcs = { + /* export for sysfs */ + .set_fan_control_mode = smu_pp_set_fan_control_mode, + .get_fan_control_mode = smu_get_fan_control_mode, + .set_fan_speed_percent = smu_set_fan_speed_percent, + .get_fan_speed_percent = smu_get_fan_speed_percent, + .force_performance_level = smu_force_performance_level, + .read_sensor = smu_read_sensor, + .get_performance_level = smu_get_performance_level, + .get_current_power_state = smu_get_current_power_state, + .get_fan_speed_rpm = smu_get_fan_speed_rpm, + .set_fan_speed_rpm = smu_set_fan_speed_rpm, + .get_pp_num_states = smu_get_power_num_states, + .get_pp_table = smu_sys_get_pp_table, + .set_pp_table = smu_sys_set_pp_table, + .switch_power_profile = smu_switch_power_profile, + /* export to amdgpu */ + .dispatch_tasks = smu_handle_dpm_task, + .set_powergating_by_smu = smu_dpm_set_power_gate, + .set_power_limit = smu_set_power_limit, + .odn_edit_dpm_table = smu_od_edit_dpm_table, + .set_mp1_state = smu_set_mp1_state, + /* export to DC */ + .get_sclk = smu_get_sclk, + .get_mclk = smu_get_mclk, + .enable_mgpu_fan_boost = smu_enable_mgpu_fan_boost, + .get_asic_baco_capability = smu_get_baco_capability, + .set_asic_baco_state = smu_baco_set_state, + .get_ppfeature_status = smu_sys_get_pp_feature_mask, + .set_ppfeature_status = smu_sys_set_pp_feature_mask, + .asic_reset_mode_2 = smu_mode2_reset, + .set_df_cstate = smu_set_df_cstate, + .set_xgmi_pstate = smu_set_xgmi_pstate, + .get_gpu_metrics = smu_sys_get_gpu_metrics, + .set_power_profile_mode = smu_set_power_profile_mode, + .get_power_profile_mode = smu_get_power_profile_mode, + .force_clock_level = smu_force_ppclk_levels, + .print_clock_levels = smu_print_ppclk_levels, + .get_uclk_dpm_states = smu_get_uclk_dpm_states, + .get_dpm_clock_table = smu_get_dpm_clock_table, + .display_configuration_change = smu_display_configuration_change, + .get_clock_by_type_with_latency = smu_get_clock_by_type_with_latency, + .display_clock_voltage_request = smu_display_clock_voltage_request, + .set_active_display_count = smu_set_display_count, + .set_min_deep_sleep_dcefclk = smu_set_deep_sleep_dcefclk, + .set_watermarks_for_clock_ranges = smu_set_watermarks_for_clock_ranges, + .display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch, + .get_max_sustainable_clocks_by_dc = smu_get_max_sustainable_clocks_by_dc, + .load_firmware = smu_load_microcode, + .gfx_state_change_set = smu_gfx_state_change_set, +}; + +int smu_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event, + uint64_t event_arg) +{ + int ret = -EINVAL; + struct smu_context *smu = &adev->smu; + + if (smu->ppt_funcs->wait_for_event) { + mutex_lock(&smu->mutex); + ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg); + mutex_unlock(&smu->mutex); + } + + return ret; +} |