diff options
author | Maxime Ripard <maxime.ripard@bootlin.com> | 2019-07-22 22:24:10 +0300 |
---|---|---|
committer | Maxime Ripard <maxime.ripard@bootlin.com> | 2019-07-22 22:24:10 +0300 |
commit | 03b0f2ce735e97e9f49790d4563c82515b8fa702 (patch) | |
tree | da561805bffd06bfba81c867f83cacb28f3a64e5 /drivers/gpu/drm/amd/powerplay | |
parent | e4f86e43716443e934d705952902d40de0fa9a05 (diff) | |
parent | 5f9e832c137075045d15cd6899ab0505cfb2ca4b (diff) | |
download | linux-03b0f2ce735e97e9f49790d4563c82515b8fa702.tar.xz |
Merge v5.3-rc1 into drm-misc-next
Noralf needs some SPI patches in 5.3 to merge some work on tinydrm.
Signed-off-by: Maxime Ripard <maxime.ripard@bootlin.com>
Diffstat (limited to 'drivers/gpu/drm/amd/powerplay')
28 files changed, 4777 insertions, 1146 deletions
diff --git a/drivers/gpu/drm/amd/powerplay/Makefile b/drivers/gpu/drm/amd/powerplay/Makefile index ec87b3430d12..727c5cff231c 100644 --- a/drivers/gpu/drm/amd/powerplay/Makefile +++ b/drivers/gpu/drm/amd/powerplay/Makefile @@ -35,7 +35,7 @@ AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/powerplay/,$( include $(AMD_POWERPLAY) -POWER_MGR = amd_powerplay.o amdgpu_smu.o smu_v11_0.o vega20_ppt.o +POWER_MGR = amd_powerplay.o amdgpu_smu.o smu_v11_0.o vega20_ppt.o navi10_ppt.o AMD_PP_POWER = $(addprefix $(AMD_PP_PATH)/,$(POWER_MGR)) diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 9c67adee2c9e..f1565c448de5 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -60,6 +60,191 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t return ret; } +int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t min, uint32_t max) +{ + int ret = 0, clk_id = 0; + uint32_t param; + + if (min <= 0 && max <= 0) + return -EINVAL; + + if (!smu_clk_dpm_is_enabled(smu, clk_type)) + return 0; + + clk_id = smu_clk_get_index(smu, clk_type); + if (clk_id < 0) + return clk_id; + + if (max > 0) { + param = (uint32_t)((clk_id << 16) | (max & 0xffff)); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, + param); + if (ret) + return ret; + } + + if (min > 0) { + param = (uint32_t)((clk_id << 16) | (min & 0xffff)); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, + param); + if (ret) + return ret; + } + + + return ret; +} + +int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t min, uint32_t max) +{ + int ret = 0, clk_id = 0; + uint32_t param; + + if (min <= 0 && max <= 0) + return -EINVAL; + + if (!smu_clk_dpm_is_enabled(smu, clk_type)) + return 0; + + clk_id = smu_clk_get_index(smu, clk_type); + if (clk_id < 0) + return clk_id; + + if (max > 0) { + param = (uint32_t)((clk_id << 16) | (max & 0xffff)); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq, + param); + if (ret) + return ret; + } + + if (min > 0) { + param = (uint32_t)((clk_id << 16) | (min & 0xffff)); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, + param); + if (ret) + return ret; + } + + + return ret; +} + +int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t *min, uint32_t *max) +{ + int ret = 0, clk_id = 0; + uint32_t param = 0; + + if (!min && !max) + return -EINVAL; + + if (!smu_clk_dpm_is_enabled(smu, clk_type)) + return 0; + + mutex_lock(&smu->mutex); + clk_id = smu_clk_get_index(smu, clk_type); + if (clk_id < 0) { + ret = -EINVAL; + goto failed; + } + + param = (clk_id & 0xffff) << 16; + + if (max) { + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param); + if (ret) + goto failed; + ret = smu_read_smc_arg(smu, max); + if (ret) + goto failed; + } + + if (min) { + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param); + if (ret) + goto failed; + ret = smu_read_smc_arg(smu, min); + if (ret) + goto failed; + } + +failed: + mutex_unlock(&smu->mutex); + return ret; +} + +int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type, + uint16_t level, uint32_t *value) +{ + int ret = 0, clk_id = 0; + uint32_t param; + + if (!value) + return -EINVAL; + + if (!smu_clk_dpm_is_enabled(smu, clk_type)) + return 0; + + clk_id = smu_clk_get_index(smu, clk_type); + if (clk_id < 0) + return clk_id; + + param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff)); + + ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex, + param); + if (ret) + return ret; + + ret = smu_read_smc_arg(smu, ¶m); + if (ret) + return ret; + + /* BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM + * now, we un-support it */ + *value = param & 0x7fffffff; + + return ret; +} + +int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t *value) +{ + return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value); +} + +bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type) +{ + enum smu_feature_mask feature_id = 0; + + switch (clk_type) { + case SMU_MCLK: + case SMU_UCLK: + feature_id = SMU_FEATURE_DPM_UCLK_BIT; + break; + case SMU_GFXCLK: + case SMU_SCLK: + feature_id = SMU_FEATURE_DPM_GFXCLK_BIT; + break; + case SMU_SOCCLK: + feature_id = SMU_FEATURE_DPM_SOCCLK_BIT; + break; + default: + return true; + } + + if(!smu_feature_is_enabled(smu, feature_id)) { + pr_warn("smu %d clk dpm feature %d is not enabled\n", clk_type, feature_id); + return false; + } + + return true; +} + + int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type, bool gate) { @@ -72,6 +257,9 @@ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type, case AMD_IP_BLOCK_TYPE_VCE: ret = smu_dpm_set_vce_enable(smu, gate); break; + case AMD_IP_BLOCK_TYPE_GFX: + ret = smu_gfx_off_control(smu, gate); + break; default: break; } @@ -116,6 +304,14 @@ int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2); *size = 8; break; + case AMDGPU_PP_SENSOR_UVD_POWER: + *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0; + *size = 4; + break; + case AMDGPU_PP_SENSOR_VCE_POWER: + *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0; + *size = 4; + break; default: ret = -EINVAL; break; @@ -127,20 +323,18 @@ int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, return ret; } -int smu_update_table_with_arg(struct smu_context *smu, uint16_t table_id, uint16_t exarg, +int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument, void *table_data, bool drv2smu) { struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *table = NULL; int ret = 0; - uint32_t table_index; + int table_id = smu_table_get_index(smu, table_index); if (!table_data || table_id >= smu_table->table_count) return -EINVAL; - table_index = (exarg << 16) | table_id; - - table = &smu_table->tables[table_id]; + table = &smu_table->tables[table_index]; if (drv2smu) memcpy(table->cpu_addr, table_data, table->size); @@ -156,7 +350,7 @@ int smu_update_table_with_arg(struct smu_context *smu, uint16_t table_id, uint16 ret = smu_send_smc_msg_with_param(smu, drv2smu ? SMU_MSG_TransferTableDram2Smu : SMU_MSG_TransferTableSmu2Dram, - table_index); + table_id | ((argument & 0xFFFF) << 16)); if (ret) return ret; @@ -168,13 +362,12 @@ int smu_update_table_with_arg(struct smu_context *smu, uint16_t table_id, uint16 bool is_support_sw_smu(struct amdgpu_device *adev) { - if (amdgpu_dpm != 1) - return false; - - if (adev->asic_type >= CHIP_VEGA20 && adev->asic_type != CHIP_RAVEN) + if (adev->asic_type == CHIP_VEGA20) + return (amdgpu_dpm == 2) ? true : false; + else if (adev->asic_type >= CHIP_NAVI10) return true; - - return false; + else + return false; } int smu_sys_get_pp_table(struct smu_context *smu, void **table) @@ -233,33 +426,36 @@ int smu_feature_init_dpm(struct smu_context *smu) { struct smu_feature *feature = &smu->smu_feature; int ret = 0; - uint32_t unallowed_feature_mask[SMU_FEATURE_MAX/32]; + uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32]; if (!smu->pm_enabled) return ret; mutex_lock(&feature->mutex); - bitmap_fill(feature->allowed, SMU_FEATURE_MAX); + bitmap_zero(feature->allowed, SMU_FEATURE_MAX); mutex_unlock(&feature->mutex); - ret = smu_get_unallowed_feature_mask(smu, unallowed_feature_mask, + ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask, SMU_FEATURE_MAX/32); if (ret) return ret; mutex_lock(&feature->mutex); - bitmap_andnot(feature->allowed, feature->allowed, - (unsigned long *)unallowed_feature_mask, + bitmap_or(feature->allowed, feature->allowed, + (unsigned long *)allowed_feature_mask, feature->feature_num); mutex_unlock(&feature->mutex); return ret; } -int smu_feature_is_enabled(struct smu_context *smu, int feature_id) +int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask) { struct smu_feature *feature = &smu->smu_feature; + uint32_t feature_id; int ret = 0; + feature_id = smu_feature_get_index(smu, mask); + WARN_ON(feature_id > feature->feature_num); mutex_lock(&feature->mutex); @@ -269,11 +465,15 @@ int smu_feature_is_enabled(struct smu_context *smu, int feature_id) return ret; } -int smu_feature_set_enabled(struct smu_context *smu, int feature_id, bool enable) +int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask, + bool enable) { struct smu_feature *feature = &smu->smu_feature; + uint32_t feature_id; int ret = 0; + feature_id = smu_feature_get_index(smu, mask); + WARN_ON(feature_id > feature->feature_num); mutex_lock(&feature->mutex); @@ -292,11 +492,14 @@ failed: return ret; } -int smu_feature_is_supported(struct smu_context *smu, int feature_id) +int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask) { struct smu_feature *feature = &smu->smu_feature; + uint32_t feature_id; int ret = 0; + feature_id = smu_feature_get_index(smu, mask); + WARN_ON(feature_id > feature->feature_num); mutex_lock(&feature->mutex); @@ -306,12 +509,16 @@ int smu_feature_is_supported(struct smu_context *smu, int feature_id) return ret; } -int smu_feature_set_supported(struct smu_context *smu, int feature_id, +int smu_feature_set_supported(struct smu_context *smu, + enum smu_feature_mask mask, bool enable) { struct smu_feature *feature = &smu->smu_feature; + uint32_t feature_id; int ret = 0; + feature_id = smu_feature_get_index(smu, mask); + WARN_ON(feature_id > feature->feature_num); mutex_lock(&feature->mutex); @@ -330,7 +537,7 @@ static int smu_set_funcs(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_VEGA20: - adev->pm.pp_feature &= ~PP_GFXOFF_MASK; + case CHIP_NAVI10: if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) smu->od_enabled = true; smu_v11_0_set_smu_funcs(smu); @@ -450,6 +657,11 @@ static int smu_sw_init(void *handle) bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX); bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX); bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX); + + mutex_init(&smu->smu_baco.mutex); + smu->smu_baco.state = SMU_BACO_STATE_EXIT; + smu->smu_baco.platform_support = false; + smu->watermarks_bitmap = 0; smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; @@ -622,17 +834,17 @@ static int smu_smc_table_hw_init(struct smu_context *smu, return 0; } - ret = smu_init_display(smu); + ret = smu_init_display_count(smu, 0); if (ret) return ret; if (initialize) { - ret = smu_read_pptable_from_vbios(smu); + /* get boot_values from vbios to set revision, gfxclk, and etc. */ + ret = smu_get_vbios_bootup_values(smu); if (ret) return ret; - /* get boot_values from vbios to set revision, gfxclk, and etc. */ - ret = smu_get_vbios_bootup_values(smu); + ret = smu_setup_pptable(smu); if (ret) return ret; @@ -725,7 +937,7 @@ static int smu_smc_table_hw_init(struct smu_context *smu, return ret; } - ret = smu_set_od8_default_settings(smu, initialize); + ret = smu_set_default_od_settings(smu, initialize); if (ret) return ret; @@ -819,20 +1031,14 @@ static int smu_hw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct smu_context *smu = &adev->smu; - if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { - ret = smu_load_microcode(smu); - if (ret) + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + ret = smu_check_fw_status(smu); + if (ret) { + pr_err("SMC firmware status is not correct\n"); return ret; + } } - ret = smu_check_fw_status(smu); - if (ret) { - pr_err("SMC firmware status is not correct\n"); - return ret; - } - - mutex_lock(&smu->mutex); - ret = smu_feature_init_dpm(smu); if (ret) goto failed; @@ -857,19 +1063,20 @@ static int smu_hw_init(void *handle) if (ret) goto failed; - mutex_unlock(&smu->mutex); + ret = smu_register_irq_handler(smu); + if (ret) + goto failed; if (!smu->pm_enabled) adev->pm.dpm_enabled = false; else - adev->pm.dpm_enabled = true; + adev->pm.dpm_enabled = true; /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */ pr_info("SMU is initialized successfully!\n"); return 0; failed: - mutex_unlock(&smu->mutex); return ret; } @@ -886,20 +1093,11 @@ static int smu_hw_fini(void *handle) kfree(table_context->max_sustainable_clocks); table_context->max_sustainable_clocks = NULL; - kfree(table_context->od_feature_capabilities); - table_context->od_feature_capabilities = NULL; - - kfree(table_context->od_settings_max); - table_context->od_settings_max = NULL; - - kfree(table_context->od_settings_min); - table_context->od_settings_min = NULL; - kfree(table_context->overdrive_table); table_context->overdrive_table = NULL; - kfree(table_context->od8_settings); - table_context->od8_settings = NULL; + kfree(smu->irq_source); + smu->irq_source = NULL; ret = smu_fini_fb_allocations(smu); if (ret) @@ -933,13 +1131,26 @@ static int smu_suspend(void *handle) int ret; struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct smu_context *smu = &adev->smu; + bool baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT); ret = smu_system_features_control(smu, false); if (ret) return ret; + if (adev->in_gpu_reset && baco_feature_is_enabled) { + ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true); + if (ret) { + pr_warn("set BACO feature enabled failed, return %d\n", ret); + return ret; + } + } + smu->watermarks_bitmap &= ~(WATERMARKS_LOADED); + if (adev->asic_type >= CHIP_NAVI10 && + adev->gfx.rlc.funcs->stop) + adev->gfx.rlc.funcs->stop(adev); + return 0; } @@ -1184,10 +1395,10 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu, break; case AMD_DPM_FORCED_LEVEL_AUTO: + case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: ret = smu_unforce_dpm_levels(smu); break; - case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: @@ -1197,8 +1408,9 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu, &soc_mask); if (ret) return ret; - smu_force_clk_levels(smu, PP_SCLK, 1 << sclk_mask); - smu_force_clk_levels(smu, PP_MCLK, 1 << mclk_mask); + smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask); + smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask); + smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask); break; case AMD_DPM_FORCED_LEVEL_MANUAL: @@ -1250,6 +1462,60 @@ int smu_handle_task(struct smu_context *smu, return ret; } +enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu) +{ + struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); + enum amd_dpm_forced_level level; + + if (!smu_dpm_ctx->dpm_context) + return -EINVAL; + + mutex_lock(&(smu->mutex)); + level = smu_dpm_ctx->dpm_level; + mutex_unlock(&(smu->mutex)); + + return level; +} + +int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) +{ + int ret = 0; + int i; + struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); + + if (!smu_dpm_ctx->dpm_context) + return -EINVAL; + + for (i = 0; i < smu->adev->num_ip_blocks; i++) { + if (smu->adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) + break; + } + + + smu->adev->ip_blocks[i].version->funcs->enable_umd_pstate(smu, &level); + ret = smu_handle_task(smu, level, + AMD_PP_TASK_READJUST_POWER_STATE); + if (ret) + return ret; + + mutex_lock(&smu->mutex); + smu_dpm_ctx->dpm_level = level; + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_display_count(struct smu_context *smu, uint32_t count) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + ret = smu_init_display_count(smu, count); + mutex_unlock(&smu->mutex); + + return ret; +} + const struct amd_ip_funcs smu_ip_funcs = { .name = "smu", .early_init = smu_early_init, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index f1d326caf69e..a24beaa4fb01 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -194,6 +194,7 @@ int hwmgr_sw_init(struct pp_hwmgr *hwmgr) return -EINVAL; phm_register_irq_handlers(hwmgr); + pr_info("hwmgr_sw_init smu backed is %s\n", hwmgr->smumgr_funcs->name); return hwmgr->smumgr_funcs->smu_init(hwmgr); } @@ -326,7 +327,7 @@ int hwmgr_resume(struct pp_hwmgr *hwmgr) if (ret) return ret; - ret = psm_adjust_power_state_dynamic(hwmgr, true, NULL); + ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL); return ret; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c index ae64ff7153d6..b760f95e7fa7 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c @@ -916,8 +916,10 @@ static int init_thermal_controller( PHM_PlatformCaps_ThermalController ); - if (0 == powerplay_table->usFanTableOffset) + if (0 == powerplay_table->usFanTableOffset) { + hwmgr->thermal_controller.use_hw_fan_control = 1; return 0; + } fan_table = (const PPTable_Generic_SubTable_Header *) (((unsigned long)powerplay_table) + @@ -1065,8 +1067,6 @@ static int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((NULL != hwmgr->pptable), "Failed to allocate hwmgr->pptable!", return -ENOMEM); - memset(hwmgr->pptable, 0x00, sizeof(struct phm_ppt_v1_information)); - powerplay_table = get_powerplay_table(hwmgr); PP_ASSERT_WITH_CODE((NULL != powerplay_table), diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index c5986d28fbf1..487aeee1cf8a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -3495,7 +3495,7 @@ static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query) ixSMU_PM_STATUS_95, 0); for (i = 0; i < 10; i++) { - mdelay(500); + msleep(500); smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample); tmp = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index 3eb1de9ecf73..1af992fb0bde 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -25,6 +25,11 @@ #include "amdgpu.h" #include "kgd_pp_interface.h" #include "dm_pp_interface.h" +#include "dm_pp_smu.h" + +#define SMU_THERMAL_MINIMUM_ALERT_TEMP 0 +#define SMU_THERMAL_MAXIMUM_ALERT_TEMP 255 +#define SMU_TEMPERATURE_UNITS_PER_CENTIGRADES 1000 struct smu_hw_power_state { unsigned int magic; @@ -106,6 +111,13 @@ struct smu_state_software_algorithm_block { struct smu_temperature_range { int min; int max; + int edge_emergency_max; + int hotspot_min; + int hotspot_crit_max; + int hotspot_emergency_max; + int mem_min; + int mem_crit_max; + int mem_emergency_max; }; struct smu_state_validation_block { @@ -224,9 +236,102 @@ enum smu_message_type SMU_MSG_PrepareMp1ForShutdown, SMU_MSG_SetMGpuFanBoostLimitRpm, SMU_MSG_GetAVFSVoltageByDpm, + SMU_MSG_PowerUpVcn, + SMU_MSG_PowerDownVcn, + SMU_MSG_PowerUpJpeg, + SMU_MSG_PowerDownJpeg, + SMU_MSG_BacoAudioD3PME, + SMU_MSG_ArmD3, SMU_MSG_MAX_COUNT, }; +enum smu_clk_type +{ + SMU_GFXCLK, + SMU_VCLK, + SMU_DCLK, + SMU_ECLK, + SMU_SOCCLK, + SMU_UCLK, + SMU_DCEFCLK, + SMU_DISPCLK, + SMU_PIXCLK, + SMU_PHYCLK, + SMU_FCLK, + SMU_SCLK, + SMU_MCLK, + SMU_PCIE, + SMU_OD_SCLK, + SMU_OD_MCLK, + SMU_OD_VDDC_CURVE, + SMU_OD_RANGE, + SMU_CLK_COUNT, +}; + +enum smu_power_src_type +{ + SMU_POWER_SOURCE_AC, + SMU_POWER_SOURCE_DC, + SMU_POWER_SOURCE_COUNT, +}; + +enum smu_feature_mask +{ + SMU_FEATURE_DPM_PREFETCHER_BIT, + SMU_FEATURE_DPM_GFXCLK_BIT, + SMU_FEATURE_DPM_UCLK_BIT, + SMU_FEATURE_DPM_SOCCLK_BIT, + SMU_FEATURE_DPM_UVD_BIT, + SMU_FEATURE_DPM_VCE_BIT, + SMU_FEATURE_ULV_BIT, + SMU_FEATURE_DPM_MP0CLK_BIT, + SMU_FEATURE_DPM_LINK_BIT, + SMU_FEATURE_DPM_DCEFCLK_BIT, + SMU_FEATURE_DS_GFXCLK_BIT, + SMU_FEATURE_DS_SOCCLK_BIT, + SMU_FEATURE_DS_LCLK_BIT, + SMU_FEATURE_PPT_BIT, + SMU_FEATURE_TDC_BIT, + SMU_FEATURE_THERMAL_BIT, + SMU_FEATURE_GFX_PER_CU_CG_BIT, + SMU_FEATURE_RM_BIT, + SMU_FEATURE_DS_DCEFCLK_BIT, + SMU_FEATURE_ACDC_BIT, + SMU_FEATURE_VR0HOT_BIT, + SMU_FEATURE_VR1HOT_BIT, + SMU_FEATURE_FW_CTF_BIT, + SMU_FEATURE_LED_DISPLAY_BIT, + SMU_FEATURE_FAN_CONTROL_BIT, + SMU_FEATURE_GFX_EDC_BIT, + SMU_FEATURE_GFXOFF_BIT, + SMU_FEATURE_CG_BIT, + SMU_FEATURE_DPM_FCLK_BIT, + SMU_FEATURE_DS_FCLK_BIT, + SMU_FEATURE_DS_MP1CLK_BIT, + SMU_FEATURE_DS_MP0CLK_BIT, + SMU_FEATURE_XGMI_BIT, + SMU_FEATURE_DPM_GFX_PACE_BIT, + SMU_FEATURE_MEM_VDDCI_SCALING_BIT, + SMU_FEATURE_MEM_MVDD_SCALING_BIT, + SMU_FEATURE_DS_UCLK_BIT, + SMU_FEATURE_GFX_ULV_BIT, + SMU_FEATURE_FW_DSTATE_BIT, + SMU_FEATURE_BACO_BIT, + SMU_FEATURE_VCN_PG_BIT, + SMU_FEATURE_JPEG_PG_BIT, + SMU_FEATURE_USB_PG_BIT, + SMU_FEATURE_RSMU_SMN_CG_BIT, + SMU_FEATURE_APCC_PLUS_BIT, + SMU_FEATURE_GTHR_BIT, + SMU_FEATURE_GFX_DCS_BIT, + SMU_FEATURE_GFX_SS_BIT, + SMU_FEATURE_OUT_OF_BAND_MONITOR_BIT, + SMU_FEATURE_TEMP_DEPENDENT_VMIN_BIT, + SMU_FEATURE_MMHUB_PG_BIT, + SMU_FEATURE_ATHUB_PG_BIT, + SMU_FEATURE_COUNT, +}; + enum smu_memory_pool_size { SMU_MEMORY_POOL_SIZE_ZERO = 0, @@ -293,11 +398,30 @@ struct smu_bios_boot_up_values uint32_t pp_table_id; }; +enum smu_table_id +{ + SMU_TABLE_PPTABLE = 0, + SMU_TABLE_WATERMARKS, + SMU_TABLE_AVFS, + SMU_TABLE_AVFS_PSM_DEBUG, + SMU_TABLE_AVFS_FUSE_OVERRIDE, + SMU_TABLE_PMSTATUSLOG, + SMU_TABLE_SMU_METRICS, + SMU_TABLE_DRIVER_SMU_CONFIG, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + SMU_TABLE_OVERDRIVE, + SMU_TABLE_I2C_COMMANDS, + SMU_TABLE_PACE, + SMU_TABLE_COUNT, +}; + struct smu_table_context { void *power_play_table; uint32_t power_play_table_size; void *hardcode_pptable; + unsigned long metrics_time; + void *metrics_table; void *max_sustainable_clocks; struct smu_bios_boot_up_values boot_values; @@ -309,13 +433,7 @@ struct smu_table_context uint8_t thermal_controller_type; uint16_t TDPODLimit; - uint8_t *od_feature_capabilities; - uint32_t *od_settings_max; - uint32_t *od_settings_min; void *overdrive_table; - void *od8_settings; - bool od_gfxclk_update; - bool od_memclk_update; }; struct smu_dpm_context { @@ -331,9 +449,15 @@ struct smu_dpm_context { struct mclock_latency_table *mclk_latency_table; }; +struct smu_power_gate { + bool uvd_gated; + bool vce_gated; +}; + struct smu_power_context { void *power_context; uint32_t power_context_size; + struct smu_power_gate power_gate; }; @@ -366,10 +490,24 @@ struct mclock_latency_table { struct mclk_latency_entries entries[MAX_REGULAR_DPM_NUM]; }; +enum smu_baco_state +{ + SMU_BACO_STATE_ENTER = 0, + SMU_BACO_STATE_EXIT, +}; + +struct smu_baco_context +{ + struct mutex mutex; + uint32_t state; + bool platform_support; +}; + #define WORKLOAD_POLICY_MAX 7 struct smu_context { struct amdgpu_device *adev; + struct amdgpu_irq_src *irq_source; const struct smu_funcs *funcs; const struct pptable_funcs *ppt_funcs; @@ -381,6 +519,8 @@ struct smu_context struct smu_power_context smu_power; struct smu_feature smu_feature; struct amd_pp_display_configuration *display_config; + struct smu_baco_context smu_baco; + void *od_settings; uint32_t pstate_sclk; uint32_t pstate_mclk; @@ -389,6 +529,11 @@ struct smu_context uint32_t power_limit; uint32_t default_power_limit; + /* soft pptable */ + uint32_t ppt_offset_bytes; + uint32_t ppt_size_bytes; + uint8_t *ppt_start_addr; + bool support_power_containment; bool disable_watermark; @@ -405,8 +550,6 @@ struct smu_context uint32_t smc_if_version; - unsigned long metrics_time; - void *metrics_table; }; struct pptable_funcs { @@ -415,27 +558,29 @@ struct pptable_funcs { int (*check_powerplay_table)(struct smu_context *smu); int (*append_powerplay_table)(struct smu_context *smu); int (*get_smu_msg_index)(struct smu_context *smu, uint32_t index); + int (*get_smu_clk_index)(struct smu_context *smu, uint32_t index); + int (*get_smu_feature_index)(struct smu_context *smu, uint32_t index); + int (*get_smu_table_index)(struct smu_context *smu, uint32_t index); + int (*get_smu_power_index)(struct smu_context *smu, uint32_t index); + int (*get_workload_type)(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile); int (*run_afll_btc)(struct smu_context *smu); - int (*get_unallowed_feature_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num); + int (*get_allowed_feature_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num); enum amd_pm_state_type (*get_current_power_state)(struct smu_context *smu); int (*set_default_dpm_table)(struct smu_context *smu); int (*set_power_state)(struct smu_context *smu); int (*populate_umd_state_clk)(struct smu_context *smu); - int (*print_clk_levels)(struct smu_context *smu, enum pp_clock_type type, char *buf); - int (*force_clk_levels)(struct smu_context *smu, enum pp_clock_type type, uint32_t mask); + int (*print_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, char *buf); + int (*force_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t mask); int (*set_default_od8_settings)(struct smu_context *smu); - int (*update_specified_od8_value)(struct smu_context *smu, - uint32_t index, - uint32_t value); - int (*get_od_percentage)(struct smu_context *smu, enum pp_clock_type type); + int (*get_od_percentage)(struct smu_context *smu, enum smu_clk_type clk_type); int (*set_od_percentage)(struct smu_context *smu, - enum pp_clock_type type, + enum smu_clk_type clk_type, uint32_t value); int (*od_edit_dpm_table)(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long *input, uint32_t size); int (*get_clock_by_type_with_latency)(struct smu_context *smu, - enum amd_pp_clock_type type, + enum smu_clk_type clk_type, struct pp_clock_levels_with_latency *clocks); @@ -446,16 +591,16 @@ struct pptable_funcs { *clocks); int (*get_power_profile_mode)(struct smu_context *smu, char *buf); int (*set_power_profile_mode)(struct smu_context *smu, long *input, uint32_t size); - enum amd_dpm_forced_level (*get_performance_level)(struct smu_context *smu); - int (*force_performance_level)(struct smu_context *smu, enum amd_dpm_forced_level level); + int (*dpm_set_uvd_enable)(struct smu_context *smu, bool enable); + int (*dpm_set_vce_enable)(struct smu_context *smu, bool enable); + int (*read_sensor)(struct smu_context *smu, enum amd_pp_sensors sensor, + void *data, uint32_t *size); int (*pre_display_config_changed)(struct smu_context *smu); int (*display_config_changed)(struct smu_context *smu); int (*apply_clocks_adjust_rules)(struct smu_context *smu); int (*notify_smc_dispaly_config)(struct smu_context *smu); int (*force_dpm_limit_value)(struct smu_context *smu, bool highest); int (*unforce_dpm_levels)(struct smu_context *smu); - int (*upload_dpm_level)(struct smu_context *smu, bool max, - uint32_t feature_mask); int (*get_profiling_clk_mask)(struct smu_context *smu, enum amd_dpm_forced_level level, uint32_t *sclk_mask, @@ -464,6 +609,18 @@ struct pptable_funcs { int (*set_cpu_power_state)(struct smu_context *smu); int (*set_ppfeature_status)(struct smu_context *smu, uint64_t ppfeatures); int (*get_ppfeature_status)(struct smu_context *smu, char *buf); + bool (*is_dpm_running)(struct smu_context *smu); + int (*tables_init)(struct smu_context *smu, struct smu_table *tables); + int (*set_thermal_fan_table)(struct smu_context *smu); + int (*get_fan_speed_percent)(struct smu_context *smu, uint32_t *speed); + int (*set_watermarks_table)(struct smu_context *smu, void *watermarks, + struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges); + int (*get_current_clk_freq_by_table)(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *value); + int (*get_thermal_temperature_range)(struct smu_context *smu, struct smu_temperature_range *range); + int (*get_uclk_dpm_states)(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states); + int (*set_default_od_settings)(struct smu_context *smu, bool initialize); }; struct smu_funcs @@ -475,7 +632,7 @@ struct smu_funcs int (*fini_power)(struct smu_context *smu); int (*load_microcode)(struct smu_context *smu); int (*check_fw_status)(struct smu_context *smu); - int (*read_pptable_from_vbios)(struct smu_context *smu); + int (*setup_pptable)(struct smu_context *smu); int (*get_vbios_bootup_values)(struct smu_context *smu); int (*get_clk_info_from_vbios)(struct smu_context *smu); int (*check_pptable)(struct smu_context *smu); @@ -492,15 +649,14 @@ struct smu_funcs int (*send_smc_msg)(struct smu_context *smu, uint16_t msg); int (*send_smc_msg_with_param)(struct smu_context *smu, uint16_t msg, uint32_t param); int (*read_smc_arg)(struct smu_context *smu, uint32_t *arg); - int (*init_display)(struct smu_context *smu); + int (*init_display_count)(struct smu_context *smu, uint32_t count); int (*set_allowed_mask)(struct smu_context *smu); int (*get_enabled_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num); - bool (*is_dpm_running)(struct smu_context *smu); int (*update_feature_enable_state)(struct smu_context *smu, uint32_t feature_id, bool enabled); int (*notify_display_change)(struct smu_context *smu); int (*get_power_limit)(struct smu_context *smu, uint32_t *limit, bool def); int (*set_power_limit)(struct smu_context *smu, uint32_t n); - int (*get_current_clk_freq)(struct smu_context *smu, uint32_t clk_id, uint32_t *value); + int (*get_current_clk_freq)(struct smu_context *smu, enum smu_clk_type clk_id, uint32_t *value); int (*init_max_sustainable_clocks)(struct smu_context *smu); int (*start_thermal_control)(struct smu_context *smu); int (*read_sensor)(struct smu_context *smu, enum amd_pp_sensors sensor, @@ -528,25 +684,21 @@ struct smu_funcs int (*notify_smu_enable_pwe)(struct smu_context *smu); int (*set_watermarks_for_clock_ranges)(struct smu_context *smu, struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges); - int (*set_od8_default_settings)(struct smu_context *smu, - bool initialize); int (*conv_power_profile_to_pplib_workload)(int power_profile); - int (*get_power_profile_mode)(struct smu_context *smu, char *buf); - int (*set_power_profile_mode)(struct smu_context *smu, long *input, uint32_t size); - int (*update_od8_settings)(struct smu_context *smu, - uint32_t index, - uint32_t value); - int (*dpm_set_uvd_enable)(struct smu_context *smu, bool enable); - int (*dpm_set_vce_enable)(struct smu_context *smu, bool enable); - uint32_t (*get_sclk)(struct smu_context *smu, bool low); - uint32_t (*get_mclk)(struct smu_context *smu, bool low); int (*get_current_rpm)(struct smu_context *smu, uint32_t *speed); uint32_t (*get_fan_control_mode)(struct smu_context *smu); int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode); - int (*get_fan_speed_percent)(struct smu_context *smu, uint32_t *speed); int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed); int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed); int (*set_xgmi_pstate)(struct smu_context *smu, uint32_t pstate); + int (*gfx_off_control)(struct smu_context *smu, bool enable); + int (*register_irq_handler)(struct smu_context *smu); + int (*set_azalia_d3_pme)(struct smu_context *smu); + int (*get_max_sustainable_clocks_by_dc)(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks); + bool (*baco_is_support)(struct smu_context *smu); + enum smu_baco_state (*baco_get_state)(struct smu_context *smu); + int (*baco_set_state)(struct smu_context *smu, enum smu_baco_state state); + int (*baco_reset)(struct smu_context *smu); }; @@ -564,8 +716,8 @@ struct smu_funcs ((smu)->funcs->load_microcode ? (smu)->funcs->load_microcode((smu)) : 0) #define smu_check_fw_status(smu) \ ((smu)->funcs->check_fw_status ? (smu)->funcs->check_fw_status((smu)) : 0) -#define smu_read_pptable_from_vbios(smu) \ - ((smu)->funcs->read_pptable_from_vbios ? (smu)->funcs->read_pptable_from_vbios((smu)) : 0) +#define smu_setup_pptable(smu) \ + ((smu)->funcs->setup_pptable ? (smu)->funcs->setup_pptable((smu)) : 0) #define smu_get_vbios_bootup_values(smu) \ ((smu)->funcs->get_vbios_bootup_values ? (smu)->funcs->get_vbios_bootup_values((smu)) : 0) #define smu_get_clk_info_from_vbios(smu) \ @@ -586,6 +738,9 @@ struct smu_funcs ((smu)->funcs->set_tool_table_location ? (smu)->funcs->set_tool_table_location((smu)) : 0) #define smu_notify_memory_pool_location(smu) \ ((smu)->funcs->notify_memory_pool_location ? (smu)->funcs->notify_memory_pool_location((smu)) : 0) +#define smu_gfx_off_control(smu, enable) \ + ((smu)->funcs->gfx_off_control ? (smu)->funcs->gfx_off_control((smu), (enable)) : 0) + #define smu_write_watermarks_table(smu) \ ((smu)->funcs->write_watermarks_table ? (smu)->funcs->write_watermarks_table((smu)) : 0) #define smu_set_last_dcef_min_deep_sleep_clk(smu) \ @@ -594,10 +749,8 @@ struct smu_funcs ((smu)->funcs->system_features_control ? (smu)->funcs->system_features_control((smu), (en)) : 0) #define smu_init_max_sustainable_clocks(smu) \ ((smu)->funcs->init_max_sustainable_clocks ? (smu)->funcs->init_max_sustainable_clocks((smu)) : 0) -#define smu_set_od8_default_settings(smu, initialize) \ - ((smu)->funcs->set_od8_default_settings ? (smu)->funcs->set_od8_default_settings((smu), (initialize)) : 0) -#define smu_update_od8_settings(smu, index, value) \ - ((smu)->funcs->update_od8_settings ? (smu)->funcs->update_od8_settings((smu), (index), (value)) : 0) +#define smu_set_default_od_settings(smu, initialize) \ + ((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0) #define smu_get_current_rpm(smu, speed) \ ((smu)->funcs->get_current_rpm ? (smu)->funcs->get_current_rpm((smu), (speed)) : 0) #define smu_set_fan_speed_rpm(smu, speed) \ @@ -610,14 +763,14 @@ struct smu_funcs ((smu)->funcs->read_smc_arg? (smu)->funcs->read_smc_arg((smu), (arg)) : 0) #define smu_alloc_dpm_context(smu) \ ((smu)->ppt_funcs->alloc_dpm_context ? (smu)->ppt_funcs->alloc_dpm_context((smu)) : 0) -#define smu_init_display(smu) \ - ((smu)->funcs->init_display ? (smu)->funcs->init_display((smu)) : 0) +#define smu_init_display_count(smu, count) \ + ((smu)->funcs->init_display_count ? (smu)->funcs->init_display_count((smu), (count)) : 0) #define smu_feature_set_allowed_mask(smu) \ ((smu)->funcs->set_allowed_mask? (smu)->funcs->set_allowed_mask((smu)) : 0) #define smu_feature_get_enabled_mask(smu, mask, num) \ ((smu)->funcs->get_enabled_mask? (smu)->funcs->get_enabled_mask((smu), (mask), (num)) : 0) #define smu_is_dpm_running(smu) \ - ((smu)->funcs->is_dpm_running ? (smu)->funcs->is_dpm_running((smu)) : 0) + ((smu)->ppt_funcs->is_dpm_running ? (smu)->ppt_funcs->is_dpm_running((smu)) : 0) #define smu_feature_update_enable_state(smu, feature_id, enabled) \ ((smu)->funcs->update_feature_enable_state? (smu)->funcs->update_feature_enable_state((smu), (feature_id), (enabled)) : 0) #define smu_notify_display_change(smu) \ @@ -634,36 +787,36 @@ struct smu_funcs ((smu)->ppt_funcs->populate_umd_state_clk ? (smu)->ppt_funcs->populate_umd_state_clk((smu)) : 0) #define smu_set_default_od8_settings(smu) \ ((smu)->ppt_funcs->set_default_od8_settings ? (smu)->ppt_funcs->set_default_od8_settings((smu)) : 0) -#define smu_update_specified_od8_value(smu, index, value) \ - ((smu)->ppt_funcs->update_specified_od8_value ? (smu)->ppt_funcs->update_specified_od8_value((smu), (index), (value)) : 0) #define smu_get_power_limit(smu, limit, def) \ ((smu)->funcs->get_power_limit ? (smu)->funcs->get_power_limit((smu), (limit), (def)) : 0) #define smu_set_power_limit(smu, limit) \ ((smu)->funcs->set_power_limit ? (smu)->funcs->set_power_limit((smu), (limit)) : 0) #define smu_get_current_clk_freq(smu, clk_id, value) \ ((smu)->funcs->get_current_clk_freq? (smu)->funcs->get_current_clk_freq((smu), (clk_id), (value)) : 0) -#define smu_print_clk_levels(smu, type, buf) \ - ((smu)->ppt_funcs->print_clk_levels ? (smu)->ppt_funcs->print_clk_levels((smu), (type), (buf)) : 0) -#define smu_force_clk_levels(smu, type, level) \ - ((smu)->ppt_funcs->force_clk_levels ? (smu)->ppt_funcs->force_clk_levels((smu), (type), (level)) : 0) +#define smu_print_clk_levels(smu, clk_type, buf) \ + ((smu)->ppt_funcs->print_clk_levels ? (smu)->ppt_funcs->print_clk_levels((smu), (clk_type), (buf)) : 0) +#define smu_force_clk_levels(smu, clk_type, level) \ + ((smu)->ppt_funcs->force_clk_levels ? (smu)->ppt_funcs->force_clk_levels((smu), (clk_type), (level)) : 0) #define smu_get_od_percentage(smu, type) \ ((smu)->ppt_funcs->get_od_percentage ? (smu)->ppt_funcs->get_od_percentage((smu), (type)) : 0) #define smu_set_od_percentage(smu, type, value) \ ((smu)->ppt_funcs->set_od_percentage ? (smu)->ppt_funcs->set_od_percentage((smu), (type), (value)) : 0) #define smu_od_edit_dpm_table(smu, type, input, size) \ ((smu)->ppt_funcs->od_edit_dpm_table ? (smu)->ppt_funcs->od_edit_dpm_table((smu), (type), (input), (size)) : 0) +#define smu_tables_init(smu, tab) \ + ((smu)->ppt_funcs->tables_init ? (smu)->ppt_funcs->tables_init((smu), (tab)) : 0) +#define smu_set_thermal_fan_table(smu) \ + ((smu)->ppt_funcs->set_thermal_fan_table ? (smu)->ppt_funcs->set_thermal_fan_table((smu)) : 0) #define smu_start_thermal_control(smu) \ ((smu)->funcs->start_thermal_control? (smu)->funcs->start_thermal_control((smu)) : 0) #define smu_read_sensor(smu, sensor, data, size) \ ((smu)->funcs->read_sensor? (smu)->funcs->read_sensor((smu), (sensor), (data), (size)) : 0) +#define smu_asic_read_sensor(smu, sensor, data, size) \ + ((smu)->ppt_funcs->read_sensor? (smu)->ppt_funcs->read_sensor((smu), (sensor), (data), (size)) : 0) #define smu_get_power_profile_mode(smu, buf) \ - ((smu)->funcs->get_power_profile_mode ? (smu)->funcs->get_power_profile_mode((smu), buf) : 0) + ((smu)->ppt_funcs->get_power_profile_mode ? (smu)->ppt_funcs->get_power_profile_mode((smu), buf) : 0) #define smu_set_power_profile_mode(smu, param, param_size) \ - ((smu)->funcs->set_power_profile_mode ? (smu)->funcs->set_power_profile_mode((smu), (param), (param_size)) : 0) -#define smu_get_performance_level(smu) \ - ((smu)->ppt_funcs->get_performance_level ? (smu)->ppt_funcs->get_performance_level((smu)) : 0) -#define smu_force_performance_level(smu, level) \ - ((smu)->ppt_funcs->force_performance_level ? (smu)->ppt_funcs->force_performance_level((smu), (level)) : 0) + ((smu)->ppt_funcs->set_power_profile_mode ? (smu)->ppt_funcs->set_power_profile_mode((smu), (param), (param_size)) : 0) #define smu_pre_display_config_changed(smu) \ ((smu)->ppt_funcs->pre_display_config_changed ? (smu)->ppt_funcs->pre_display_config_changed((smu)) : 0) #define smu_display_config_changed(smu) \ @@ -676,8 +829,6 @@ struct smu_funcs ((smu)->ppt_funcs->force_dpm_limit_value ? (smu)->ppt_funcs->force_dpm_limit_value((smu), (highest)) : 0) #define smu_unforce_dpm_levels(smu) \ ((smu)->ppt_funcs->unforce_dpm_levels ? (smu)->ppt_funcs->unforce_dpm_levels((smu)) : 0) -#define smu_upload_dpm_level(smu, max, feature_mask) \ - ((smu)->ppt_funcs->upload_dpm_level ? (smu)->ppt_funcs->upload_dpm_level((smu), (max), (feature_mask)) : 0) #define smu_get_profiling_clk_mask(smu, level, sclk_mask, mclk_mask, soc_mask) \ ((smu)->ppt_funcs->get_profiling_clk_mask ? (smu)->ppt_funcs->get_profiling_clk_mask((smu), (level), (sclk_mask), (mclk_mask), (soc_mask)) : 0) #define smu_set_cpu_power_state(smu) \ @@ -687,16 +838,26 @@ struct smu_funcs #define smu_set_fan_control_mode(smu, value) \ ((smu)->funcs->set_fan_control_mode ? (smu)->funcs->set_fan_control_mode((smu), (value)) : 0) #define smu_get_fan_speed_percent(smu, speed) \ - ((smu)->funcs->get_fan_speed_percent ? (smu)->funcs->get_fan_speed_percent((smu), (speed)) : 0) + ((smu)->ppt_funcs->get_fan_speed_percent ? (smu)->ppt_funcs->get_fan_speed_percent((smu), (speed)) : 0) #define smu_set_fan_speed_percent(smu, speed) \ ((smu)->funcs->set_fan_speed_percent ? (smu)->funcs->set_fan_speed_percent((smu), (speed)) : 0) #define smu_msg_get_index(smu, msg) \ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL) +#define smu_clk_get_index(smu, msg) \ + ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_clk_index? (smu)->ppt_funcs->get_smu_clk_index((smu), (msg)) : -EINVAL) : -EINVAL) +#define smu_feature_get_index(smu, msg) \ + ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_feature_index? (smu)->ppt_funcs->get_smu_feature_index((smu), (msg)) : -EINVAL) : -EINVAL) +#define smu_table_get_index(smu, tab) \ + ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_table_index? (smu)->ppt_funcs->get_smu_table_index((smu), (tab)) : -EINVAL) : -EINVAL) +#define smu_power_get_index(smu, src) \ + ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_power_index? (smu)->ppt_funcs->get_smu_power_index((smu), (src)) : -EINVAL) : -EINVAL) +#define smu_workload_get_type(smu, profile) \ + ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_workload_type? (smu)->ppt_funcs->get_workload_type((smu), (profile)) : -EINVAL) : -EINVAL) #define smu_run_afll_btc(smu) \ ((smu)->ppt_funcs? ((smu)->ppt_funcs->run_afll_btc? (smu)->ppt_funcs->run_afll_btc((smu)) : 0) : 0) -#define smu_get_unallowed_feature_mask(smu, feature_mask, num) \ - ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_unallowed_feature_mask? (smu)->ppt_funcs->get_unallowed_feature_mask((smu), (feature_mask), (num)) : 0) : 0) +#define smu_get_allowed_feature_mask(smu, feature_mask, num) \ + ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_allowed_feature_mask? (smu)->ppt_funcs->get_allowed_feature_mask((smu), (feature_mask), (num)) : 0) : 0) #define smu_set_deep_sleep_dcefclk(smu, clk) \ ((smu)->funcs->set_deep_sleep_dcefclk ? (smu)->funcs->set_deep_sleep_dcefclk((smu), (clk)) : 0) #define smu_set_active_display_count(smu, count) \ @@ -707,8 +868,8 @@ struct smu_funcs ((smu)->funcs->get_clock_by_type ? (smu)->funcs->get_clock_by_type((smu), (type), (clocks)) : 0) #define smu_get_max_high_clocks(smu, clocks) \ ((smu)->funcs->get_max_high_clocks ? (smu)->funcs->get_max_high_clocks((smu), (clocks)) : 0) -#define smu_get_clock_by_type_with_latency(smu, type, clocks) \ - ((smu)->ppt_funcs->get_clock_by_type_with_latency ? (smu)->ppt_funcs->get_clock_by_type_with_latency((smu), (type), (clocks)) : 0) +#define smu_get_clock_by_type_with_latency(smu, clk_type, clocks) \ + ((smu)->ppt_funcs->get_clock_by_type_with_latency ? (smu)->ppt_funcs->get_clock_by_type_with_latency((smu), (clk_type), (clocks)) : 0) #define smu_get_clock_by_type_with_voltage(smu, type, clocks) \ ((smu)->ppt_funcs->get_clock_by_type_with_voltage ? (smu)->ppt_funcs->get_clock_by_type_with_voltage((smu), (type), (clocks)) : 0) #define smu_display_clock_voltage_request(smu, clock_req) \ @@ -724,19 +885,39 @@ struct smu_funcs #define smu_set_watermarks_for_clock_ranges(smu, clock_ranges) \ ((smu)->funcs->set_watermarks_for_clock_ranges ? (smu)->funcs->set_watermarks_for_clock_ranges((smu), (clock_ranges)) : 0) #define smu_dpm_set_uvd_enable(smu, enable) \ - ((smu)->funcs->dpm_set_uvd_enable ? (smu)->funcs->dpm_set_uvd_enable((smu), (enable)) : 0) + ((smu)->ppt_funcs->dpm_set_uvd_enable ? (smu)->ppt_funcs->dpm_set_uvd_enable((smu), (enable)) : 0) #define smu_dpm_set_vce_enable(smu, enable) \ - ((smu)->funcs->dpm_set_vce_enable ? (smu)->funcs->dpm_set_vce_enable((smu), (enable)) : 0) -#define smu_get_sclk(smu, low) \ - ((smu)->funcs->get_sclk ? (smu)->funcs->get_sclk((smu), (low)) : 0) -#define smu_get_mclk(smu, low) \ - ((smu)->funcs->get_mclk ? (smu)->funcs->get_mclk((smu), (low)) : 0) + ((smu)->ppt_funcs->dpm_set_vce_enable ? (smu)->ppt_funcs->dpm_set_vce_enable((smu), (enable)) : 0) #define smu_set_xgmi_pstate(smu, pstate) \ ((smu)->funcs->set_xgmi_pstate ? (smu)->funcs->set_xgmi_pstate((smu), (pstate)) : 0) #define smu_set_ppfeature_status(smu, ppfeatures) \ ((smu)->ppt_funcs->set_ppfeature_status ? (smu)->ppt_funcs->set_ppfeature_status((smu), (ppfeatures)) : -EINVAL) #define smu_get_ppfeature_status(smu, buf) \ ((smu)->ppt_funcs->get_ppfeature_status ? (smu)->ppt_funcs->get_ppfeature_status((smu), (buf)) : -EINVAL) +#define smu_set_watermarks_table(smu, tab, clock_ranges) \ + ((smu)->ppt_funcs->set_watermarks_table ? (smu)->ppt_funcs->set_watermarks_table((smu), (tab), (clock_ranges)) : 0) +#define smu_get_current_clk_freq_by_table(smu, clk_type, value) \ + ((smu)->ppt_funcs->get_current_clk_freq_by_table ? (smu)->ppt_funcs->get_current_clk_freq_by_table((smu), (clk_type), (value)) : 0) +#define smu_thermal_temperature_range_update(smu, range, rw) \ + ((smu)->ppt_funcs->thermal_temperature_range_update? (smu)->ppt_funcs->thermal_temperature_range_update((smu), (range), (rw)) : 0) +#define smu_get_thermal_temperature_range(smu, range) \ + ((smu)->ppt_funcs->get_thermal_temperature_range? (smu)->ppt_funcs->get_thermal_temperature_range((smu), (range)) : 0) +#define smu_register_irq_handler(smu) \ + ((smu)->funcs->register_irq_handler ? (smu)->funcs->register_irq_handler(smu) : 0) +#define smu_set_azalia_d3_pme(smu) \ + ((smu)->funcs->set_azalia_d3_pme ? (smu)->funcs->set_azalia_d3_pme((smu)) : 0) +#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \ + ((smu)->ppt_funcs->get_uclk_dpm_states ? (smu)->ppt_funcs->get_uclk_dpm_states((smu), (clocks_in_khz), (num_states)) : 0) +#define smu_get_max_sustainable_clocks_by_dc(smu, max_clocks) \ + ((smu)->funcs->get_max_sustainable_clocks_by_dc ? (smu)->funcs->get_max_sustainable_clocks_by_dc((smu), (max_clocks)) : 0) +#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \ + ((smu)->ppt_funcs->get_uclk_dpm_states ? (smu)->ppt_funcs->get_uclk_dpm_states((smu), (clocks_in_khz), (num_states)) : 0) +#define smu_baco_is_support(smu) \ + ((smu)->funcs->baco_is_support? (smu)->funcs->baco_is_support((smu)) : false) +#define smu_baco_get_state(smu, state) \ + ((smu)->funcs->baco_get_state? (smu)->funcs->baco_get_state((smu), (state)) : 0) +#define smu_baco_reset(smu) \ + ((smu)->funcs->baco_reset? (smu)->funcs->baco_reset((smu)) : 0) extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table, uint16_t *size, uint8_t *frev, uint8_t *crev, @@ -747,15 +928,17 @@ extern const struct amd_ip_funcs smu_ip_funcs; extern const struct amdgpu_ip_block_version smu_v11_0_ip_block; extern int smu_feature_init_dpm(struct smu_context *smu); -extern int smu_feature_is_enabled(struct smu_context *smu, int feature_id); -extern int smu_feature_set_enabled(struct smu_context *smu, int feature_id, bool enable); -extern int smu_feature_is_supported(struct smu_context *smu, int feature_id); -extern int smu_feature_set_supported(struct smu_context *smu, int feature_id, bool enable); +extern int smu_feature_is_enabled(struct smu_context *smu, + enum smu_feature_mask mask); +extern int smu_feature_set_enabled(struct smu_context *smu, + enum smu_feature_mask mask, bool enable); +extern int smu_feature_is_supported(struct smu_context *smu, + enum smu_feature_mask mask); +extern int smu_feature_set_supported(struct smu_context *smu, + enum smu_feature_mask mask, bool enable); -int smu_update_table_with_arg(struct smu_context *smu, uint16_t table_id, uint16_t exarg, +int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument, void *table_data, bool drv2smu); -#define smu_update_table(smu, table_id, table_data, drv2smu) \ - smu_update_table_with_arg((smu), (table_id), 0, (table_data), (drv2smu)) bool is_support_sw_smu(struct amdgpu_device *adev); int smu_reset(struct smu_context *smu); @@ -777,4 +960,19 @@ extern int smu_handle_task(struct smu_context *smu, enum amd_dpm_forced_level level, enum amd_pp_task task_id); int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version); +int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type, + uint16_t level, uint32_t *value); +int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t *value); +int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t *min, uint32_t *max); +int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t min, uint32_t max); +int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t min, uint32_t max); +enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu); +int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level); +int smu_set_display_count(struct smu_context *smu, uint32_t count); +bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type); + #endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index c92999aac07c..c5989cb38b1b 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -190,6 +190,7 @@ struct phm_vce_clock_voltage_dependency_table { }; struct pp_smumgr_func { + char *name; int (*smu_init)(struct pp_hwmgr *hwmgr); int (*smu_fini)(struct pp_hwmgr *hwmgr); int (*start_smu)(struct pp_hwmgr *hwmgr); @@ -694,6 +695,7 @@ struct pp_thermal_controller_info { uint8_t ucType; uint8_t ucI2cLine; uint8_t ucI2cAddress; + uint8_t use_hw_fan_control; struct pp_fan_info fanInfo; struct pp_advance_fan_control_parameters advanceFanControlParameters; }; diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h index 195c4ae67058..755d51f9c6a9 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h @@ -27,7 +27,7 @@ // *** IMPORTANT *** // SMU TEAM: Always increment the interface version if // any structure is changed in this file -#define SMU11_DRIVER_IF_VERSION 0x12 +#define SMU11_DRIVER_IF_VERSION 0x13 #define PPTABLE_V20_SMU_VERSION 3 @@ -615,6 +615,7 @@ typedef struct { uint16_t UclkAverageLpfTau; uint16_t GfxActivityLpfTau; uint16_t UclkActivityLpfTau; + uint16_t SocketPowerLpfTau; uint32_t MmHubPadding[8]; @@ -665,7 +666,8 @@ typedef struct { uint32_t ThrottlerStatus ; uint8_t LinkDpmLevel; - uint8_t Padding[3]; + uint16_t AverageSocketPower; + uint8_t Padding; uint32_t MmHubPadding[7]; diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h new file mode 100644 index 000000000000..adbbfebbb1e5 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h @@ -0,0 +1,1069 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __SMU11_DRIVER_IF_NAVI10_H__ +#define __SMU11_DRIVER_IF_NAVI10_H__ + +// *** IMPORTANT *** +// SMU TEAM: Always increment the interface version if +// any structure is changed in this file +#define SMU11_DRIVER_IF_VERSION 0x33 + +#define PPTABLE_NV10_SMU_VERSION 8 + +#define NUM_GFXCLK_DPM_LEVELS 16 +#define NUM_SMNCLK_DPM_LEVELS 2 +#define NUM_SOCCLK_DPM_LEVELS 8 +#define NUM_MP0CLK_DPM_LEVELS 2 +#define NUM_DCLK_DPM_LEVELS 8 +#define NUM_VCLK_DPM_LEVELS 8 +#define NUM_DCEFCLK_DPM_LEVELS 8 +#define NUM_PHYCLK_DPM_LEVELS 8 +#define NUM_DISPCLK_DPM_LEVELS 8 +#define NUM_PIXCLK_DPM_LEVELS 8 +#define NUM_UCLK_DPM_LEVELS 4 +#define NUM_MP1CLK_DPM_LEVELS 2 +#define NUM_LINK_LEVELS 2 + + +#define MAX_GFXCLK_DPM_LEVEL (NUM_GFXCLK_DPM_LEVELS - 1) +#define MAX_SMNCLK_DPM_LEVEL (NUM_SMNCLK_DPM_LEVELS - 1) +#define MAX_SOCCLK_DPM_LEVEL (NUM_SOCCLK_DPM_LEVELS - 1) +#define MAX_MP0CLK_DPM_LEVEL (NUM_MP0CLK_DPM_LEVELS - 1) +#define MAX_DCLK_DPM_LEVEL (NUM_DCLK_DPM_LEVELS - 1) +#define MAX_VCLK_DPM_LEVEL (NUM_VCLK_DPM_LEVELS - 1) +#define MAX_DCEFCLK_DPM_LEVEL (NUM_DCEFCLK_DPM_LEVELS - 1) +#define MAX_DISPCLK_DPM_LEVEL (NUM_DISPCLK_DPM_LEVELS - 1) +#define MAX_PIXCLK_DPM_LEVEL (NUM_PIXCLK_DPM_LEVELS - 1) +#define MAX_PHYCLK_DPM_LEVEL (NUM_PHYCLK_DPM_LEVELS - 1) +#define MAX_UCLK_DPM_LEVEL (NUM_UCLK_DPM_LEVELS - 1) +#define MAX_MP1CLK_DPM_LEVEL (NUM_MP1CLK_DPM_LEVELS - 1) +#define MAX_LINK_LEVEL (NUM_LINK_LEVELS - 1) + +//Gemini Modes +#define PPSMC_GeminiModeNone 0 //Single GPU board +#define PPSMC_GeminiModeMaster 1 //Master GPU on a Gemini board +#define PPSMC_GeminiModeSlave 2 //Slave GPU on a Gemini board + +// Feature Control Defines +// DPM +#define FEATURE_DPM_PREFETCHER_BIT 0 +#define FEATURE_DPM_GFXCLK_BIT 1 +#define FEATURE_DPM_GFX_PACE_BIT 2 +#define FEATURE_DPM_UCLK_BIT 3 +#define FEATURE_DPM_SOCCLK_BIT 4 +#define FEATURE_DPM_MP0CLK_BIT 5 +#define FEATURE_DPM_LINK_BIT 6 +#define FEATURE_DPM_DCEFCLK_BIT 7 +#define FEATURE_MEM_VDDCI_SCALING_BIT 8 +#define FEATURE_MEM_MVDD_SCALING_BIT 9 + +//Idle +#define FEATURE_DS_GFXCLK_BIT 10 +#define FEATURE_DS_SOCCLK_BIT 11 +#define FEATURE_DS_LCLK_BIT 12 +#define FEATURE_DS_DCEFCLK_BIT 13 +#define FEATURE_DS_UCLK_BIT 14 +#define FEATURE_GFX_ULV_BIT 15 +#define FEATURE_FW_DSTATE_BIT 16 +#define FEATURE_GFXOFF_BIT 17 +#define FEATURE_BACO_BIT 18 +#define FEATURE_VCN_PG_BIT 19 +#define FEATURE_JPEG_PG_BIT 20 +#define FEATURE_USB_PG_BIT 21 +#define FEATURE_RSMU_SMN_CG_BIT 22 +//Throttler/Response +#define FEATURE_PPT_BIT 23 +#define FEATURE_TDC_BIT 24 +#define FEATURE_GFX_EDC_BIT 25 +#define FEATURE_APCC_PLUS_BIT 26 +#define FEATURE_GTHR_BIT 27 +#define FEATURE_ACDC_BIT 28 +#define FEATURE_VR0HOT_BIT 29 +#define FEATURE_VR1HOT_BIT 30 +#define FEATURE_FW_CTF_BIT 31 +#define FEATURE_FAN_CONTROL_BIT 32 +#define FEATURE_THERMAL_BIT 33 +#define FEATURE_GFX_DCS_BIT 34 +//VF +#define FEATURE_RM_BIT 35 +#define FEATURE_LED_DISPLAY_BIT 36 +//Other +#define FEATURE_GFX_SS_BIT 37 +#define FEATURE_OUT_OF_BAND_MONITOR_BIT 38 +#define FEATURE_TEMP_DEPENDENT_VMIN_BIT 39 + +#define FEATURE_MMHUB_PG_BIT 40 +#define FEATURE_ATHUB_PG_BIT 41 +#define FEATURE_APCC_DFLL_BIT 42 +#define FEATURE_SPARE_43_BIT 43 +#define FEATURE_SPARE_44_BIT 44 +#define FEATURE_SPARE_45_BIT 45 +#define FEATURE_SPARE_46_BIT 46 +#define FEATURE_SPARE_47_BIT 47 +#define FEATURE_SPARE_48_BIT 48 +#define FEATURE_SPARE_49_BIT 49 +#define FEATURE_SPARE_50_BIT 50 +#define FEATURE_SPARE_51_BIT 51 +#define FEATURE_SPARE_52_BIT 52 +#define FEATURE_SPARE_53_BIT 53 +#define FEATURE_SPARE_54_BIT 54 +#define FEATURE_SPARE_55_BIT 55 +#define FEATURE_SPARE_56_BIT 56 +#define FEATURE_SPARE_57_BIT 57 +#define FEATURE_SPARE_58_BIT 58 +#define FEATURE_SPARE_59_BIT 59 +#define FEATURE_SPARE_60_BIT 60 +#define FEATURE_SPARE_61_BIT 61 +#define FEATURE_SPARE_62_BIT 62 +#define FEATURE_SPARE_63_BIT 63 +#define NUM_FEATURES 64 + +// Debug Overrides Bitmask +#define DPM_OVERRIDE_DISABLE_SOCCLK_PID 0x00000001 +#define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000002 +#define DPM_OVERRIDE_DISABLE_VOLT_LINK_VCN_SOCCLK 0x00000004 +#define DPM_OVERRIDE_ENABLE_FREQ_LINK_VCLK_SOCCLK 0x00000008 +#define DPM_OVERRIDE_ENABLE_FREQ_LINK_DCLK_SOCCLK 0x00000010 +#define DPM_OVERRIDE_ENABLE_FREQ_LINK_GFXCLK_SOCCLK 0x00000020 +#define DPM_OVERRIDE_ENABLE_FREQ_LINK_GFXCLK_UCLK 0x00000040 +#define DPM_OVERRIDE_DISABLE_VOLT_LINK_DCE_SOCCLK 0x00000080 +#define DPM_OVERRIDE_DISABLE_VOLT_LINK_MP0_SOCCLK 0x00000100 +#define DPM_OVERRIDE_DISABLE_DFLL_PLL_SHUTDOWN 0x00000200 +#define DPM_OVERRIDE_DISABLE_MEMORY_TEMPERATURE_READ 0x00000400 + +// VR Mapping Bit Defines +#define VR_MAPPING_VR_SELECT_MASK 0x01 +#define VR_MAPPING_VR_SELECT_SHIFT 0x00 + +#define VR_MAPPING_PLANE_SELECT_MASK 0x02 +#define VR_MAPPING_PLANE_SELECT_SHIFT 0x01 + +// PSI Bit Defines +#define PSI_SEL_VR0_PLANE0_PSI0 0x01 +#define PSI_SEL_VR0_PLANE0_PSI1 0x02 +#define PSI_SEL_VR0_PLANE1_PSI0 0x04 +#define PSI_SEL_VR0_PLANE1_PSI1 0x08 +#define PSI_SEL_VR1_PLANE0_PSI0 0x10 +#define PSI_SEL_VR1_PLANE0_PSI1 0x20 +#define PSI_SEL_VR1_PLANE1_PSI0 0x40 +#define PSI_SEL_VR1_PLANE1_PSI1 0x80 + +// Throttler Control/Status Bits +#define THROTTLER_PADDING_BIT 0 +#define THROTTLER_TEMP_EDGE_BIT 1 +#define THROTTLER_TEMP_HOTSPOT_BIT 2 +#define THROTTLER_TEMP_MEM_BIT 3 +#define THROTTLER_TEMP_VR_GFX_BIT 4 +#define THROTTLER_TEMP_VR_MEM0_BIT 5 +#define THROTTLER_TEMP_VR_MEM1_BIT 6 +#define THROTTLER_TEMP_VR_SOC_BIT 7 +#define THROTTLER_TEMP_LIQUID0_BIT 8 +#define THROTTLER_TEMP_LIQUID1_BIT 9 +#define THROTTLER_TEMP_PLX_BIT 10 +#define THROTTLER_TEMP_SKIN_BIT 11 +#define THROTTLER_TDC_GFX_BIT 12 +#define THROTTLER_TDC_SOC_BIT 13 +#define THROTTLER_PPT0_BIT 14 +#define THROTTLER_PPT1_BIT 15 +#define THROTTLER_PPT2_BIT 16 +#define THROTTLER_PPT3_BIT 17 +#define THROTTLER_FIT_BIT 18 +#define THROTTLER_PPM_BIT 19 +#define THROTTLER_APCC_BIT 20 + +// FW DState Features Control Bits +#define FW_DSTATE_SOC_ULV_BIT 0 +#define FW_DSTATE_G6_HSR_BIT 1 +#define FW_DSTATE_G6_PHY_VDDCI_OFF_BIT 2 +#define FW_DSTATE_MP0_DS_BIT 3 +#define FW_DSTATE_SMN_DS_BIT 4 +#define FW_DSTATE_MP1_DS_BIT 5 +#define FW_DSTATE_MP1_WHISPER_MODE_BIT 6 +#define FW_DSTATE_LIV_MIN_BIT 7 +#define FW_DSTATE_SOC_PLL_PWRDN_BIT 8 + +#define FW_DSTATE_SOC_ULV_MASK (1 << FW_DSTATE_SOC_ULV_BIT ) +#define FW_DSTATE_G6_HSR_MASK (1 << FW_DSTATE_G6_HSR_BIT ) +#define FW_DSTATE_G6_PHY_VDDCI_OFF_MASK (1 << FW_DSTATE_G6_PHY_VDDCI_OFF_BIT ) +#define FW_DSTATE_MP1_DS_MASK (1 << FW_DSTATE_MP1_DS_BIT ) +#define FW_DSTATE_MP0_DS_MASK (1 << FW_DSTATE_MP0_DS_BIT ) +#define FW_DSTATE_SMN_DS_MASK (1 << FW_DSTATE_SMN_DS_BIT ) +#define FW_DSTATE_MP1_WHISPER_MODE_MASK (1 << FW_DSTATE_MP1_WHISPER_MODE_BIT ) +#define FW_DSTATE_LIV_MIN_MASK (1 << FW_DSTATE_LIV_MIN_BIT ) +#define FW_DSTATE_SOC_PLL_PWRDN_MASK (1 << FW_DSTATE_SOC_PLL_PWRDN_BIT ) + +//I2C Interface + +#define NUM_I2C_CONTROLLERS 8 + +#define I2C_CONTROLLER_ENABLED 1 +#define I2C_CONTROLLER_DISABLED 0 + +#define MAX_SW_I2C_COMMANDS 8 + +typedef enum { + I2C_CONTROLLER_PORT_0 = 0, //CKSVII2C0 + I2C_CONTROLLER_PORT_1 = 1, //CKSVII2C1 + I2C_CONTROLLER_PORT_COUNT, +} I2cControllerPort_e; + +typedef enum { + I2C_CONTROLLER_NAME_VR_GFX = 0, + I2C_CONTROLLER_NAME_VR_SOC, + I2C_CONTROLLER_NAME_VR_VDDCI, + I2C_CONTROLLER_NAME_VR_MVDD, + I2C_CONTROLLER_NAME_LIQUID0, + I2C_CONTROLLER_NAME_LIQUID1, + I2C_CONTROLLER_NAME_PLX, + I2C_CONTROLLER_NAME_SPARE, + I2C_CONTROLLER_NAME_COUNT, +} I2cControllerName_e; + +typedef enum { + I2C_CONTROLLER_THROTTLER_TYPE_NONE = 0, + I2C_CONTROLLER_THROTTLER_VR_GFX, + I2C_CONTROLLER_THROTTLER_VR_SOC, + I2C_CONTROLLER_THROTTLER_VR_VDDCI, + I2C_CONTROLLER_THROTTLER_VR_MVDD, + I2C_CONTROLLER_THROTTLER_LIQUID0, + I2C_CONTROLLER_THROTTLER_LIQUID1, + I2C_CONTROLLER_THROTTLER_PLX, + I2C_CONTROLLER_THROTTLER_COUNT, +} I2cControllerThrottler_e; + +typedef enum { + I2C_CONTROLLER_PROTOCOL_VR_0, + I2C_CONTROLLER_PROTOCOL_VR_1, + I2C_CONTROLLER_PROTOCOL_TMP_0, + I2C_CONTROLLER_PROTOCOL_TMP_1, + I2C_CONTROLLER_PROTOCOL_SPARE_0, + I2C_CONTROLLER_PROTOCOL_SPARE_1, + I2C_CONTROLLER_PROTOCOL_COUNT, +} I2cControllerProtocol_e; + +typedef struct { + uint8_t Enabled; + uint8_t Speed; + uint8_t Padding[2]; + uint32_t SlaveAddress; + uint8_t ControllerPort; + uint8_t ControllerName; + uint8_t ThermalThrotter; + uint8_t I2cProtocol; +} I2cControllerConfig_t; + +typedef enum { + I2C_PORT_SVD_SCL = 0, + I2C_PORT_GPIO, +} I2cPort_e; + +typedef enum { + I2C_SPEED_FAST_50K = 0, //50 Kbits/s + I2C_SPEED_FAST_100K, //100 Kbits/s + I2C_SPEED_FAST_400K, //400 Kbits/s + I2C_SPEED_FAST_PLUS_1M, //1 Mbits/s (in fast mode) + I2C_SPEED_HIGH_1M, //1 Mbits/s (in high speed mode) + I2C_SPEED_HIGH_2M, //2.3 Mbits/s + I2C_SPEED_COUNT, +} I2cSpeed_e; + +typedef enum { + I2C_CMD_READ = 0, + I2C_CMD_WRITE, + I2C_CMD_COUNT, +} I2cCmdType_e; + +#define CMDCONFIG_STOP_BIT 0 +#define CMDCONFIG_RESTART_BIT 1 + +#define CMDCONFIG_STOP_MASK (1 << CMDCONFIG_STOP_BIT) +#define CMDCONFIG_RESTART_MASK (1 << CMDCONFIG_RESTART_BIT) + +typedef struct { + uint8_t RegisterAddr; ////only valid for write, ignored for read + uint8_t Cmd; //Read(0) or Write(1) + uint8_t Data; //Return data for read. Data to send for write + uint8_t CmdConfig; //Includes whether associated command should have a stop or restart command +} SwI2cCmd_t; //SW I2C Command Table + +typedef struct { + uint8_t I2CcontrollerPort; //CKSVII2C0(0) or //CKSVII2C1(1) + uint8_t I2CSpeed; //Slow(0) or Fast(1) + uint16_t SlaveAddress; + uint8_t NumCmds; //Number of commands + uint8_t Padding[3]; + + SwI2cCmd_t SwI2cCmds[MAX_SW_I2C_COMMANDS]; + + uint32_t MmHubPadding[8]; // SMU internal use + +} SwI2cRequest_t; // SW I2C Request Table + +//D3HOT sequences +typedef enum { + BACO_SEQUENCE, + MSR_SEQUENCE, + BAMACO_SEQUENCE, + ULPS_SEQUENCE, + D3HOT_SEQUENCE_COUNT, +}D3HOTSequence_e; + +//THis is aligned with RSMU PGFSM Register Mapping +typedef enum { + PG_DYNAMIC_MODE = 0, + PG_STATIC_MODE, +} PowerGatingMode_e; + +//This is aligned with RSMU PGFSM Register Mapping +typedef enum { + PG_POWER_DOWN = 0, + PG_POWER_UP, +} PowerGatingSettings_e; + +typedef struct { + uint32_t a; // store in IEEE float format in this variable + uint32_t b; // store in IEEE float format in this variable + uint32_t c; // store in IEEE float format in this variable +} QuadraticInt_t; + +typedef struct { + uint32_t m; // store in IEEE float format in this variable + uint32_t b; // store in IEEE float format in this variable +} LinearInt_t; + +typedef struct { + uint32_t a; // store in IEEE float format in this variable + uint32_t b; // store in IEEE float format in this variable + uint32_t c; // store in IEEE float format in this variable +} DroopInt_t; + +typedef enum { + GFXCLK_SOURCE_PLL = 0, + GFXCLK_SOURCE_DFLL, + GFXCLK_SOURCE_COUNT, +} GfxclkSrc_e; + +//Only Clks that have DPM descriptors are listed here +typedef enum { + PPCLK_GFXCLK = 0, + PPCLK_SOCCLK, + PPCLK_UCLK, + PPCLK_DCLK, + PPCLK_VCLK, + PPCLK_DCEFCLK, + PPCLK_DISPCLK, + PPCLK_PIXCLK, + PPCLK_PHYCLK, + PPCLK_COUNT, +} PPCLK_e; + +typedef enum { + POWER_SOURCE_AC, + POWER_SOURCE_DC, + POWER_SOURCE_COUNT, +} POWER_SOURCE_e; + +typedef enum { + PPT_THROTTLER_PPT0, + PPT_THROTTLER_PPT1, + PPT_THROTTLER_PPT2, + PPT_THROTTLER_PPT3, + PPT_THROTTLER_COUNT +} PPT_THROTTLER_e; + +typedef enum { + VOLTAGE_MODE_AVFS = 0, + VOLTAGE_MODE_AVFS_SS, + VOLTAGE_MODE_SS, + VOLTAGE_MODE_COUNT, +} VOLTAGE_MODE_e; + + +typedef enum { + AVFS_VOLTAGE_GFX = 0, + AVFS_VOLTAGE_SOC, + AVFS_VOLTAGE_COUNT, +} AVFS_VOLTAGE_TYPE_e; + +typedef enum { + UCLK_DIV_BY_1 = 0, + UCLK_DIV_BY_2, + UCLK_DIV_BY_4, + UCLK_DIV_BY_8, +} UCLK_DIV_e; + +typedef enum { + GPIO_INT_POLARITY_ACTIVE_LOW = 0, + GPIO_INT_POLARITY_ACTIVE_HIGH, +} GpioIntPolarity_e; + +typedef enum { + MEMORY_TYPE_GDDR6 = 0, + MEMORY_TYPE_HBM, +} MemoryType_e; + +typedef enum { + PWR_CONFIG_TDP = 0, + PWR_CONFIG_TGP, + PWR_CONFIG_TCP_ESTIMATED, + PWR_CONFIG_TCP_MEASURED, +} PwrConfig_e; + +typedef struct { + uint8_t VoltageMode; // 0 - AVFS only, 1- min(AVFS,SS), 2-SS only + uint8_t SnapToDiscrete; // 0 - Fine grained DPM, 1 - Discrete DPM + uint8_t NumDiscreteLevels; // Set to 2 (Fmin, Fmax) when using fine grained DPM, otherwise set to # discrete levels used + uint8_t Padding; + LinearInt_t ConversionToAvfsClk; // Transfer function to AVFS Clock (GHz->GHz) + QuadraticInt_t SsCurve; // Slow-slow curve (GHz->V) +} DpmDescriptor_t; + +typedef enum { + TEMP_EDGE, + TEMP_HOTSPOT, + TEMP_MEM, + TEMP_VR_GFX, + TEMP_VR_MEM0, + TEMP_VR_MEM1, + TEMP_VR_SOC, + TEMP_LIQUID0, + TEMP_LIQUID1, + TEMP_PLX, + TEMP_COUNT +} TEMP_e; + +//Out of band monitor status defines +//see SPEC //gpu/doc/soc_arch/spec/feature/SMBUS/SMBUS.xlsx +#define POWER_MANAGER_CONTROLLER_NOT_RUNNING 0 +#define POWER_MANAGER_CONTROLLER_RUNNING 1 + +#define POWER_MANAGER_CONTROLLER_BIT 0 +#define MAXIMUM_DPM_STATE_GFX_ENGINE_RESTRICTED_BIT 8 +#define GPU_DIE_TEMPERATURE_THROTTLING_BIT 9 +#define HBM_DIE_TEMPERATURE_THROTTLING_BIT 10 +#define TGP_THROTTLING_BIT 11 +#define PCC_THROTTLING_BIT 12 +#define HBM_TEMPERATURE_EXCEEDING_TEMPERATURE_LIMIT_BIT 13 +#define HBM_TEMPERATURE_EXCEEDING_MAX_MEMORY_TEMPERATURE_BIT 14 + +#define POWER_MANAGER_CONTROLLER_MASK (1 << POWER_MANAGER_CONTROLLER_BIT ) +#define MAXIMUM_DPM_STATE_GFX_ENGINE_RESTRICTED_MASK (1 << MAXIMUM_DPM_STATE_GFX_ENGINE_RESTRICTED_BIT ) +#define GPU_DIE_TEMPERATURE_THROTTLING_MASK (1 << GPU_DIE_TEMPERATURE_THROTTLING_BIT ) +#define HBM_DIE_TEMPERATURE_THROTTLING_MASK (1 << HBM_DIE_TEMPERATURE_THROTTLING_BIT ) +#define TGP_THROTTLING_MASK (1 << TGP_THROTTLING_BIT ) +#define PCC_THROTTLING_MASK (1 << PCC_THROTTLING_BIT ) +#define HBM_TEMPERATURE_EXCEEDING_TEMPERATURE_LIMIT_MASK (1 << HBM_TEMPERATURE_EXCEEDING_TEMPERATURE_LIMIT_BIT ) +#define HBM_TEMPERATURE_EXCEEDING_MAX_MEMORY_TEMPERATURE_MASK (1 << HBM_TEMPERATURE_EXCEEDING_MAX_MEMORY_TEMPERATURE_BIT) + +//This structure to be DMA to SMBUS Config register space +typedef struct { + uint8_t MinorInfoVersion; + uint8_t MajorInfoVersion; + uint8_t TableSize; + uint8_t Reserved; + + uint8_t Reserved1; + uint8_t RevID; + uint16_t DeviceID; + + uint16_t DieTemperatureLimit; + uint16_t FanTargetTemperature; + + uint16_t MemoryTemperatureLimit; + uint16_t MemoryTemperatureLimit1; + + uint16_t TGP; + uint16_t CardPower; + + uint32_t DieTemperatureRegisterOffset; + + uint32_t Reserved2; + + uint32_t Reserved3; + + uint32_t Status; + + uint16_t DieTemperature; + uint16_t MemoryTemperature; + + uint16_t SelectedCardPower; + uint16_t Reserved4; + + uint32_t BoardLevelEnergyAccumulator; +} OutOfBandMonitor_t; + +typedef struct { + uint32_t Version; + + // SECTION: Feature Enablement + uint32_t FeaturesToRun[2]; + + // SECTION: Infrastructure Limits + uint16_t SocketPowerLimitAc[PPT_THROTTLER_COUNT]; + uint16_t SocketPowerLimitAcTau[PPT_THROTTLER_COUNT]; + uint16_t SocketPowerLimitDc[PPT_THROTTLER_COUNT]; + uint16_t SocketPowerLimitDcTau[PPT_THROTTLER_COUNT]; + + uint16_t TdcLimitSoc; // Amps + uint16_t TdcLimitSocTau; // Time constant of LPF in ms + uint16_t TdcLimitGfx; // Amps + uint16_t TdcLimitGfxTau; // Time constant of LPF in ms + + uint16_t TedgeLimit; // Celcius + uint16_t ThotspotLimit; // Celcius + uint16_t TmemLimit; // Celcius + uint16_t Tvr_gfxLimit; // Celcius + uint16_t Tvr_mem0Limit; // Celcius + uint16_t Tvr_mem1Limit; // Celcius + uint16_t Tvr_socLimit; // Celcius + uint16_t Tliquid0Limit; // Celcius + uint16_t Tliquid1Limit; // Celcius + uint16_t TplxLimit; // Celcius + uint32_t FitLimit; // Failures in time (failures per million parts over the defined lifetime) + + uint16_t PpmPowerLimit; // Switch this this power limit when temperature is above PpmTempThreshold + uint16_t PpmTemperatureThreshold; + + // SECTION: Throttler settings + uint32_t ThrottlerControlMask; // See Throtter masks defines + + // SECTION: FW DSTATE Settings + uint32_t FwDStateMask; // See FW DState masks defines + + // SECTION: ULV Settings + uint16_t UlvVoltageOffsetSoc; // In mV(Q2) + uint16_t UlvVoltageOffsetGfx; // In mV(Q2) + + uint8_t GceaLinkMgrIdleThreshold; //Set by SMU FW during enablment of SOC_ULV. Controls delay for GFX SDP port disconnection during idle events + uint8_t paddingRlcUlvParams[3]; + + uint8_t UlvSmnclkDid; //DID for ULV mode. 0 means CLK will not be modified in ULV. + uint8_t UlvMp1clkDid; //DID for ULV mode. 0 means CLK will not be modified in ULV. + uint8_t UlvGfxclkBypass; // 1 to turn off/bypass Gfxclk during ULV, 0 to leave Gfxclk on during ULV + uint8_t Padding234; + + uint16_t MinVoltageUlvGfx; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_GFX in ULV mode + uint16_t MinVoltageUlvSoc; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_SOC in ULV mode + + + // SECTION: Voltage Control Parameters + uint16_t MinVoltageGfx; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_GFX + uint16_t MinVoltageSoc; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_SOC + uint16_t MaxVoltageGfx; // In mV(Q2) Maximum Voltage allowable of VDD_GFX + uint16_t MaxVoltageSoc; // In mV(Q2) Maximum Voltage allowable of VDD_SOC + + uint16_t LoadLineResistanceGfx; // In mOhms with 8 fractional bits + uint16_t LoadLineResistanceSoc; // In mOhms with 8 fractional bits + + //SECTION: DPM Config 1 + DpmDescriptor_t DpmDescriptor[PPCLK_COUNT]; + + uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDcefclk [NUM_DCEFCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDispclk [NUM_DISPCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTablePixclk [NUM_PIXCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTablePhyclk [NUM_PHYCLK_DPM_LEVELS ]; // In MHz + uint32_t Paddingclks[16]; + + uint16_t DcModeMaxFreq [PPCLK_COUNT ]; // In MHz + uint16_t Padding8_Clks; + + uint8_t FreqTableUclkDiv [NUM_UCLK_DPM_LEVELS ]; // 0:Div-1, 1:Div-1/2, 2:Div-1/4, 3:Div-1/8 + + // SECTION: DPM Config 2 + uint16_t Mp0clkFreq [NUM_MP0CLK_DPM_LEVELS]; // in MHz + uint16_t Mp0DpmVoltage [NUM_MP0CLK_DPM_LEVELS]; // mV(Q2) + uint16_t MemVddciVoltage [NUM_UCLK_DPM_LEVELS]; // mV(Q2) + uint16_t MemMvddVoltage [NUM_UCLK_DPM_LEVELS]; // mV(Q2) + // GFXCLK DPM + uint16_t GfxclkFgfxoffEntry; // in Mhz + uint16_t GfxclkFinit; // in Mhz + uint16_t GfxclkFidle; // in MHz + uint16_t GfxclkSlewRate; // for PLL babystepping??? + uint16_t GfxclkFopt; // in Mhz + uint8_t Padding567[2]; + uint16_t GfxclkDsMaxFreq; // in MHz + uint8_t GfxclkSource; // 0 = PLL, 1 = DFLL + uint8_t Padding456; + + // UCLK section + uint8_t LowestUclkReservedForUlv; // Set this to 1 if UCLK DPM0 is reserved for ULV-mode only + uint8_t paddingUclk[3]; + + uint8_t MemoryType; // 0-GDDR6, 1-HBM + uint8_t MemoryChannels; + uint8_t PaddingMem[2]; + + // Link DPM Settings + uint8_t PcieGenSpeed[NUM_LINK_LEVELS]; ///< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3 3:PciE-gen4 + uint8_t PcieLaneCount[NUM_LINK_LEVELS]; ///< 1=x1, 2=x2, 3=x4, 4=x8, 5=x12, 6=x16 + uint16_t LclkFreq[NUM_LINK_LEVELS]; + + // GFXCLK Thermal DPM (formerly 'Boost' Settings) + uint16_t EnableTdpm; + uint16_t TdpmHighHystTemperature; + uint16_t TdpmLowHystTemperature; + uint16_t GfxclkFreqHighTempLimit; // High limit on GFXCLK when temperature is high, for reliability. + + // SECTION: Fan Control + uint16_t FanStopTemp; //Celcius + uint16_t FanStartTemp; //Celcius + + uint16_t FanGainEdge; + uint16_t FanGainHotspot; + uint16_t FanGainLiquid0; + uint16_t FanGainLiquid1; + uint16_t FanGainVrGfx; + uint16_t FanGainVrSoc; + uint16_t FanGainVrMem0; + uint16_t FanGainVrMem1; + uint16_t FanGainPlx; + uint16_t FanGainMem; + uint16_t FanPwmMin; + uint16_t FanAcousticLimitRpm; + uint16_t FanThrottlingRpm; + uint16_t FanMaximumRpm; + uint16_t FanTargetTemperature; + uint16_t FanTargetGfxclk; + uint8_t FanTempInputSelect; + uint8_t FanPadding; + uint8_t FanZeroRpmEnable; + uint8_t FanTachEdgePerRev; + //uint8_t padding8_Fan[2]; + + // The following are AFC override parameters. Leave at 0 to use FW defaults. + int16_t FuzzyFan_ErrorSetDelta; + int16_t FuzzyFan_ErrorRateSetDelta; + int16_t FuzzyFan_PwmSetDelta; + uint16_t FuzzyFan_Reserved; + + + // SECTION: AVFS + // Overrides + uint8_t OverrideAvfsGb[AVFS_VOLTAGE_COUNT]; + uint8_t Padding8_Avfs[2]; + + QuadraticInt_t qAvfsGb[AVFS_VOLTAGE_COUNT]; // GHz->V Override of fused curve + DroopInt_t dBtcGbGfxPll; // GHz->V BtcGb + DroopInt_t dBtcGbGfxDfll; // GHz->V BtcGb + DroopInt_t dBtcGbSoc; // GHz->V BtcGb + LinearInt_t qAgingGb[AVFS_VOLTAGE_COUNT]; // GHz->V + + QuadraticInt_t qStaticVoltageOffset[AVFS_VOLTAGE_COUNT]; // GHz->V + + uint16_t DcTol[AVFS_VOLTAGE_COUNT]; // mV Q2 + + uint8_t DcBtcEnabled[AVFS_VOLTAGE_COUNT]; + uint8_t Padding8_GfxBtc[2]; + + uint16_t DcBtcMin[AVFS_VOLTAGE_COUNT]; // mV Q2 + uint16_t DcBtcMax[AVFS_VOLTAGE_COUNT]; // mV Q2 + + // SECTION: Advanced Options + uint32_t DebugOverrides; + QuadraticInt_t ReservedEquation0; + QuadraticInt_t ReservedEquation1; + QuadraticInt_t ReservedEquation2; + QuadraticInt_t ReservedEquation3; + + // Total Power configuration, use defines from PwrConfig_e + uint8_t TotalPowerConfig; //0-TDP, 1-TGP, 2-TCP Estimated, 3-TCP Measured + uint8_t TotalPowerSpare1; + uint16_t TotalPowerSpare2; + + // APCC Settings + uint16_t PccThresholdLow; + uint16_t PccThresholdHigh; + uint32_t PaddingAPCC[6]; //FIXME pending SPEC + + // Temperature Dependent Vmin + uint16_t VDDGFX_TVmin; //Celcius + uint16_t VDDSOC_TVmin; //Celcius + uint16_t VDDGFX_Vmin_HiTemp; // mV Q2 + uint16_t VDDGFX_Vmin_LoTemp; // mV Q2 + uint16_t VDDSOC_Vmin_HiTemp; // mV Q2 + uint16_t VDDSOC_Vmin_LoTemp; // mV Q2 + + uint16_t VDDGFX_TVminHystersis; // Celcius + uint16_t VDDSOC_TVminHystersis; // Celcius + + // BTC Setting + uint32_t BtcConfig; + + uint16_t SsFmin[10]; // PPtable value to function similar to VFTFmin for SS Curve; Size is PPCLK_COUNT rounded to nearest multiple of 2 + uint16_t DcBtcGb[AVFS_VOLTAGE_COUNT]; + + // SECTION: Board Reserved + uint32_t Reserved[8]; + + // SECTION: BOARD PARAMETERS + // I2C Control + I2cControllerConfig_t I2cControllers[NUM_I2C_CONTROLLERS]; + + // SVI2 Board Parameters + uint16_t MaxVoltageStepGfx; // In mV(Q2) Max voltage step that SMU will request. Multiple steps are taken if voltage change exceeds this value. + uint16_t MaxVoltageStepSoc; // In mV(Q2) Max voltage step that SMU will request. Multiple steps are taken if voltage change exceeds this value. + + uint8_t VddGfxVrMapping; // Use VR_MAPPING* bitfields + uint8_t VddSocVrMapping; // Use VR_MAPPING* bitfields + uint8_t VddMem0VrMapping; // Use VR_MAPPING* bitfields + uint8_t VddMem1VrMapping; // Use VR_MAPPING* bitfields + + uint8_t GfxUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode + uint8_t SocUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode + uint8_t ExternalSensorPresent; // External RDI connected to TMON (aka TEMP IN) + uint8_t Padding8_V; + + // Telemetry Settings + uint16_t GfxMaxCurrent; // in Amps + int8_t GfxOffset; // in Amps + uint8_t Padding_TelemetryGfx; + + uint16_t SocMaxCurrent; // in Amps + int8_t SocOffset; // in Amps + uint8_t Padding_TelemetrySoc; + + uint16_t Mem0MaxCurrent; // in Amps + int8_t Mem0Offset; // in Amps + uint8_t Padding_TelemetryMem0; + + uint16_t Mem1MaxCurrent; // in Amps + int8_t Mem1Offset; // in Amps + uint8_t Padding_TelemetryMem1; + + // GPIO Settings + uint8_t AcDcGpio; // GPIO pin configured for AC/DC switching + uint8_t AcDcPolarity; // GPIO polarity for AC/DC switching + uint8_t VR0HotGpio; // GPIO pin configured for VR0 HOT event + uint8_t VR0HotPolarity; // GPIO polarity for VR0 HOT event + + uint8_t VR1HotGpio; // GPIO pin configured for VR1 HOT event + uint8_t VR1HotPolarity; // GPIO polarity for VR1 HOT event + uint8_t GthrGpio; // GPIO pin configured for GTHR Event + uint8_t GthrPolarity; // replace GPIO polarity for GTHR + + // LED Display Settings + uint8_t LedPin0; // GPIO number for LedPin[0] + uint8_t LedPin1; // GPIO number for LedPin[1] + uint8_t LedPin2; // GPIO number for LedPin[2] + uint8_t padding8_4; + + // GFXCLK PLL Spread Spectrum + uint8_t PllGfxclkSpreadEnabled; // on or off + uint8_t PllGfxclkSpreadPercent; // Q4.4 + uint16_t PllGfxclkSpreadFreq; // kHz + + // GFXCLK DFLL Spread Spectrum + uint8_t DfllGfxclkSpreadEnabled; // on or off + uint8_t DfllGfxclkSpreadPercent; // Q4.4 + uint16_t DfllGfxclkSpreadFreq; // kHz + + // UCLK Spread Spectrum + uint8_t UclkSpreadEnabled; // on or off + uint8_t UclkSpreadPercent; // Q4.4 + uint16_t UclkSpreadFreq; // kHz + + // SOCCLK Spread Spectrum + uint8_t SoclkSpreadEnabled; // on or off + uint8_t SocclkSpreadPercent; // Q4.4 + uint16_t SocclkSpreadFreq; // kHz + + // Total board power + uint16_t TotalBoardPower; //Only needed for TCP Estimated case, where TCP = TGP+Total Board Power + uint16_t BoardPadding; + + // Mvdd Svi2 Div Ratio Setting + uint32_t MvddRatio; // This is used for MVDD Vid workaround. It has 16 fractional bits (Q16.16) + + uint32_t BoardReserved[9]; + + // Padding for MMHUB - do not modify this + uint32_t MmHubPadding[8]; // SMU internal use + +} PPTable_t; + +typedef struct { + // Time constant parameters for clock averages in ms + uint16_t GfxclkAverageLpfTau; + uint16_t SocclkAverageLpfTau; + uint16_t UclkAverageLpfTau; + uint16_t GfxActivityLpfTau; + uint16_t UclkActivityLpfTau; + uint16_t SocketPowerLpfTau; + + // Padding - ignore + uint32_t MmHubPadding[8]; // SMU internal use +} DriverSmuConfig_t; + +typedef struct { + + uint16_t GfxclkFmin; // MHz + uint16_t GfxclkFmax; // MHz + uint16_t GfxclkFreq1; // MHz + uint16_t GfxclkVolt1; // mV (Q2) + uint16_t GfxclkFreq2; // MHz + uint16_t GfxclkVolt2; // mV (Q2) + uint16_t GfxclkFreq3; // MHz + uint16_t GfxclkVolt3; // mV (Q2) + uint16_t UclkFmax; // MHz + int16_t OverDrivePct; // % + uint16_t FanMaximumRpm; + uint16_t FanMinimumPwm; + uint16_t FanTargetTemperature; // Degree Celcius + uint16_t MaxOpTemp; // Degree Celcius + uint16_t FanZeroRpmEnable; + uint16_t Padding; + + uint32_t MmHubPadding[8]; // SMU internal use + +} OverDriveTable_t; + +typedef struct { + uint16_t CurrClock[PPCLK_COUNT]; + uint16_t AverageGfxclkFrequency; + uint16_t AverageSocclkFrequency; + uint16_t AverageUclkFrequency ; + uint16_t AverageGfxActivity ; + uint16_t AverageUclkActivity ; + uint8_t CurrSocVoltageOffset ; + uint8_t CurrGfxVoltageOffset ; + uint8_t CurrMemVidOffset ; + uint8_t Padding8 ; + uint16_t AverageSocketPower ; + uint16_t TemperatureEdge ; + uint16_t TemperatureHotspot ; + uint16_t TemperatureMem ; + uint16_t TemperatureVrGfx ; + uint16_t TemperatureVrMem0 ; + uint16_t TemperatureVrMem1 ; + uint16_t TemperatureVrSoc ; + uint16_t TemperatureLiquid0 ; + uint16_t TemperatureLiquid1 ; + uint16_t TemperaturePlx ; + uint16_t Padding16 ; + uint32_t ThrottlerStatus ; + + uint8_t LinkDpmLevel; + uint8_t Padding8_2; + uint16_t CurrFanSpeed; + + // Padding - ignore + uint32_t MmHubPadding[8]; // SMU internal use +} SmuMetrics_t; + +typedef struct { + uint16_t MinClock; // This is either DCEFCLK or SOCCLK (in MHz) + uint16_t MaxClock; // This is either DCEFCLK or SOCCLK (in MHz) + uint16_t MinUclk; + uint16_t MaxUclk; + + uint8_t WmSetting; + uint8_t Padding[3]; + + uint32_t MmHubPadding[8]; // SMU internal use +} WatermarkRowGeneric_t; + +#define NUM_WM_RANGES 4 + +typedef enum { + WM_SOCCLK = 0, + WM_DCEFCLK, + WM_COUNT, +} WM_CLOCK_e; + +typedef struct { + // Watermarks + WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES]; + + uint32_t MmHubPadding[8]; // SMU internal use +} Watermarks_t; + +typedef struct { + uint16_t avgPsmCount[36]; + uint16_t minPsmCount[36]; + float avgPsmVoltage[36]; + float minPsmVoltage[36]; + + uint32_t MmHubPadding[8]; // SMU internal use +} AvfsDebugTable_t; + +typedef struct { + uint8_t AvfsVersion; + uint8_t Padding; + + uint8_t AvfsEn[AVFS_VOLTAGE_COUNT]; + + uint8_t OverrideVFT[AVFS_VOLTAGE_COUNT]; + uint8_t OverrideAvfsGb[AVFS_VOLTAGE_COUNT]; + + uint8_t OverrideTemperatures[AVFS_VOLTAGE_COUNT]; + uint8_t OverrideVInversion[AVFS_VOLTAGE_COUNT]; + uint8_t OverrideP2V[AVFS_VOLTAGE_COUNT]; + uint8_t OverrideP2VCharzFreq[AVFS_VOLTAGE_COUNT]; + + int32_t VFT0_m1[AVFS_VOLTAGE_COUNT]; // Q8.24 + int32_t VFT0_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 + int32_t VFT0_b[AVFS_VOLTAGE_COUNT]; // Q32 + + int32_t VFT1_m1[AVFS_VOLTAGE_COUNT]; // Q8.16 + int32_t VFT1_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 + int32_t VFT1_b[AVFS_VOLTAGE_COUNT]; // Q32 + + int32_t VFT2_m1[AVFS_VOLTAGE_COUNT]; // Q8.16 + int32_t VFT2_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 + int32_t VFT2_b[AVFS_VOLTAGE_COUNT]; // Q32 + + int32_t AvfsGb0_m1[AVFS_VOLTAGE_COUNT]; // Q8.24 + int32_t AvfsGb0_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 + int32_t AvfsGb0_b[AVFS_VOLTAGE_COUNT]; // Q32 + + int32_t AcBtcGb_m1[AVFS_VOLTAGE_COUNT]; // Q8.24 + int32_t AcBtcGb_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 + int32_t AcBtcGb_b[AVFS_VOLTAGE_COUNT]; // Q32 + + uint32_t AvfsTempCold[AVFS_VOLTAGE_COUNT]; + uint32_t AvfsTempMid[AVFS_VOLTAGE_COUNT]; + uint32_t AvfsTempHot[AVFS_VOLTAGE_COUNT]; + + uint32_t VInversion[AVFS_VOLTAGE_COUNT]; // in mV with 2 fractional bits + + + int32_t P2V_m1[AVFS_VOLTAGE_COUNT]; // Q8.24 + int32_t P2V_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 + int32_t P2V_b[AVFS_VOLTAGE_COUNT]; // Q32 + + uint32_t P2VCharzFreq[AVFS_VOLTAGE_COUNT]; // in 10KHz units + + uint32_t EnabledAvfsModules[2]; //NV10 - 36 AVFS modules + + uint32_t MmHubPadding[8]; // SMU internal use +} AvfsFuseOverride_t; + +typedef struct { + + uint8_t Gfx_ActiveHystLimit; + uint8_t Gfx_IdleHystLimit; + uint8_t Gfx_FPS; + uint8_t Gfx_MinActiveFreqType; + uint8_t Gfx_BoosterFreqType; + uint8_t Gfx_MinFreqStep; // Minimum delta between current and target frequeny in order for FW to change clock. + uint16_t Gfx_MinActiveFreq; // MHz + uint16_t Gfx_BoosterFreq; // MHz + uint16_t Gfx_PD_Data_time_constant; // Time constant of PD controller in ms + uint32_t Gfx_PD_Data_limit_a; // Q16 + uint32_t Gfx_PD_Data_limit_b; // Q16 + uint32_t Gfx_PD_Data_limit_c; // Q16 + uint32_t Gfx_PD_Data_error_coeff; // Q16 + uint32_t Gfx_PD_Data_error_rate_coeff; // Q16 + + uint8_t Soc_ActiveHystLimit; + uint8_t Soc_IdleHystLimit; + uint8_t Soc_FPS; + uint8_t Soc_MinActiveFreqType; + uint8_t Soc_BoosterFreqType; + uint8_t Soc_MinFreqStep; // Minimum delta between current and target frequeny in order for FW to change clock. + uint16_t Soc_MinActiveFreq; // MHz + uint16_t Soc_BoosterFreq; // MHz + uint16_t Soc_PD_Data_time_constant; // Time constant of PD controller in ms + uint32_t Soc_PD_Data_limit_a; // Q16 + uint32_t Soc_PD_Data_limit_b; // Q16 + uint32_t Soc_PD_Data_limit_c; // Q16 + uint32_t Soc_PD_Data_error_coeff; // Q16 + uint32_t Soc_PD_Data_error_rate_coeff; // Q16 + + uint8_t Mem_ActiveHystLimit; + uint8_t Mem_IdleHystLimit; + uint8_t Mem_FPS; + uint8_t Mem_MinActiveFreqType; + uint8_t Mem_BoosterFreqType; + uint8_t Mem_MinFreqStep; // Minimum delta between current and target frequeny in order for FW to change clock. + uint16_t Mem_MinActiveFreq; // MHz + uint16_t Mem_BoosterFreq; // MHz + uint16_t Mem_PD_Data_time_constant; // Time constant of PD controller in ms + uint32_t Mem_PD_Data_limit_a; // Q16 + uint32_t Mem_PD_Data_limit_b; // Q16 + uint32_t Mem_PD_Data_limit_c; // Q16 + uint32_t Mem_PD_Data_error_coeff; // Q16 + uint32_t Mem_PD_Data_error_rate_coeff; // Q16 + + uint32_t Mem_UpThreshold_Limit; // Q16 + uint8_t Mem_UpHystLimit; + uint8_t Mem_DownHystLimit; + uint16_t Mem_Fps; + + uint32_t MmHubPadding[8]; // SMU internal use + +} DpmActivityMonitorCoeffInt_t; + + +// Workload bits +#define WORKLOAD_PPLIB_DEFAULT_BIT 0 +#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 1 +#define WORKLOAD_PPLIB_POWER_SAVING_BIT 2 +#define WORKLOAD_PPLIB_VIDEO_BIT 3 +#define WORKLOAD_PPLIB_VR_BIT 4 +#define WORKLOAD_PPLIB_COMPUTE_BIT 5 +#define WORKLOAD_PPLIB_CUSTOM_BIT 6 +#define WORKLOAD_PPLIB_COUNT 7 + + +// These defines are used with the following messages: +// SMC_MSG_TransferTableDram2Smu +// SMC_MSG_TransferTableSmu2Dram + +// Table transfer status +#define TABLE_TRANSFER_OK 0x0 +#define TABLE_TRANSFER_FAILED 0xFF + +// Table types +#define TABLE_PPTABLE 0 +#define TABLE_WATERMARKS 1 +#define TABLE_AVFS 2 +#define TABLE_AVFS_PSM_DEBUG 3 +#define TABLE_AVFS_FUSE_OVERRIDE 4 +#define TABLE_PMSTATUSLOG 5 +#define TABLE_SMU_METRICS 6 +#define TABLE_DRIVER_SMU_CONFIG 7 +#define TABLE_ACTIVITY_MONITOR_COEFF 8 +#define TABLE_OVERDRIVE 9 +#define TABLE_I2C_COMMANDS 10 +#define TABLE_PACE 11 +#define TABLE_COUNT 12 + +//RLC Pace Table total number of levels +#define RLC_PACE_TABLE_NUM_LEVELS 16 + +typedef struct { + float FlopsPerByteTable[RLC_PACE_TABLE_NUM_LEVELS]; + + uint32_t MmHubPadding[8]; // SMU internal use +} RlcPaceFlopsPerByteOverride_t; + +// These defines are used with the SMC_MSG_SetUclkFastSwitch message. +#define UCLK_SWITCH_SLOW 0 +#define UCLK_SWITCH_FAST 1 +#endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h index 02c965d64256..2fff4b16cb4e 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h @@ -30,6 +30,7 @@ #define MP0_SRAM 0x03900000 #define MP1_Public 0x03b00000 #define MP1_SRAM 0x03c00004 +#define MP1_SMC_SIZE 0x40000 /* address block */ #define smnMP1_FIRMWARE_FLAGS 0x3010024 @@ -39,6 +40,23 @@ #define TEMP_RANGE_MIN (0) #define TEMP_RANGE_MAX (80 * 1000) +#define SMU11_TOOL_SIZE 0x19000 + +#define CLK_MAP(clk, index) \ + [SMU_##clk] = index + +#define FEA_MAP(fea) \ + [SMU_FEATURE_##fea##_BIT] = FEATURE_##fea##_BIT + +#define TAB_MAP(tab) \ + [SMU_TABLE_##tab] = TABLE_##tab + +#define PWR_MAP(tab) \ + [SMU_POWER_SOURCE_##tab] = POWER_SOURCE_##tab + +#define WORKLOAD_MAP(profile, workload) \ + [profile] = workload + struct smu_11_0_max_sustainable_clocks { uint32_t display_clock; uint32_t phy_clock; @@ -87,6 +105,14 @@ struct smu_11_0_power_context { enum smu_11_0_power_state power_state; }; +enum smu_v11_0_baco_seq { + BACO_SEQ_BACO = 0, + BACO_SEQ_MSR, + BACO_SEQ_BAMACO, + BACO_SEQ_ULPS, + BACO_SEQ_COUNT, +}; + void smu_v11_0_set_smu_funcs(struct smu_context *smu); #endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h index f466f624ad32..373861ddccd0 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h @@ -60,6 +60,7 @@ //BACO/BAMACO/BOMACO #define PPSMC_MSG_EnterBaco 0x18 #define PPSMC_MSG_ExitBaco 0x19 +#define PPSMC_MSG_ArmD3 0x46 //DPM #define PPSMC_MSG_SetSoftMinByFreq 0x1A @@ -71,26 +72,23 @@ #define PPSMC_MSG_GetDpmFreqByIndex 0x20 #define PPSMC_MSG_OverridePcieParameters 0x21 #define PPSMC_MSG_SetMinDeepSleepDcefclk 0x22 -#define PPSMC_MSG_SetWorkloadMask 0x23 -#define PPSMC_MSG_SetUclkFastSwitch 0x24 -#define PPSMC_MSG_GetAvfsVoltageByDpm 0x25 -#define PPSMC_MSG_SetVideoFps 0x26 -#define PPSMC_MSG_GetDcModeMaxDpmFreq 0x27 -//Power Gating -#define PPSMC_MSG_AllowGfxOff 0x28 -#define PPSMC_MSG_DisallowGfxOff 0x29 -#define PPSMC_MSG_PowerUpVcn 0x2A -#define PPSMC_MSG_PowerDownVcn 0x2B -#define PPSMC_MSG_PowerUpJpeg 0x2C -#define PPSMC_MSG_PowerDownJpeg 0x2D -//reserve 0x2A to 0x2F for PG harvesting TBD +#define PPSMC_MSG_SetWorkloadMask 0x24 +#define PPSMC_MSG_SetUclkFastSwitch 0x25 +#define PPSMC_MSG_GetVoltageByDpm 0x26 +#define PPSMC_MSG_SetVideoFps 0x27 +#define PPSMC_MSG_GetDcModeMaxDpmFreq 0x28 -//I2C Interface -#define PPSMC_RequestI2cTransaction 0x30 +//Power Gating +#define PPSMC_MSG_AllowGfxOff 0x29 +#define PPSMC_MSG_DisallowGfxOff 0x2A +#define PPSMC_MSG_PowerUpVcn 0x2B +#define PPSMC_MSG_PowerDownVcn 0x2C +#define PPSMC_MSG_PowerUpJpeg 0x2D +#define PPSMC_MSG_PowerDownJpeg 0x2E +//reserve 0x29 to 0x30 for PG harvesting TBD //Resets -#define PPSMC_MSG_SoftReset 0x31 //FIXME Need confirmation from driver #define PPSMC_MSG_PrepareMp1ForUnload 0x32 #define PPSMC_MSG_PrepareMp1ForReset 0x33 #define PPSMC_MSG_PrepareMp1ForShutdown 0x34 @@ -100,7 +98,6 @@ #define PPSMC_MSG_GetPptLimit 0x36 #define PPSMC_MSG_ReenableAcDcInterrupt 0x37 #define PPSMC_MSG_NotifyPowerSource 0x38 -//#define PPSMC_MSG_GfxDeviceDriverReset 0x39 //FIXME mode1 and 2 resets will go directly go PSP //BTC #define PPSMC_MSG_RunBtc 0x3A @@ -120,9 +117,15 @@ #define PPSMC_MSG_SetGeminiApertureHigh 0x43 #define PPSMC_MSG_SetGeminiApertureLow 0x44 -#define PPSMC_Message_Count 0x45 +#define PPSMC_MSG_GetVoltageByDpmOverdrive 0x45 +#define PPSMC_MSG_BacoAudioD3PME 0x48 + +#define PPSMC_Message_Count 0x49 typedef uint32_t PPSMC_Result; typedef uint32_t PPSMC_Msg; +//for use with PPSMC_MSG_GetVoltageByDpmOverdrive +#define PPSMC_GET_AVFS_CURVE 0 +#define PPSMC_GET_OVERDRIVE_CURVE 1 #endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h index 92c65b80bde2..86cdc3393eac 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h @@ -121,7 +121,7 @@ struct smu_11_0_powerplay_table { struct atom_common_table_header header; uint8_t table_revision; - uint32_t table_size; //Driver portion table size. The offset to smc_pptable including header size + uint16_t table_size; //Driver portion table size. The offset to smc_pptable including header size uint32_t golden_pp_id; uint32_t golden_revision; uint16_t format_id; diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c new file mode 100644 index 000000000000..2dae0ae0829e --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -0,0 +1,1577 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "pp_debug.h" +#include <linux/firmware.h> +#include "amdgpu.h" +#include "amdgpu_smu.h" +#include "atomfirmware.h" +#include "amdgpu_atomfirmware.h" +#include "smu_v11_0.h" +#include "smu11_driver_if_navi10.h" +#include "soc15_common.h" +#include "atom.h" +#include "navi10_ppt.h" +#include "smu_v11_0_pptable.h" +#include "smu_v11_0_ppsmc.h" + +#include "asic_reg/mp/mp_11_0_sh_mask.h" + +#define FEATURE_MASK(feature) (1ULL << feature) +#define SMC_DPM_FEATURE ( \ + FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) | \ + FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \ + FEATURE_MASK(FEATURE_DPM_GFX_PACE_BIT) | \ + FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \ + FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \ + FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT) | \ + FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \ + FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT)) + +#define MSG_MAP(msg, index) \ + [SMU_MSG_##msg] = index + +static int navi10_message_map[SMU_MSG_MAX_COUNT] = { + MSG_MAP(TestMessage, PPSMC_MSG_TestMessage), + MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion), + MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion), + MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow), + MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh), + MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures), + MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures), + MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow), + MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh), + MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow), + MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh), + MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetEnabledSmuFeaturesLow), + MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh), + MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask), + MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit), + MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh), + MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow), + MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh), + MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow), + MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram), + MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu), + MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable), + MSG_MAP(UseBackupPPTable, PPSMC_MSG_UseBackupPPTable), + MSG_MAP(RunBtc, PPSMC_MSG_RunBtc), + MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco), + MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq), + MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq), + MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq), + MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq), + MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq), + MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq), + MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex), + MSG_MAP(SetMemoryChannelConfig, PPSMC_MSG_SetMemoryChannelConfig), + MSG_MAP(SetGeminiMode, PPSMC_MSG_SetGeminiMode), + MSG_MAP(SetGeminiApertureHigh, PPSMC_MSG_SetGeminiApertureHigh), + MSG_MAP(SetGeminiApertureLow, PPSMC_MSG_SetGeminiApertureLow), + MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters), + MSG_MAP(SetMinDeepSleepDcefclk, PPSMC_MSG_SetMinDeepSleepDcefclk), + MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt), + MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource), + MSG_MAP(SetUclkFastSwitch, PPSMC_MSG_SetUclkFastSwitch), + MSG_MAP(SetVideoFps, PPSMC_MSG_SetVideoFps), + MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload), + MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh), + MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow), + MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize), + MSG_MAP(ConfigureGfxDidt, PPSMC_MSG_ConfigureGfxDidt), + MSG_MAP(NumOfDisplays, PPSMC_MSG_NumOfDisplays), + MSG_MAP(SetSystemVirtualDramAddrHigh, PPSMC_MSG_SetSystemVirtualDramAddrHigh), + MSG_MAP(SetSystemVirtualDramAddrLow, PPSMC_MSG_SetSystemVirtualDramAddrLow), + MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff), + MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff), + MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit), + MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq), + MSG_MAP(GetDebugData, PPSMC_MSG_GetDebugData), + MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco), + MSG_MAP(PrepareMp1ForReset, PPSMC_MSG_PrepareMp1ForReset), + MSG_MAP(PrepareMp1ForShutdown, PPSMC_MSG_PrepareMp1ForShutdown), + MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn), + MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn), + MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg), + MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg), + MSG_MAP(BacoAudioD3PME, PPSMC_MSG_BacoAudioD3PME), + MSG_MAP(ArmD3, PPSMC_MSG_ArmD3), +}; + +static int navi10_clk_map[SMU_CLK_COUNT] = { + CLK_MAP(GFXCLK, PPCLK_GFXCLK), + CLK_MAP(SCLK, PPCLK_GFXCLK), + CLK_MAP(SOCCLK, PPCLK_SOCCLK), + CLK_MAP(FCLK, PPCLK_SOCCLK), + CLK_MAP(UCLK, PPCLK_UCLK), + CLK_MAP(MCLK, PPCLK_UCLK), + CLK_MAP(DCLK, PPCLK_DCLK), + CLK_MAP(VCLK, PPCLK_VCLK), + CLK_MAP(DCEFCLK, PPCLK_DCEFCLK), + CLK_MAP(DISPCLK, PPCLK_DISPCLK), + CLK_MAP(PIXCLK, PPCLK_PIXCLK), + CLK_MAP(PHYCLK, PPCLK_PHYCLK), +}; + +static int navi10_feature_mask_map[SMU_FEATURE_COUNT] = { + FEA_MAP(DPM_PREFETCHER), + FEA_MAP(DPM_GFXCLK), + FEA_MAP(DPM_GFX_PACE), + FEA_MAP(DPM_UCLK), + FEA_MAP(DPM_SOCCLK), + FEA_MAP(DPM_MP0CLK), + FEA_MAP(DPM_LINK), + FEA_MAP(DPM_DCEFCLK), + FEA_MAP(MEM_VDDCI_SCALING), + FEA_MAP(MEM_MVDD_SCALING), + FEA_MAP(DS_GFXCLK), + FEA_MAP(DS_SOCCLK), + FEA_MAP(DS_LCLK), + FEA_MAP(DS_DCEFCLK), + FEA_MAP(DS_UCLK), + FEA_MAP(GFX_ULV), + FEA_MAP(FW_DSTATE), + FEA_MAP(GFXOFF), + FEA_MAP(BACO), + FEA_MAP(VCN_PG), + FEA_MAP(JPEG_PG), + FEA_MAP(USB_PG), + FEA_MAP(RSMU_SMN_CG), + FEA_MAP(PPT), + FEA_MAP(TDC), + FEA_MAP(GFX_EDC), + FEA_MAP(APCC_PLUS), + FEA_MAP(GTHR), + FEA_MAP(ACDC), + FEA_MAP(VR0HOT), + FEA_MAP(VR1HOT), + FEA_MAP(FW_CTF), + FEA_MAP(FAN_CONTROL), + FEA_MAP(THERMAL), + FEA_MAP(GFX_DCS), + FEA_MAP(RM), + FEA_MAP(LED_DISPLAY), + FEA_MAP(GFX_SS), + FEA_MAP(OUT_OF_BAND_MONITOR), + FEA_MAP(TEMP_DEPENDENT_VMIN), + FEA_MAP(MMHUB_PG), + FEA_MAP(ATHUB_PG), +}; + +static int navi10_table_map[SMU_TABLE_COUNT] = { + TAB_MAP(PPTABLE), + TAB_MAP(WATERMARKS), + TAB_MAP(AVFS), + TAB_MAP(AVFS_PSM_DEBUG), + TAB_MAP(AVFS_FUSE_OVERRIDE), + TAB_MAP(PMSTATUSLOG), + TAB_MAP(SMU_METRICS), + TAB_MAP(DRIVER_SMU_CONFIG), + TAB_MAP(ACTIVITY_MONITOR_COEFF), + TAB_MAP(OVERDRIVE), + TAB_MAP(I2C_COMMANDS), + TAB_MAP(PACE), +}; + +static int navi10_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { + PWR_MAP(AC), + PWR_MAP(DC), +}; + +static int navi10_workload_map[] = { + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_CUSTOM_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), +}; + +static int navi10_get_smu_msg_index(struct smu_context *smc, uint32_t index) +{ + int val; + if (index > SMU_MSG_MAX_COUNT) + return -EINVAL; + + val = navi10_message_map[index]; + if (val > PPSMC_Message_Count) + return -EINVAL; + + return val; +} + +static int navi10_get_smu_clk_index(struct smu_context *smc, uint32_t index) +{ + int val; + if (index >= SMU_CLK_COUNT) + return -EINVAL; + + val = navi10_clk_map[index]; + if (val >= PPCLK_COUNT) + return -EINVAL; + + return val; +} + +static int navi10_get_smu_feature_index(struct smu_context *smc, uint32_t index) +{ + int val; + if (index >= SMU_FEATURE_COUNT) + return -EINVAL; + + val = navi10_feature_mask_map[index]; + if (val > 64) + return -EINVAL; + + return val; +} + +static int navi10_get_smu_table_index(struct smu_context *smc, uint32_t index) +{ + int val; + if (index >= SMU_TABLE_COUNT) + return -EINVAL; + + val = navi10_table_map[index]; + if (val >= TABLE_COUNT) + return -EINVAL; + + return val; +} + +static int navi10_get_pwr_src_index(struct smu_context *smc, uint32_t index) +{ + int val; + if (index >= SMU_POWER_SOURCE_COUNT) + return -EINVAL; + + val = navi10_pwr_src_map[index]; + if (val >= POWER_SOURCE_COUNT) + return -EINVAL; + + return val; +} + + +static int navi10_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile) +{ + int val; + if (profile > PP_SMC_POWER_PROFILE_CUSTOM) + return -EINVAL; + + val = navi10_workload_map[profile]; + + return val; +} + +static bool is_asic_secure(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + bool is_secure = true; + uint32_t mp0_fw_intf; + + mp0_fw_intf = RREG32_PCIE(MP0_Public | + (smnMP0_FW_INTF & 0xffffffff)); + + if (!(mp0_fw_intf & (1 << 19))) + is_secure = false; + + return is_secure; +} + +static int +navi10_get_allowed_feature_mask(struct smu_context *smu, + uint32_t *feature_mask, uint32_t num) +{ + struct amdgpu_device *adev = smu->adev; + + if (num > 2) + return -EINVAL; + + memset(feature_mask, 0, sizeof(uint32_t) * num); + + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) + | FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) + | FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) + | FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT) + | FEATURE_MASK(FEATURE_DPM_LINK_BIT) + | FEATURE_MASK(FEATURE_GFX_ULV_BIT) + | FEATURE_MASK(FEATURE_RSMU_SMN_CG_BIT) + | FEATURE_MASK(FEATURE_DS_SOCCLK_BIT) + | FEATURE_MASK(FEATURE_PPT_BIT) + | FEATURE_MASK(FEATURE_TDC_BIT) + | FEATURE_MASK(FEATURE_GFX_EDC_BIT) + | FEATURE_MASK(FEATURE_VR0HOT_BIT) + | FEATURE_MASK(FEATURE_FAN_CONTROL_BIT) + | FEATURE_MASK(FEATURE_THERMAL_BIT) + | FEATURE_MASK(FEATURE_LED_DISPLAY_BIT) + | FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT) + | FEATURE_MASK(FEATURE_DS_GFXCLK_BIT) + | FEATURE_MASK(FEATURE_DS_DCEFCLK_BIT) + | FEATURE_MASK(FEATURE_FW_DSTATE_BIT) + | FEATURE_MASK(FEATURE_BACO_BIT) + | FEATURE_MASK(FEATURE_ACDC_BIT) + | FEATURE_MASK(FEATURE_GFX_SS_BIT) + | FEATURE_MASK(FEATURE_APCC_DFLL_BIT) + | FEATURE_MASK(FEATURE_FW_CTF_BIT); + + if (adev->pm.pp_feature & PP_MCLK_DPM_MASK) + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT) + | FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT) + | FEATURE_MASK(FEATURE_MEM_MVDD_SCALING_BIT); + + if (adev->pm.pp_feature & PP_GFXOFF_MASK) { + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT); + /* TODO: remove it once fw fix the bug */ + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_FW_DSTATE_BIT); + } + + if (smu->adev->pg_flags & AMD_PG_SUPPORT_MMHUB) + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MMHUB_PG_BIT); + + if (smu->adev->pg_flags & AMD_PG_SUPPORT_ATHUB) + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_PG_BIT); + + if (smu->adev->pg_flags & AMD_PG_SUPPORT_VCN) + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VCN_PG_BIT); + + /* disable DPM UCLK and DS SOCCLK on navi10 A0 secure board */ + if (is_asic_secure(smu)) { + /* only for navi10 A0 */ + if ((adev->asic_type == CHIP_NAVI10) && + (adev->rev_id == 0)) { + *(uint64_t *)feature_mask &= + ~(FEATURE_MASK(FEATURE_DPM_UCLK_BIT) + | FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT) + | FEATURE_MASK(FEATURE_MEM_MVDD_SCALING_BIT)); + *(uint64_t *)feature_mask &= + ~FEATURE_MASK(FEATURE_DS_SOCCLK_BIT); + } + } + + return 0; +} + +static int navi10_check_powerplay_table(struct smu_context *smu) +{ + return 0; +} + +static int navi10_append_powerplay_table(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *smc_pptable = table_context->driver_pptable; + struct atom_smc_dpm_info_v4_5 *smc_dpm_table; + int index, ret; + + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, + smc_dpm_info); + + ret = smu_get_atom_data_table(smu, index, NULL, NULL, NULL, + (uint8_t **)&smc_dpm_table); + if (ret) + return ret; + + memcpy(smc_pptable->I2cControllers, smc_dpm_table->I2cControllers, + sizeof(I2cControllerConfig_t) * NUM_I2C_CONTROLLERS); + + /* SVI2 Board Parameters */ + smc_pptable->MaxVoltageStepGfx = smc_dpm_table->MaxVoltageStepGfx; + smc_pptable->MaxVoltageStepSoc = smc_dpm_table->MaxVoltageStepSoc; + smc_pptable->VddGfxVrMapping = smc_dpm_table->VddGfxVrMapping; + smc_pptable->VddSocVrMapping = smc_dpm_table->VddSocVrMapping; + smc_pptable->VddMem0VrMapping = smc_dpm_table->VddMem0VrMapping; + smc_pptable->VddMem1VrMapping = smc_dpm_table->VddMem1VrMapping; + smc_pptable->GfxUlvPhaseSheddingMask = smc_dpm_table->GfxUlvPhaseSheddingMask; + smc_pptable->SocUlvPhaseSheddingMask = smc_dpm_table->SocUlvPhaseSheddingMask; + smc_pptable->ExternalSensorPresent = smc_dpm_table->ExternalSensorPresent; + smc_pptable->Padding8_V = smc_dpm_table->Padding8_V; + + /* Telemetry Settings */ + smc_pptable->GfxMaxCurrent = smc_dpm_table->GfxMaxCurrent; + smc_pptable->GfxOffset = smc_dpm_table->GfxOffset; + smc_pptable->Padding_TelemetryGfx = smc_dpm_table->Padding_TelemetryGfx; + smc_pptable->SocMaxCurrent = smc_dpm_table->SocMaxCurrent; + smc_pptable->SocOffset = smc_dpm_table->SocOffset; + smc_pptable->Padding_TelemetrySoc = smc_dpm_table->Padding_TelemetrySoc; + smc_pptable->Mem0MaxCurrent = smc_dpm_table->Mem0MaxCurrent; + smc_pptable->Mem0Offset = smc_dpm_table->Mem0Offset; + smc_pptable->Padding_TelemetryMem0 = smc_dpm_table->Padding_TelemetryMem0; + smc_pptable->Mem1MaxCurrent = smc_dpm_table->Mem1MaxCurrent; + smc_pptable->Mem1Offset = smc_dpm_table->Mem1Offset; + smc_pptable->Padding_TelemetryMem1 = smc_dpm_table->Padding_TelemetryMem1; + + /* GPIO Settings */ + smc_pptable->AcDcGpio = smc_dpm_table->AcDcGpio; + smc_pptable->AcDcPolarity = smc_dpm_table->AcDcPolarity; + smc_pptable->VR0HotGpio = smc_dpm_table->VR0HotGpio; + smc_pptable->VR0HotPolarity = smc_dpm_table->VR0HotPolarity; + smc_pptable->VR1HotGpio = smc_dpm_table->VR1HotGpio; + smc_pptable->VR1HotPolarity = smc_dpm_table->VR1HotPolarity; + smc_pptable->GthrGpio = smc_dpm_table->GthrGpio; + smc_pptable->GthrPolarity = smc_dpm_table->GthrPolarity; + + /* LED Display Settings */ + smc_pptable->LedPin0 = smc_dpm_table->LedPin0; + smc_pptable->LedPin1 = smc_dpm_table->LedPin1; + smc_pptable->LedPin2 = smc_dpm_table->LedPin2; + smc_pptable->padding8_4 = smc_dpm_table->padding8_4; + + /* GFXCLK PLL Spread Spectrum */ + smc_pptable->PllGfxclkSpreadEnabled = smc_dpm_table->PllGfxclkSpreadEnabled; + smc_pptable->PllGfxclkSpreadPercent = smc_dpm_table->PllGfxclkSpreadPercent; + smc_pptable->PllGfxclkSpreadFreq = smc_dpm_table->PllGfxclkSpreadFreq; + + /* GFXCLK DFLL Spread Spectrum */ + smc_pptable->DfllGfxclkSpreadEnabled = smc_dpm_table->DfllGfxclkSpreadEnabled; + smc_pptable->DfllGfxclkSpreadPercent = smc_dpm_table->DfllGfxclkSpreadPercent; + smc_pptable->DfllGfxclkSpreadFreq = smc_dpm_table->DfllGfxclkSpreadFreq; + + /* UCLK Spread Spectrum */ + smc_pptable->UclkSpreadEnabled = smc_dpm_table->UclkSpreadEnabled; + smc_pptable->UclkSpreadPercent = smc_dpm_table->UclkSpreadPercent; + smc_pptable->UclkSpreadFreq = smc_dpm_table->UclkSpreadFreq; + + /* SOCCLK Spread Spectrum */ + smc_pptable->SoclkSpreadEnabled = smc_dpm_table->SoclkSpreadEnabled; + smc_pptable->SocclkSpreadPercent = smc_dpm_table->SocclkSpreadPercent; + smc_pptable->SocclkSpreadFreq = smc_dpm_table->SocclkSpreadFreq; + + /* Total board power */ + smc_pptable->TotalBoardPower = smc_dpm_table->TotalBoardPower; + smc_pptable->BoardPadding = smc_dpm_table->BoardPadding; + + /* Mvdd Svi2 Div Ratio Setting */ + smc_pptable->MvddRatio = smc_dpm_table->MvddRatio; + + if (adev->pm.pp_feature & PP_GFXOFF_MASK) { + /* TODO: remove it once SMU fw fix it */ + smc_pptable->DebugOverrides |= DPM_OVERRIDE_DISABLE_DFLL_PLL_SHUTDOWN; + } + + return 0; +} + +static int navi10_store_powerplay_table(struct smu_context *smu) +{ + struct smu_11_0_powerplay_table *powerplay_table = NULL; + struct smu_table_context *table_context = &smu->smu_table; + struct smu_baco_context *smu_baco = &smu->smu_baco; + + if (!table_context->power_play_table) + return -EINVAL; + + powerplay_table = table_context->power_play_table; + + memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable, + sizeof(PPTable_t)); + + table_context->thermal_controller_type = powerplay_table->thermal_controller_type; + + mutex_lock(&smu_baco->mutex); + if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO || + powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO) + smu_baco->platform_support = true; + mutex_unlock(&smu_baco->mutex); + + return 0; +} + +static int navi10_tables_init(struct smu_context *smu, struct smu_table *tables) +{ + SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, + sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM); + + return 0; +} + +static int navi10_allocate_dpm_context(struct smu_context *smu) +{ + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + + if (smu_dpm->dpm_context) + return -EINVAL; + + smu_dpm->dpm_context = kzalloc(sizeof(struct smu_11_0_dpm_context), + GFP_KERNEL); + if (!smu_dpm->dpm_context) + return -ENOMEM; + + smu_dpm->dpm_context_size = sizeof(struct smu_11_0_dpm_context); + + return 0; +} + +static int navi10_set_default_dpm_table(struct smu_context *smu) +{ + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + struct smu_table_context *table_context = &smu->smu_table; + struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context; + PPTable_t *driver_ppt = NULL; + + driver_ppt = table_context->driver_pptable; + + dpm_context->dpm_tables.soc_table.min = driver_ppt->FreqTableSocclk[0]; + dpm_context->dpm_tables.soc_table.max = driver_ppt->FreqTableSocclk[NUM_SOCCLK_DPM_LEVELS - 1]; + + dpm_context->dpm_tables.gfx_table.min = driver_ppt->FreqTableGfx[0]; + dpm_context->dpm_tables.gfx_table.max = driver_ppt->FreqTableGfx[NUM_GFXCLK_DPM_LEVELS - 1]; + + dpm_context->dpm_tables.uclk_table.min = driver_ppt->FreqTableUclk[0]; + dpm_context->dpm_tables.uclk_table.max = driver_ppt->FreqTableUclk[NUM_UCLK_DPM_LEVELS - 1]; + + dpm_context->dpm_tables.vclk_table.min = driver_ppt->FreqTableVclk[0]; + dpm_context->dpm_tables.vclk_table.max = driver_ppt->FreqTableVclk[NUM_VCLK_DPM_LEVELS - 1]; + + dpm_context->dpm_tables.dclk_table.min = driver_ppt->FreqTableDclk[0]; + dpm_context->dpm_tables.dclk_table.max = driver_ppt->FreqTableDclk[NUM_DCLK_DPM_LEVELS - 1]; + + dpm_context->dpm_tables.dcef_table.min = driver_ppt->FreqTableDcefclk[0]; + dpm_context->dpm_tables.dcef_table.max = driver_ppt->FreqTableDcefclk[NUM_DCEFCLK_DPM_LEVELS - 1]; + + dpm_context->dpm_tables.pixel_table.min = driver_ppt->FreqTablePixclk[0]; + dpm_context->dpm_tables.pixel_table.max = driver_ppt->FreqTablePixclk[NUM_PIXCLK_DPM_LEVELS - 1]; + + dpm_context->dpm_tables.display_table.min = driver_ppt->FreqTableDispclk[0]; + dpm_context->dpm_tables.display_table.max = driver_ppt->FreqTableDispclk[NUM_DISPCLK_DPM_LEVELS - 1]; + + dpm_context->dpm_tables.phy_table.min = driver_ppt->FreqTablePhyclk[0]; + dpm_context->dpm_tables.phy_table.max = driver_ppt->FreqTablePhyclk[NUM_PHYCLK_DPM_LEVELS - 1]; + + return 0; +} + +static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable) +{ + int ret = 0; + struct smu_power_context *smu_power = &smu->smu_power; + struct smu_power_gate *power_gate = &smu_power->power_gate; + + if (enable && power_gate->uvd_gated) { + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)) { + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1); + if (ret) + return ret; + } + power_gate->uvd_gated = false; + } else { + if (!enable && !power_gate->uvd_gated) { + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)) { + ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); + if (ret) + return ret; + } + power_gate->uvd_gated = true; + } + } + + return 0; +} + +static int navi10_get_current_clk_freq_by_table(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *value) +{ + static SmuMetrics_t metrics; + int ret = 0, clk_id = 0; + + if (!value) + return -EINVAL; + + memset(&metrics, 0, sizeof(metrics)); + + ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)&metrics, false); + if (ret) + return ret; + + clk_id = smu_clk_get_index(smu, clk_type); + if (clk_id < 0) + return clk_id; + + *value = metrics.CurrClock[clk_id]; + + return ret; +} + +static int navi10_print_clk_levels(struct smu_context *smu, + enum smu_clk_type clk_type, char *buf) +{ + int i, size = 0, ret = 0; + uint32_t cur_value = 0, value = 0, count = 0; + + switch (clk_type) { + case SMU_GFXCLK: + case SMU_SCLK: + case SMU_SOCCLK: + case SMU_MCLK: + case SMU_UCLK: + case SMU_FCLK: + case SMU_DCEFCLK: + ret = smu_get_current_clk_freq(smu, clk_type, &cur_value); + if (ret) + return size; + /* 10KHz -> MHz */ + cur_value = cur_value / 100; + + size += sprintf(buf, "current clk: %uMhz\n", cur_value); + + ret = smu_get_dpm_level_count(smu, clk_type, &count); + if (ret) + return size; + + for (i = 0; i < count; i++) { + ret = smu_get_dpm_freq_by_index(smu, clk_type, i, &value); + if (ret) + return size; + + size += sprintf(buf + size, "%d: %uMhz %s\n", i, value, + cur_value == value ? "*" : ""); + } + break; + default: + break; + } + + return size; +} + +static int navi10_force_clk_levels(struct smu_context *smu, + enum smu_clk_type clk_type, uint32_t mask) +{ + + int ret = 0, size = 0; + uint32_t soft_min_level = 0, soft_max_level = 0, min_freq = 0, max_freq = 0; + + soft_min_level = mask ? (ffs(mask) - 1) : 0; + soft_max_level = mask ? (fls(mask) - 1) : 0; + + switch (clk_type) { + case SMU_GFXCLK: + case SMU_SCLK: + case SMU_SOCCLK: + case SMU_MCLK: + case SMU_UCLK: + case SMU_DCEFCLK: + case SMU_FCLK: + ret = smu_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq); + if (ret) + return size; + + ret = smu_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq); + if (ret) + return size; + + ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq); + if (ret) + return size; + break; + default: + break; + } + + return size; +} + +static int navi10_populate_umd_state_clk(struct smu_context *smu) +{ + int ret = 0; + uint32_t min_sclk_freq = 0, min_mclk_freq = 0; + + ret = smu_get_dpm_freq_range(smu, SMU_SCLK, &min_sclk_freq, NULL); + if (ret) + return ret; + + smu->pstate_sclk = min_sclk_freq * 100; + + ret = smu_get_dpm_freq_range(smu, SMU_MCLK, &min_mclk_freq, NULL); + if (ret) + return ret; + + smu->pstate_mclk = min_mclk_freq * 100; + + return ret; +} + +static int navi10_get_clock_by_type_with_latency(struct smu_context *smu, + enum smu_clk_type clk_type, + struct pp_clock_levels_with_latency *clocks) +{ + int ret = 0, i = 0; + uint32_t level_count = 0, freq = 0; + + switch (clk_type) { + case SMU_GFXCLK: + case SMU_DCEFCLK: + case SMU_SOCCLK: + ret = smu_get_dpm_level_count(smu, clk_type, &level_count); + if (ret) + return ret; + + level_count = min(level_count, (uint32_t)MAX_NUM_CLOCKS); + clocks->num_levels = level_count; + + for (i = 0; i < level_count; i++) { + ret = smu_get_dpm_freq_by_index(smu, clk_type, i, &freq); + if (ret) + return ret; + + clocks->data[i].clocks_in_khz = freq * 1000; + clocks->data[i].latency_in_us = 0; + } + break; + default: + break; + } + + return ret; +} + +static int navi10_pre_display_config_changed(struct smu_context *smu) +{ + int ret = 0; + uint32_t max_freq = 0; + + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0); + if (ret) + return ret; + + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { + ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &max_freq); + if (ret) + return ret; + ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, max_freq); + if (ret) + return ret; + } + + return ret; +} + +static int navi10_display_config_changed(struct smu_context *smu) +{ + int ret = 0; + + if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && + !(smu->watermarks_bitmap & WATERMARKS_LOADED)) { + ret = smu_write_watermarks_table(smu); + if (ret) + return ret; + + smu->watermarks_bitmap |= WATERMARKS_LOADED; + } + + if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && + smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) && + smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, + smu->display_config->num_display); + if (ret) + return ret; + } + + return ret; +} + +static int navi10_force_dpm_limit_value(struct smu_context *smu, bool highest) +{ + int ret = 0, i = 0; + uint32_t min_freq, max_freq, force_freq; + enum smu_clk_type clk_type; + + enum smu_clk_type clks[] = { + SMU_GFXCLK, + SMU_MCLK, + SMU_SOCCLK, + }; + + for (i = 0; i < ARRAY_SIZE(clks); i++) { + clk_type = clks[i]; + ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq); + if (ret) + return ret; + + force_freq = highest ? max_freq : min_freq; + ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq); + if (ret) + return ret; + } + + return ret; +} + +static int navi10_unforce_dpm_levels(struct smu_context *smu) +{ + int ret = 0, i = 0; + uint32_t min_freq, max_freq; + enum smu_clk_type clk_type; + + enum smu_clk_type clks[] = { + SMU_GFXCLK, + SMU_MCLK, + SMU_SOCCLK, + }; + + for (i = 0; i < ARRAY_SIZE(clks); i++) { + clk_type = clks[i]; + ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq); + if (ret) + return ret; + + ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq); + if (ret) + return ret; + } + + return ret; +} + +static int navi10_get_gpu_power(struct smu_context *smu, uint32_t *value) +{ + int ret = 0; + SmuMetrics_t metrics; + + if (!value) + return -EINVAL; + + ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)&metrics, + false); + if (ret) + return ret; + + *value = metrics.AverageSocketPower << 8; + + return 0; +} + +static int navi10_get_current_activity_percent(struct smu_context *smu, + enum amd_pp_sensors sensor, + uint32_t *value) +{ + int ret = 0; + SmuMetrics_t metrics; + + if (!value) + return -EINVAL; + + msleep(1); + + ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, + (void *)&metrics, false); + if (ret) + return ret; + + switch (sensor) { + case AMDGPU_PP_SENSOR_GPU_LOAD: + *value = metrics.AverageGfxActivity; + break; + case AMDGPU_PP_SENSOR_MEM_LOAD: + *value = metrics.AverageUclkActivity; + break; + default: + pr_err("Invalid sensor for retrieving clock activity\n"); + return -EINVAL; + } + + return 0; +} + +static bool navi10_is_dpm_running(struct smu_context *smu) +{ + int ret = 0; + uint32_t feature_mask[2]; + unsigned long feature_enabled; + ret = smu_feature_get_enabled_mask(smu, feature_mask, 2); + feature_enabled = (unsigned long)((uint64_t)feature_mask[0] | + ((uint64_t)feature_mask[1] << 32)); + return !!(feature_enabled & SMC_DPM_FEATURE); +} + +static int navi10_get_fan_speed(struct smu_context *smu, uint16_t *value) +{ + SmuMetrics_t metrics; + int ret = 0; + + if (!value) + return -EINVAL; + + memset(&metrics, 0, sizeof(metrics)); + + ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, + (void *)&metrics, false); + if (ret) + return ret; + + *value = metrics.CurrFanSpeed; + + return ret; +} + +static int navi10_get_fan_speed_percent(struct smu_context *smu, + uint32_t *speed) +{ + int ret = 0; + uint32_t percent = 0; + uint16_t current_rpm; + PPTable_t *pptable = smu->smu_table.driver_pptable; + + ret = navi10_get_fan_speed(smu, ¤t_rpm); + if (ret) + return ret; + + percent = current_rpm * 100 / pptable->FanMaximumRpm; + *speed = percent > 100 ? 100 : percent; + + return ret; +} + +static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf) +{ + DpmActivityMonitorCoeffInt_t activity_monitor; + uint32_t i, size = 0; + uint16_t workload_type = 0; + static const char *profile_name[] = { + "BOOTUP_DEFAULT", + "3D_FULL_SCREEN", + "POWER_SAVING", + "VIDEO", + "VR", + "COMPUTE", + "CUSTOM"}; + static const char *title[] = { + "PROFILE_INDEX(NAME)", + "CLOCK_TYPE(NAME)", + "FPS", + "MinFreqType", + "MinActiveFreqType", + "MinActiveFreq", + "BoosterFreqType", + "BoosterFreq", + "PD_Data_limit_c", + "PD_Data_error_coeff", + "PD_Data_error_rate_coeff"}; + int result = 0; + + if (!buf) + return -EINVAL; + + size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n", + title[0], title[1], title[2], title[3], title[4], title[5], + title[6], title[7], title[8], title[9], title[10]); + + for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { + /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ + workload_type = smu_workload_get_type(smu, i); + result = smu_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type, + (void *)(&activity_monitor), false); + if (result) { + pr_err("[%s] Failed to get activity monitor!", __func__); + return result; + } + + size += sprintf(buf + size, "%2d %14s%s:\n", + i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); + + size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", + " ", + 0, + "GFXCLK", + activity_monitor.Gfx_FPS, + activity_monitor.Gfx_MinFreqStep, + activity_monitor.Gfx_MinActiveFreqType, + activity_monitor.Gfx_MinActiveFreq, + activity_monitor.Gfx_BoosterFreqType, + activity_monitor.Gfx_BoosterFreq, + activity_monitor.Gfx_PD_Data_limit_c, + activity_monitor.Gfx_PD_Data_error_coeff, + activity_monitor.Gfx_PD_Data_error_rate_coeff); + + size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", + " ", + 1, + "SOCCLK", + activity_monitor.Soc_FPS, + activity_monitor.Soc_MinFreqStep, + activity_monitor.Soc_MinActiveFreqType, + activity_monitor.Soc_MinActiveFreq, + activity_monitor.Soc_BoosterFreqType, + activity_monitor.Soc_BoosterFreq, + activity_monitor.Soc_PD_Data_limit_c, + activity_monitor.Soc_PD_Data_error_coeff, + activity_monitor.Soc_PD_Data_error_rate_coeff); + + size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", + " ", + 2, + "MEMLK", + activity_monitor.Mem_FPS, + activity_monitor.Mem_MinFreqStep, + activity_monitor.Mem_MinActiveFreqType, + activity_monitor.Mem_MinActiveFreq, + activity_monitor.Mem_BoosterFreqType, + activity_monitor.Mem_BoosterFreq, + activity_monitor.Mem_PD_Data_limit_c, + activity_monitor.Mem_PD_Data_error_coeff, + activity_monitor.Mem_PD_Data_error_rate_coeff); + } + + return size; +} + +static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) +{ + DpmActivityMonitorCoeffInt_t activity_monitor; + int workload_type, ret = 0; + + smu->power_profile_mode = input[size]; + + if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { + pr_err("Invalid power profile mode %d\n", smu->power_profile_mode); + return -EINVAL; + } + + if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { + if (size < 0) + return -EINVAL; + + ret = smu_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor), false); + if (ret) { + pr_err("[%s] Failed to get activity monitor!", __func__); + return ret; + } + + switch (input[0]) { + case 0: /* Gfxclk */ + activity_monitor.Gfx_FPS = input[1]; + activity_monitor.Gfx_MinFreqStep = input[2]; + activity_monitor.Gfx_MinActiveFreqType = input[3]; + activity_monitor.Gfx_MinActiveFreq = input[4]; + activity_monitor.Gfx_BoosterFreqType = input[5]; + activity_monitor.Gfx_BoosterFreq = input[6]; + activity_monitor.Gfx_PD_Data_limit_c = input[7]; + activity_monitor.Gfx_PD_Data_error_coeff = input[8]; + activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9]; + break; + case 1: /* Socclk */ + activity_monitor.Soc_FPS = input[1]; + activity_monitor.Soc_MinFreqStep = input[2]; + activity_monitor.Soc_MinActiveFreqType = input[3]; + activity_monitor.Soc_MinActiveFreq = input[4]; + activity_monitor.Soc_BoosterFreqType = input[5]; + activity_monitor.Soc_BoosterFreq = input[6]; + activity_monitor.Soc_PD_Data_limit_c = input[7]; + activity_monitor.Soc_PD_Data_error_coeff = input[8]; + activity_monitor.Soc_PD_Data_error_rate_coeff = input[9]; + break; + case 2: /* Memlk */ + activity_monitor.Mem_FPS = input[1]; + activity_monitor.Mem_MinFreqStep = input[2]; + activity_monitor.Mem_MinActiveFreqType = input[3]; + activity_monitor.Mem_MinActiveFreq = input[4]; + activity_monitor.Mem_BoosterFreqType = input[5]; + activity_monitor.Mem_BoosterFreq = input[6]; + activity_monitor.Mem_PD_Data_limit_c = input[7]; + activity_monitor.Mem_PD_Data_error_coeff = input[8]; + activity_monitor.Mem_PD_Data_error_rate_coeff = input[9]; + break; + } + + ret = smu_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor), true); + if (ret) { + pr_err("[%s] Failed to set activity monitor!", __func__); + return ret; + } + } + + /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ + workload_type = smu_workload_get_type(smu, smu->power_profile_mode); + smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, + 1 << workload_type); + + return ret; +} + +static int navi10_get_profiling_clk_mask(struct smu_context *smu, + enum amd_dpm_forced_level level, + uint32_t *sclk_mask, + uint32_t *mclk_mask, + uint32_t *soc_mask) +{ + int ret = 0; + uint32_t level_count = 0; + + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { + if (sclk_mask) + *sclk_mask = 0; + } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { + if (mclk_mask) + *mclk_mask = 0; + } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { + if(sclk_mask) { + ret = smu_get_dpm_level_count(smu, SMU_SCLK, &level_count); + if (ret) + return ret; + *sclk_mask = level_count - 1; + } + + if(mclk_mask) { + ret = smu_get_dpm_level_count(smu, SMU_MCLK, &level_count); + if (ret) + return ret; + *mclk_mask = level_count - 1; + } + + if(soc_mask) { + ret = smu_get_dpm_level_count(smu, SMU_SOCCLK, &level_count); + if (ret) + return ret; + *soc_mask = level_count - 1; + } + } + + return ret; +} + +static int navi10_notify_smc_dispaly_config(struct smu_context *smu) +{ + struct smu_clocks min_clocks = {0}; + struct pp_display_clock_request clock_req; + int ret = 0; + + min_clocks.dcef_clock = smu->display_config->min_dcef_set_clk; + min_clocks.dcef_clock_in_sr = smu->display_config->min_dcef_deep_sleep_set_clk; + min_clocks.memory_clock = smu->display_config->min_mem_set_clock; + + if (smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) { + clock_req.clock_type = amd_pp_dcef_clock; + clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10; + if (!smu_display_clock_voltage_request(smu, &clock_req)) { + if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) { + ret = smu_send_smc_msg_with_param(smu, + SMU_MSG_SetMinDeepSleepDcefclk, + min_clocks.dcef_clock_in_sr/100); + if (ret) { + pr_err("Attempt to set divider for DCEFCLK Failed!"); + return ret; + } + } + } else { + pr_info("Attempt to set Hard Min for DCEFCLK Failed!"); + } + } + + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { + ret = smu_set_hard_freq_range(smu, SMU_UCLK, min_clocks.memory_clock/100, 0); + if (ret) { + pr_err("[%s] Set hard min uclk failed!", __func__); + return ret; + } + } + + return 0; +} + +static int navi10_set_watermarks_table(struct smu_context *smu, + void *watermarks, struct + dm_pp_wm_sets_with_clock_ranges_soc15 + *clock_ranges) +{ + int i; + Watermarks_t *table = watermarks; + + if (!table || !clock_ranges) + return -EINVAL; + + if (clock_ranges->num_wm_dmif_sets > 4 || + clock_ranges->num_wm_mcif_sets > 4) + return -EINVAL; + + for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) { + table->WatermarkRow[1][i].MinClock = + cpu_to_le16((uint16_t) + (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz / + 1000)); + table->WatermarkRow[1][i].MaxClock = + cpu_to_le16((uint16_t) + (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz / + 1000)); + table->WatermarkRow[1][i].MinUclk = + cpu_to_le16((uint16_t) + (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz / + 1000)); + table->WatermarkRow[1][i].MaxUclk = + cpu_to_le16((uint16_t) + (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz / + 1000)); + table->WatermarkRow[1][i].WmSetting = (uint8_t) + clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id; + } + + for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) { + table->WatermarkRow[0][i].MinClock = + cpu_to_le16((uint16_t) + (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz / + 1000)); + table->WatermarkRow[0][i].MaxClock = + cpu_to_le16((uint16_t) + (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz / + 1000)); + table->WatermarkRow[0][i].MinUclk = + cpu_to_le16((uint16_t) + (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz / + 1000)); + table->WatermarkRow[0][i].MaxUclk = + cpu_to_le16((uint16_t) + (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz / + 1000)); + table->WatermarkRow[0][i].WmSetting = (uint8_t) + clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id; + } + + return 0; +} + +static int navi10_thermal_get_temperature(struct smu_context *smu, + enum amd_pp_sensors sensor, + uint32_t *value) +{ + SmuMetrics_t metrics; + int ret = 0; + + if (!value) + return -EINVAL; + + ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)&metrics, false); + if (ret) + return ret; + + switch (sensor) { + case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: + *value = metrics.TemperatureHotspot * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case AMDGPU_PP_SENSOR_EDGE_TEMP: + *value = metrics.TemperatureEdge * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case AMDGPU_PP_SENSOR_MEM_TEMP: + *value = metrics.TemperatureMem * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + default: + pr_err("Invalid sensor for retrieving temp\n"); + return -EINVAL; + } + + return 0; +} + +static int navi10_read_sensor(struct smu_context *smu, + enum amd_pp_sensors sensor, + void *data, uint32_t *size) +{ + int ret = 0; + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *pptable = table_context->driver_pptable; + + switch (sensor) { + case AMDGPU_PP_SENSOR_MAX_FAN_RPM: + *(uint32_t *)data = pptable->FanMaximumRpm; + *size = 4; + break; + case AMDGPU_PP_SENSOR_MEM_LOAD: + case AMDGPU_PP_SENSOR_GPU_LOAD: + ret = navi10_get_current_activity_percent(smu, sensor, (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_GPU_POWER: + ret = navi10_get_gpu_power(smu, (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: + case AMDGPU_PP_SENSOR_EDGE_TEMP: + case AMDGPU_PP_SENSOR_MEM_TEMP: + ret = navi10_thermal_get_temperature(smu, sensor, (uint32_t *)data); + *size = 4; + break; + default: + return -EINVAL; + } + + return ret; +} + +static int navi10_get_uclk_dpm_states(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states) +{ + uint32_t num_discrete_levels = 0; + uint16_t *dpm_levels = NULL; + uint16_t i = 0; + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *driver_ppt = NULL; + + if (!clocks_in_khz || !num_states || !table_context->driver_pptable) + return -EINVAL; + + driver_ppt = table_context->driver_pptable; + num_discrete_levels = driver_ppt->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels; + dpm_levels = driver_ppt->FreqTableUclk; + + if (num_discrete_levels == 0 || dpm_levels == NULL) + return -EINVAL; + + *num_states = num_discrete_levels; + for (i = 0; i < num_discrete_levels; i++) { + /* convert to khz */ + *clocks_in_khz = (*dpm_levels) * 1000; + clocks_in_khz++; + dpm_levels++; + } + + return 0; +} + +static int navi10_get_ppfeature_status(struct smu_context *smu, + char *buf) +{ + static const char *ppfeature_name[] = { + "DPM_PREFETCHER", + "DPM_GFXCLK", + "DPM_GFX_PACE", + "DPM_UCLK", + "DPM_SOCCLK", + "DPM_MP0CLK", + "DPM_LINK", + "DPM_DCEFCLK", + "MEM_VDDCI_SCALING", + "MEM_MVDD_SCALING", + "DS_GFXCLK", + "DS_SOCCLK", + "DS_LCLK", + "DS_DCEFCLK", + "DS_UCLK", + "GFX_ULV", + "FW_DSTATE", + "GFXOFF", + "BACO", + "VCN_PG", + "JPEG_PG", + "USB_PG", + "RSMU_SMN_CG", + "PPT", + "TDC", + "GFX_EDC", + "APCC_PLUS", + "GTHR", + "ACDC", + "VR0HOT", + "VR1HOT", + "FW_CTF", + "FAN_CONTROL", + "THERMAL", + "GFX_DCS", + "RM", + "LED_DISPLAY", + "GFX_SS", + "OUT_OF_BAND_MONITOR", + "TEMP_DEPENDENT_VMIN", + "MMHUB_PG", + "ATHUB_PG"}; + static const char *output_title[] = { + "FEATURES", + "BITMASK", + "ENABLEMENT"}; + uint64_t features_enabled; + uint32_t feature_mask[2]; + int i; + int ret = 0; + int size = 0; + + ret = smu_feature_get_enabled_mask(smu, feature_mask, 2); + PP_ASSERT_WITH_CODE(!ret, + "[GetPPfeatureStatus] Failed to get enabled smc features!", + return ret); + features_enabled = (uint64_t)feature_mask[0] | + (uint64_t)feature_mask[1] << 32; + + size += sprintf(buf + size, "Current ppfeatures: 0x%016llx\n", features_enabled); + size += sprintf(buf + size, "%-19s %-22s %s\n", + output_title[0], + output_title[1], + output_title[2]); + for (i = 0; i < (sizeof(ppfeature_name) / sizeof(ppfeature_name[0])); i++) { + size += sprintf(buf + size, "%-19s 0x%016llx %6s\n", + ppfeature_name[i], + 1ULL << i, + (features_enabled & (1ULL << i)) ? "Y" : "N"); + } + + return size; +} + +static int navi10_enable_smc_features(struct smu_context *smu, + bool enabled, + uint64_t feature_masks) +{ + struct smu_feature *feature = &smu->smu_feature; + uint32_t feature_low, feature_high; + uint32_t feature_mask[2]; + int ret = 0; + + feature_low = (uint32_t)(feature_masks & 0xFFFFFFFF); + feature_high = (uint32_t)((feature_masks & 0xFFFFFFFF00000000ULL) >> 32); + + if (enabled) { + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow, + feature_low); + if (ret) + return ret; + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh, + feature_high); + if (ret) + return ret; + } else { + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow, + feature_low); + if (ret) + return ret; + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh, + feature_high); + if (ret) + return ret; + } + + ret = smu_feature_get_enabled_mask(smu, feature_mask, 2); + if (ret) + return ret; + + mutex_lock(&feature->mutex); + bitmap_copy(feature->enabled, (unsigned long *)&feature_mask, + feature->feature_num); + mutex_unlock(&feature->mutex); + + return 0; +} + +static int navi10_set_ppfeature_status(struct smu_context *smu, + uint64_t new_ppfeature_masks) +{ + uint64_t features_enabled; + uint32_t feature_mask[2]; + uint64_t features_to_enable; + uint64_t features_to_disable; + int ret = 0; + + ret = smu_feature_get_enabled_mask(smu, feature_mask, 2); + PP_ASSERT_WITH_CODE(!ret, + "[SetPPfeatureStatus] Failed to get enabled smc features!", + return ret); + features_enabled = (uint64_t)feature_mask[0] | + (uint64_t)feature_mask[1] << 32; + + features_to_disable = + features_enabled & ~new_ppfeature_masks; + features_to_enable = + ~features_enabled & new_ppfeature_masks; + + pr_debug("features_to_disable 0x%llx\n", features_to_disable); + pr_debug("features_to_enable 0x%llx\n", features_to_enable); + + if (features_to_disable) { + ret = navi10_enable_smc_features(smu, false, features_to_disable); + PP_ASSERT_WITH_CODE(!ret, + "[SetPPfeatureStatus] Failed to disable smc features!", + return ret); + } + + if (features_to_enable) { + ret = navi10_enable_smc_features(smu, true, features_to_enable); + PP_ASSERT_WITH_CODE(!ret, + "[SetPPfeatureStatus] Failed to enable smc features!", + return ret); + } + + return 0; +} + +static const struct pptable_funcs navi10_ppt_funcs = { + .tables_init = navi10_tables_init, + .alloc_dpm_context = navi10_allocate_dpm_context, + .store_powerplay_table = navi10_store_powerplay_table, + .check_powerplay_table = navi10_check_powerplay_table, + .append_powerplay_table = navi10_append_powerplay_table, + .get_smu_msg_index = navi10_get_smu_msg_index, + .get_smu_clk_index = navi10_get_smu_clk_index, + .get_smu_feature_index = navi10_get_smu_feature_index, + .get_smu_table_index = navi10_get_smu_table_index, + .get_smu_power_index = navi10_get_pwr_src_index, + .get_workload_type = navi10_get_workload_type, + .get_allowed_feature_mask = navi10_get_allowed_feature_mask, + .set_default_dpm_table = navi10_set_default_dpm_table, + .dpm_set_uvd_enable = navi10_dpm_set_uvd_enable, + .get_current_clk_freq_by_table = navi10_get_current_clk_freq_by_table, + .print_clk_levels = navi10_print_clk_levels, + .force_clk_levels = navi10_force_clk_levels, + .populate_umd_state_clk = navi10_populate_umd_state_clk, + .get_clock_by_type_with_latency = navi10_get_clock_by_type_with_latency, + .pre_display_config_changed = navi10_pre_display_config_changed, + .display_config_changed = navi10_display_config_changed, + .notify_smc_dispaly_config = navi10_notify_smc_dispaly_config, + .force_dpm_limit_value = navi10_force_dpm_limit_value, + .unforce_dpm_levels = navi10_unforce_dpm_levels, + .is_dpm_running = navi10_is_dpm_running, + .get_fan_speed_percent = navi10_get_fan_speed_percent, + .get_power_profile_mode = navi10_get_power_profile_mode, + .set_power_profile_mode = navi10_set_power_profile_mode, + .get_profiling_clk_mask = navi10_get_profiling_clk_mask, + .set_watermarks_table = navi10_set_watermarks_table, + .read_sensor = navi10_read_sensor, + .get_uclk_dpm_states = navi10_get_uclk_dpm_states, + .get_ppfeature_status = navi10_get_ppfeature_status, + .set_ppfeature_status = navi10_set_ppfeature_status, +}; + +void navi10_set_ppt_funcs(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + + smu->ppt_funcs = &navi10_ppt_funcs; + smu->smc_if_version = SMU11_DRIVER_IF_VERSION; + smu_table->table_count = TABLE_COUNT; +} diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h new file mode 100644 index 000000000000..957288e22f47 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h @@ -0,0 +1,28 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __NAVI10_PPT_H__ +#define __NAVI10_PPT_H__ + +extern void navi10_set_ppt_funcs(struct smu_context *smu); + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index 463275f88e89..95c7c4dae523 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -22,6 +22,7 @@ #include <linux/firmware.h> #include <linux/module.h> +#include <linux/pci.h> #include "pp_debug.h" #include "amdgpu.h" @@ -29,39 +30,25 @@ #include "atomfirmware.h" #include "amdgpu_atomfirmware.h" #include "smu_v11_0.h" -#include "smu11_driver_if.h" #include "soc15_common.h" #include "atom.h" #include "vega20_ppt.h" -#include "pp_thermal.h" +#include "navi10_ppt.h" #include "asic_reg/thm/thm_11_0_2_offset.h" #include "asic_reg/thm/thm_11_0_2_sh_mask.h" -#include "asic_reg/mp/mp_9_0_offset.h" -#include "asic_reg/mp/mp_9_0_sh_mask.h" +#include "asic_reg/mp/mp_11_0_offset.h" +#include "asic_reg/mp/mp_11_0_sh_mask.h" #include "asic_reg/nbio/nbio_7_4_offset.h" -#include "asic_reg/smuio/smuio_9_0_offset.h" -#include "asic_reg/smuio/smuio_9_0_sh_mask.h" +#include "asic_reg/nbio/nbio_7_4_sh_mask.h" +#include "asic_reg/smuio/smuio_11_0_0_offset.h" +#include "asic_reg/smuio/smuio_11_0_0_sh_mask.h" MODULE_FIRMWARE("amdgpu/vega20_smc.bin"); +MODULE_FIRMWARE("amdgpu/navi10_smc.bin"); -#define SMU11_TOOL_SIZE 0x19000 -#define SMU11_THERMAL_MINIMUM_ALERT_TEMP 0 -#define SMU11_THERMAL_MAXIMUM_ALERT_TEMP 255 - -#define SMU11_TEMPERATURE_UNITS_PER_CENTIGRADES 1000 #define SMU11_VOLTAGE_SCALE 4 -#define SMC_DPM_FEATURE (FEATURE_DPM_PREFETCHER_MASK | \ - FEATURE_DPM_GFXCLK_MASK | \ - FEATURE_DPM_UCLK_MASK | \ - FEATURE_DPM_SOCCLK_MASK | \ - FEATURE_DPM_UVD_MASK | \ - FEATURE_DPM_VCE_MASK | \ - FEATURE_DPM_MP0CLK_MASK | \ - FEATURE_DPM_LINK_MASK | \ - FEATURE_DPM_DCEFCLK_MASK) - static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu, uint16_t msg) { @@ -81,9 +68,9 @@ static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg) static int smu_v11_0_wait_for_response(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - uint32_t cur_value, i; + uint32_t cur_value, i, timeout = adev->usec_timeout * 10; - for (i = 0; i < adev->usec_timeout; i++) { + for (i = 0; i < timeout; i++) { cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90); if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0) break; @@ -91,7 +78,7 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu) } /* timeout means wrong logic */ - if (i == adev->usec_timeout) + if (i == timeout) return -ETIME; return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO; @@ -167,6 +154,9 @@ static int smu_v11_0_init_microcode(struct smu_context *smu) case CHIP_VEGA20: chip_name = "vega20"; break; + case CHIP_NAVI10: + chip_name = "navi10"; + break; default: BUG(); } @@ -205,6 +195,39 @@ out: static int smu_v11_0_load_microcode(struct smu_context *smu) { + struct amdgpu_device *adev = smu->adev; + const uint32_t *src; + const struct smc_firmware_header_v1_0 *hdr; + uint32_t addr_start = MP1_SRAM; + uint32_t i; + uint32_t mp1_fw_flags; + + hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; + src = (const uint32_t *)(adev->pm.fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + + for (i = 1; i < MP1_SMC_SIZE/4 - 1; i++) { + WREG32_PCIE(addr_start, src[i]); + addr_start += 4; + } + + WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff), + 1 & MP1_SMN_PUB_CTRL__RESET_MASK); + WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff), + 1 & ~MP1_SMN_PUB_CTRL__RESET_MASK); + + for (i = 0; i < adev->usec_timeout; i++) { + mp1_fw_flags = RREG32_PCIE(MP1_Public | + (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); + if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> + MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) + break; + udelay(1); + } + + if (i == adev->usec_timeout) + return -ETIME; + return 0; } @@ -238,31 +261,104 @@ static int smu_v11_0_check_fw_version(struct smu_context *smu) smu_minor = (smu_version >> 8) & 0xff; smu_debug = (smu_version >> 0) & 0xff; - pr_info("SMU Driver IF Version = 0x%08x, SMU FW Version = 0x%08x (%d.%d.%d)\n", - if_version, smu_version, smu_major, smu_minor, smu_debug); - + /* + * 1. if_version mismatch is not critical as our fw is designed + * to be backward compatible. + * 2. New fw usually brings some optimizations. But that's visible + * only on the paired driver. + * Considering above, we just leave user a warning message instead + * of halt driver loading. + */ if (if_version != smu->smc_if_version) { - pr_err("SMU driver if version not matched\n"); - ret = -EINVAL; + pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, " + "smu fw version = 0x%08x (%d.%d.%d)\n", + smu->smc_if_version, if_version, + smu_version, smu_major, smu_minor, smu_debug); + pr_warn("SMU driver if version not matched\n"); } return ret; } -static int smu_v11_0_read_pptable_from_vbios(struct smu_context *smu) +static int smu_v11_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size) { + struct amdgpu_device *adev = smu->adev; + uint32_t ppt_offset_bytes; + const struct smc_firmware_header_v2_0 *v2; + + v2 = (const struct smc_firmware_header_v2_0 *) adev->pm.fw->data; + + ppt_offset_bytes = le32_to_cpu(v2->ppt_offset_bytes); + *size = le32_to_cpu(v2->ppt_size_bytes); + *table = (uint8_t *)v2 + ppt_offset_bytes; + + return 0; +} + +static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, void **table, uint32_t *size, uint32_t pptable_id) +{ + struct amdgpu_device *adev = smu->adev; + const struct smc_firmware_header_v2_1 *v2_1; + struct smc_soft_pptable_entry *entries; + uint32_t pptable_count = 0; + int i = 0; + + v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data; + entries = (struct smc_soft_pptable_entry *) + ((uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset)); + pptable_count = le32_to_cpu(v2_1->pptable_count); + for (i = 0; i < pptable_count; i++) { + if (le32_to_cpu(entries[i].id) == pptable_id) { + *table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes)); + *size = le32_to_cpu(entries[i].ppt_size_bytes); + break; + } + } + + if (i == pptable_count) + return -EINVAL; + + return 0; +} + +static int smu_v11_0_setup_pptable(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + const struct smc_firmware_header_v1_0 *hdr; int ret, index; - uint16_t size; + uint32_t size; uint8_t frev, crev; void *table; + uint16_t version_major, version_minor; - index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, - powerplayinfo); + hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; + version_major = le16_to_cpu(hdr->header.header_version_major); + version_minor = le16_to_cpu(hdr->header.header_version_minor); + if (version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) { + switch (version_minor) { + case 0: + ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size); + break; + case 1: + ret = smu_v11_0_set_pptable_v2_1(smu, &table, &size, + smu->smu_table.boot_values.pp_table_id); + break; + default: + ret = -EINVAL; + break; + } + if (ret) + return ret; - ret = smu_get_atom_data_table(smu, index, &size, &frev, &crev, - (uint8_t **)&table); - if (ret) - return ret; + } else { + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, + powerplayinfo); + + ret = smu_get_atom_data_table(smu, index, (uint16_t *)&size, &frev, &crev, + (uint8_t **)&table); + if (ret) + return ret; + } if (!smu->smu_table.power_play_table) smu->smu_table.power_play_table = table; @@ -308,30 +404,19 @@ static int smu_v11_0_init_smc_tables(struct smu_context *smu) struct smu_table *tables = NULL; int ret = 0; - if (smu_table->tables || smu_table->table_count != 0) + if (smu_table->tables || smu_table->table_count == 0) return -EINVAL; - tables = kcalloc(TABLE_COUNT, sizeof(struct smu_table), GFP_KERNEL); + tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table), + GFP_KERNEL); if (!tables) return -ENOMEM; smu_table->tables = tables; - smu_table->table_count = TABLE_COUNT; - - SMU_TABLE_INIT(tables, TABLE_PPTABLE, sizeof(PPTable_t), - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); - SMU_TABLE_INIT(tables, TABLE_WATERMARKS, sizeof(Watermarks_t), - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); - SMU_TABLE_INIT(tables, TABLE_SMU_METRICS, sizeof(SmuMetrics_t), - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); - SMU_TABLE_INIT(tables, TABLE_OVERDRIVE, sizeof(OverDriveTable_t), - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); - SMU_TABLE_INIT(tables, TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM); - SMU_TABLE_INIT(tables, TABLE_ACTIVITY_MONITOR_COEFF, - sizeof(DpmActivityMonitorCoeffInt_t), - PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM); + + ret = smu_tables_init(smu, tables); + if (ret) + return ret; ret = smu_v11_0_init_dpm_context(smu); if (ret) @@ -349,8 +434,11 @@ static int smu_v11_0_fini_smc_tables(struct smu_context *smu) return -EINVAL; kfree(smu_table->tables); + kfree(smu_table->metrics_table); smu_table->tables = NULL; smu_table->table_count = 0; + smu_table->metrics_table = NULL; + smu_table->metrics_time = 0; ret = smu_v11_0_fini_dpm_context(smu); if (ret) @@ -373,13 +461,6 @@ static int smu_v11_0_init_power(struct smu_context *smu) return -ENOMEM; smu_power->power_context_size = sizeof(struct smu_11_0_dpm_context); - smu->metrics_time = 0; - smu->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); - if (!smu->metrics_table) { - kfree(smu_power->power_context); - return -ENOMEM; - } - return 0; } @@ -392,9 +473,7 @@ static int smu_v11_0_fini_power(struct smu_context *smu) if (!smu_power->power_context || smu_power->power_context_size == 0) return -EINVAL; - kfree(smu->metrics_table); kfree(smu_power->power_context); - smu->metrics_table = NULL; smu_power->power_context = NULL; smu_power->power_context_size = 0; @@ -597,11 +676,12 @@ static int smu_v11_0_parse_pptable(struct smu_context *smu) int ret; struct smu_table_context *table_context = &smu->smu_table; + struct smu_table *table = &table_context->tables[SMU_TABLE_PPTABLE]; if (table_context->driver_pptable) return -EINVAL; - table_context->driver_pptable = kzalloc(sizeof(PPTable_t), GFP_KERNEL); + table_context->driver_pptable = kzalloc(table->size, GFP_KERNEL); if (!table_context->driver_pptable) return -ENOMEM; @@ -629,15 +709,29 @@ static int smu_v11_0_write_pptable(struct smu_context *smu) struct smu_table_context *table_context = &smu->smu_table; int ret = 0; - ret = smu_update_table(smu, TABLE_PPTABLE, table_context->driver_pptable, true); + ret = smu_update_table(smu, SMU_TABLE_PPTABLE, 0, + table_context->driver_pptable, true); return ret; } static int smu_v11_0_write_watermarks_table(struct smu_context *smu) { - return smu_update_table(smu, TABLE_WATERMARKS, - smu->smu_table.tables[TABLE_WATERMARKS].cpu_addr, true); + int ret = 0; + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_table *table = NULL; + + table = &smu_table->tables[SMU_TABLE_WATERMARKS]; + if (!table) + return -EINVAL; + + if (!table->cpu_addr) + return -EINVAL; + + ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr, + true); + + return ret; } static int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk) @@ -668,7 +762,7 @@ static int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu) static int smu_v11_0_set_tool_table_location(struct smu_context *smu) { int ret = 0; - struct smu_table *tool_table = &smu->smu_table.tables[TABLE_PMSTATUSLOG]; + struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG]; if (tool_table->mc_address) { ret = smu_send_smc_msg_with_param(smu, @@ -683,13 +777,14 @@ static int smu_v11_0_set_tool_table_location(struct smu_context *smu) return ret; } -static int smu_v11_0_init_display(struct smu_context *smu) +static int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count) { int ret = 0; if (!smu->pm_enabled) return ret; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0); + + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count); return ret; } @@ -788,17 +883,6 @@ static int smu_v11_0_get_enabled_mask(struct smu_context *smu, return ret; } -static bool smu_v11_0_is_dpm_running(struct smu_context *smu) -{ - int ret = 0; - uint32_t feature_mask[2]; - unsigned long feature_enabled; - ret = smu_v11_0_get_enabled_mask(smu, feature_mask, 2); - feature_enabled = (unsigned long)((uint64_t)feature_mask[0] | - ((uint64_t)feature_mask[1] << 32)); - return !!(feature_enabled & SMC_DPM_FEATURE); -} - static int smu_v11_0_system_features_control(struct smu_context *smu, bool en) { @@ -831,22 +915,23 @@ static int smu_v11_0_notify_display_change(struct smu_context *smu) if (!smu->pm_enabled) return ret; - if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1); + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) && + smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM) + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1); return ret; } static int smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock, - PPCLK_e clock_select) + enum smu_clk_type clock_select) { int ret = 0; if (!smu->pm_enabled) return ret; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq, - clock_select << 16); + smu_clk_get_index(smu, clock_select) << 16); if (ret) { pr_err("[GetMaxSustainableClock] Failed to get max DC clock from SMC!"); return ret; @@ -861,7 +946,7 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock, /* if DC limit is zero, return AC limit */ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, - clock_select << 16); + smu_clk_get_index(smu, clock_select) << 16); if (ret) { pr_err("[GetMaxSustainableClock] failed to get max AC clock from SMC!"); return ret; @@ -888,10 +973,10 @@ static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu) max_sustainable_clocks->phy_clock = 0xFFFFFFFF; max_sustainable_clocks->pixel_clock = 0xFFFFFFFF; - if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) { + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { ret = smu_v11_0_get_max_sustainable_clock(smu, &(max_sustainable_clocks->uclock), - PPCLK_UCLK); + SMU_UCLK); if (ret) { pr_err("[%s] failed to get max UCLK from SMC!", __func__); @@ -899,10 +984,10 @@ static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu) } } - if (smu_feature_is_enabled(smu, FEATURE_DPM_SOCCLK_BIT)) { + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { ret = smu_v11_0_get_max_sustainable_clock(smu, &(max_sustainable_clocks->soc_clock), - PPCLK_SOCCLK); + SMU_SOCCLK); if (ret) { pr_err("[%s] failed to get max SOCCLK from SMC!", __func__); @@ -910,10 +995,10 @@ static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu) } } - if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) { + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) { ret = smu_v11_0_get_max_sustainable_clock(smu, &(max_sustainable_clocks->dcef_clock), - PPCLK_DCEFCLK); + SMU_DCEFCLK); if (ret) { pr_err("[%s] failed to get max DCEFCLK from SMC!", __func__); @@ -922,7 +1007,7 @@ static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu) ret = smu_v11_0_get_max_sustainable_clock(smu, &(max_sustainable_clocks->display_clock), - PPCLK_DISPCLK); + SMU_DISPCLK); if (ret) { pr_err("[%s] failed to get max DISPCLK from SMC!", __func__); @@ -930,7 +1015,7 @@ static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu) } ret = smu_v11_0_get_max_sustainable_clock(smu, &(max_sustainable_clocks->phy_clock), - PPCLK_PHYCLK); + SMU_PHYCLK); if (ret) { pr_err("[%s] failed to get max PHYCLK from SMC!", __func__); @@ -938,7 +1023,7 @@ static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu) } ret = smu_v11_0_get_max_sustainable_clock(smu, &(max_sustainable_clocks->pixel_clock), - PPCLK_PIXCLK); + SMU_PIXCLK); if (ret) { pr_err("[%s] failed to get max PIXCLK from SMC!", __func__); @@ -968,7 +1053,7 @@ static int smu_v11_0_get_power_limit(struct smu_context *smu, mutex_unlock(&smu->mutex); } else { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit, - POWER_SOURCE_AC << 16); + smu_power_get_index(smu, SMU_POWER_SOURCE_AC) << 16); if (ret) { pr_err("[%s] get PPT limit failed!", __func__); return ret; @@ -995,7 +1080,7 @@ static int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n) max_power_limit /= 100; } - if (smu_feature_is_enabled(smu, FEATURE_PPT_BIT)) + if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n); if (ret) { pr_err("[%s] Set power limit Failed!", __func__); @@ -1005,22 +1090,29 @@ static int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n) return ret; } -static int smu_v11_0_get_current_clk_freq(struct smu_context *smu, uint32_t clk_id, uint32_t *value) +static int smu_v11_0_get_current_clk_freq(struct smu_context *smu, + enum smu_clk_type clk_id, + uint32_t *value) { int ret = 0; - uint32_t freq; + uint32_t freq = 0; - if (clk_id >= PPCLK_COUNT || !value) + if (clk_id >= SMU_CLK_COUNT || !value) return -EINVAL; - ret = smu_send_smc_msg_with_param(smu, - SMU_MSG_GetDpmClockFreq, (clk_id << 16)); - if (ret) - return ret; + /* if don't has GetDpmClockFreq Message, try get current clock by SmuMetrics_t */ + if (smu_msg_get_index(smu, SMU_MSG_GetDpmClockFreq) == 0) + ret = smu_get_current_clk_freq_by_table(smu, clk_id, &freq); + else { + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmClockFreq, + (smu_clk_get_index(smu, clk_id) << 16)); + if (ret) + return ret; - ret = smu_read_smc_arg(smu, &freq); - if (ret) - return ret; + ret = smu_read_smc_arg(smu, &freq); + if (ret) + return ret; + } freq *= 100; *value = freq; @@ -1028,38 +1120,19 @@ static int smu_v11_0_get_current_clk_freq(struct smu_context *smu, uint32_t clk_ return ret; } -static int smu_v11_0_get_thermal_range(struct smu_context *smu, - struct PP_TemperatureRange *range) -{ - PPTable_t *pptable = smu->smu_table.driver_pptable; - memcpy(range, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange)); - - range->max = pptable->TedgeLimit * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE) * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->hotspot_crit_max = pptable->ThotspotLimit * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->mem_crit_max = pptable->ThbmLimit * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM)* - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - - return 0; -} - static int smu_v11_0_set_thermal_range(struct smu_context *smu, - struct PP_TemperatureRange *range) + struct smu_temperature_range *range) { struct amdgpu_device *adev = smu->adev; - int low = SMU11_THERMAL_MINIMUM_ALERT_TEMP * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - int high = SMU11_THERMAL_MAXIMUM_ALERT_TEMP * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + int low = SMU_THERMAL_MINIMUM_ALERT_TEMP * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; uint32_t val; + if (!range) + return -EINVAL; + if (low < range->min) low = range->min; if (high > range->max) @@ -1071,8 +1144,10 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu, val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL); val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES)); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES)); val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val); @@ -1094,22 +1169,10 @@ static int smu_v11_0_enable_thermal_alert(struct smu_context *smu) return 0; } -static int smu_v11_0_set_thermal_fan_table(struct smu_context *smu) -{ - int ret; - struct smu_table_context *table_context = &smu->smu_table; - PPTable_t *pptable = table_context->driver_pptable; - - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetFanTemperatureTarget, - (uint32_t)pptable->FanTargetTemperature); - - return ret; -} - static int smu_v11_0_start_thermal_control(struct smu_context *smu) { int ret = 0; - struct PP_TemperatureRange range = { + struct smu_temperature_range range = { TEMP_RANGE_MIN, TEMP_RANGE_MAX, TEMP_RANGE_MAX, @@ -1123,7 +1186,7 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu) if (!smu->pm_enabled) return ret; - smu_v11_0_get_thermal_range(smu, &range); + ret = smu_get_thermal_temperature_range(smu, &range); if (smu->smu_table.thermal_controller_type) { ret = smu_v11_0_set_thermal_range(smu, &range); @@ -1133,7 +1196,8 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu) ret = smu_v11_0_enable_thermal_alert(smu); if (ret) return ret; - ret = smu_v11_0_set_thermal_fan_table(smu); + + ret = smu_set_thermal_fan_table(smu); if (ret) return ret; } @@ -1151,115 +1215,6 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu) return ret; } -static int smu_v11_0_get_metrics_table(struct smu_context *smu, - SmuMetrics_t *metrics_table) -{ - int ret = 0; - - if (!smu->metrics_time || time_after(jiffies, smu->metrics_time + HZ / 1000)) { - ret = smu_update_table(smu, TABLE_SMU_METRICS, - (void *)metrics_table, false); - if (ret) { - pr_info("Failed to export SMU metrics table!\n"); - return ret; - } - memcpy(smu->metrics_table, metrics_table, sizeof(SmuMetrics_t)); - smu->metrics_time = jiffies; - } else - memcpy(metrics_table, smu->metrics_table, sizeof(SmuMetrics_t)); - - return ret; -} - -static int smu_v11_0_get_current_activity_percent(struct smu_context *smu, - enum amd_pp_sensors sensor, - uint32_t *value) -{ - int ret = 0; - SmuMetrics_t metrics; - - if (!value) - return -EINVAL; - - ret = smu_v11_0_get_metrics_table(smu, &metrics); - if (ret) - return ret; - - switch (sensor) { - case AMDGPU_PP_SENSOR_GPU_LOAD: - *value = metrics.AverageGfxActivity; - break; - case AMDGPU_PP_SENSOR_MEM_LOAD: - *value = metrics.AverageUclkActivity; - break; - default: - pr_err("Invalid sensor for retrieving clock activity\n"); - return -EINVAL; - } - - return 0; -} - -static int smu_v11_0_thermal_get_temperature(struct smu_context *smu, - enum amd_pp_sensors sensor, - uint32_t *value) -{ - struct amdgpu_device *adev = smu->adev; - SmuMetrics_t metrics; - uint32_t temp = 0; - int ret = 0; - - if (!value) - return -EINVAL; - - ret = smu_v11_0_get_metrics_table(smu, &metrics); - if (ret) - return ret; - - switch (sensor) { - case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: - temp = RREG32_SOC15(THM, 0, mmCG_MULT_THERMAL_STATUS); - temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >> - CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT; - - temp = temp & 0x1ff; - temp *= SMU11_TEMPERATURE_UNITS_PER_CENTIGRADES; - - *value = temp; - break; - case AMDGPU_PP_SENSOR_EDGE_TEMP: - *value = metrics.TemperatureEdge * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - break; - case AMDGPU_PP_SENSOR_MEM_TEMP: - *value = metrics.TemperatureHBM * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - break; - default: - pr_err("Invalid sensor for retrieving temp\n"); - return -EINVAL; - } - - return 0; -} - -static int smu_v11_0_get_gpu_power(struct smu_context *smu, uint32_t *value) -{ - int ret = 0; - SmuMetrics_t metrics; - - if (!value) - return -EINVAL; - - ret = smu_v11_0_get_metrics_table(smu, &metrics); - if (ret) - return ret; - - *value = metrics.CurrSocketPower << 8; - - return 0; -} - static uint16_t convert_to_vddc(uint8_t vid) { return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE); @@ -1288,60 +1243,33 @@ static int smu_v11_0_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, void *data, uint32_t *size) { - struct smu_table_context *table_context = &smu->smu_table; - PPTable_t *pptable = table_context->driver_pptable; int ret = 0; switch (sensor) { - case AMDGPU_PP_SENSOR_GPU_LOAD: - case AMDGPU_PP_SENSOR_MEM_LOAD: - ret = smu_v11_0_get_current_activity_percent(smu, - sensor, - (uint32_t *)data); - *size = 4; - break; case AMDGPU_PP_SENSOR_GFX_MCLK: - ret = smu_get_current_clk_freq(smu, PPCLK_UCLK, (uint32_t *)data); + ret = smu_get_current_clk_freq(smu, SMU_UCLK, (uint32_t *)data); *size = 4; break; case AMDGPU_PP_SENSOR_GFX_SCLK: - ret = smu_get_current_clk_freq(smu, PPCLK_GFXCLK, (uint32_t *)data); - *size = 4; - break; - case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: - case AMDGPU_PP_SENSOR_EDGE_TEMP: - case AMDGPU_PP_SENSOR_MEM_TEMP: - ret = smu_v11_0_thermal_get_temperature(smu, sensor, (uint32_t *)data); - *size = 4; - break; - case AMDGPU_PP_SENSOR_GPU_POWER: - ret = smu_v11_0_get_gpu_power(smu, (uint32_t *)data); + ret = smu_get_current_clk_freq(smu, SMU_GFXCLK, (uint32_t *)data); *size = 4; break; case AMDGPU_PP_SENSOR_VDDGFX: ret = smu_v11_0_get_gfx_vdd(smu, (uint32_t *)data); *size = 4; break; - case AMDGPU_PP_SENSOR_UVD_POWER: - *(uint32_t *)data = smu_feature_is_enabled(smu, FEATURE_DPM_UVD_BIT) ? 1 : 0; - *size = 4; - break; - case AMDGPU_PP_SENSOR_VCE_POWER: - *(uint32_t *)data = smu_feature_is_enabled(smu, FEATURE_DPM_VCE_BIT) ? 1 : 0; - *size = 4; - break; case AMDGPU_PP_SENSOR_MIN_FAN_RPM: *(uint32_t *)data = 0; *size = 4; break; - case AMDGPU_PP_SENSOR_MAX_FAN_RPM: - *(uint32_t *)data = pptable->FanMaximumRpm; - *size = 4; - break; default: ret = smu_common_read_sensor(smu, sensor, data, size); break; } + /* try get sensor data by asic */ + if (ret) + ret = smu_asic_read_sensor(smu, sensor, data, size); + if (ret) *size = 0; @@ -1355,24 +1283,29 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu, { enum amd_pp_clock_type clk_type = clock_req->clock_type; int ret = 0; - PPCLK_e clk_select = 0; + enum smu_clk_type clk_select = 0; uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000; if (!smu->pm_enabled) return -EINVAL; - if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) { + + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) || + smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { switch (clk_type) { case amd_pp_dcef_clock: - clk_select = PPCLK_DCEFCLK; + clk_select = SMU_DCEFCLK; break; case amd_pp_disp_clock: - clk_select = PPCLK_DISPCLK; + clk_select = SMU_DISPCLK; break; case amd_pp_pixel_clock: - clk_select = PPCLK_PIXCLK; + clk_select = SMU_PIXCLK; break; case amd_pp_phy_clock: - clk_select = PPCLK_PHYCLK; + clk_select = SMU_PHYCLK; + break; + case amd_pp_mem_clock: + clk_select = SMU_UCLK; break; default: pr_info("[%s] Invalid Clock Type!", __func__); @@ -1383,86 +1316,29 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu, if (ret) goto failed; + mutex_lock(&smu->mutex); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, - (clk_select << 16) | clk_freq); + (smu_clk_get_index(smu, clk_select) << 16) | clk_freq); + mutex_unlock(&smu->mutex); } failed: return ret; } -static int smu_v11_0_set_watermarks_table(struct smu_context *smu, - Watermarks_t *table, struct - dm_pp_wm_sets_with_clock_ranges_soc15 - *clock_ranges) -{ - int i; - - if (!table || !clock_ranges) - return -EINVAL; - - if (clock_ranges->num_wm_dmif_sets > 4 || - clock_ranges->num_wm_mcif_sets > 4) - return -EINVAL; - - for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) { - table->WatermarkRow[1][i].MinClock = - cpu_to_le16((uint16_t) - (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz / - 1000)); - table->WatermarkRow[1][i].MaxClock = - cpu_to_le16((uint16_t) - (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz / - 1000)); - table->WatermarkRow[1][i].MinUclk = - cpu_to_le16((uint16_t) - (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz / - 1000)); - table->WatermarkRow[1][i].MaxUclk = - cpu_to_le16((uint16_t) - (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz / - 1000)); - table->WatermarkRow[1][i].WmSetting = (uint8_t) - clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id; - } - - for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) { - table->WatermarkRow[0][i].MinClock = - cpu_to_le16((uint16_t) - (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz / - 1000)); - table->WatermarkRow[0][i].MaxClock = - cpu_to_le16((uint16_t) - (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz / - 1000)); - table->WatermarkRow[0][i].MinUclk = - cpu_to_le16((uint16_t) - (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz / - 1000)); - table->WatermarkRow[0][i].MaxUclk = - cpu_to_le16((uint16_t) - (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz / - 1000)); - table->WatermarkRow[0][i].WmSetting = (uint8_t) - clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id; - } - - return 0; -} - static int smu_v11_0_set_watermarks_for_clock_ranges(struct smu_context *smu, struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges) { int ret = 0; - struct smu_table *watermarks = &smu->smu_table.tables[TABLE_WATERMARKS]; - Watermarks_t *table = watermarks->cpu_addr; + struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS]; + void *table = watermarks->cpu_addr; if (!smu->disable_watermark && - smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT) && - smu_feature_is_enabled(smu, FEATURE_DPM_SOCCLK_BIT)) { - smu_v11_0_set_watermarks_table(smu, table, clock_ranges); + smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) && + smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { + smu_set_watermarks_table(smu, table, clock_ranges); smu->watermarks_bitmap |= WATERMARKS_EXIST; smu->watermarks_bitmap &= ~WATERMARKS_LOADED; } @@ -1470,393 +1346,31 @@ smu_v11_0_set_watermarks_for_clock_ranges(struct smu_context *smu, struct return ret; } -static int smu_v11_0_get_clock_ranges(struct smu_context *smu, - uint32_t *clock, - PPCLK_e clock_select, - bool max) -{ - int ret; - *clock = 0; - if (max) { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, - (clock_select << 16)); - if (ret) { - pr_err("[GetClockRanges] Failed to get max clock from SMC!\n"); - return ret; - } - smu_read_smc_arg(smu, clock); - } else { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, - (clock_select << 16)); - if (ret) { - pr_err("[GetClockRanges] Failed to get min clock from SMC!\n"); - return ret; - } - smu_read_smc_arg(smu, clock); - } - - return 0; -} - -static uint32_t smu_v11_0_dpm_get_sclk(struct smu_context *smu, bool low) +static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) { - uint32_t gfx_clk; - int ret; - - if (!smu_feature_is_enabled(smu, FEATURE_DPM_GFXCLK_BIT)) { - pr_err("[GetSclks]: gfxclk dpm not enabled!\n"); - return -EPERM; - } - - if (low) { - ret = smu_v11_0_get_clock_ranges(smu, &gfx_clk, PPCLK_GFXCLK, false); - if (ret) { - pr_err("[GetSclks]: fail to get min PPCLK_GFXCLK\n"); - return ret; - } - } else { - ret = smu_v11_0_get_clock_ranges(smu, &gfx_clk, PPCLK_GFXCLK, true); - if (ret) { - pr_err("[GetSclks]: fail to get max PPCLK_GFXCLK\n"); - return ret; - } - } - - return (gfx_clk * 100); -} - -static uint32_t smu_v11_0_dpm_get_mclk(struct smu_context *smu, bool low) -{ - uint32_t mem_clk; - int ret; - - if (!smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) { - pr_err("[GetMclks]: memclk dpm not enabled!\n"); - return -EPERM; - } - - if (low) { - ret = smu_v11_0_get_clock_ranges(smu, &mem_clk, PPCLK_UCLK, false); - if (ret) { - pr_err("[GetMclks]: fail to get min PPCLK_UCLK\n"); - return ret; - } - } else { - ret = smu_v11_0_get_clock_ranges(smu, &mem_clk, PPCLK_GFXCLK, true); - if (ret) { - pr_err("[GetMclks]: fail to get max PPCLK_UCLK\n"); - return ret; - } - } - - return (mem_clk * 100); -} - -static int smu_v11_0_set_od8_default_settings(struct smu_context *smu, - bool initialize) -{ - struct smu_table_context *table_context = &smu->smu_table; - int ret; - - if (initialize) { - if (table_context->overdrive_table) - return -EINVAL; - - table_context->overdrive_table = kzalloc(sizeof(OverDriveTable_t), GFP_KERNEL); - - if (!table_context->overdrive_table) - return -ENOMEM; - - ret = smu_update_table(smu, TABLE_OVERDRIVE, table_context->overdrive_table, false); - if (ret) { - pr_err("Failed to export over drive table!\n"); - return ret; - } - - smu_set_default_od8_settings(smu); - } - - ret = smu_update_table(smu, TABLE_OVERDRIVE, table_context->overdrive_table, true); - if (ret) { - pr_err("Failed to import over drive table!\n"); - return ret; - } - - return 0; -} + int ret = 0; + struct amdgpu_device *adev = smu->adev; -static int smu_v11_0_conv_power_profile_to_pplib_workload(int power_profile) -{ - int pplib_workload = 0; - - switch (power_profile) { - case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT: - pplib_workload = WORKLOAD_DEFAULT_BIT; - break; - case PP_SMC_POWER_PROFILE_FULLSCREEN3D: - pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT; - break; - case PP_SMC_POWER_PROFILE_POWERSAVING: - pplib_workload = WORKLOAD_PPLIB_POWER_SAVING_BIT; - break; - case PP_SMC_POWER_PROFILE_VIDEO: - pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT; - break; - case PP_SMC_POWER_PROFILE_VR: - pplib_workload = WORKLOAD_PPLIB_VR_BIT; - break; - case PP_SMC_POWER_PROFILE_COMPUTE: - pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT; - break; - case PP_SMC_POWER_PROFILE_CUSTOM: - pplib_workload = WORKLOAD_PPLIB_CUSTOM_BIT; + switch (adev->asic_type) { + case CHIP_VEGA20: + break; + case CHIP_NAVI10: + if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) + return 0; + mutex_lock(&smu->mutex); + if (enable) + ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff); + else + ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff); + mutex_unlock(&smu->mutex); + break; + default: break; } - return pplib_workload; -} - -static int smu_v11_0_get_power_profile_mode(struct smu_context *smu, char *buf) -{ - DpmActivityMonitorCoeffInt_t activity_monitor; - uint32_t i, size = 0; - uint16_t workload_type = 0; - static const char *profile_name[] = { - "BOOTUP_DEFAULT", - "3D_FULL_SCREEN", - "POWER_SAVING", - "VIDEO", - "VR", - "COMPUTE", - "CUSTOM"}; - static const char *title[] = { - "PROFILE_INDEX(NAME)", - "CLOCK_TYPE(NAME)", - "FPS", - "UseRlcBusy", - "MinActiveFreqType", - "MinActiveFreq", - "BoosterFreqType", - "BoosterFreq", - "PD_Data_limit_c", - "PD_Data_error_coeff", - "PD_Data_error_rate_coeff"}; - int result = 0; - - if (!smu->pm_enabled || !buf) - return -EINVAL; - - size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n", - title[0], title[1], title[2], title[3], title[4], title[5], - title[6], title[7], title[8], title[9], title[10]); - - for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_v11_0_conv_power_profile_to_pplib_workload(i); - result = smu_update_table_with_arg(smu, TABLE_ACTIVITY_MONITOR_COEFF, - workload_type, &activity_monitor, false); - if (result) { - pr_err("[%s] Failed to get activity monitor!", __func__); - return result; - } - - size += sprintf(buf + size, "%2d %14s%s:\n", - i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); - - size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", - " ", - 0, - "GFXCLK", - activity_monitor.Gfx_FPS, - activity_monitor.Gfx_UseRlcBusy, - activity_monitor.Gfx_MinActiveFreqType, - activity_monitor.Gfx_MinActiveFreq, - activity_monitor.Gfx_BoosterFreqType, - activity_monitor.Gfx_BoosterFreq, - activity_monitor.Gfx_PD_Data_limit_c, - activity_monitor.Gfx_PD_Data_error_coeff, - activity_monitor.Gfx_PD_Data_error_rate_coeff); - - size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", - " ", - 1, - "SOCCLK", - activity_monitor.Soc_FPS, - activity_monitor.Soc_UseRlcBusy, - activity_monitor.Soc_MinActiveFreqType, - activity_monitor.Soc_MinActiveFreq, - activity_monitor.Soc_BoosterFreqType, - activity_monitor.Soc_BoosterFreq, - activity_monitor.Soc_PD_Data_limit_c, - activity_monitor.Soc_PD_Data_error_coeff, - activity_monitor.Soc_PD_Data_error_rate_coeff); - - size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", - " ", - 2, - "UCLK", - activity_monitor.Mem_FPS, - activity_monitor.Mem_UseRlcBusy, - activity_monitor.Mem_MinActiveFreqType, - activity_monitor.Mem_MinActiveFreq, - activity_monitor.Mem_BoosterFreqType, - activity_monitor.Mem_BoosterFreq, - activity_monitor.Mem_PD_Data_limit_c, - activity_monitor.Mem_PD_Data_error_coeff, - activity_monitor.Mem_PD_Data_error_rate_coeff); - - size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", - " ", - 3, - "FCLK", - activity_monitor.Fclk_FPS, - activity_monitor.Fclk_UseRlcBusy, - activity_monitor.Fclk_MinActiveFreqType, - activity_monitor.Fclk_MinActiveFreq, - activity_monitor.Fclk_BoosterFreqType, - activity_monitor.Fclk_BoosterFreq, - activity_monitor.Fclk_PD_Data_limit_c, - activity_monitor.Fclk_PD_Data_error_coeff, - activity_monitor.Fclk_PD_Data_error_rate_coeff); - } - - return size; -} - -static int smu_v11_0_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) -{ - DpmActivityMonitorCoeffInt_t activity_monitor; - int workload_type = 0, ret = 0; - - smu->power_profile_mode = input[size]; - - if (!smu->pm_enabled) - return ret; - if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { - pr_err("Invalid power profile mode %d\n", smu->power_profile_mode); - return -EINVAL; - } - - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { - ret = smu_update_table_with_arg(smu, TABLE_ACTIVITY_MONITOR_COEFF, - WORKLOAD_PPLIB_CUSTOM_BIT, &activity_monitor, false); - if (ret) { - pr_err("[%s] Failed to get activity monitor!", __func__); - return ret; - } - - switch (input[0]) { - case 0: /* Gfxclk */ - activity_monitor.Gfx_FPS = input[1]; - activity_monitor.Gfx_UseRlcBusy = input[2]; - activity_monitor.Gfx_MinActiveFreqType = input[3]; - activity_monitor.Gfx_MinActiveFreq = input[4]; - activity_monitor.Gfx_BoosterFreqType = input[5]; - activity_monitor.Gfx_BoosterFreq = input[6]; - activity_monitor.Gfx_PD_Data_limit_c = input[7]; - activity_monitor.Gfx_PD_Data_error_coeff = input[8]; - activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9]; - break; - case 1: /* Socclk */ - activity_monitor.Soc_FPS = input[1]; - activity_monitor.Soc_UseRlcBusy = input[2]; - activity_monitor.Soc_MinActiveFreqType = input[3]; - activity_monitor.Soc_MinActiveFreq = input[4]; - activity_monitor.Soc_BoosterFreqType = input[5]; - activity_monitor.Soc_BoosterFreq = input[6]; - activity_monitor.Soc_PD_Data_limit_c = input[7]; - activity_monitor.Soc_PD_Data_error_coeff = input[8]; - activity_monitor.Soc_PD_Data_error_rate_coeff = input[9]; - break; - case 2: /* Uclk */ - activity_monitor.Mem_FPS = input[1]; - activity_monitor.Mem_UseRlcBusy = input[2]; - activity_monitor.Mem_MinActiveFreqType = input[3]; - activity_monitor.Mem_MinActiveFreq = input[4]; - activity_monitor.Mem_BoosterFreqType = input[5]; - activity_monitor.Mem_BoosterFreq = input[6]; - activity_monitor.Mem_PD_Data_limit_c = input[7]; - activity_monitor.Mem_PD_Data_error_coeff = input[8]; - activity_monitor.Mem_PD_Data_error_rate_coeff = input[9]; - break; - case 3: /* Fclk */ - activity_monitor.Fclk_FPS = input[1]; - activity_monitor.Fclk_UseRlcBusy = input[2]; - activity_monitor.Fclk_MinActiveFreqType = input[3]; - activity_monitor.Fclk_MinActiveFreq = input[4]; - activity_monitor.Fclk_BoosterFreqType = input[5]; - activity_monitor.Fclk_BoosterFreq = input[6]; - activity_monitor.Fclk_PD_Data_limit_c = input[7]; - activity_monitor.Fclk_PD_Data_error_coeff = input[8]; - activity_monitor.Fclk_PD_Data_error_rate_coeff = input[9]; - break; - } - - ret = smu_update_table_with_arg(smu, TABLE_ACTIVITY_MONITOR_COEFF, - WORKLOAD_PPLIB_COMPUTE_BIT, &activity_monitor, true); - if (ret) { - pr_err("[%s] Failed to set activity monitor!", __func__); - return ret; - } - } - - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = - smu_v11_0_conv_power_profile_to_pplib_workload(smu->power_profile_mode); - smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type); - return ret; } -static int smu_v11_0_update_od8_settings(struct smu_context *smu, - uint32_t index, - uint32_t value) -{ - struct smu_table_context *table_context = &smu->smu_table; - int ret; - - ret = smu_update_table(smu, TABLE_OVERDRIVE, - table_context->overdrive_table, false); - if (ret) { - pr_err("Failed to export over drive table!\n"); - return ret; - } - - smu_update_specified_od8_value(smu, index, value); - - ret = smu_update_table(smu, TABLE_OVERDRIVE, - table_context->overdrive_table, true); - if (ret) { - pr_err("Failed to import over drive table!\n"); - return ret; - } - - return 0; -} - -static int smu_v11_0_dpm_set_uvd_enable(struct smu_context *smu, bool enable) -{ - if (!smu_feature_is_supported(smu, FEATURE_DPM_UVD_BIT)) - return 0; - - if (enable == smu_feature_is_enabled(smu, FEATURE_DPM_UVD_BIT)) - return 0; - - return smu_feature_set_enabled(smu, FEATURE_DPM_UVD_BIT, enable); -} - -static int smu_v11_0_dpm_set_vce_enable(struct smu_context *smu, bool enable) -{ - if (!smu_feature_is_supported(smu, FEATURE_DPM_VCE_BIT)) - return 0; - - if (enable == smu_feature_is_enabled(smu, FEATURE_DPM_VCE_BIT)) - return 0; - - return smu_feature_set_enabled(smu, FEATURE_DPM_VCE_BIT, enable); -} - static int smu_v11_0_get_current_rpm(struct smu_context *smu, uint32_t *current_rpm) { @@ -1877,37 +1391,21 @@ static int smu_v11_0_get_current_rpm(struct smu_context *smu, static uint32_t smu_v11_0_get_fan_control_mode(struct smu_context *smu) { - if (!smu_feature_is_enabled(smu, FEATURE_FAN_CONTROL_BIT)) + if (!smu_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT)) return AMD_FAN_CTRL_MANUAL; else return AMD_FAN_CTRL_AUTO; } static int -smu_v11_0_get_fan_speed_percent(struct smu_context *smu, - uint32_t *speed) -{ - int ret = 0; - uint32_t percent = 0; - uint32_t current_rpm; - PPTable_t *pptable = smu->smu_table.driver_pptable; - - ret = smu_v11_0_get_current_rpm(smu, ¤t_rpm); - percent = current_rpm * 100 / pptable->FanMaximumRpm; - *speed = percent > 100 ? 100 : percent; - - return ret; -} - -static int smu_v11_0_smc_fan_control(struct smu_context *smu, bool start) { int ret = 0; - if (smu_feature_is_supported(smu, FEATURE_FAN_CONTROL_BIT)) + if (smu_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT)) return 0; - ret = smu_feature_set_enabled(smu, FEATURE_FAN_CONTROL_BIT, start); + ret = smu_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, start); if (ret) pr_err("[%s]%s smc FAN CONTROL feature failed!", __func__, (start ? "Start" : "Stop")); @@ -2020,6 +1518,9 @@ set_fan_speed_rpm_failed: return ret; } +#define XGMI_STATE_D0 1 +#define XGMI_STATE_D3 0 + static int smu_v11_0_set_xgmi_pstate(struct smu_context *smu, uint32_t pstate) { @@ -2032,6 +1533,208 @@ static int smu_v11_0_set_xgmi_pstate(struct smu_context *smu, return ret; } +#define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */ +#define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */ + +static int smu_v11_0_irq_process(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + uint32_t client_id = entry->client_id; + uint32_t src_id = entry->src_id; + + if (client_id == SOC15_IH_CLIENTID_THM) { + switch (src_id) { + case THM_11_0__SRCID__THM_DIG_THERM_L2H: + pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n", + PCI_BUS_NUM(adev->pdev->devfn), + PCI_SLOT(adev->pdev->devfn), + PCI_FUNC(adev->pdev->devfn)); + break; + case THM_11_0__SRCID__THM_DIG_THERM_H2L: + pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n", + PCI_BUS_NUM(adev->pdev->devfn), + PCI_SLOT(adev->pdev->devfn), + PCI_FUNC(adev->pdev->devfn)); + break; + default: + pr_warn("GPU under temperature range unknown src id (%d), detected on PCIe %d:%d.%d!\n", + src_id, + PCI_BUS_NUM(adev->pdev->devfn), + PCI_SLOT(adev->pdev->devfn), + PCI_FUNC(adev->pdev->devfn)); + break; + + } + } + + return 0; +} + +static const struct amdgpu_irq_src_funcs smu_v11_0_irq_funcs = +{ + .process = smu_v11_0_irq_process, +}; + +static int smu_v11_0_register_irq_handler(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + struct amdgpu_irq_src *irq_src = smu->irq_source; + int ret = 0; + + /* already register */ + if (irq_src) + return 0; + + irq_src = kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL); + if (!irq_src) + return -ENOMEM; + smu->irq_source = irq_src; + + irq_src->funcs = &smu_v11_0_irq_funcs; + + ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM, + THM_11_0__SRCID__THM_DIG_THERM_L2H, + irq_src); + if (ret) + return ret; + + ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM, + THM_11_0__SRCID__THM_DIG_THERM_H2L, + irq_src); + if (ret) + return ret; + + return ret; +} + +static int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, + struct pp_smu_nv_clock_table *max_clocks) +{ + struct smu_table_context *table_context = &smu->smu_table; + struct smu_11_0_max_sustainable_clocks *sustainable_clocks = NULL; + + if (!max_clocks || !table_context->max_sustainable_clocks) + return -EINVAL; + + sustainable_clocks = table_context->max_sustainable_clocks; + + max_clocks->dcfClockInKhz = + (unsigned int) sustainable_clocks->dcef_clock * 1000; + max_clocks->displayClockInKhz = + (unsigned int) sustainable_clocks->display_clock * 1000; + max_clocks->phyClockInKhz = + (unsigned int) sustainable_clocks->phy_clock * 1000; + max_clocks->pixelClockInKhz = + (unsigned int) sustainable_clocks->pixel_clock * 1000; + max_clocks->uClockInKhz = + (unsigned int) sustainable_clocks->uclock * 1000; + max_clocks->socClockInKhz = + (unsigned int) sustainable_clocks->soc_clock * 1000; + max_clocks->dscClockInKhz = 0; + max_clocks->dppClockInKhz = 0; + max_clocks->fabricClockInKhz = 0; + + return 0; +} + +static int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME); + mutex_unlock(&smu->mutex); + + return ret; +} + +static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v11_0_baco_seq baco_seq) +{ + return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq); +} + +static bool smu_v11_0_baco_is_support(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + struct smu_baco_context *smu_baco = &smu->smu_baco; + uint32_t val; + bool baco_support; + + mutex_lock(&smu_baco->mutex); + baco_support = smu_baco->platform_support; + mutex_unlock(&smu_baco->mutex); + + if (!baco_support) + return false; + + if (!smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) + return false; + + val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0); + if (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) + return true; + + return false; +} + +static enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu) +{ + struct smu_baco_context *smu_baco = &smu->smu_baco; + enum smu_baco_state baco_state = SMU_BACO_STATE_EXIT; + + mutex_lock(&smu_baco->mutex); + baco_state = smu_baco->state; + mutex_unlock(&smu_baco->mutex); + + return baco_state; +} + +static int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) +{ + + struct smu_baco_context *smu_baco = &smu->smu_baco; + int ret = 0; + + if (smu_v11_0_baco_get_state(smu) == state) + return 0; + + mutex_lock(&smu_baco->mutex); + + if (state == SMU_BACO_STATE_ENTER) + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, BACO_SEQ_BACO); + else + ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco); + if (ret) + goto out; + + smu_baco->state = state; +out: + mutex_unlock(&smu_baco->mutex); + return ret; +} + +static int smu_v11_0_baco_reset(struct smu_context *smu) +{ + int ret = 0; + + ret = smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO); + if (ret) + return ret; + + ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_ENTER); + if (ret) + return ret; + + msleep(10); + + ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_EXIT); + if (ret) + return ret; + + return ret; +} + static const struct smu_funcs smu_v11_0_funcs = { .init_microcode = smu_v11_0_init_microcode, .load_microcode = smu_v11_0_load_microcode, @@ -2040,7 +1743,7 @@ static const struct smu_funcs smu_v11_0_funcs = { .send_smc_msg = smu_v11_0_send_msg, .send_smc_msg_with_param = smu_v11_0_send_msg_with_param, .read_smc_arg = smu_v11_0_read_arg, - .read_pptable_from_vbios = smu_v11_0_read_pptable_from_vbios, + .setup_pptable = smu_v11_0_setup_pptable, .init_smc_tables = smu_v11_0_init_smc_tables, .fini_smc_tables = smu_v11_0_fini_smc_tables, .init_power = smu_v11_0_init_power, @@ -2055,10 +1758,9 @@ static const struct smu_funcs smu_v11_0_funcs = { .write_watermarks_table = smu_v11_0_write_watermarks_table, .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep, .set_tool_table_location = smu_v11_0_set_tool_table_location, - .init_display = smu_v11_0_init_display, + .init_display_count = smu_v11_0_init_display_count, .set_allowed_mask = smu_v11_0_set_allowed_mask, .get_enabled_mask = smu_v11_0_get_enabled_mask, - .is_dpm_running = smu_v11_0_is_dpm_running, .system_features_control = smu_v11_0_system_features_control, .update_feature_enable_state = smu_v11_0_update_feature_enable_state, .notify_display_change = smu_v11_0_notify_display_change, @@ -2071,22 +1773,20 @@ static const struct smu_funcs smu_v11_0_funcs = { .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk, .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request, .set_watermarks_for_clock_ranges = smu_v11_0_set_watermarks_for_clock_ranges, - .get_sclk = smu_v11_0_dpm_get_sclk, - .get_mclk = smu_v11_0_dpm_get_mclk, - .set_od8_default_settings = smu_v11_0_set_od8_default_settings, - .conv_power_profile_to_pplib_workload = smu_v11_0_conv_power_profile_to_pplib_workload, - .get_power_profile_mode = smu_v11_0_get_power_profile_mode, - .set_power_profile_mode = smu_v11_0_set_power_profile_mode, - .update_od8_settings = smu_v11_0_update_od8_settings, - .dpm_set_uvd_enable = smu_v11_0_dpm_set_uvd_enable, - .dpm_set_vce_enable = smu_v11_0_dpm_set_vce_enable, .get_current_rpm = smu_v11_0_get_current_rpm, .get_fan_control_mode = smu_v11_0_get_fan_control_mode, .set_fan_control_mode = smu_v11_0_set_fan_control_mode, - .get_fan_speed_percent = smu_v11_0_get_fan_speed_percent, .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent, .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm, .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate, + .gfx_off_control = smu_v11_0_gfx_off_control, + .register_irq_handler = smu_v11_0_register_irq_handler, + .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme, + .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc, + .baco_is_support = smu_v11_0_baco_is_support, + .baco_get_state = smu_v11_0_baco_get_state, + .baco_set_state = smu_v11_0_baco_set_state, + .baco_reset = smu_v11_0_baco_reset, }; void smu_v11_0_set_smu_funcs(struct smu_context *smu) @@ -2098,6 +1798,9 @@ void smu_v11_0_set_smu_funcs(struct smu_context *smu) case CHIP_VEGA20: vega20_set_ppt_funcs(smu); break; + case CHIP_NAVI10: + navi10_set_ppt_funcs(smu); + break; default: pr_warn("Unknown asic for smu11\n"); } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c index 7184d39dcbee..15590fd86ef4 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c @@ -2705,8 +2705,6 @@ static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2)); cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2)); - memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table)); - result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table); if (0 == result) @@ -2936,6 +2934,7 @@ static int ci_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) } const struct pp_smumgr_func ci_smu_funcs = { + .name = "ci_smu", .smu_init = ci_smu_init, .smu_fini = ci_smu_fini, .start_smu = ci_start_smu, diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index 0ce85b73338e..da025b1d302d 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c @@ -2643,6 +2643,7 @@ static int fiji_update_dpm_settings(struct pp_hwmgr *hwmgr, } const struct pp_smumgr_func fiji_smu_funcs = { + .name = "fiji_smu", .smu_init = &fiji_smu_init, .smu_fini = &smu7_smu_fini, .start_smu = &fiji_start_smu, diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c index 73091ac0b647..732005c03a82 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c @@ -2634,8 +2634,6 @@ static int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2)); cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2)); - memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table)); - result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table); if (0 == result) @@ -2662,6 +2660,7 @@ static bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr) } const struct pp_smumgr_func iceland_smu_funcs = { + .name = "iceland_smu", .smu_init = &iceland_smu_init, .smu_fini = &smu7_smu_fini, .start_smu = &iceland_start_smu, diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index d6052e6daef2..dc754447f0dd 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -2094,6 +2094,10 @@ static int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) return 0; } + /* use hardware fan control */ + if (hwmgr->thermal_controller.use_hw_fan_control) + return 0; + tmp64 = hwmgr->thermal_controller.advanceFanControlParameters. usPWMMin * duty100; do_div(tmp64, 10000); @@ -2552,6 +2556,7 @@ static int polaris10_update_dpm_settings(struct pp_hwmgr *hwmgr, } const struct pp_smumgr_func polaris10_smu_funcs = { + .name = "polaris10_smu", .smu_init = polaris10_smu_init, .smu_fini = smu7_smu_fini, .start_smu = polaris10_start_smu, diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c index d409925d1f7d..7fb3e57cfc41 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c @@ -293,6 +293,7 @@ static int smu10_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint1 const struct pp_smumgr_func smu10_smu_funcs = { + .name = "smu10_smu", .smu_init = &smu10_smu_init, .smu_fini = &smu10_smu_fini, .start_smu = &smu10_start_smu, diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c index e2787e14a500..8189fe402c6d 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c @@ -881,6 +881,7 @@ static bool smu8_is_dpm_running(struct pp_hwmgr *hwmgr) } const struct pp_smumgr_func smu8_smu_funcs = { + .name = "smu8_smu", .smu_init = smu8_smu_init, .smu_fini = smu8_smu_fini, .start_smu = smu8_start_smu, diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c index e4e976b9d64e..f19bac7ef7ba 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c @@ -3117,8 +3117,6 @@ static int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2)); - memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table)); - result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table); if (!result) @@ -3241,6 +3239,7 @@ static int tonga_update_dpm_settings(struct pp_hwmgr *hwmgr, } const struct pp_smumgr_func tonga_smu_funcs = { + .name = "tonga_smu", .smu_init = &tonga_smu_init, .smu_fini = &smu7_smu_fini, .start_smu = &tonga_start_smu, diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c index 672986e9eecb..967d34b1dc51 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c @@ -348,6 +348,7 @@ static int vega10_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, } const struct pp_smumgr_func vega10_smu_funcs = { + .name = "vega10_smu", .smu_init = &vega10_smu_init, .smu_fini = &vega10_smu_fini, .start_smu = &vega10_start_smu, diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c index 1eaf0fa28ef7..bab3df85fdcd 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c @@ -386,6 +386,7 @@ static int vega12_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, } const struct pp_smumgr_func vega12_smu_funcs = { + .name = "vega12_smu", .smu_init = &vega12_smu_init, .smu_fini = &vega12_smu_fini, .start_smu = &vega12_start_smu, diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c index f301a73f6df1..957446cf467e 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c @@ -592,6 +592,7 @@ static int vega20_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, } const struct pp_smumgr_func vega20_smu_funcs = { + .name = "vega20_smu", .smu_init = &vega20_smu_init, .smu_fini = &vega20_smu_fini, .start_smu = &vega20_start_smu, diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c index d499204b2184..7c960b07746f 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c @@ -2279,6 +2279,7 @@ static int vegam_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) } const struct pp_smumgr_func vegam_smu_funcs = { + .name = "vegam_smu", .smu_init = vegam_smu_init, .smu_fini = smu7_smu_fini, .start_smu = vegam_start_smu, diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c index 4aa8f5a69c4c..bb9bb09cfc7a 100644 --- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c @@ -36,13 +36,29 @@ #include "vega20_pptable.h" #include "vega20_ppsmc.h" #include "nbio/nbio_7_4_sh_mask.h" +#include "asic_reg/thm/thm_11_0_2_offset.h" +#include "asic_reg/thm/thm_11_0_2_sh_mask.h" #define smnPCIE_LC_SPEED_CNTL 0x11140290 #define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288 +#define CTF_OFFSET_EDGE 5 +#define CTF_OFFSET_HOTSPOT 5 +#define CTF_OFFSET_HBM 5 + #define MSG_MAP(msg) \ [SMU_MSG_##msg] = PPSMC_MSG_##msg +#define SMC_DPM_FEATURE (FEATURE_DPM_PREFETCHER_MASK | \ + FEATURE_DPM_GFXCLK_MASK | \ + FEATURE_DPM_UCLK_MASK | \ + FEATURE_DPM_SOCCLK_MASK | \ + FEATURE_DPM_UVD_MASK | \ + FEATURE_DPM_VCE_MASK | \ + FEATURE_DPM_MP0CLK_MASK | \ + FEATURE_DPM_LINK_MASK | \ + FEATURE_DPM_DCEFCLK_MASK) + static int vega20_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage), MSG_MAP(GetSmuVersion), @@ -129,6 +145,136 @@ static int vega20_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(GetAVFSVoltageByDpm), }; +static int vega20_clk_map[SMU_CLK_COUNT] = { + CLK_MAP(GFXCLK, PPCLK_GFXCLK), + CLK_MAP(VCLK, PPCLK_VCLK), + CLK_MAP(DCLK, PPCLK_DCLK), + CLK_MAP(ECLK, PPCLK_ECLK), + CLK_MAP(SOCCLK, PPCLK_SOCCLK), + CLK_MAP(UCLK, PPCLK_UCLK), + CLK_MAP(DCEFCLK, PPCLK_DCEFCLK), + CLK_MAP(DISPCLK, PPCLK_DISPCLK), + CLK_MAP(PIXCLK, PPCLK_PIXCLK), + CLK_MAP(PHYCLK, PPCLK_PHYCLK), + CLK_MAP(FCLK, PPCLK_FCLK), +}; + +static int vega20_feature_mask_map[SMU_FEATURE_COUNT] = { + FEA_MAP(DPM_PREFETCHER), + FEA_MAP(DPM_GFXCLK), + FEA_MAP(DPM_UCLK), + FEA_MAP(DPM_SOCCLK), + FEA_MAP(DPM_UVD), + FEA_MAP(DPM_VCE), + FEA_MAP(ULV), + FEA_MAP(DPM_MP0CLK), + FEA_MAP(DPM_LINK), + FEA_MAP(DPM_DCEFCLK), + FEA_MAP(DS_GFXCLK), + FEA_MAP(DS_SOCCLK), + FEA_MAP(DS_LCLK), + FEA_MAP(PPT), + FEA_MAP(TDC), + FEA_MAP(THERMAL), + FEA_MAP(GFX_PER_CU_CG), + FEA_MAP(RM), + FEA_MAP(DS_DCEFCLK), + FEA_MAP(ACDC), + FEA_MAP(VR0HOT), + FEA_MAP(VR1HOT), + FEA_MAP(FW_CTF), + FEA_MAP(LED_DISPLAY), + FEA_MAP(FAN_CONTROL), + FEA_MAP(GFX_EDC), + FEA_MAP(GFXOFF), + FEA_MAP(CG), + FEA_MAP(DPM_FCLK), + FEA_MAP(DS_FCLK), + FEA_MAP(DS_MP1CLK), + FEA_MAP(DS_MP0CLK), + FEA_MAP(XGMI), +}; + +static int vega20_table_map[SMU_TABLE_COUNT] = { + TAB_MAP(PPTABLE), + TAB_MAP(WATERMARKS), + TAB_MAP(AVFS), + TAB_MAP(AVFS_PSM_DEBUG), + TAB_MAP(AVFS_FUSE_OVERRIDE), + TAB_MAP(PMSTATUSLOG), + TAB_MAP(SMU_METRICS), + TAB_MAP(DRIVER_SMU_CONFIG), + TAB_MAP(ACTIVITY_MONITOR_COEFF), + TAB_MAP(OVERDRIVE), +}; + +static int vega20_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { + PWR_MAP(AC), + PWR_MAP(DC), +}; + +static int vega20_workload_map[] = { + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_DEFAULT_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_CUSTOM_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), +}; + +static int vega20_get_smu_table_index(struct smu_context *smc, uint32_t index) +{ + int val; + if (index >= SMU_TABLE_COUNT) + return -EINVAL; + + val = vega20_table_map[index]; + if (val >= TABLE_COUNT) + return -EINVAL; + + return val; +} + +static int vega20_get_pwr_src_index(struct smu_context *smc, uint32_t index) +{ + int val; + if (index >= SMU_POWER_SOURCE_COUNT) + return -EINVAL; + + val = vega20_pwr_src_map[index]; + if (val >= POWER_SOURCE_COUNT) + return -EINVAL; + + return val; +} + +static int vega20_get_smu_feature_index(struct smu_context *smc, uint32_t index) +{ + int val; + if (index >= SMU_FEATURE_COUNT) + return -EINVAL; + + val = vega20_feature_mask_map[index]; + if (val > 64) + return -EINVAL; + + return val; +} + +static int vega20_get_smu_clk_index(struct smu_context *smc, uint32_t index) +{ + int val; + if (index >= SMU_CLK_COUNT) + return -EINVAL; + + val = vega20_clk_map[index]; + if (val >= PPCLK_COUNT) + return -EINVAL; + + return val; +} + static int vega20_get_smu_msg_index(struct smu_context *smc, uint32_t index) { int val; @@ -143,6 +289,43 @@ static int vega20_get_smu_msg_index(struct smu_context *smc, uint32_t index) return val; } +static int vega20_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile) +{ + int val; + if (profile > PP_SMC_POWER_PROFILE_CUSTOM) + return -EINVAL; + + val = vega20_workload_map[profile]; + + return val; +} + +static int vega20_tables_init(struct smu_context *smu, struct smu_table *tables) +{ + struct smu_table_context *smu_table = &smu->smu_table; + + SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, + sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM); + + smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); + if (!smu_table->metrics_table) + return -ENOMEM; + smu_table->metrics_time = 0; + + return 0; +} + static int vega20_allocate_dpm_context(struct smu_context *smu) { struct smu_dpm_context *smu_dpm = &smu->smu_dpm; @@ -182,6 +365,7 @@ static int vega20_setup_od8_information(struct smu_context *smu) { ATOM_Vega20_POWERPLAYTABLE *powerplay_table = NULL; struct smu_table_context *table_context = &smu->smu_table; + struct vega20_od8_settings *od8_settings = (struct vega20_od8_settings *)smu->od_settings; uint32_t od_feature_count, od_feature_array_size, od_setting_count, od_setting_array_size; @@ -202,13 +386,13 @@ static int vega20_setup_od8_information(struct smu_context *smu) od_feature_array_size = sizeof(uint8_t) * od_feature_count; - if (table_context->od_feature_capabilities) + if (od8_settings->od_feature_capabilities) return -EINVAL; - table_context->od_feature_capabilities = kmemdup(&powerplay_table->OverDrive8Table.ODFeatureCapabilities, + od8_settings->od_feature_capabilities = kmemdup(&powerplay_table->OverDrive8Table.ODFeatureCapabilities, od_feature_array_size, GFP_KERNEL); - if (!table_context->od_feature_capabilities) + if (!od8_settings->od_feature_capabilities) return -ENOMEM; /* Setup correct ODSettingCount, and store ODSettingArray from @@ -221,31 +405,31 @@ static int vega20_setup_od8_information(struct smu_context *smu) od_setting_array_size = sizeof(uint32_t) * od_setting_count; - if (table_context->od_settings_max) + if (od8_settings->od_settings_max) return -EINVAL; - table_context->od_settings_max = kmemdup(&powerplay_table->OverDrive8Table.ODSettingsMax, + od8_settings->od_settings_max = kmemdup(&powerplay_table->OverDrive8Table.ODSettingsMax, od_setting_array_size, GFP_KERNEL); - if (!table_context->od_settings_max) { - kfree(table_context->od_feature_capabilities); - table_context->od_feature_capabilities = NULL; + if (!od8_settings->od_settings_max) { + kfree(od8_settings->od_feature_capabilities); + od8_settings->od_feature_capabilities = NULL; return -ENOMEM; } - if (table_context->od_settings_min) + if (od8_settings->od_settings_min) return -EINVAL; - table_context->od_settings_min = kmemdup(&powerplay_table->OverDrive8Table.ODSettingsMin, + od8_settings->od_settings_min = kmemdup(&powerplay_table->OverDrive8Table.ODSettingsMin, od_setting_array_size, GFP_KERNEL); - if (!table_context->od_settings_min) { - kfree(table_context->od_feature_capabilities); - table_context->od_feature_capabilities = NULL; - kfree(table_context->od_settings_max); - table_context->od_settings_max = NULL; + if (!od8_settings->od_settings_min) { + kfree(od8_settings->od_feature_capabilities); + od8_settings->od_feature_capabilities = NULL; + kfree(od8_settings->od_settings_max); + od8_settings->od_settings_max = NULL; return -ENOMEM; } } @@ -257,7 +441,6 @@ static int vega20_store_powerplay_table(struct smu_context *smu) { ATOM_Vega20_POWERPLAYTABLE *powerplay_table = NULL; struct smu_table_context *table_context = &smu->smu_table; - int ret; if (!table_context->power_play_table) return -EINVAL; @@ -271,9 +454,7 @@ static int vega20_store_powerplay_table(struct smu_context *smu) table_context->thermal_controller_type = powerplay_table->ucThermalControllerType; table_context->TDPODLimit = le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]); - ret = vega20_setup_od8_information(smu); - - return ret; + return 0; } static int vega20_append_powerplay_table(struct smu_context *smu) @@ -392,16 +573,42 @@ static int vega20_run_btc_afll(struct smu_context *smu) return smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc); } +#define FEATURE_MASK(feature) (1ULL << feature) static int -vega20_get_unallowed_feature_mask(struct smu_context *smu, +vega20_get_allowed_feature_mask(struct smu_context *smu, uint32_t *feature_mask, uint32_t num) { if (num > 2) return -EINVAL; - feature_mask[0] = 0xE0041C00; - feature_mask[1] = 0xFFFFFFFE; /* bit32~bit63 is Unsupported */ - + memset(feature_mask, 0, sizeof(uint32_t) * num); + + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) + | FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) + | FEATURE_MASK(FEATURE_DPM_UCLK_BIT) + | FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) + | FEATURE_MASK(FEATURE_DPM_UVD_BIT) + | FEATURE_MASK(FEATURE_DPM_VCE_BIT) + | FEATURE_MASK(FEATURE_ULV_BIT) + | FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT) + | FEATURE_MASK(FEATURE_DPM_LINK_BIT) + | FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT) + | FEATURE_MASK(FEATURE_PPT_BIT) + | FEATURE_MASK(FEATURE_TDC_BIT) + | FEATURE_MASK(FEATURE_THERMAL_BIT) + | FEATURE_MASK(FEATURE_GFX_PER_CU_CG_BIT) + | FEATURE_MASK(FEATURE_RM_BIT) + | FEATURE_MASK(FEATURE_ACDC_BIT) + | FEATURE_MASK(FEATURE_VR0HOT_BIT) + | FEATURE_MASK(FEATURE_VR1HOT_BIT) + | FEATURE_MASK(FEATURE_FW_CTF_BIT) + | FEATURE_MASK(FEATURE_LED_DISPLAY_BIT) + | FEATURE_MASK(FEATURE_FAN_CONTROL_BIT) + | FEATURE_MASK(FEATURE_GFX_EDC_BIT) + | FEATURE_MASK(FEATURE_GFXOFF_BIT) + | FEATURE_MASK(FEATURE_CG_BIT) + | FEATURE_MASK(FEATURE_DPM_FCLK_BIT) + | FEATURE_MASK(FEATURE_XGMI_BIT); return 0; } @@ -502,7 +709,7 @@ static int vega20_set_default_dpm_table(struct smu_context *smu) /* socclk */ single_dpm_table = &(dpm_table->soc_table); - if (smu_feature_is_enabled(smu, FEATURE_DPM_SOCCLK_BIT)) { + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { ret = vega20_set_single_dpm_table(smu, single_dpm_table, PPCLK_SOCCLK); if (ret) { @@ -518,7 +725,7 @@ static int vega20_set_default_dpm_table(struct smu_context *smu) /* gfxclk */ single_dpm_table = &(dpm_table->gfx_table); - if (smu_feature_is_enabled(smu, FEATURE_DPM_GFXCLK_BIT)) { + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) { ret = vega20_set_single_dpm_table(smu, single_dpm_table, PPCLK_GFXCLK); if (ret) { @@ -534,7 +741,7 @@ static int vega20_set_default_dpm_table(struct smu_context *smu) /* memclk */ single_dpm_table = &(dpm_table->mem_table); - if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) { + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { ret = vega20_set_single_dpm_table(smu, single_dpm_table, PPCLK_UCLK); if (ret) { @@ -550,7 +757,7 @@ static int vega20_set_default_dpm_table(struct smu_context *smu) /* eclk */ single_dpm_table = &(dpm_table->eclk_table); - if (smu_feature_is_enabled(smu, FEATURE_DPM_VCE_BIT)) { + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT)) { ret = vega20_set_single_dpm_table(smu, single_dpm_table, PPCLK_ECLK); if (ret) { pr_err("[SetupDefaultDpmTable] failed to get eclk dpm levels!"); @@ -565,7 +772,7 @@ static int vega20_set_default_dpm_table(struct smu_context *smu) /* vclk */ single_dpm_table = &(dpm_table->vclk_table); - if (smu_feature_is_enabled(smu, FEATURE_DPM_UVD_BIT)) { + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)) { ret = vega20_set_single_dpm_table(smu, single_dpm_table, PPCLK_VCLK); if (ret) { pr_err("[SetupDefaultDpmTable] failed to get vclk dpm levels!"); @@ -580,7 +787,7 @@ static int vega20_set_default_dpm_table(struct smu_context *smu) /* dclk */ single_dpm_table = &(dpm_table->dclk_table); - if (smu_feature_is_enabled(smu, FEATURE_DPM_UVD_BIT)) { + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)) { ret = vega20_set_single_dpm_table(smu, single_dpm_table, PPCLK_DCLK); if (ret) { pr_err("[SetupDefaultDpmTable] failed to get dclk dpm levels!"); @@ -595,7 +802,7 @@ static int vega20_set_default_dpm_table(struct smu_context *smu) /* dcefclk */ single_dpm_table = &(dpm_table->dcef_table); - if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) { + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) { ret = vega20_set_single_dpm_table(smu, single_dpm_table, PPCLK_DCEFCLK); if (ret) { @@ -611,7 +818,7 @@ static int vega20_set_default_dpm_table(struct smu_context *smu) /* pixclk */ single_dpm_table = &(dpm_table->pixel_table); - if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) { + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) { ret = vega20_set_single_dpm_table(smu, single_dpm_table, PPCLK_PIXCLK); if (ret) { @@ -626,7 +833,7 @@ static int vega20_set_default_dpm_table(struct smu_context *smu) /* dispclk */ single_dpm_table = &(dpm_table->display_table); - if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) { + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) { ret = vega20_set_single_dpm_table(smu, single_dpm_table, PPCLK_DISPCLK); if (ret) { @@ -641,7 +848,7 @@ static int vega20_set_default_dpm_table(struct smu_context *smu) /* phyclk */ single_dpm_table = &(dpm_table->phy_table); - if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) { + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) { ret = vega20_set_single_dpm_table(smu, single_dpm_table, PPCLK_PHYCLK); if (ret) { @@ -719,7 +926,7 @@ static int vega20_get_clk_table(struct smu_context *smu, } static int vega20_print_clk_levels(struct smu_context *smu, - enum pp_clock_type type, char *buf) + enum smu_clk_type type, char *buf) { int i, now, size = 0; int ret = 0; @@ -731,7 +938,7 @@ static int vega20_print_clk_levels(struct smu_context *smu, struct smu_dpm_context *smu_dpm = &smu->smu_dpm; struct vega20_dpm_table *dpm_table = NULL; struct vega20_od8_settings *od8_settings = - (struct vega20_od8_settings *)table_context->od8_settings; + (struct vega20_od8_settings *)smu->od_settings; OverDriveTable_t *od_table = (OverDriveTable_t *)(table_context->overdrive_table); PPTable_t *pptable = (PPTable_t *)table_context->driver_pptable; @@ -739,8 +946,8 @@ static int vega20_print_clk_levels(struct smu_context *smu, dpm_table = smu_dpm->dpm_context; switch (type) { - case PP_SCLK: - ret = smu_get_current_clk_freq(smu, PPCLK_GFXCLK, &now); + case SMU_SCLK: + ret = smu_get_current_clk_freq(smu, SMU_GFXCLK, &now); if (ret) { pr_err("Attempt to get current gfx clk Failed!"); return ret; @@ -760,8 +967,8 @@ static int vega20_print_clk_levels(struct smu_context *smu, ? "*" : ""); break; - case PP_MCLK: - ret = smu_get_current_clk_freq(smu, PPCLK_UCLK, &now); + case SMU_MCLK: + ret = smu_get_current_clk_freq(smu, SMU_UCLK, &now); if (ret) { pr_err("Attempt to get current mclk Failed!"); return ret; @@ -781,8 +988,8 @@ static int vega20_print_clk_levels(struct smu_context *smu, ? "*" : ""); break; - case PP_SOCCLK: - ret = smu_get_current_clk_freq(smu, PPCLK_SOCCLK, &now); + case SMU_SOCCLK: + ret = smu_get_current_clk_freq(smu, SMU_SOCCLK, &now); if (ret) { pr_err("Attempt to get current socclk Failed!"); return ret; @@ -802,8 +1009,8 @@ static int vega20_print_clk_levels(struct smu_context *smu, ? "*" : ""); break; - case PP_FCLK: - ret = smu_get_current_clk_freq(smu, PPCLK_FCLK, &now); + case SMU_FCLK: + ret = smu_get_current_clk_freq(smu, SMU_FCLK, &now); if (ret) { pr_err("Attempt to get current fclk Failed!"); return ret; @@ -817,8 +1024,8 @@ static int vega20_print_clk_levels(struct smu_context *smu, ? "*" : ""); break; - case PP_DCEFCLK: - ret = smu_get_current_clk_freq(smu, PPCLK_DCEFCLK, &now); + case SMU_DCEFCLK: + ret = smu_get_current_clk_freq(smu, SMU_DCEFCLK, &now); if (ret) { pr_err("Attempt to get current dcefclk Failed!"); return ret; @@ -837,7 +1044,7 @@ static int vega20_print_clk_levels(struct smu_context *smu, (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); break; - case PP_PCIE: + case SMU_PCIE: gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; @@ -862,7 +1069,7 @@ static int vega20_print_clk_levels(struct smu_context *smu, "*" : ""); break; - case OD_SCLK: + case SMU_OD_SCLK: if (od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id && od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id) { size = sprintf(buf, "%s:\n", "OD_SCLK"); @@ -874,7 +1081,7 @@ static int vega20_print_clk_levels(struct smu_context *smu, break; - case OD_MCLK: + case SMU_OD_MCLK: if (od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id) { size = sprintf(buf, "%s:\n", "OD_MCLK"); size += sprintf(buf + size, "1: %10uMhz\n", @@ -883,7 +1090,7 @@ static int vega20_print_clk_levels(struct smu_context *smu, break; - case OD_VDDC_CURVE: + case SMU_OD_VDDC_CURVE: if (od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id && od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id && od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id && @@ -904,7 +1111,7 @@ static int vega20_print_clk_levels(struct smu_context *smu, break; - case OD_RANGE: + case SMU_OD_RANGE: size = sprintf(buf, "%s:\n", "OD_RANGE"); if (od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id && @@ -971,7 +1178,7 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max, dpm_table = smu->smu_dpm.dpm_context; - if (smu_feature_is_enabled(smu, FEATURE_DPM_GFXCLK_BIT) && + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) && (feature_mask & FEATURE_DPM_GFXCLK_MASK)) { single_dpm_table = &(dpm_table->gfx_table); freq = max ? single_dpm_table->dpm_state.soft_max_level : @@ -986,7 +1193,7 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max, } } - if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT) && + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) && (feature_mask & FEATURE_DPM_UCLK_MASK)) { single_dpm_table = &(dpm_table->mem_table); freq = max ? single_dpm_table->dpm_state.soft_max_level : @@ -1001,7 +1208,7 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max, } } - if (smu_feature_is_enabled(smu, FEATURE_DPM_SOCCLK_BIT) && + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT) && (feature_mask & FEATURE_DPM_SOCCLK_MASK)) { single_dpm_table = &(dpm_table->soc_table); freq = max ? single_dpm_table->dpm_state.soft_max_level : @@ -1016,7 +1223,7 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max, } } - if (smu_feature_is_enabled(smu, FEATURE_DPM_FCLK_BIT) && + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT) && (feature_mask & FEATURE_DPM_FCLK_MASK)) { single_dpm_table = &(dpm_table->fclk_table); freq = max ? single_dpm_table->dpm_state.soft_max_level : @@ -1031,7 +1238,7 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max, } } - if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT) && + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) && (feature_mask & FEATURE_DPM_DCEFCLK_MASK)) { single_dpm_table = &(dpm_table->dcef_table); freq = single_dpm_table->dpm_state.hard_min_level; @@ -1050,7 +1257,7 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max, } static int vega20_force_clk_levels(struct smu_context *smu, - enum pp_clock_type type, uint32_t mask) + enum smu_clk_type clk_type, uint32_t mask) { struct vega20_dpm_table *dpm_table; struct vega20_single_dpm_table *single_dpm_table; @@ -1070,8 +1277,8 @@ static int vega20_force_clk_levels(struct smu_context *smu, dpm_table = smu->smu_dpm.dpm_context; - switch (type) { - case PP_SCLK: + switch (clk_type) { + case SMU_SCLK: single_dpm_table = &(dpm_table->gfx_table); if (soft_max_level >= single_dpm_table->count) { @@ -1098,7 +1305,7 @@ static int vega20_force_clk_levels(struct smu_context *smu, break; - case PP_MCLK: + case SMU_MCLK: single_dpm_table = &(dpm_table->mem_table); if (soft_max_level >= single_dpm_table->count) { @@ -1125,7 +1332,7 @@ static int vega20_force_clk_levels(struct smu_context *smu, break; - case PP_SOCCLK: + case SMU_SOCCLK: single_dpm_table = &(dpm_table->soc_table); if (soft_max_level >= single_dpm_table->count) { @@ -1152,7 +1359,7 @@ static int vega20_force_clk_levels(struct smu_context *smu, break; - case PP_FCLK: + case SMU_FCLK: single_dpm_table = &(dpm_table->fclk_table); if (soft_max_level >= single_dpm_table->count) { @@ -1179,7 +1386,7 @@ static int vega20_force_clk_levels(struct smu_context *smu, break; - case PP_DCEFCLK: + case SMU_DCEFCLK: hard_min_level = soft_min_level; single_dpm_table = &(dpm_table->dcef_table); @@ -1199,7 +1406,7 @@ static int vega20_force_clk_levels(struct smu_context *smu, break; - case PP_PCIE: + case SMU_PCIE: if (soft_min_level >= NUM_LINK_LEVELS || soft_max_level >= NUM_LINK_LEVELS) { ret = -EINVAL; @@ -1222,7 +1429,7 @@ static int vega20_force_clk_levels(struct smu_context *smu, } static int vega20_get_clock_by_type_with_latency(struct smu_context *smu, - enum amd_pp_clock_type type, + enum smu_clk_type clk_type, struct pp_clock_levels_with_latency *clocks) { int ret; @@ -1234,20 +1441,20 @@ static int vega20_get_clock_by_type_with_latency(struct smu_context *smu, mutex_lock(&smu->mutex); - switch (type) { - case amd_pp_sys_clock: + switch (clk_type) { + case SMU_GFXCLK: single_dpm_table = &(dpm_table->gfx_table); ret = vega20_get_clk_table(smu, clocks, single_dpm_table); break; - case amd_pp_mem_clock: + case SMU_MCLK: single_dpm_table = &(dpm_table->mem_table); ret = vega20_get_clk_table(smu, clocks, single_dpm_table); break; - case amd_pp_dcef_clock: + case SMU_DCEFCLK: single_dpm_table = &(dpm_table->dcef_table); ret = vega20_get_clk_table(smu, clocks, single_dpm_table); break; - case amd_pp_soc_clock: + case SMU_SOCCLK: single_dpm_table = &(dpm_table->soc_table); ret = vega20_get_clk_table(smu, clocks, single_dpm_table); break; @@ -1287,23 +1494,28 @@ static int vega20_set_default_od8_setttings(struct smu_context *smu) PPTable_t *smc_pptable = table_context->driver_pptable; int i, ret; - if (table_context->od8_settings) + if (smu->od_settings) return -EINVAL; - table_context->od8_settings = kzalloc(sizeof(struct vega20_od8_settings), GFP_KERNEL); + od8_settings = kzalloc(sizeof(struct vega20_od8_settings), GFP_KERNEL); - if (!table_context->od8_settings) + if (!od8_settings) return -ENOMEM; - memset(table_context->od8_settings, 0, sizeof(struct vega20_od8_settings)); - od8_settings = (struct vega20_od8_settings *)table_context->od8_settings; + smu->od_settings = (void *)od8_settings; + + ret = vega20_setup_od8_information(smu); + if (ret) { + pr_err("Retrieve board OD limits failed!\n"); + return ret; + } - if (smu_feature_is_enabled(smu, FEATURE_DPM_SOCCLK_BIT)) { - if (table_context->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_GFXCLK_LIMITS] && - table_context->od_settings_max[OD8_SETTING_GFXCLK_FMAX] > 0 && - table_context->od_settings_min[OD8_SETTING_GFXCLK_FMIN] > 0 && - (table_context->od_settings_max[OD8_SETTING_GFXCLK_FMAX] >= - table_context->od_settings_min[OD8_SETTING_GFXCLK_FMIN])) { + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { + if (od8_settings->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_GFXCLK_LIMITS] && + od8_settings->od_settings_max[OD8_SETTING_GFXCLK_FMAX] > 0 && + od8_settings->od_settings_min[OD8_SETTING_GFXCLK_FMIN] > 0 && + (od8_settings->od_settings_max[OD8_SETTING_GFXCLK_FMAX] >= + od8_settings->od_settings_min[OD8_SETTING_GFXCLK_FMIN])) { od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id = OD8_GFXCLK_LIMITS; od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id = @@ -1314,13 +1526,13 @@ static int vega20_set_default_od8_setttings(struct smu_context *smu) od_table->GfxclkFmax; } - if (table_context->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_GFXCLK_CURVE] && - (table_context->od_settings_min[OD8_SETTING_GFXCLK_VOLTAGE1] >= + if (od8_settings->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_GFXCLK_CURVE] && + (od8_settings->od_settings_min[OD8_SETTING_GFXCLK_VOLTAGE1] >= smc_pptable->MinVoltageGfx / VOLTAGE_SCALE) && - (table_context->od_settings_max[OD8_SETTING_GFXCLK_VOLTAGE3] <= + (od8_settings->od_settings_max[OD8_SETTING_GFXCLK_VOLTAGE3] <= smc_pptable->MaxVoltageGfx / VOLTAGE_SCALE) && - (table_context->od_settings_min[OD8_SETTING_GFXCLK_VOLTAGE1] <= - table_context->od_settings_max[OD8_SETTING_GFXCLK_VOLTAGE3])) { + (od8_settings->od_settings_min[OD8_SETTING_GFXCLK_VOLTAGE1] <= + od8_settings->od_settings_max[OD8_SETTING_GFXCLK_VOLTAGE3])) { od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id = OD8_GFXCLK_CURVE; od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id = @@ -1371,12 +1583,12 @@ static int vega20_set_default_od8_setttings(struct smu_context *smu) } } - if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) { - if (table_context->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_UCLK_MAX] && - table_context->od_settings_min[OD8_SETTING_UCLK_FMAX] > 0 && - table_context->od_settings_max[OD8_SETTING_UCLK_FMAX] > 0 && - (table_context->od_settings_max[OD8_SETTING_UCLK_FMAX] >= - table_context->od_settings_min[OD8_SETTING_UCLK_FMAX])) { + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { + if (od8_settings->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_UCLK_MAX] && + od8_settings->od_settings_min[OD8_SETTING_UCLK_FMAX] > 0 && + od8_settings->od_settings_max[OD8_SETTING_UCLK_FMAX] > 0 && + (od8_settings->od_settings_max[OD8_SETTING_UCLK_FMAX] >= + od8_settings->od_settings_min[OD8_SETTING_UCLK_FMAX])) { od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id = OD8_UCLK_MAX; od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].default_value = @@ -1384,34 +1596,34 @@ static int vega20_set_default_od8_setttings(struct smu_context *smu) } } - if (table_context->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_POWER_LIMIT] && - table_context->od_settings_min[OD8_SETTING_POWER_PERCENTAGE] > 0 && - table_context->od_settings_min[OD8_SETTING_POWER_PERCENTAGE] <= 100 && - table_context->od_settings_max[OD8_SETTING_POWER_PERCENTAGE] > 0 && - table_context->od_settings_max[OD8_SETTING_POWER_PERCENTAGE] <= 100) { + if (od8_settings->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_POWER_LIMIT] && + od8_settings->od_settings_min[OD8_SETTING_POWER_PERCENTAGE] > 0 && + od8_settings->od_settings_min[OD8_SETTING_POWER_PERCENTAGE] <= 100 && + od8_settings->od_settings_max[OD8_SETTING_POWER_PERCENTAGE] > 0 && + od8_settings->od_settings_max[OD8_SETTING_POWER_PERCENTAGE] <= 100) { od8_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].feature_id = OD8_POWER_LIMIT; od8_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].default_value = od_table->OverDrivePct; } - if (smu_feature_is_enabled(smu, FEATURE_FAN_CONTROL_BIT)) { - if (table_context->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_ACOUSTIC_LIMIT] && - table_context->od_settings_min[OD8_SETTING_FAN_ACOUSTIC_LIMIT] > 0 && - table_context->od_settings_max[OD8_SETTING_FAN_ACOUSTIC_LIMIT] > 0 && - (table_context->od_settings_max[OD8_SETTING_FAN_ACOUSTIC_LIMIT] >= - table_context->od_settings_min[OD8_SETTING_FAN_ACOUSTIC_LIMIT])) { + if (smu_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT)) { + if (od8_settings->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_ACOUSTIC_LIMIT] && + od8_settings->od_settings_min[OD8_SETTING_FAN_ACOUSTIC_LIMIT] > 0 && + od8_settings->od_settings_max[OD8_SETTING_FAN_ACOUSTIC_LIMIT] > 0 && + (od8_settings->od_settings_max[OD8_SETTING_FAN_ACOUSTIC_LIMIT] >= + od8_settings->od_settings_min[OD8_SETTING_FAN_ACOUSTIC_LIMIT])) { od8_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].feature_id = OD8_ACOUSTIC_LIMIT_SCLK; od8_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].default_value = od_table->FanMaximumRpm; } - if (table_context->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_SPEED_MIN] && - table_context->od_settings_min[OD8_SETTING_FAN_MIN_SPEED] > 0 && - table_context->od_settings_max[OD8_SETTING_FAN_MIN_SPEED] > 0 && - (table_context->od_settings_max[OD8_SETTING_FAN_MIN_SPEED] >= - table_context->od_settings_min[OD8_SETTING_FAN_MIN_SPEED])) { + if (od8_settings->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_SPEED_MIN] && + od8_settings->od_settings_min[OD8_SETTING_FAN_MIN_SPEED] > 0 && + od8_settings->od_settings_max[OD8_SETTING_FAN_MIN_SPEED] > 0 && + (od8_settings->od_settings_max[OD8_SETTING_FAN_MIN_SPEED] >= + od8_settings->od_settings_min[OD8_SETTING_FAN_MIN_SPEED])) { od8_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].feature_id = OD8_FAN_SPEED_MIN; od8_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].default_value = @@ -1419,23 +1631,23 @@ static int vega20_set_default_od8_setttings(struct smu_context *smu) } } - if (smu_feature_is_enabled(smu, FEATURE_THERMAL_BIT)) { - if (table_context->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_TEMPERATURE_FAN] && - table_context->od_settings_min[OD8_SETTING_FAN_TARGET_TEMP] > 0 && - table_context->od_settings_max[OD8_SETTING_FAN_TARGET_TEMP] > 0 && - (table_context->od_settings_max[OD8_SETTING_FAN_TARGET_TEMP] >= - table_context->od_settings_min[OD8_SETTING_FAN_TARGET_TEMP])) { + if (smu_feature_is_enabled(smu, SMU_FEATURE_THERMAL_BIT)) { + if (od8_settings->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_TEMPERATURE_FAN] && + od8_settings->od_settings_min[OD8_SETTING_FAN_TARGET_TEMP] > 0 && + od8_settings->od_settings_max[OD8_SETTING_FAN_TARGET_TEMP] > 0 && + (od8_settings->od_settings_max[OD8_SETTING_FAN_TARGET_TEMP] >= + od8_settings->od_settings_min[OD8_SETTING_FAN_TARGET_TEMP])) { od8_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].feature_id = OD8_TEMPERATURE_FAN; od8_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].default_value = od_table->FanTargetTemperature; } - if (table_context->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_TEMPERATURE_SYSTEM] && - table_context->od_settings_min[OD8_SETTING_OPERATING_TEMP_MAX] > 0 && - table_context->od_settings_max[OD8_SETTING_OPERATING_TEMP_MAX] > 0 && - (table_context->od_settings_max[OD8_SETTING_OPERATING_TEMP_MAX] >= - table_context->od_settings_min[OD8_SETTING_OPERATING_TEMP_MAX])) { + if (od8_settings->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_TEMPERATURE_SYSTEM] && + od8_settings->od_settings_min[OD8_SETTING_OPERATING_TEMP_MAX] > 0 && + od8_settings->od_settings_max[OD8_SETTING_OPERATING_TEMP_MAX] > 0 && + (od8_settings->od_settings_max[OD8_SETTING_OPERATING_TEMP_MAX] >= + od8_settings->od_settings_min[OD8_SETTING_OPERATING_TEMP_MAX])) { od8_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].feature_id = OD8_TEMPERATURE_SYSTEM; od8_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].default_value = @@ -1446,9 +1658,9 @@ static int vega20_set_default_od8_setttings(struct smu_context *smu) for (i = 0; i < OD8_SETTING_COUNT; i++) { if (od8_settings->od8_settings_array[i].feature_id) { od8_settings->od8_settings_array[i].min_value = - table_context->od_settings_min[i]; + od8_settings->od_settings_min[i]; od8_settings->od8_settings_array[i].max_value = - table_context->od_settings_max[i]; + od8_settings->od_settings_max[i]; od8_settings->od8_settings_array[i].current_value = od8_settings->od8_settings_array[i].default_value; } else { @@ -1461,8 +1673,66 @@ static int vega20_set_default_od8_setttings(struct smu_context *smu) return 0; } +static int vega20_get_metrics_table(struct smu_context *smu, + SmuMetrics_t *metrics_table) +{ + struct smu_table_context *smu_table= &smu->smu_table; + int ret = 0; + + if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) { + ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, + (void *)smu_table->metrics_table, false); + if (ret) { + pr_info("Failed to export SMU metrics table!\n"); + return ret; + } + smu_table->metrics_time = jiffies; + } + + memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t)); + + return ret; +} + +static int vega20_set_default_od_settings(struct smu_context *smu, + bool initialize) +{ + struct smu_table_context *table_context = &smu->smu_table; + int ret; + + if (initialize) { + if (table_context->overdrive_table) + return -EINVAL; + + table_context->overdrive_table = kzalloc(sizeof(OverDriveTable_t), GFP_KERNEL); + + if (!table_context->overdrive_table) + return -ENOMEM; + + ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, + table_context->overdrive_table, false); + if (ret) { + pr_err("Failed to export over drive table!\n"); + return ret; + } + + ret = vega20_set_default_od8_setttings(smu); + if (ret) + return ret; + } + + ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, + table_context->overdrive_table, true); + if (ret) { + pr_err("Failed to import over drive table!\n"); + return ret; + } + + return 0; +} + static int vega20_get_od_percentage(struct smu_context *smu, - enum pp_clock_type type) + enum smu_clk_type clk_type) { struct smu_dpm_context *smu_dpm = &smu->smu_dpm; struct vega20_dpm_table *dpm_table = NULL; @@ -1474,12 +1744,12 @@ static int vega20_get_od_percentage(struct smu_context *smu, dpm_table = smu_dpm->dpm_context; golden_table = smu_dpm->golden_dpm_context; - switch (type) { - case OD_SCLK: + switch (clk_type) { + case SMU_OD_SCLK: single_dpm_table = &(dpm_table->gfx_table); golden_dpm_table = &(golden_table->gfx_table); break; - case OD_MCLK: + case SMU_OD_MCLK: single_dpm_table = &(dpm_table->mem_table); golden_dpm_table = &(golden_table->mem_table); break; @@ -1497,6 +1767,201 @@ static int vega20_get_od_percentage(struct smu_context *smu, return value; } +static int vega20_get_power_profile_mode(struct smu_context *smu, char *buf) +{ + DpmActivityMonitorCoeffInt_t activity_monitor; + uint32_t i, size = 0; + uint16_t workload_type = 0; + static const char *profile_name[] = { + "BOOTUP_DEFAULT", + "3D_FULL_SCREEN", + "POWER_SAVING", + "VIDEO", + "VR", + "COMPUTE", + "CUSTOM"}; + static const char *title[] = { + "PROFILE_INDEX(NAME)", + "CLOCK_TYPE(NAME)", + "FPS", + "UseRlcBusy", + "MinActiveFreqType", + "MinActiveFreq", + "BoosterFreqType", + "BoosterFreq", + "PD_Data_limit_c", + "PD_Data_error_coeff", + "PD_Data_error_rate_coeff"}; + int result = 0; + + if (!smu->pm_enabled || !buf) + return -EINVAL; + + size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n", + title[0], title[1], title[2], title[3], title[4], title[5], + title[6], title[7], title[8], title[9], title[10]); + + for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { + /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ + workload_type = smu_workload_get_type(smu, i); + result = smu_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type, + (void *)(&activity_monitor), false); + if (result) { + pr_err("[%s] Failed to get activity monitor!", __func__); + return result; + } + + size += sprintf(buf + size, "%2d %14s%s:\n", + i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); + + size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", + " ", + 0, + "GFXCLK", + activity_monitor.Gfx_FPS, + activity_monitor.Gfx_UseRlcBusy, + activity_monitor.Gfx_MinActiveFreqType, + activity_monitor.Gfx_MinActiveFreq, + activity_monitor.Gfx_BoosterFreqType, + activity_monitor.Gfx_BoosterFreq, + activity_monitor.Gfx_PD_Data_limit_c, + activity_monitor.Gfx_PD_Data_error_coeff, + activity_monitor.Gfx_PD_Data_error_rate_coeff); + + size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", + " ", + 1, + "SOCCLK", + activity_monitor.Soc_FPS, + activity_monitor.Soc_UseRlcBusy, + activity_monitor.Soc_MinActiveFreqType, + activity_monitor.Soc_MinActiveFreq, + activity_monitor.Soc_BoosterFreqType, + activity_monitor.Soc_BoosterFreq, + activity_monitor.Soc_PD_Data_limit_c, + activity_monitor.Soc_PD_Data_error_coeff, + activity_monitor.Soc_PD_Data_error_rate_coeff); + + size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", + " ", + 2, + "UCLK", + activity_monitor.Mem_FPS, + activity_monitor.Mem_UseRlcBusy, + activity_monitor.Mem_MinActiveFreqType, + activity_monitor.Mem_MinActiveFreq, + activity_monitor.Mem_BoosterFreqType, + activity_monitor.Mem_BoosterFreq, + activity_monitor.Mem_PD_Data_limit_c, + activity_monitor.Mem_PD_Data_error_coeff, + activity_monitor.Mem_PD_Data_error_rate_coeff); + + size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", + " ", + 3, + "FCLK", + activity_monitor.Fclk_FPS, + activity_monitor.Fclk_UseRlcBusy, + activity_monitor.Fclk_MinActiveFreqType, + activity_monitor.Fclk_MinActiveFreq, + activity_monitor.Fclk_BoosterFreqType, + activity_monitor.Fclk_BoosterFreq, + activity_monitor.Fclk_PD_Data_limit_c, + activity_monitor.Fclk_PD_Data_error_coeff, + activity_monitor.Fclk_PD_Data_error_rate_coeff); + } + + return size; +} + +static int vega20_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) +{ + DpmActivityMonitorCoeffInt_t activity_monitor; + int workload_type = 0, ret = 0; + + smu->power_profile_mode = input[size]; + + if (!smu->pm_enabled) + return ret; + if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { + pr_err("Invalid power profile mode %d\n", smu->power_profile_mode); + return -EINVAL; + } + + if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { + ret = smu_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor), false); + if (ret) { + pr_err("[%s] Failed to get activity monitor!", __func__); + return ret; + } + + switch (input[0]) { + case 0: /* Gfxclk */ + activity_monitor.Gfx_FPS = input[1]; + activity_monitor.Gfx_UseRlcBusy = input[2]; + activity_monitor.Gfx_MinActiveFreqType = input[3]; + activity_monitor.Gfx_MinActiveFreq = input[4]; + activity_monitor.Gfx_BoosterFreqType = input[5]; + activity_monitor.Gfx_BoosterFreq = input[6]; + activity_monitor.Gfx_PD_Data_limit_c = input[7]; + activity_monitor.Gfx_PD_Data_error_coeff = input[8]; + activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9]; + break; + case 1: /* Socclk */ + activity_monitor.Soc_FPS = input[1]; + activity_monitor.Soc_UseRlcBusy = input[2]; + activity_monitor.Soc_MinActiveFreqType = input[3]; + activity_monitor.Soc_MinActiveFreq = input[4]; + activity_monitor.Soc_BoosterFreqType = input[5]; + activity_monitor.Soc_BoosterFreq = input[6]; + activity_monitor.Soc_PD_Data_limit_c = input[7]; + activity_monitor.Soc_PD_Data_error_coeff = input[8]; + activity_monitor.Soc_PD_Data_error_rate_coeff = input[9]; + break; + case 2: /* Uclk */ + activity_monitor.Mem_FPS = input[1]; + activity_monitor.Mem_UseRlcBusy = input[2]; + activity_monitor.Mem_MinActiveFreqType = input[3]; + activity_monitor.Mem_MinActiveFreq = input[4]; + activity_monitor.Mem_BoosterFreqType = input[5]; + activity_monitor.Mem_BoosterFreq = input[6]; + activity_monitor.Mem_PD_Data_limit_c = input[7]; + activity_monitor.Mem_PD_Data_error_coeff = input[8]; + activity_monitor.Mem_PD_Data_error_rate_coeff = input[9]; + break; + case 3: /* Fclk */ + activity_monitor.Fclk_FPS = input[1]; + activity_monitor.Fclk_UseRlcBusy = input[2]; + activity_monitor.Fclk_MinActiveFreqType = input[3]; + activity_monitor.Fclk_MinActiveFreq = input[4]; + activity_monitor.Fclk_BoosterFreqType = input[5]; + activity_monitor.Fclk_BoosterFreq = input[6]; + activity_monitor.Fclk_PD_Data_limit_c = input[7]; + activity_monitor.Fclk_PD_Data_error_coeff = input[8]; + activity_monitor.Fclk_PD_Data_error_rate_coeff = input[9]; + break; + } + + ret = smu_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor), true); + if (ret) { + pr_err("[%s] Failed to set activity monitor!", __func__); + return ret; + } + } + + /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ + workload_type = smu_workload_get_type(smu, smu->power_profile_mode); + smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, + 1 << workload_type); + + return ret; +} + static int vega20_get_profiling_clk_mask(struct smu_context *smu, enum amd_dpm_forced_level level, @@ -1550,7 +2015,7 @@ vega20_set_uclk_to_highest_dpm_level(struct smu_context *smu, if (!smu_dpm_ctx->dpm_context) return -EINVAL; - if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) { + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { if (dpm_table->count <= 0) { pr_err("[%s] Dpm table has no entry!", __func__); return -EINVAL; @@ -1594,17 +2059,9 @@ static int vega20_display_config_changed(struct smu_context *smu) { int ret = 0; - if (!smu->funcs) - return -EINVAL; - - if (!smu->smu_dpm.dpm_context || - !smu->smu_table.tables || - !smu->smu_table.tables[TABLE_WATERMARKS].cpu_addr) - return -EINVAL; - if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && !(smu->watermarks_bitmap & WATERMARKS_LOADED)) { - ret = smu->funcs->write_watermarks_table(smu); + ret = smu_write_watermarks_table(smu); if (ret) { pr_err("Failed to update WMTABLE!"); return ret; @@ -1613,8 +2070,8 @@ static int vega20_display_config_changed(struct smu_context *smu) } if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && - smu_feature_is_supported(smu, FEATURE_DPM_DCEFCLK_BIT) && - smu_feature_is_supported(smu, FEATURE_DPM_SOCCLK_BIT)) { + smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) && + smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, smu->display_config->num_display); @@ -1783,11 +2240,11 @@ vega20_notify_smc_dispaly_config(struct smu_context *smu) min_clocks.dcef_clock_in_sr = smu->display_config->min_dcef_deep_sleep_set_clk; min_clocks.memory_clock = smu->display_config->min_mem_set_clock; - if (smu_feature_is_supported(smu, FEATURE_DPM_DCEFCLK_BIT)) { + if (smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) { clock_req.clock_type = amd_pp_dcef_clock; clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10; if (!smu->funcs->display_clock_voltage_request(smu, &clock_req)) { - if (smu_feature_is_supported(smu, FEATURE_DS_DCEFCLK_BIT)) { + if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetMinDeepSleepDcefclk, min_clocks.dcef_clock_in_sr/100); @@ -1801,7 +2258,7 @@ vega20_notify_smc_dispaly_config(struct smu_context *smu) } } - if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) { + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { memtable->dpm_state.hard_min_level = min_clocks.memory_clock/100; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, @@ -1939,13 +2396,13 @@ static int vega20_unforce_dpm_levels(struct smu_context *smu) dpm_table->soc_table.dpm_state.soft_max_level = dpm_table->soc_table.dpm_levels[soft_max_level].value; - ret = smu_upload_dpm_level(smu, false, 0xFFFFFFFF); + ret = vega20_upload_dpm_level(smu, false, 0xFFFFFFFF); if (ret) { pr_err("Failed to upload DPM Bootup Levels!"); return ret; } - ret = smu_upload_dpm_level(smu, true, 0xFFFFFFFF); + ret = vega20_upload_dpm_level(smu, true, 0xFFFFFFFF); if (ret) { pr_err("Failed to upload DPM Max Levels!"); return ret; @@ -1954,46 +2411,6 @@ static int vega20_unforce_dpm_levels(struct smu_context *smu) return ret; } -static enum amd_dpm_forced_level vega20_get_performance_level(struct smu_context *smu) -{ - struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); - if (!smu_dpm_ctx->dpm_context) - return -EINVAL; - - if (smu_dpm_ctx->dpm_level != smu_dpm_ctx->saved_dpm_level) { - mutex_lock(&(smu->mutex)); - smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level; - mutex_unlock(&(smu->mutex)); - } - return smu_dpm_ctx->dpm_level; -} - -static int -vega20_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) -{ - int ret = 0; - int i; - struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); - - if (!smu_dpm_ctx->dpm_context) - return -EINVAL; - - for (i = 0; i < smu->adev->num_ip_blocks; i++) { - if (smu->adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) - break; - } - - mutex_lock(&smu->mutex); - - smu->adev->ip_blocks[i].version->funcs->enable_umd_pstate(smu, &level); - ret = smu_handle_task(smu, level, - AMD_PP_TASK_READJUST_POWER_STATE); - - mutex_unlock(&smu->mutex); - - return ret; -} - static int vega20_update_specified_od8_value(struct smu_context *smu, uint32_t index, uint32_t value) @@ -2002,7 +2419,7 @@ static int vega20_update_specified_od8_value(struct smu_context *smu, OverDriveTable_t *od_table = (OverDriveTable_t *)(table_context->overdrive_table); struct vega20_od8_settings *od8_settings = - (struct vega20_od8_settings *)table_context->od8_settings; + (struct vega20_od8_settings *)smu->od_settings; switch (index) { case OD8_SETTING_GFXCLK_FMIN: @@ -2071,8 +2488,36 @@ static int vega20_update_specified_od8_value(struct smu_context *smu, return 0; } +static int vega20_update_od8_settings(struct smu_context *smu, + uint32_t index, + uint32_t value) +{ + struct smu_table_context *table_context = &smu->smu_table; + int ret; + + ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, + table_context->overdrive_table, false); + if (ret) { + pr_err("Failed to export over drive table!\n"); + return ret; + } + + ret = vega20_update_specified_od8_value(smu, index, value); + if (ret) + return ret; + + ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, + table_context->overdrive_table, true); + if (ret) { + pr_err("Failed to import over drive table!\n"); + return ret; + } + + return 0; +} + static int vega20_set_od_percentage(struct smu_context *smu, - enum pp_clock_type type, + enum smu_clk_type clk_type, uint32_t value) { struct smu_dpm_context *smu_dpm = &smu->smu_dpm; @@ -2090,18 +2535,18 @@ static int vega20_set_od_percentage(struct smu_context *smu, dpm_table = smu_dpm->dpm_context; golden_table = smu_dpm->golden_dpm_context; - switch (type) { - case OD_SCLK: + switch (clk_type) { + case SMU_OD_SCLK: single_dpm_table = &(dpm_table->gfx_table); golden_dpm_table = &(golden_table->gfx_table); - feature_enabled = smu_feature_is_enabled(smu, FEATURE_DPM_GFXCLK_BIT); + feature_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT); clk_id = PPCLK_GFXCLK; index = OD8_SETTING_GFXCLK_FMAX; break; - case OD_MCLK: + case SMU_OD_MCLK: single_dpm_table = &(dpm_table->mem_table); golden_dpm_table = &(golden_table->mem_table); - feature_enabled = smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT); + feature_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT); clk_id = PPCLK_UCLK; index = OD8_SETTING_UCLK_FMAX; break; @@ -2117,7 +2562,7 @@ static int vega20_set_od_percentage(struct smu_context *smu, od_clk /= 100; od_clk += golden_dpm_table->dpm_levels[golden_dpm_table->count - 1].value; - ret = smu_update_od8_settings(smu, index, od_clk); + ret = vega20_update_od8_settings(smu, index, od_clk); if (ret) { pr_err("[Setoverdrive] failed to set od clk!\n"); goto set_od_failed; @@ -2155,7 +2600,7 @@ static int vega20_odn_edit_dpm_table(struct smu_context *smu, struct vega20_dpm_table *dpm_table = NULL; struct vega20_single_dpm_table *single_dpm_table; struct vega20_od8_settings *od8_settings = - (struct vega20_od8_settings *)table_context->od8_settings; + (struct vega20_od8_settings *)smu->od_settings; struct pp_clock_levels_with_latency clocks; int32_t input_index, input_clk, input_vol, i; int od8_id; @@ -2202,10 +2647,10 @@ static int vega20_odn_edit_dpm_table(struct smu_context *smu, if (input_index == 0 && od_table->GfxclkFmin != input_clk) { od_table->GfxclkFmin = input_clk; - table_context->od_gfxclk_update = true; + od8_settings->od_gfxclk_update = true; } else if (input_index == 1 && od_table->GfxclkFmax != input_clk) { od_table->GfxclkFmax = input_clk; - table_context->od_gfxclk_update = true; + od8_settings->od_gfxclk_update = true; } } @@ -2250,7 +2695,7 @@ static int vega20_odn_edit_dpm_table(struct smu_context *smu, } if (input_index == 1 && od_table->UclkFmax != input_clk) { - table_context->od_gfxclk_update = true; + od8_settings->od_gfxclk_update = true; od_table->UclkFmax = input_clk; } } @@ -2325,7 +2770,7 @@ static int vega20_odn_edit_dpm_table(struct smu_context *smu, break; case PP_OD_RESTORE_DEFAULT_TABLE: - ret = smu_update_table(smu, TABLE_OVERDRIVE, table_context->overdrive_table, false); + ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, false); if (ret) { pr_err("Failed to export over drive table!\n"); return ret; @@ -2334,18 +2779,18 @@ static int vega20_odn_edit_dpm_table(struct smu_context *smu, break; case PP_OD_COMMIT_DPM_TABLE: - ret = smu_update_table(smu, TABLE_OVERDRIVE, table_context->overdrive_table, true); + ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, true); if (ret) { pr_err("Failed to import over drive table!\n"); return ret; } /* retrieve updated gfxclk table */ - if (table_context->od_gfxclk_update) { - table_context->od_gfxclk_update = false; + if (od8_settings->od_gfxclk_update) { + od8_settings->od_gfxclk_update = false; single_dpm_table = &(dpm_table->gfx_table); - if (smu_feature_is_enabled(smu, FEATURE_DPM_GFXCLK_BIT)) { + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) { ret = vega20_set_single_dpm_table(smu, single_dpm_table, PPCLK_GFXCLK); if (ret) { @@ -2374,6 +2819,28 @@ static int vega20_odn_edit_dpm_table(struct smu_context *smu, return ret; } +static int vega20_dpm_set_uvd_enable(struct smu_context *smu, bool enable) +{ + if (!smu_feature_is_supported(smu, SMU_FEATURE_DPM_UVD_BIT)) + return 0; + + if (enable == smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)) + return 0; + + return smu_feature_set_enabled(smu, SMU_FEATURE_DPM_UVD_BIT, enable); +} + +static int vega20_dpm_set_vce_enable(struct smu_context *smu, bool enable) +{ + if (!smu_feature_is_supported(smu, SMU_FEATURE_DPM_VCE_BIT)) + return 0; + + if (enable == smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT)) + return 0; + + return smu_feature_set_enabled(smu, SMU_FEATURE_DPM_VCE_BIT, enable); +} + static int vega20_get_enabled_smc_features(struct smu_context *smu, uint64_t *features_enabled) { @@ -2525,14 +2992,279 @@ static int vega20_set_ppfeature_status(struct smu_context *smu, uint64_t new_ppf return 0; } +static bool vega20_is_dpm_running(struct smu_context *smu) +{ + int ret = 0; + uint32_t feature_mask[2]; + unsigned long feature_enabled; + ret = smu_feature_get_enabled_mask(smu, feature_mask, 2); + feature_enabled = (unsigned long)((uint64_t)feature_mask[0] | + ((uint64_t)feature_mask[1] << 32)); + return !!(feature_enabled & SMC_DPM_FEATURE); +} + +static int vega20_set_thermal_fan_table(struct smu_context *smu) +{ + int ret; + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *pptable = table_context->driver_pptable; + + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetFanTemperatureTarget, + (uint32_t)pptable->FanTargetTemperature); + + return ret; +} + +static int vega20_get_fan_speed_percent(struct smu_context *smu, + uint32_t *speed) +{ + int ret = 0; + uint32_t current_rpm = 0, percent = 0; + PPTable_t *pptable = smu->smu_table.driver_pptable; + + ret = smu_get_current_rpm(smu, ¤t_rpm); + if (ret) + return ret; + + percent = current_rpm * 100 / pptable->FanMaximumRpm; + *speed = percent > 100 ? 100 : percent; + + return 0; +} + +static int vega20_get_gpu_power(struct smu_context *smu, uint32_t *value) +{ + int ret = 0; + SmuMetrics_t metrics; + + if (!value) + return -EINVAL; + + ret = vega20_get_metrics_table(smu, &metrics); + if (ret) + return ret; + + *value = metrics.CurrSocketPower << 8; + + return 0; +} + +static int vega20_get_current_activity_percent(struct smu_context *smu, + enum amd_pp_sensors sensor, + uint32_t *value) +{ + int ret = 0; + SmuMetrics_t metrics; + + if (!value) + return -EINVAL; + + ret = vega20_get_metrics_table(smu, &metrics); + if (ret) + return ret; + + switch (sensor) { + case AMDGPU_PP_SENSOR_GPU_LOAD: + *value = metrics.AverageGfxActivity; + break; + case AMDGPU_PP_SENSOR_MEM_LOAD: + *value = metrics.AverageUclkActivity; + break; + default: + pr_err("Invalid sensor for retrieving clock activity\n"); + return -EINVAL; + } + + return 0; +} + +static int vega20_thermal_get_temperature(struct smu_context *smu, + enum amd_pp_sensors sensor, + uint32_t *value) +{ + struct amdgpu_device *adev = smu->adev; + SmuMetrics_t metrics; + uint32_t temp = 0; + int ret = 0; + + if (!value) + return -EINVAL; + + ret = vega20_get_metrics_table(smu, &metrics); + if (ret) + return ret; + + switch (sensor) { + case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: + temp = RREG32_SOC15(THM, 0, mmCG_MULT_THERMAL_STATUS); + temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >> + CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT; + + temp = temp & 0x1ff; + temp *= SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + + *value = temp; + break; + case AMDGPU_PP_SENSOR_EDGE_TEMP: + *value = metrics.TemperatureEdge * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case AMDGPU_PP_SENSOR_MEM_TEMP: + *value = metrics.TemperatureHBM * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + default: + pr_err("Invalid sensor for retrieving temp\n"); + return -EINVAL; + } + + return 0; +} +static int vega20_read_sensor(struct smu_context *smu, + enum amd_pp_sensors sensor, + void *data, uint32_t *size) +{ + int ret = 0; + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *pptable = table_context->driver_pptable; + + switch (sensor) { + case AMDGPU_PP_SENSOR_MAX_FAN_RPM: + *(uint32_t *)data = pptable->FanMaximumRpm; + *size = 4; + break; + case AMDGPU_PP_SENSOR_MEM_LOAD: + case AMDGPU_PP_SENSOR_GPU_LOAD: + ret = vega20_get_current_activity_percent(smu, + sensor, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_GPU_POWER: + ret = vega20_get_gpu_power(smu, (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: + case AMDGPU_PP_SENSOR_EDGE_TEMP: + case AMDGPU_PP_SENSOR_MEM_TEMP: + ret = vega20_thermal_get_temperature(smu, sensor, (uint32_t *)data); + *size = 4; + break; + default: + return -EINVAL; + } + + return ret; +} + +static int vega20_set_watermarks_table(struct smu_context *smu, + void *watermarks, struct + dm_pp_wm_sets_with_clock_ranges_soc15 + *clock_ranges) +{ + int i; + Watermarks_t *table = watermarks; + + if (!table || !clock_ranges) + return -EINVAL; + + if (clock_ranges->num_wm_dmif_sets > 4 || + clock_ranges->num_wm_mcif_sets > 4) + return -EINVAL; + + for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) { + table->WatermarkRow[1][i].MinClock = + cpu_to_le16((uint16_t) + (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz / + 1000)); + table->WatermarkRow[1][i].MaxClock = + cpu_to_le16((uint16_t) + (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz / + 1000)); + table->WatermarkRow[1][i].MinUclk = + cpu_to_le16((uint16_t) + (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz / + 1000)); + table->WatermarkRow[1][i].MaxUclk = + cpu_to_le16((uint16_t) + (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz / + 1000)); + table->WatermarkRow[1][i].WmSetting = (uint8_t) + clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id; + } + + for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) { + table->WatermarkRow[0][i].MinClock = + cpu_to_le16((uint16_t) + (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz / + 1000)); + table->WatermarkRow[0][i].MaxClock = + cpu_to_le16((uint16_t) + (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz / + 1000)); + table->WatermarkRow[0][i].MinUclk = + cpu_to_le16((uint16_t) + (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz / + 1000)); + table->WatermarkRow[0][i].MaxUclk = + cpu_to_le16((uint16_t) + (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz / + 1000)); + table->WatermarkRow[0][i].WmSetting = (uint8_t) + clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id; + } + + return 0; +} + +static const struct smu_temperature_range vega20_thermal_policy[] = +{ + {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000}, + { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000}, +}; + +static int vega20_get_thermal_temperature_range(struct smu_context *smu, + struct smu_temperature_range *range) +{ + + PPTable_t *pptable = smu->smu_table.driver_pptable; + + if (!range) + return -EINVAL; + + memcpy(range, &vega20_thermal_policy[0], sizeof(struct smu_temperature_range)); + + range->max = pptable->TedgeLimit * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE) * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + range->hotspot_crit_max = pptable->ThotspotLimit * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + range->mem_crit_max = pptable->ThbmLimit * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM)* + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + + + return 0; +} + static const struct pptable_funcs vega20_ppt_funcs = { + .tables_init = vega20_tables_init, .alloc_dpm_context = vega20_allocate_dpm_context, .store_powerplay_table = vega20_store_powerplay_table, .check_powerplay_table = vega20_check_powerplay_table, .append_powerplay_table = vega20_append_powerplay_table, .get_smu_msg_index = vega20_get_smu_msg_index, + .get_smu_clk_index = vega20_get_smu_clk_index, + .get_smu_feature_index = vega20_get_smu_feature_index, + .get_smu_table_index = vega20_get_smu_table_index, + .get_smu_power_index = vega20_get_pwr_src_index, + .get_workload_type = vega20_get_workload_type, .run_afll_btc = vega20_run_btc_afll, - .get_unallowed_feature_mask = vega20_get_unallowed_feature_mask, + .get_allowed_feature_mask = vega20_get_allowed_feature_mask, .get_current_power_state = vega20_get_current_power_state, .set_default_dpm_table = vega20_set_default_dpm_table, .set_power_state = NULL, @@ -2540,27 +3272,36 @@ static const struct pptable_funcs vega20_ppt_funcs = { .print_clk_levels = vega20_print_clk_levels, .force_clk_levels = vega20_force_clk_levels, .get_clock_by_type_with_latency = vega20_get_clock_by_type_with_latency, - .set_default_od8_settings = vega20_set_default_od8_setttings, .get_od_percentage = vega20_get_od_percentage, - .get_performance_level = vega20_get_performance_level, - .force_performance_level = vega20_force_performance_level, - .update_specified_od8_value = vega20_update_specified_od8_value, + .get_power_profile_mode = vega20_get_power_profile_mode, + .set_power_profile_mode = vega20_set_power_profile_mode, .set_od_percentage = vega20_set_od_percentage, + .set_default_od_settings = vega20_set_default_od_settings, .od_edit_dpm_table = vega20_odn_edit_dpm_table, + .dpm_set_uvd_enable = vega20_dpm_set_uvd_enable, + .dpm_set_vce_enable = vega20_dpm_set_vce_enable, + .read_sensor = vega20_read_sensor, .pre_display_config_changed = vega20_pre_display_config_changed, .display_config_changed = vega20_display_config_changed, .apply_clocks_adjust_rules = vega20_apply_clocks_adjust_rules, .notify_smc_dispaly_config = vega20_notify_smc_dispaly_config, .force_dpm_limit_value = vega20_force_dpm_limit_value, .unforce_dpm_levels = vega20_unforce_dpm_levels, - .upload_dpm_level = vega20_upload_dpm_level, .get_profiling_clk_mask = vega20_get_profiling_clk_mask, .set_ppfeature_status = vega20_set_ppfeature_status, .get_ppfeature_status = vega20_get_ppfeature_status, + .is_dpm_running = vega20_is_dpm_running, + .set_thermal_fan_table = vega20_set_thermal_fan_table, + .get_fan_speed_percent = vega20_get_fan_speed_percent, + .set_watermarks_table = vega20_set_watermarks_table, + .get_thermal_temperature_range = vega20_get_thermal_temperature_range }; void vega20_set_ppt_funcs(struct smu_context *smu) { + struct smu_table_context *smu_table = &smu->smu_table; + smu->ppt_funcs = &vega20_ppt_funcs; smu->smc_if_version = SMU11_DRIVER_IF_VERSION; + smu_table->table_count = TABLE_COUNT; } diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.h b/drivers/gpu/drm/amd/powerplay/vega20_ppt.h index 87f3a8303645..2dc10e47b767 100644 --- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.h +++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.h @@ -166,6 +166,12 @@ struct vega20_od8_single_setting { struct vega20_od8_settings { struct vega20_od8_single_setting od8_settings_array[OD8_SETTING_COUNT]; + uint8_t *od_feature_capabilities; + uint32_t *od_settings_max; + uint32_t *od_settings_min; + void *od8_settings; + bool od_gfxclk_update; + bool od_memclk_update; }; extern void vega20_set_ppt_funcs(struct smu_context *smu); |