diff options
Diffstat (limited to 'drivers/gpu/drm/amd/powerplay')
41 files changed, 1433 insertions, 1568 deletions
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index d567be49c31b..7a646f94b478 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -221,29 +221,7 @@ static int pp_sw_reset(void *handle) static int pp_set_powergating_state(void *handle, enum amd_powergating_state state) { - struct amdgpu_device *adev = handle; - struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; - int ret; - - if (!hwmgr || !hwmgr->pm_en) - return 0; - - if (hwmgr->hwmgr_func->gfx_off_control) { - /* Enable/disable GFX off through SMU */ - ret = hwmgr->hwmgr_func->gfx_off_control(hwmgr, - state == AMD_PG_STATE_GATE); - if (ret) - pr_err("gfx off control failed!\n"); - } - - if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) { - pr_debug("%s was not implemented.\n", __func__); - return 0; - } - - /* Enable/disable GFX per cu powergating through SMU */ - return hwmgr->hwmgr_func->enable_per_cu_power_gating(hwmgr, - state == AMD_PG_STATE_GATE); + return 0; } static int pp_suspend(void *handle) @@ -1020,7 +998,7 @@ static int pp_get_display_power_level(void *handle, static int pp_get_current_clocks(void *handle, struct amd_pp_clock_info *clocks) { - struct amd_pp_simple_clock_info simple_clocks; + struct amd_pp_simple_clock_info simple_clocks = { 0 }; struct pp_clock_info hw_clocks; struct pp_hwmgr *hwmgr = handle; int ret = 0; @@ -1056,7 +1034,10 @@ static int pp_get_current_clocks(void *handle, clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk; clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk; - clocks->max_clocks_state = simple_clocks.level; + if (simple_clocks.level == 0) + clocks->max_clocks_state = PP_DAL_POWERLEVEL_7; + else + clocks->max_clocks_state = simple_clocks.level; if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) { clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk; @@ -1118,17 +1099,17 @@ static int pp_get_clock_by_type_with_voltage(void *handle, } static int pp_set_watermarks_for_clocks_ranges(void *handle, - struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges) + void *clock_ranges) { struct pp_hwmgr *hwmgr = handle; int ret = 0; - if (!hwmgr || !hwmgr->pm_en ||!wm_with_clock_ranges) + if (!hwmgr || !hwmgr->pm_en || !clock_ranges) return -EINVAL; mutex_lock(&hwmgr->smu_lock); ret = phm_set_watermarks_for_clocks_ranges(hwmgr, - wm_with_clock_ranges); + clock_ranges); mutex_unlock(&hwmgr->smu_lock); return ret; @@ -1159,6 +1140,8 @@ static int pp_get_display_mode_validation_clocks(void *handle, if (!hwmgr || !hwmgr->pm_en ||!clocks) return -EINVAL; + clocks->level = PP_DAL_POWERLEVEL_7; + mutex_lock(&hwmgr->smu_lock); if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState)) @@ -1168,19 +1151,78 @@ static int pp_get_display_mode_validation_clocks(void *handle, return ret; } -static int pp_set_mmhub_powergating_by_smu(void *handle) +static int pp_dpm_powergate_mmhub(void *handle) { struct pp_hwmgr *hwmgr = handle; if (!hwmgr || !hwmgr->pm_en) return -EINVAL; - if (hwmgr->hwmgr_func->set_mmhub_powergating_by_smu == NULL) { + if (hwmgr->hwmgr_func->powergate_mmhub == NULL) { + pr_info("%s was not implemented.\n", __func__); + return 0; + } + + return hwmgr->hwmgr_func->powergate_mmhub(hwmgr); +} + +static int pp_dpm_powergate_gfx(void *handle, bool gate) +{ + struct pp_hwmgr *hwmgr = handle; + + if (!hwmgr || !hwmgr->pm_en) + return 0; + + if (hwmgr->hwmgr_func->powergate_gfx == NULL) { pr_info("%s was not implemented.\n", __func__); return 0; } - return hwmgr->hwmgr_func->set_mmhub_powergating_by_smu(hwmgr); + return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate); +} + +static int pp_set_powergating_by_smu(void *handle, + uint32_t block_type, bool gate) +{ + int ret = 0; + + switch (block_type) { + case AMD_IP_BLOCK_TYPE_UVD: + case AMD_IP_BLOCK_TYPE_VCN: + pp_dpm_powergate_uvd(handle, gate); + break; + case AMD_IP_BLOCK_TYPE_VCE: + pp_dpm_powergate_vce(handle, gate); + break; + case AMD_IP_BLOCK_TYPE_GMC: + pp_dpm_powergate_mmhub(handle); + break; + case AMD_IP_BLOCK_TYPE_GFX: + ret = pp_dpm_powergate_gfx(handle, gate); + break; + default: + break; + } + return ret; +} + +static int pp_notify_smu_enable_pwe(void *handle) +{ + struct pp_hwmgr *hwmgr = handle; + + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; + + if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) { + pr_info("%s was not implemented.\n", __func__); + return -EINVAL;; + } + + mutex_lock(&hwmgr->smu_lock); + hwmgr->hwmgr_func->smus_notify_pwe(hwmgr); + mutex_unlock(&hwmgr->smu_lock); + + return 0; } static const struct amd_pm_funcs pp_dpm_funcs = { @@ -1189,8 +1231,6 @@ static const struct amd_pm_funcs pp_dpm_funcs = { .force_performance_level = pp_dpm_force_performance_level, .get_performance_level = pp_dpm_get_performance_level, .get_current_power_state = pp_dpm_get_current_power_state, - .powergate_vce = pp_dpm_powergate_vce, - .powergate_uvd = pp_dpm_powergate_uvd, .dispatch_tasks = pp_dpm_dispatch_tasks, .set_fan_control_mode = pp_dpm_set_fan_control_mode, .get_fan_control_mode = pp_dpm_get_fan_control_mode, @@ -1210,6 +1250,7 @@ static const struct amd_pm_funcs pp_dpm_funcs = { .get_vce_clock_state = pp_dpm_get_vce_clock_state, .switch_power_profile = pp_dpm_switch_power_profile, .set_clockgating_by_smu = pp_set_clockgating_by_smu, + .set_powergating_by_smu = pp_set_powergating_by_smu, .get_power_profile_mode = pp_get_power_profile_mode, .set_power_profile_mode = pp_set_power_profile_mode, .odn_edit_dpm_table = pp_odn_edit_dpm_table, @@ -1227,5 +1268,5 @@ static const struct amd_pm_funcs pp_dpm_funcs = { .set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges, .display_clock_voltage_request = pp_display_clock_voltage_request, .get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks, - .set_mmhub_powergating_by_smu = pp_set_mmhub_powergating_by_smu, + .notify_smu_enable_pwe = pp_notify_smu_enable_pwe, }; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c index a0bb921fac22..53207e76b0f3 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c @@ -435,7 +435,7 @@ int phm_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, } int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, - struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges) + void *clock_ranges) { PHM_FUNC_CHECK(hwmgr); @@ -443,7 +443,7 @@ int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, return -EINVAL; return hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges(hwmgr, - wm_with_clock_ranges); + clock_ranges); } int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index e63bc47dc715..8994aa5c8cf8 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -81,7 +81,6 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr) return -EINVAL; hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT; - hwmgr->power_source = PP_PowerSource_AC; hwmgr->pp_table_version = PP_TABLE_V1; hwmgr->dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; hwmgr->request_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; @@ -148,10 +147,10 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr) smu7_init_function_pointers(hwmgr); break; case AMDGPU_FAMILY_AI: - hwmgr->feature_mask &= ~PP_GFXOFF_MASK; switch (hwmgr->chip_id) { case CHIP_VEGA10: case CHIP_VEGA20: + hwmgr->feature_mask &= ~PP_GFXOFF_MASK; hwmgr->smumgr_funcs = &vega10_smu_funcs; vega10_hwmgr_init(hwmgr); break; @@ -236,6 +235,11 @@ int hwmgr_hw_init(struct pp_hwmgr *hwmgr) ret = hwmgr->hwmgr_func->backend_init(hwmgr); if (ret) goto err1; + /* make sure dc limits are valid */ + if ((hwmgr->dyn_state.max_clock_voltage_on_dc.sclk == 0) || + (hwmgr->dyn_state.max_clock_voltage_on_dc.mclk == 0)) + hwmgr->dyn_state.max_clock_voltage_on_dc = + hwmgr->dyn_state.max_clock_voltage_on_ac; ret = psm_init_power_state_table(hwmgr); if (ret) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c index 7047e29755c3..01dc46dc9c8a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c @@ -1544,14 +1544,14 @@ void atomctrl_get_voltage_range(struct pp_hwmgr *hwmgr, uint32_t *max_vddc, switch (hwmgr->chip_id) { case CHIP_TONGA: case CHIP_FIJI: - *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMaxVddc/4); - *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMinVddc/4); + *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMaxVddc) / 4; + *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMinVddc) / 4; return; case CHIP_POLARIS11: case CHIP_POLARIS10: case CHIP_POLARIS12: - *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMaxVddc/100); - *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMinVddc/100); + *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMaxVddc) / 100; + *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMinVddc) / 100; return; default: break; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c index 35bd9870ab10..4e1fd5393845 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c @@ -183,10 +183,10 @@ static int get_vddc_lookup_table( ATOM_Tonga_Voltage_Lookup_Record, entries, vddc_lookup_pp_tables, i); record->us_calculated = 0; - record->us_vdd = atom_record->usVdd; - record->us_cac_low = atom_record->usCACLow; - record->us_cac_mid = atom_record->usCACMid; - record->us_cac_high = atom_record->usCACHigh; + record->us_vdd = le16_to_cpu(atom_record->usVdd); + record->us_cac_low = le16_to_cpu(atom_record->usCACLow); + record->us_cac_mid = le16_to_cpu(atom_record->usCACMid); + record->us_cac_high = le16_to_cpu(atom_record->usCACHigh); } *lookup_table = table; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index d4bc83e81389..a63e00653324 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -993,7 +993,7 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr, clocks->num_levels = 0; for (i = 0; i < pclk_vol_table->count; i++) { - clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk; + clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10; clocks->data[i].latency_in_us = latency_required ? smu10_get_mem_latency(hwmgr, pclk_vol_table->entries[i].clk) : @@ -1044,7 +1044,7 @@ static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, clocks->num_levels = 0; for (i = 0; i < pclk_vol_table->count; i++) { - clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk; + clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10; clocks->data[i].voltage_in_mv = pclk_vol_table->entries[i].vol; clocks->num_levels++; } @@ -1108,9 +1108,10 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx, } static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, - struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges) + void *clock_ranges) { struct smu10_hwmgr *data = hwmgr->backend; + struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges; Watermarks_t *table = &(data->water_marks_table); int result = 0; @@ -1126,7 +1127,7 @@ static int smu10_smus_notify_pwe(struct pp_hwmgr *hwmgr) return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SetRccPfcPmeRestoreRegister); } -static int smu10_set_mmhub_powergating_by_smu(struct pp_hwmgr *hwmgr) +static int smu10_powergate_mmhub(struct pp_hwmgr *hwmgr) { return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub); } @@ -1182,10 +1183,11 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = { .asic_setup = smu10_setup_asic_task, .power_state_set = smu10_set_power_state_tasks, .dynamic_state_management_disable = smu10_disable_dpm_tasks, - .set_mmhub_powergating_by_smu = smu10_set_mmhub_powergating_by_smu, + .powergate_mmhub = smu10_powergate_mmhub, .smus_notify_pwe = smu10_smus_notify_pwe, .gfx_off_control = smu10_gfx_off_control, .display_clock_voltage_request = smu10_display_clock_voltage_request, + .powergate_gfx = smu10_gfx_off_control, }; int smu10_init_function_pointers(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c index 6d72a5600917..683b29a99366 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c @@ -39,13 +39,6 @@ static int smu7_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) PPSMC_MSG_VCEDPM_Disable); } -static int smu7_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable) -{ - return smum_send_msg_to_smc(hwmgr, enable ? - PPSMC_MSG_SAMUDPM_Enable : - PPSMC_MSG_SAMUDPM_Disable); -} - static int smu7_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate) { if (!bgate) @@ -60,13 +53,6 @@ static int smu7_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate) return smu7_enable_disable_vce_dpm(hwmgr, !bgate); } -static int smu7_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate) -{ - if (!bgate) - smum_update_smc_table(hwmgr, SMU_SAMU_TABLE); - return smu7_enable_disable_samu_dpm(hwmgr, !bgate); -} - int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr) { if (phm_cf_want_uvd_power_gating(hwmgr)) @@ -107,35 +93,15 @@ static int smu7_powerup_vce(struct pp_hwmgr *hwmgr) return 0; } -static int smu7_powerdown_samu(struct pp_hwmgr *hwmgr) -{ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SamuPowerGating)) - return smum_send_msg_to_smc(hwmgr, - PPSMC_MSG_SAMPowerOFF); - return 0; -} - -static int smu7_powerup_samu(struct pp_hwmgr *hwmgr) -{ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SamuPowerGating)) - return smum_send_msg_to_smc(hwmgr, - PPSMC_MSG_SAMPowerON); - return 0; -} - int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); data->uvd_power_gated = false; data->vce_power_gated = false; - data->samu_power_gated = false; smu7_powerup_uvd(hwmgr); smu7_powerup_vce(hwmgr); - smu7_powerup_samu(hwmgr); return 0; } @@ -195,26 +161,6 @@ void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) } } -int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate) -{ - struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - - if (data->samu_power_gated == bgate) - return 0; - - data->samu_power_gated = bgate; - - if (bgate) { - smu7_update_samu_dpm(hwmgr, true); - smu7_powerdown_samu(hwmgr); - } else { - smu7_powerup_samu(hwmgr); - smu7_update_samu_dpm(hwmgr, false); - } - - return 0; -} - int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, const uint32_t *msg_id) { @@ -470,7 +416,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, * Powerplay will only control the static per CU Power Gating. * Dynamic per CU Power Gating will be done in gfx. */ -int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable) +int smu7_powergate_gfx(struct pp_hwmgr *hwmgr, bool enable) { struct amdgpu_device *adev = hwmgr->adev; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h index 1ddce023218a..fc8f8a6acc72 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h @@ -29,11 +29,10 @@ void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate); void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate); int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr); -int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate); int smu7_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate); int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr); int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, const uint32_t *msg_id); -int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable); +int smu7_powergate_gfx(struct pp_hwmgr *hwmgr, bool enable); #endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index f8e866ceda02..052e60dfaf9f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -48,6 +48,8 @@ #include "processpptables.h" #include "pp_thermal.h" +#include "ivsrcid/ivsrcid_vislands30.h" + #define MC_CG_ARB_FREQ_F0 0x0a #define MC_CG_ARB_FREQ_F1 0x0b #define MC_CG_ARB_FREQ_F2 0x0c @@ -885,6 +887,60 @@ static void smu7_setup_voltage_range_from_vbios(struct pp_hwmgr *hwmgr) data->odn_dpm_table.max_vddc = max_vddc; } +static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint32_t i; + + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table; + struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table; + + if (table_info == NULL) + return; + + for (i = 0; i < data->dpm_table.sclk_table.count; i++) { + if (odn_table->odn_core_clock_dpm_levels.entries[i].clock != + data->dpm_table.sclk_table.dpm_levels[i].value) { + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; + break; + } + } + + for (i = 0; i < data->dpm_table.mclk_table.count; i++) { + if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock != + data->dpm_table.mclk_table.dpm_levels[i].value) { + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; + break; + } + } + + dep_table = table_info->vdd_dep_on_mclk; + odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk); + + for (i = 0; i < dep_table->count; i++) { + if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK; + return; + } + } + + dep_table = table_info->vdd_dep_on_sclk; + odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk); + for (i = 0; i < dep_table->count; i++) { + if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK; + return; + } + } + if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { + data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK; + } +} + static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); @@ -904,10 +960,13 @@ static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) /* initialize ODN table */ if (hwmgr->od_enabled) { - smu7_setup_voltage_range_from_vbios(hwmgr); - smu7_odn_initial_default_setting(hwmgr); + if (data->odn_dpm_table.max_vddc) { + smu7_check_dpm_table_updated(hwmgr); + } else { + smu7_setup_voltage_range_from_vbios(hwmgr); + smu7_odn_initial_default_setting(hwmgr); + } } - return 0; } @@ -1521,7 +1580,7 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr) data->current_profile_setting.sclk_up_hyst = 0; data->current_profile_setting.sclk_down_hyst = 100; data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT; - data->current_profile_setting.bupdate_sclk = 1; + data->current_profile_setting.bupdate_mclk = 1; data->current_profile_setting.mclk_up_hyst = 0; data->current_profile_setting.mclk_down_hyst = 100; data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT; @@ -2820,7 +2879,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, struct pp_power_state *request_ps, const struct pp_power_state *current_ps) { - + struct amdgpu_device *adev = hwmgr->adev; struct smu7_power_state *smu7_ps = cast_phw_smu7_power_state(&request_ps->hardware); uint32_t sclk; @@ -2843,12 +2902,12 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, "VI should always have 2 performance levels", ); - max_limits = (PP_PowerSource_AC == hwmgr->power_source) ? + max_limits = adev->pm.ac_power ? &(hwmgr->dyn_state.max_clock_voltage_on_ac) : &(hwmgr->dyn_state.max_clock_voltage_on_dc); /* Cap clock DPM tables at DC MAX if it is in DC. */ - if (PP_PowerSource_DC == hwmgr->power_source) { + if (!adev->pm.ac_power) { for (i = 0; i < smu7_ps->performance_level_count; i++) { if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk) smu7_ps->performance_levels[i].memory_clock = max_limits->mclk; @@ -3126,7 +3185,7 @@ static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr, performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, state_entry->ucPCIEGenLow); performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, - state_entry->ucPCIELaneHigh); + state_entry->ucPCIELaneLow); performance_level = &(smu7_power_state->performance_levels [smu7_power_state->performance_level_count++]); @@ -3717,8 +3776,9 @@ static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr, uint32_t i; for (i = 0; i < dpm_table->count; i++) { - if ((dpm_table->dpm_levels[i].value < low_limit) - || (dpm_table->dpm_levels[i].value > high_limit)) + /*skip the trim if od is enabled*/ + if (!hwmgr->od_enabled && (dpm_table->dpm_levels[i].value < low_limit + || dpm_table->dpm_levels[i].value > high_limit)) dpm_table->dpm_levels[i].enabled = false; else dpm_table->dpm_levels[i].enabled = true; @@ -3762,10 +3822,8 @@ static int smu7_generate_dpm_level_enable_mask( const struct smu7_power_state *smu7_ps = cast_const_phw_smu7_power_state(states->pnew_state); - /*skip the trim if od is enabled*/ - if (!hwmgr->od_enabled) - result = smu7_trim_dpm_states(hwmgr, smu7_ps); + result = smu7_trim_dpm_states(hwmgr, smu7_ps); if (result) return result; @@ -4049,17 +4107,17 @@ static int smu7_register_irq_handlers(struct pp_hwmgr *hwmgr) amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), AMDGPU_IH_CLIENTID_LEGACY, - 230, + VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH, source); amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), AMDGPU_IH_CLIENTID_LEGACY, - 231, + VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW, source); /* Register CTF(GPIO_19) interrupt */ amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), AMDGPU_IH_CLIENTID_LEGACY, - 83, + VISLANDS30_IV_SRCID_GPIO_19, source); return 0; @@ -4244,7 +4302,6 @@ static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr) data->uvd_power_gated = false; data->vce_power_gated = false; - data->samu_power_gated = false; return 0; } @@ -4555,12 +4612,12 @@ static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) return -EINVAL; dep_sclk_table = table_info->vdd_dep_on_sclk; for (i = 0; i < dep_sclk_table->count; i++) - clocks->clock[i] = dep_sclk_table->entries[i].clk; + clocks->clock[i] = dep_sclk_table->entries[i].clk * 10; clocks->count = dep_sclk_table->count; } else if (hwmgr->pp_table_version == PP_TABLE_V0) { sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk; for (i = 0; i < sclk_table->count; i++) - clocks->clock[i] = sclk_table->entries[i].clk; + clocks->clock[i] = sclk_table->entries[i].clk * 10; clocks->count = sclk_table->count; } @@ -4592,7 +4649,7 @@ static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) return -EINVAL; dep_mclk_table = table_info->vdd_dep_on_mclk; for (i = 0; i < dep_mclk_table->count; i++) { - clocks->clock[i] = dep_mclk_table->entries[i].clk; + clocks->clock[i] = dep_mclk_table->entries[i].clk * 10; clocks->latency[i] = smu7_get_mem_latency(hwmgr, dep_mclk_table->entries[i].clk); } @@ -4600,7 +4657,7 @@ static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) } else if (hwmgr->pp_table_version == PP_TABLE_V0) { mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk; for (i = 0; i < mclk_table->count; i++) - clocks->clock[i] = mclk_table->entries[i].clk; + clocks->clock[i] = mclk_table->entries[i].clk * 10; clocks->count = mclk_table->count; } return 0; @@ -4739,60 +4796,6 @@ static bool smu7_check_clk_voltage_valid(struct pp_hwmgr *hwmgr, return true; } -static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr) -{ - struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - uint32_t i; - - struct phm_ppt_v1_clock_voltage_dependency_table *dep_table; - struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table; - - if (table_info == NULL) - return; - - for (i=0; i<data->dpm_table.sclk_table.count; i++) { - if (odn_table->odn_core_clock_dpm_levels.entries[i].clock != - data->dpm_table.sclk_table.dpm_levels[i].value) { - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; - break; - } - } - - for (i=0; i<data->dpm_table.mclk_table.count; i++) { - if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock != - data->dpm_table.mclk_table.dpm_levels[i].value) { - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; - break; - } - } - - dep_table = table_info->vdd_dep_on_mclk; - odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk); - - for (i=0; i < dep_table->count; i++) { - if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK; - return; - } - } - - dep_table = table_info->vdd_dep_on_sclk; - odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk); - for (i=0; i < dep_table->count; i++) { - if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK; - return; - } - } - if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { - data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK; - } -} - static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, enum PP_OD_DPM_TABLE_COMMAND type, long *input, uint32_t size) @@ -5043,7 +5046,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = { .get_fan_control_mode = smu7_get_fan_control_mode, .force_clock_level = smu7_force_clock_level, .print_clock_levels = smu7_print_clock_levels, - .enable_per_cu_power_gating = smu7_enable_per_cu_power_gating, + .powergate_gfx = smu7_powergate_gfx, .get_sclk_od = smu7_get_sclk_od, .set_sclk_od = smu7_set_sclk_od, .get_mclk_od = smu7_get_mclk_od, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h index c91e75db6a8e..3784ce6e50ab 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h @@ -310,7 +310,6 @@ struct smu7_hwmgr { /* ---- Power Gating States ---- */ bool uvd_power_gated; bool vce_power_gated; - bool samu_power_gated; bool need_long_memory_training; /* Application power optimization parameters */ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c index c952845833d7..5e19f5977eb1 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c @@ -403,6 +403,49 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris12[] = { { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND }, { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND }, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c index 50690c72b2ea..0adfc5392cd3 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c @@ -244,6 +244,7 @@ static int smu8_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) return 0; } +/* convert form 8bit vid to real voltage in mV*4 */ static uint32_t smu8_convert_8Bit_index_to_voltage( struct pp_hwmgr *hwmgr, uint16_t voltage) { @@ -1604,17 +1605,17 @@ static int smu8_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type switch (type) { case amd_pp_disp_clock: for (i = 0; i < clocks->count; i++) - clocks->clock[i] = data->sys_info.display_clock[i]; + clocks->clock[i] = data->sys_info.display_clock[i] * 10; break; case amd_pp_sys_clock: table = hwmgr->dyn_state.vddc_dependency_on_sclk; for (i = 0; i < clocks->count; i++) - clocks->clock[i] = table->entries[i].clk; + clocks->clock[i] = table->entries[i].clk * 10; break; case amd_pp_mem_clock: clocks->count = SMU8_NUM_NBPMEMORYCLOCK; for (i = 0; i < clocks->count; i++) - clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i]; + clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i] * 10; break; default: return -1; @@ -1702,13 +1703,13 @@ static int smu8_read_sensor(struct pp_hwmgr *hwmgr, int idx, case AMDGPU_PP_SENSOR_VDDNB: tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) & CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT; - vddnb = smu8_convert_8Bit_index_to_voltage(hwmgr, tmp); + vddnb = smu8_convert_8Bit_index_to_voltage(hwmgr, tmp) / 4; *((uint32_t *)value) = vddnb; return 0; case AMDGPU_PP_SENSOR_VDDGFX: tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) & CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT; - vddgfx = smu8_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp); + vddgfx = smu8_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp) / 4; *((uint32_t *)value) = vddgfx; return 0; case AMDGPU_PP_SENSOR_UVD_VCLK: diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c index 93a3d022ba47..2aab1b475945 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c @@ -25,6 +25,9 @@ #include "ppatomctrl.h" #include "ppsmc.h" #include "atom.h" +#include "ivsrcid/thm/irqsrcs_thm_9_0.h" +#include "ivsrcid/smuio/irqsrcs_smuio_9_0.h" +#include "ivsrcid/ivsrcid_vislands30.h" uint8_t convert_to_vid(uint16_t vddc) { @@ -543,17 +546,17 @@ int phm_irq_process(struct amdgpu_device *adev, uint32_t src_id = entry->src_id; if (client_id == AMDGPU_IH_CLIENTID_LEGACY) { - if (src_id == 230) + if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH) pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n", PCI_BUS_NUM(adev->pdev->devfn), PCI_SLOT(adev->pdev->devfn), PCI_FUNC(adev->pdev->devfn)); - else if (src_id == 231) + else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW) pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n", PCI_BUS_NUM(adev->pdev->devfn), PCI_SLOT(adev->pdev->devfn), PCI_FUNC(adev->pdev->devfn)); - else if (src_id == 83) + else if (src_id == VISLANDS30_IV_SRCID_GPIO_19) pr_warn("GPU Critical Temperature Fault detected on PCIe %d:%d.%d!\n", PCI_BUS_NUM(adev->pdev->devfn), PCI_SLOT(adev->pdev->devfn), @@ -594,17 +597,17 @@ int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr) amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), SOC15_IH_CLIENTID_THM, - 0, + THM_9_0__SRCID__THM_DIG_THERM_L2H, source); amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), SOC15_IH_CLIENTID_THM, - 1, + THM_9_0__SRCID__THM_DIG_THERM_H2L, source); /* Register CTF(GPIO_19) interrupt */ amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), SOC15_IH_CLIENTID_ROM_SMUIO, - 83, + SMUIO_9_0__SRCID__SMUIO_GPIO19, source); return 0; @@ -652,7 +655,7 @@ int smu_get_voltage_dependency_table_ppt_v1( } int smu_set_watermarks_for_clocks_ranges(void *wt_table, - struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges) + struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges) { uint32_t i; struct watermarks *table = wt_table; @@ -660,49 +663,49 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table, if (!table || !wm_with_clock_ranges) return -EINVAL; - if (wm_with_clock_ranges->num_wm_sets_dmif > 4 || wm_with_clock_ranges->num_wm_sets_mcif > 4) + if (wm_with_clock_ranges->num_wm_dmif_sets > 4 || wm_with_clock_ranges->num_wm_mcif_sets > 4) return -EINVAL; - for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) { + for (i = 0; i < wm_with_clock_ranges->num_wm_dmif_sets; i++) { table->WatermarkRow[1][i].MinClock = cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) / - 100); + (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz) / + 1000); table->WatermarkRow[1][i].MaxClock = cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) / + (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz) / 100); table->WatermarkRow[1][i].MinUclk = cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) / - 100); + (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz) / + 1000); table->WatermarkRow[1][i].MaxUclk = cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) / - 100); + (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz) / + 1000); table->WatermarkRow[1][i].WmSetting = (uint8_t) - wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id; + wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id; } - for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) { + for (i = 0; i < wm_with_clock_ranges->num_wm_mcif_sets; i++) { table->WatermarkRow[0][i].MinClock = cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) / - 100); + (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz) / + 1000); table->WatermarkRow[0][i].MaxClock = cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) / - 100); + (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz) / + 1000); table->WatermarkRow[0][i].MinUclk = cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) / - 100); + (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz) / + 1000); table->WatermarkRow[0][i].MaxUclk = cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) / - 100); + (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz) / + 1000); table->WatermarkRow[0][i].WmSetting = (uint8_t) - wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id; + wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id; } return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h index 916cc01e7652..5454289d5226 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h @@ -107,7 +107,7 @@ int smu_get_voltage_dependency_table_ppt_v1( struct phm_ppt_v1_clock_voltage_dependency_table *dep_table); int smu_set_watermarks_for_clocks_ranges(void *wt_table, - struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges); + struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges); #define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT #define PHM_FIELD_MASK(reg, field) reg##__##field##_MASK diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 05e680d55dbb..fb86c24394ff 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -55,12 +55,6 @@ static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2}; -#define MEM_FREQ_LOW_LATENCY 25000 -#define MEM_FREQ_HIGH_LATENCY 80000 -#define MEM_LATENCY_HIGH 245 -#define MEM_LATENCY_LOW 35 -#define MEM_LATENCY_ERR 0xFFFF - #define mmDF_CS_AON0_DramBaseAddress0 0x0044 #define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0 @@ -295,7 +289,15 @@ static int vega10_odn_initial_default_setting(struct pp_hwmgr *hwmgr) struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table; struct phm_ppt_v1_clock_voltage_dependency_table *dep_table[3]; struct phm_ppt_v1_clock_voltage_dependency_table *od_table[3]; + struct pp_atomfwctrl_avfs_parameters avfs_params = {0}; uint32_t i; + int result; + + result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params); + if (!result) { + data->odn_dpm_table.max_vddc = avfs_params.ulMaxVddc; + data->odn_dpm_table.min_vddc = avfs_params.ulMinVddc; + } od_lookup_table = &odn_table->vddc_lookup_table; vddc_lookup_table = table_info->vddc_lookup_table; @@ -2078,9 +2080,6 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) if (data->smu_features[GNLD_AVFS].supported) { result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params); if (!result) { - data->odn_dpm_table.max_vddc = avfs_params.ulMaxVddc; - data->odn_dpm_table.min_vddc = avfs_params.ulMinVddc; - pp_table->MinVoltageVid = (uint8_t) convert_to_vid((uint16_t)(avfs_params.ulMinVddc)); pp_table->MaxVoltageVid = (uint8_t) @@ -2414,6 +2413,40 @@ static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr) return result; } +static void vega10_check_dpm_table_updated(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = hwmgr->backend; + struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table); + struct phm_ppt_v2_information *table_info = hwmgr->pptable; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table; + struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table; + uint32_t i; + + dep_table = table_info->vdd_dep_on_mclk; + odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_mclk); + + for (i = 0; i < dep_table->count; i++) { + if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { + data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK; + return; + } + } + + dep_table = table_info->vdd_dep_on_sclk; + odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_sclk); + for (i = 0; i < dep_table->count; i++) { + if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { + data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK; + return; + } + } + + if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { + data->need_update_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; + data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK; + } +} + /** * Initializes the SMC table and uploads it * @@ -2430,6 +2463,7 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr) PPTable_t *pp_table = &(data->smc_state_table.pp_table); struct pp_atomfwctrl_voltage_table voltage_table; struct pp_atomfwctrl_bios_boot_up_values boot_up_values; + struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table); result = vega10_setup_default_dpm_tables(hwmgr); PP_ASSERT_WITH_CODE(!result, @@ -2437,8 +2471,14 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr) return result); /* initialize ODN table */ - if (hwmgr->od_enabled) - vega10_odn_initial_default_setting(hwmgr); + if (hwmgr->od_enabled) { + if (odn_table->max_vddc) { + data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK; + vega10_check_dpm_table_updated(hwmgr); + } else { + vega10_odn_initial_default_setting(hwmgr); + } + } pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2, &voltage_table); @@ -2861,11 +2901,6 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) vega10_enable_disable_PCC_limit_feature(hwmgr, true); - if ((hwmgr->smu_version == 0x001c2c00) || - (hwmgr->smu_version == 0x001c2d00)) - smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_UpdatePkgPwrPidAlpha, 1); - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ConfigureTelemetry, data->config_telemetry); @@ -3061,6 +3096,7 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, struct pp_power_state *request_ps, const struct pp_power_state *current_ps) { + struct amdgpu_device *adev = hwmgr->adev; struct vega10_power_state *vega10_ps = cast_phw_vega10_power_state(&request_ps->hardware); uint32_t sclk; @@ -3086,12 +3122,12 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, if (vega10_ps->performance_level_count != 2) pr_info("VI should always have 2 performance levels"); - max_limits = (PP_PowerSource_AC == hwmgr->power_source) ? + max_limits = adev->pm.ac_power ? &(hwmgr->dyn_state.max_clock_voltage_on_ac) : &(hwmgr->dyn_state.max_clock_voltage_on_dc); /* Cap clock DPM tables at DC MAX if it is in DC. */ - if (PP_PowerSource_DC == hwmgr->power_source) { + if (!adev->pm.ac_power) { for (i = 0; i < vega10_ps->performance_level_count; i++) { if (vega10_ps->performance_levels[i].mem_clock > max_limits->mclk) @@ -3181,7 +3217,7 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, /* Find the lowest MCLK frequency that is within * the tolerable latency defined in DAL */ - latency = 0; + latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency; for (i = 0; i < data->mclk_latency_table.count; i++) { if ((data->mclk_latency_table.entries[i].latency <= latency) && (data->mclk_latency_table.entries[i].frequency >= @@ -3223,10 +3259,25 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels( { int result = 0; struct vega10_hwmgr *data = hwmgr->backend; + struct vega10_dpm_table *dpm_table = &data->dpm_table; + struct vega10_odn_dpm_table *odn_table = &data->odn_dpm_table; + struct vega10_odn_clock_voltage_dependency_table *odn_clk_table = &odn_table->vdd_dep_on_sclk; + int count; if (!data->need_update_dpm_table) return 0; + if (hwmgr->od_enabled && data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK) { + for (count = 0; count < dpm_table->gfx_table.count; count++) + dpm_table->gfx_table.dpm_levels[count].value = odn_clk_table->entries[count].clk; + } + + odn_clk_table = &odn_table->vdd_dep_on_mclk; + if (hwmgr->od_enabled && data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK) { + for (count = 0; count < dpm_table->mem_table.count; count++) + dpm_table->mem_table.dpm_levels[count].value = odn_clk_table->entries[count].clk; + } + if (data->need_update_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK + DPMTABLE_UPDATE_SOCCLK)) { result = vega10_populate_all_graphic_levels(hwmgr); @@ -3674,7 +3725,7 @@ static void vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr, { smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetUclkFastSwitch, - has_disp ? 0 : 1); + has_disp ? 1 : 0); } int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr, @@ -3749,7 +3800,9 @@ static int vega10_notify_smc_display_config_after_ps_adjustment( uint32_t i; struct pp_display_clock_request clock_req; - if (hwmgr->display_config->num_display > 1) + if ((hwmgr->display_config->num_display > 1) && + !hwmgr->display_config->multi_monitor_in_sync && + !hwmgr->display_config->nb_pstate_switch_disable) vega10_notify_smc_display_change(hwmgr, false); else vega10_notify_smc_display_change(hwmgr, true); @@ -3765,7 +3818,7 @@ static int vega10_notify_smc_display_config_after_ps_adjustment( if (i < dpm_table->count) { clock_req.clock_type = amd_pp_dcef_clock; - clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value; + clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value * 10; if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) { smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, @@ -4022,28 +4075,17 @@ static void vega10_get_sclks(struct pp_hwmgr *hwmgr, table_info->vdd_dep_on_sclk; uint32_t i; + clocks->num_levels = 0; for (i = 0; i < dep_table->count; i++) { if (dep_table->entries[i].clk) { clocks->data[clocks->num_levels].clocks_in_khz = - dep_table->entries[i].clk; + dep_table->entries[i].clk * 10; clocks->num_levels++; } } } -static uint32_t vega10_get_mem_latency(struct pp_hwmgr *hwmgr, - uint32_t clock) -{ - if (clock >= MEM_FREQ_LOW_LATENCY && - clock < MEM_FREQ_HIGH_LATENCY) - return MEM_LATENCY_HIGH; - else if (clock >= MEM_FREQ_HIGH_LATENCY) - return MEM_LATENCY_LOW; - else - return MEM_LATENCY_ERR; -} - static void vega10_get_memclocks(struct pp_hwmgr *hwmgr, struct pp_clock_levels_with_latency *clocks) { @@ -4052,26 +4094,22 @@ static void vega10_get_memclocks(struct pp_hwmgr *hwmgr, struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = table_info->vdd_dep_on_mclk; struct vega10_hwmgr *data = hwmgr->backend; + uint32_t j = 0; uint32_t i; - clocks->num_levels = 0; - data->mclk_latency_table.count = 0; - for (i = 0; i < dep_table->count; i++) { if (dep_table->entries[i].clk) { - clocks->data[clocks->num_levels].clocks_in_khz = - data->mclk_latency_table.entries - [data->mclk_latency_table.count].frequency = - dep_table->entries[i].clk; - clocks->data[clocks->num_levels].latency_in_us = - data->mclk_latency_table.entries - [data->mclk_latency_table.count].latency = - vega10_get_mem_latency(hwmgr, - dep_table->entries[i].clk); - clocks->num_levels++; - data->mclk_latency_table.count++; + + clocks->data[j].clocks_in_khz = + dep_table->entries[i].clk * 10; + data->mclk_latency_table.entries[j].frequency = + dep_table->entries[i].clk; + clocks->data[j].latency_in_us = + data->mclk_latency_table.entries[j].latency = 25; + j++; } } + clocks->num_levels = data->mclk_latency_table.count = j; } static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr, @@ -4084,7 +4122,7 @@ static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr, uint32_t i; for (i = 0; i < dep_table->count; i++) { - clocks->data[i].clocks_in_khz = dep_table->entries[i].clk; + clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10; clocks->data[i].latency_in_us = 0; clocks->num_levels++; } @@ -4100,7 +4138,7 @@ static void vega10_get_socclocks(struct pp_hwmgr *hwmgr, uint32_t i; for (i = 0; i < dep_table->count; i++) { - clocks->data[i].clocks_in_khz = dep_table->entries[i].clk; + clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10; clocks->data[i].latency_in_us = 0; clocks->num_levels++; } @@ -4160,7 +4198,7 @@ static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, } for (i = 0; i < dep_table->count; i++) { - clocks->data[i].clocks_in_khz = dep_table->entries[i].clk; + clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10; clocks->data[i].voltage_in_mv = (uint32_t)(table_info->vddc_lookup_table-> entries[dep_table->entries[i].vddInd].us_vdd); clocks->num_levels++; @@ -4173,9 +4211,10 @@ static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, } static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, - struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges) + void *clock_range) { struct vega10_hwmgr *data = hwmgr->backend; + struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_range; Watermarks_t *table = &(data->smc_state_table.water_marks_table); int result = 0; @@ -4695,40 +4734,6 @@ static bool vega10_check_clk_voltage_valid(struct pp_hwmgr *hwmgr, return true; } -static void vega10_check_dpm_table_updated(struct pp_hwmgr *hwmgr) -{ - struct vega10_hwmgr *data = hwmgr->backend; - struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table); - struct phm_ppt_v2_information *table_info = hwmgr->pptable; - struct phm_ppt_v1_clock_voltage_dependency_table *dep_table; - struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table; - uint32_t i; - - dep_table = table_info->vdd_dep_on_mclk; - odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_mclk); - - for (i = 0; i < dep_table->count; i++) { - if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { - data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK; - return; - } - } - - dep_table = table_info->vdd_dep_on_sclk; - odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_sclk); - for (i = 0; i < dep_table->count; i++) { - if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { - data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK; - return; - } - } - - if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { - data->need_update_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; - data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK; - } -} - static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr, enum PP_OD_DPM_TABLE_COMMAND type) { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h index aadd6cbc7e85..339820da9e6a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h @@ -370,7 +370,6 @@ struct vega10_hwmgr { /* ---- Power Gating States ---- */ bool uvd_power_gated; bool vce_power_gated; - bool samu_power_gated; bool need_long_memory_training; /* Internal settings to apply the application power optimization parameters */ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c index c98e5de777cd..0789d64246ca 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c @@ -423,6 +423,11 @@ static int vega12_hwmgr_backend_init(struct pp_hwmgr *hwmgr) hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit * hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100; + if (hwmgr->feature_mask & PP_GFXOFF_MASK) + data->gfxoff_controlled_by_driver = true; + else + data->gfxoff_controlled_by_driver = false; + return result; } @@ -454,43 +459,36 @@ static int vega12_setup_asic_task(struct pp_hwmgr *hwmgr) */ static void vega12_init_dpm_state(struct vega12_dpm_state *dpm_state) { - dpm_state->soft_min_level = 0xff; - dpm_state->soft_max_level = 0xff; - dpm_state->hard_min_level = 0xff; - dpm_state->hard_max_level = 0xff; + dpm_state->soft_min_level = 0x0; + dpm_state->soft_max_level = 0xffff; + dpm_state->hard_min_level = 0x0; + dpm_state->hard_max_level = 0xffff; } -static int vega12_get_number_dpm_level(struct pp_hwmgr *hwmgr, - PPCLK_e clkID, uint32_t *num_dpm_level) +static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr, + PPCLK_e clk_id, uint32_t *num_of_levels) { - int result; - /* - * SMU expects the Clock ID to be in the top 16 bits. - * Lower 16 bits specify the level however 0xFF is a - * special argument the returns the total number of levels - */ - PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_GetDpmFreqByIndex, (clkID << 16 | 0xFF)) == 0, - "[GetNumberDpmLevel] Failed to get DPM levels from SMU for CLKID!", - return -EINVAL); - - result = vega12_read_arg_from_smc(hwmgr, num_dpm_level); + int ret = 0; - PP_ASSERT_WITH_CODE(*num_dpm_level < MAX_REGULAR_DPM_NUMBER, - "[GetNumberDPMLevel] Number of DPM levels is greater than limit", - return -EINVAL); + ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_GetDpmFreqByIndex, + (clk_id << 16 | 0xFF)); + PP_ASSERT_WITH_CODE(!ret, + "[GetNumOfDpmLevel] failed to get dpm levels!", + return ret); - PP_ASSERT_WITH_CODE(*num_dpm_level != 0, - "[GetNumberDPMLevel] Number of CLK Levels is zero!", - return -EINVAL); + *num_of_levels = smum_get_argument(hwmgr); + PP_ASSERT_WITH_CODE(*num_of_levels > 0, + "[GetNumOfDpmLevel] number of clk levels is invalid!", + return -EINVAL); - return result; + return ret; } static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr, PPCLK_e clkID, uint32_t index, uint32_t *clock) { - int result; + int result = 0; /* *SMU expects the Clock ID to be in the top 16 bits. @@ -501,15 +499,36 @@ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr, "[GetDpmFrequencyByIndex] Failed to get dpm frequency from SMU!", return -EINVAL); - result = vega12_read_arg_from_smc(hwmgr, clock); - - PP_ASSERT_WITH_CODE(*clock != 0, - "[GetDPMFrequencyByIndex] Failed to get dpm frequency by index.!", - return -EINVAL); + *clock = smum_get_argument(hwmgr); return result; } +static int vega12_setup_single_dpm_table(struct pp_hwmgr *hwmgr, + struct vega12_single_dpm_table *dpm_table, PPCLK_e clk_id) +{ + int ret = 0; + uint32_t i, num_of_levels, clk; + + ret = vega12_get_number_of_dpm_level(hwmgr, clk_id, &num_of_levels); + PP_ASSERT_WITH_CODE(!ret, + "[SetupSingleDpmTable] failed to get clk levels!", + return ret); + + dpm_table->count = num_of_levels; + + for (i = 0; i < num_of_levels; i++) { + ret = vega12_get_dpm_frequency_by_index(hwmgr, clk_id, i, &clk); + PP_ASSERT_WITH_CODE(!ret, + "[SetupSingleDpmTable] failed to get clk of specific level!", + return ret); + dpm_table->dpm_levels[i].value = clk; + dpm_table->dpm_levels[i].enabled = true; + } + + return ret; +} + /* * This function is to initialize all DPM state tables * for SMU based on the dependency table. @@ -520,224 +539,136 @@ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr, */ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) { - uint32_t num_levels, i, clock; struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); - struct vega12_single_dpm_table *dpm_table; + int ret = 0; memset(&data->dpm_table, 0, sizeof(data->dpm_table)); - /* Initialize Sclk DPM and SOC DPM table based on allow Sclk values */ + /* socclk */ dpm_table = &(data->dpm_table.soc_table); - - PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_SOCCLK, - &num_levels) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for SOCCLK!", - return -EINVAL); - - dpm_table->count = num_levels; - - for (i = 0; i < num_levels; i++) { - PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr, - PPCLK_SOCCLK, i, &clock) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for SOCCLK!", - return -EINVAL); - - dpm_table->dpm_levels[i].value = clock; - dpm_table->dpm_levels[i].enabled = true; + if (data->smu_features[GNLD_DPM_SOCCLK].enabled) { + ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_SOCCLK); + PP_ASSERT_WITH_CODE(!ret, + "[SetupDefaultDpmTable] failed to get socclk dpm levels!", + return ret); + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = data->vbios_boot_state.soc_clock / 100; } - vega12_init_dpm_state(&(dpm_table->dpm_state)); + /* gfxclk */ dpm_table = &(data->dpm_table.gfx_table); - - PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_GFXCLK, - &num_levels) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for GFXCLK!", - return -EINVAL); - - dpm_table->count = num_levels; - for (i = 0; i < num_levels; i++) { - PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr, - PPCLK_GFXCLK, i, &clock) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for GFXCLK!", - return -EINVAL); - - dpm_table->dpm_levels[i].value = clock; - dpm_table->dpm_levels[i].enabled = true; + if (data->smu_features[GNLD_DPM_GFXCLK].enabled) { + ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_GFXCLK); + PP_ASSERT_WITH_CODE(!ret, + "[SetupDefaultDpmTable] failed to get gfxclk dpm levels!", + return ret); + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = data->vbios_boot_state.gfx_clock / 100; } - vega12_init_dpm_state(&(dpm_table->dpm_state)); - /* Initialize Mclk DPM table based on allow Mclk values */ - dpm_table = &(data->dpm_table.mem_table); - PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_UCLK, - &num_levels) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for UCLK!", - return -EINVAL); - - dpm_table->count = num_levels; - - for (i = 0; i < num_levels; i++) { - PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr, - PPCLK_UCLK, i, &clock) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for UCLK!", - return -EINVAL); - - dpm_table->dpm_levels[i].value = clock; - dpm_table->dpm_levels[i].enabled = true; + /* memclk */ + dpm_table = &(data->dpm_table.mem_table); + if (data->smu_features[GNLD_DPM_UCLK].enabled) { + ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_UCLK); + PP_ASSERT_WITH_CODE(!ret, + "[SetupDefaultDpmTable] failed to get memclk dpm levels!", + return ret); + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = data->vbios_boot_state.mem_clock / 100; } - vega12_init_dpm_state(&(dpm_table->dpm_state)); + /* eclk */ dpm_table = &(data->dpm_table.eclk_table); - - PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_ECLK, - &num_levels) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for ECLK!", - return -EINVAL); - - dpm_table->count = num_levels; - - for (i = 0; i < num_levels; i++) { - PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr, - PPCLK_ECLK, i, &clock) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for ECLK!", - return -EINVAL); - - dpm_table->dpm_levels[i].value = clock; - dpm_table->dpm_levels[i].enabled = true; + if (data->smu_features[GNLD_DPM_VCE].enabled) { + ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_ECLK); + PP_ASSERT_WITH_CODE(!ret, + "[SetupDefaultDpmTable] failed to get eclk dpm levels!", + return ret); + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = data->vbios_boot_state.eclock / 100; } - vega12_init_dpm_state(&(dpm_table->dpm_state)); + /* vclk */ dpm_table = &(data->dpm_table.vclk_table); - - PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_VCLK, - &num_levels) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for VCLK!", - return -EINVAL); - - dpm_table->count = num_levels; - - for (i = 0; i < num_levels; i++) { - PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr, - PPCLK_VCLK, i, &clock) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for VCLK!", - return -EINVAL); - - dpm_table->dpm_levels[i].value = clock; - dpm_table->dpm_levels[i].enabled = true; + if (data->smu_features[GNLD_DPM_UVD].enabled) { + ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_VCLK); + PP_ASSERT_WITH_CODE(!ret, + "[SetupDefaultDpmTable] failed to get vclk dpm levels!", + return ret); + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = data->vbios_boot_state.vclock / 100; } - vega12_init_dpm_state(&(dpm_table->dpm_state)); + /* dclk */ dpm_table = &(data->dpm_table.dclk_table); - - PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_DCLK, - &num_levels) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCLK!", - return -EINVAL); - - dpm_table->count = num_levels; - - for (i = 0; i < num_levels; i++) { - PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr, - PPCLK_DCLK, i, &clock) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCLK!", - return -EINVAL); - - dpm_table->dpm_levels[i].value = clock; - dpm_table->dpm_levels[i].enabled = true; + if (data->smu_features[GNLD_DPM_UVD].enabled) { + ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCLK); + PP_ASSERT_WITH_CODE(!ret, + "[SetupDefaultDpmTable] failed to get dclk dpm levels!", + return ret); + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = data->vbios_boot_state.dclock / 100; } - vega12_init_dpm_state(&(dpm_table->dpm_state)); - /* Assume there is no headless Vega12 for now */ + /* dcefclk */ dpm_table = &(data->dpm_table.dcef_table); - - PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, - PPCLK_DCEFCLK, &num_levels) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCEFCLK!", - return -EINVAL); - - dpm_table->count = num_levels; - - for (i = 0; i < num_levels; i++) { - PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr, - PPCLK_DCEFCLK, i, &clock) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCEFCLK!", - return -EINVAL); - - dpm_table->dpm_levels[i].value = clock; - dpm_table->dpm_levels[i].enabled = true; + if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { + ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCEFCLK); + PP_ASSERT_WITH_CODE(!ret, + "[SetupDefaultDpmTable] failed to get dcefclk dpm levels!", + return ret); + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = data->vbios_boot_state.dcef_clock / 100; } - vega12_init_dpm_state(&(dpm_table->dpm_state)); + /* pixclk */ dpm_table = &(data->dpm_table.pixel_table); - - PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, - PPCLK_PIXCLK, &num_levels) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PIXCLK!", - return -EINVAL); - - dpm_table->count = num_levels; - - for (i = 0; i < num_levels; i++) { - PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr, - PPCLK_PIXCLK, i, &clock) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PIXCLK!", - return -EINVAL); - - dpm_table->dpm_levels[i].value = clock; - dpm_table->dpm_levels[i].enabled = true; - } - + if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { + ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PIXCLK); + PP_ASSERT_WITH_CODE(!ret, + "[SetupDefaultDpmTable] failed to get pixclk dpm levels!", + return ret); + } else + dpm_table->count = 0; vega12_init_dpm_state(&(dpm_table->dpm_state)); + /* dispclk */ dpm_table = &(data->dpm_table.display_table); - - PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, - PPCLK_DISPCLK, &num_levels) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DISPCLK!", - return -EINVAL); - - dpm_table->count = num_levels; - - for (i = 0; i < num_levels; i++) { - PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr, - PPCLK_DISPCLK, i, &clock) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DISPCLK!", - return -EINVAL); - - dpm_table->dpm_levels[i].value = clock; - dpm_table->dpm_levels[i].enabled = true; - } - + if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { + ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DISPCLK); + PP_ASSERT_WITH_CODE(!ret, + "[SetupDefaultDpmTable] failed to get dispclk dpm levels!", + return ret); + } else + dpm_table->count = 0; vega12_init_dpm_state(&(dpm_table->dpm_state)); + /* phyclk */ dpm_table = &(data->dpm_table.phy_table); - - PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, - PPCLK_PHYCLK, &num_levels) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PHYCLK!", - return -EINVAL); - - dpm_table->count = num_levels; - - for (i = 0; i < num_levels; i++) { - PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr, - PPCLK_PHYCLK, i, &clock) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PHYCLK!", - return -EINVAL); - - dpm_table->dpm_levels[i].value = clock; - dpm_table->dpm_levels[i].enabled = true; - } - + if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { + ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PHYCLK); + PP_ASSERT_WITH_CODE(!ret, + "[SetupDefaultDpmTable] failed to get phyclk dpm levels!", + return ret); + } else + dpm_table->count = 0; vega12_init_dpm_state(&(dpm_table->dpm_state)); /* save a copy of the default DPM table */ @@ -848,6 +779,21 @@ static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr) return 0; } +static void vega12_init_powergate_state(struct pp_hwmgr *hwmgr) +{ + struct vega12_hwmgr *data = + (struct vega12_hwmgr *)(hwmgr->backend); + + data->uvd_power_gated = true; + data->vce_power_gated = true; + + if (data->smu_features[GNLD_DPM_UVD].enabled) + data->uvd_power_gated = false; + + if (data->smu_features[GNLD_DPM_VCE].enabled) + data->vce_power_gated = false; +} + static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr) { struct vega12_hwmgr *data = @@ -866,12 +812,11 @@ static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr) enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? true : false; data->smu_features[i].enabled = enabled; data->smu_features[i].supported = enabled; - PP_ASSERT( - !data->smu_features[i].allowed || enabled, - "[EnableAllSMUFeatures] Enabled feature is different from allowed, expected disabled!"); } } + vega12_init_powergate_state(hwmgr); + return 0; } @@ -927,6 +872,48 @@ static int vega12_power_control_set_level(struct pp_hwmgr *hwmgr) return result; } +static int vega12_get_all_clock_ranges_helper(struct pp_hwmgr *hwmgr, + PPCLK_e clkid, struct vega12_clock_range *clock) +{ + /* AC Max */ + PP_ASSERT_WITH_CODE( + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clkid << 16)) == 0, + "[GetClockRanges] Failed to get max ac clock from SMC!", + return -EINVAL); + clock->ACMax = smum_get_argument(hwmgr); + + /* AC Min */ + PP_ASSERT_WITH_CODE( + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clkid << 16)) == 0, + "[GetClockRanges] Failed to get min ac clock from SMC!", + return -EINVAL); + clock->ACMin = smum_get_argument(hwmgr); + + /* DC Max */ + PP_ASSERT_WITH_CODE( + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDcModeMaxDpmFreq, (clkid << 16)) == 0, + "[GetClockRanges] Failed to get max dc clock from SMC!", + return -EINVAL); + clock->DCMax = smum_get_argument(hwmgr); + + return 0; +} + +static int vega12_get_all_clock_ranges(struct pp_hwmgr *hwmgr) +{ + struct vega12_hwmgr *data = + (struct vega12_hwmgr *)(hwmgr->backend); + uint32_t i; + + for (i = 0; i < PPCLK_COUNT; i++) + PP_ASSERT_WITH_CODE(!vega12_get_all_clock_ranges_helper(hwmgr, + i, &(data->clk_range[i])), + "Failed to get clk range from SMC!", + return -EINVAL); + + return 0; +} + static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr) { int tmp_result, result = 0; @@ -954,6 +941,11 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr) "Failed to power control set level!", result = tmp_result); + result = vega12_get_all_clock_ranges(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to get all clock ranges!", + return result); + result = vega12_odn_initialize_default_settings(hwmgr); PP_ASSERT_WITH_CODE(!result, "Failed to power control set level!", @@ -982,76 +974,172 @@ static uint32_t vega12_find_lowest_dpm_level( break; } + if (i >= table->count) { + i = 0; + table->dpm_levels[i].enabled = true; + } + return i; } static uint32_t vega12_find_highest_dpm_level( struct vega12_single_dpm_table *table) { - uint32_t i = 0; + int32_t i = 0; + PP_ASSERT_WITH_CODE(table->count <= MAX_REGULAR_DPM_NUMBER, + "[FindHighestDPMLevel] DPM Table has too many entries!", + return MAX_REGULAR_DPM_NUMBER - 1); - if (table->count <= MAX_REGULAR_DPM_NUMBER) { - for (i = table->count; i > 0; i--) { - if (table->dpm_levels[i - 1].enabled) - return i - 1; - } - } else { - pr_info("DPM Table Has Too Many Entries!"); - return MAX_REGULAR_DPM_NUMBER - 1; + for (i = table->count - 1; i >= 0; i--) { + if (table->dpm_levels[i].enabled) + break; } - return i; + if (i < 0) { + i = 0; + table->dpm_levels[i].enabled = true; + } + + return (uint32_t)i; } static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr) { struct vega12_hwmgr *data = hwmgr->backend; - if (data->smc_state_table.gfx_boot_level != - data->dpm_table.gfx_table.dpm_state.soft_min_level) { - smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SetSoftMinByFreq, - PPCLK_GFXCLK<<16 | data->dpm_table.gfx_table.dpm_levels[data->smc_state_table.gfx_boot_level].value); - data->dpm_table.gfx_table.dpm_state.soft_min_level = - data->smc_state_table.gfx_boot_level; + uint32_t min_freq; + int ret = 0; + + if (data->smu_features[GNLD_DPM_GFXCLK].enabled) { + min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level; + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMinByFreq, + (PPCLK_GFXCLK << 16) | (min_freq & 0xffff))), + "Failed to set soft min gfxclk !", + return ret); } - if (data->smc_state_table.mem_boot_level != - data->dpm_table.mem_table.dpm_state.soft_min_level) { - smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SetSoftMinByFreq, - PPCLK_UCLK<<16 | data->dpm_table.mem_table.dpm_levels[data->smc_state_table.mem_boot_level].value); - data->dpm_table.mem_table.dpm_state.soft_min_level = - data->smc_state_table.mem_boot_level; + if (data->smu_features[GNLD_DPM_UCLK].enabled) { + min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level; + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMinByFreq, + (PPCLK_UCLK << 16) | (min_freq & 0xffff))), + "Failed to set soft min memclk !", + return ret); + + min_freq = data->dpm_table.mem_table.dpm_state.hard_min_level; + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetHardMinByFreq, + (PPCLK_UCLK << 16) | (min_freq & 0xffff))), + "Failed to set hard min memclk !", + return ret); } - return 0; + if (data->smu_features[GNLD_DPM_UVD].enabled) { + min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level; + + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMinByFreq, + (PPCLK_VCLK << 16) | (min_freq & 0xffff))), + "Failed to set soft min vclk!", + return ret); + + min_freq = data->dpm_table.dclk_table.dpm_state.soft_min_level; + + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMinByFreq, + (PPCLK_DCLK << 16) | (min_freq & 0xffff))), + "Failed to set soft min dclk!", + return ret); + } + + if (data->smu_features[GNLD_DPM_VCE].enabled) { + min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level; + + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMinByFreq, + (PPCLK_ECLK << 16) | (min_freq & 0xffff))), + "Failed to set soft min eclk!", + return ret); + } + + if (data->smu_features[GNLD_DPM_SOCCLK].enabled) { + min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level; + + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMinByFreq, + (PPCLK_SOCCLK << 16) | (min_freq & 0xffff))), + "Failed to set soft min socclk!", + return ret); + } + + return ret; } static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr) { struct vega12_hwmgr *data = hwmgr->backend; - if (data->smc_state_table.gfx_max_level != - data->dpm_table.gfx_table.dpm_state.soft_max_level) { - smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SetSoftMaxByFreq, - /* plus the vale by 1 to align the resolution */ - PPCLK_GFXCLK<<16 | (data->dpm_table.gfx_table.dpm_levels[data->smc_state_table.gfx_max_level].value + 1)); - data->dpm_table.gfx_table.dpm_state.soft_max_level = - data->smc_state_table.gfx_max_level; + uint32_t max_freq; + int ret = 0; + + if (data->smu_features[GNLD_DPM_GFXCLK].enabled) { + max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level; + + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMaxByFreq, + (PPCLK_GFXCLK << 16) | (max_freq & 0xffff))), + "Failed to set soft max gfxclk!", + return ret); } - if (data->smc_state_table.mem_max_level != - data->dpm_table.mem_table.dpm_state.soft_max_level) { - smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SetSoftMaxByFreq, - /* plus the vale by 1 to align the resolution */ - PPCLK_UCLK<<16 | (data->dpm_table.mem_table.dpm_levels[data->smc_state_table.mem_max_level].value + 1)); - data->dpm_table.mem_table.dpm_state.soft_max_level = - data->smc_state_table.mem_max_level; + if (data->smu_features[GNLD_DPM_UCLK].enabled) { + max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level; + + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMaxByFreq, + (PPCLK_UCLK << 16) | (max_freq & 0xffff))), + "Failed to set soft max memclk!", + return ret); } - return 0; + if (data->smu_features[GNLD_DPM_UVD].enabled) { + max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level; + + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMaxByFreq, + (PPCLK_VCLK << 16) | (max_freq & 0xffff))), + "Failed to set soft max vclk!", + return ret); + + max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level; + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMaxByFreq, + (PPCLK_DCLK << 16) | (max_freq & 0xffff))), + "Failed to set soft max dclk!", + return ret); + } + + if (data->smu_features[GNLD_DPM_VCE].enabled) { + max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level; + + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMaxByFreq, + (PPCLK_ECLK << 16) | (max_freq & 0xffff))), + "Failed to set soft max eclk!", + return ret); + } + + if (data->smu_features[GNLD_DPM_SOCCLK].enabled) { + max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level; + + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMaxByFreq, + (PPCLK_SOCCLK << 16) | (max_freq & 0xffff))), + "Failed to set soft max socclk!", + return ret); + } + + return ret; } int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) @@ -1127,7 +1215,7 @@ static int vega12_get_gpu_power(struct pp_hwmgr *hwmgr, uint32_t *query) "Failed to get current package power!", return -EINVAL); - vega12_read_arg_from_smc(hwmgr, &value); + value = smum_get_argument(hwmgr); /* power value is an integer */ *query = value << 8; #endif @@ -1140,14 +1228,11 @@ static int vega12_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx *gfx_freq = 0; - PP_ASSERT_WITH_CODE( - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0, + PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0, "[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!", - return -1); - PP_ASSERT_WITH_CODE( - vega12_read_arg_from_smc(hwmgr, &gfx_clk) == 0, - "[GetCurrentGfxClkFreq] Attempt to read arg from SMC Failed", - return -1); + return -EINVAL); + gfx_clk = smum_get_argument(hwmgr); *gfx_freq = gfx_clk * 100; @@ -1163,11 +1248,8 @@ static int vega12_get_current_mclk_freq(struct pp_hwmgr *hwmgr, uint32_t *mclk_f PP_ASSERT_WITH_CODE( smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16)) == 0, "[GetCurrentMClkFreq] Attempt to get Current MCLK Frequency Failed!", - return -1); - PP_ASSERT_WITH_CODE( - vega12_read_arg_from_smc(hwmgr, &mem_clk) == 0, - "[GetCurrentMClkFreq] Attempt to read arg from SMC Failed", - return -1); + return -EINVAL); + mem_clk = smum_get_argument(hwmgr); *mclk_freq = mem_clk * 100; @@ -1184,16 +1266,12 @@ static int vega12_get_current_activity_percent( #if 0 ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0); if (!ret) { - ret = vega12_read_arg_from_smc(hwmgr, ¤t_activity); - if (!ret) { - if (current_activity > 100) { - PP_ASSERT(false, - "[GetCurrentActivityPercent] Activity Percentage Exceeds 100!"); - current_activity = 100; - } - } else + current_activity = smum_get_argument(hwmgr); + if (current_activity > 100) { PP_ASSERT(false, - "[GetCurrentActivityPercent] Attempt To Read Average Graphics Activity from SMU Failed!"); + "[GetCurrentActivityPercent] Activity Percentage Exceeds 100!"); + current_activity = 100; + } } else PP_ASSERT(false, "[GetCurrentActivityPercent] Attempt To Send Get Average Graphics Activity to SMU Failed!"); @@ -1256,7 +1334,7 @@ static int vega12_notify_smc_display_change(struct pp_hwmgr *hwmgr, if (data->smu_features[GNLD_DPM_UCLK].enabled) return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetUclkFastSwitch, - has_disp ? 0 : 1); + has_disp ? 1 : 0); return 0; } @@ -1274,7 +1352,6 @@ int vega12_display_clock_voltage_request(struct pp_hwmgr *hwmgr, if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { switch (clk_type) { case amd_pp_dcef_clock: - clk_freq = clock_req->clock_freq_in_khz / 100; clk_select = PPCLK_DCEFCLK; break; case amd_pp_disp_clock: @@ -1310,9 +1387,10 @@ static int vega12_notify_smc_display_config_after_ps_adjustment( (struct vega12_hwmgr *)(hwmgr->backend); struct PP_Clocks min_clocks = {0}; struct pp_display_clock_request clock_req; - uint32_t clk_request; - if (hwmgr->display_config->num_display > 1) + if ((hwmgr->display_config->num_display > 1) && + !hwmgr->display_config->multi_monitor_in_sync && + !hwmgr->display_config->nb_pstate_switch_disable) vega12_notify_smc_display_change(hwmgr, false); else vega12_notify_smc_display_change(hwmgr, true); @@ -1323,7 +1401,7 @@ static int vega12_notify_smc_display_config_after_ps_adjustment( if (data->smu_features[GNLD_DPM_DCEFCLK].supported) { clock_req.clock_type = amd_pp_dcef_clock; - clock_req.clock_freq_in_khz = min_clocks.dcefClock; + clock_req.clock_freq_in_khz = min_clocks.dcefClock/10; if (!vega12_display_clock_voltage_request(hwmgr, &clock_req)) { if (data->smu_features[GNLD_DS_DCEFCLK].supported) PP_ASSERT_WITH_CODE( @@ -1337,15 +1415,6 @@ static int vega12_notify_smc_display_config_after_ps_adjustment( } } - if (data->smu_features[GNLD_DPM_UCLK].enabled) { - clk_request = (PPCLK_UCLK << 16) | (min_clocks.memoryClock) / 100; - PP_ASSERT_WITH_CODE( - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinByFreq, clk_request) == 0, - "[PhwVega12_NotifySMCDisplayConfigAfterPowerStateAdjustment] Attempt to set UCLK HardMin Failed!", - return -1); - data->dpm_table.mem_table.dpm_state.hard_min_level = min_clocks.memoryClock; - } - return 0; } @@ -1354,12 +1423,19 @@ static int vega12_force_dpm_highest(struct pp_hwmgr *hwmgr) struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); - data->smc_state_table.gfx_boot_level = - data->smc_state_table.gfx_max_level = - vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table)); - data->smc_state_table.mem_boot_level = - data->smc_state_table.mem_max_level = - vega12_find_highest_dpm_level(&(data->dpm_table.mem_table)); + uint32_t soft_level; + + soft_level = vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table)); + + data->dpm_table.gfx_table.dpm_state.soft_min_level = + data->dpm_table.gfx_table.dpm_state.soft_max_level = + data->dpm_table.gfx_table.dpm_levels[soft_level].value; + + soft_level = vega12_find_highest_dpm_level(&(data->dpm_table.mem_table)); + + data->dpm_table.mem_table.dpm_state.soft_min_level = + data->dpm_table.mem_table.dpm_state.soft_max_level = + data->dpm_table.mem_table.dpm_levels[soft_level].value; PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr), "Failed to upload boot level to highest!", @@ -1376,13 +1452,19 @@ static int vega12_force_dpm_lowest(struct pp_hwmgr *hwmgr) { struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); + uint32_t soft_level; + + soft_level = vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table)); + + data->dpm_table.gfx_table.dpm_state.soft_min_level = + data->dpm_table.gfx_table.dpm_state.soft_max_level = + data->dpm_table.gfx_table.dpm_levels[soft_level].value; - data->smc_state_table.gfx_boot_level = - data->smc_state_table.gfx_max_level = - vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table)); - data->smc_state_table.mem_boot_level = - data->smc_state_table.mem_max_level = - vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table)); + soft_level = vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table)); + + data->dpm_table.mem_table.dpm_state.soft_min_level = + data->dpm_table.mem_table.dpm_state.soft_max_level = + data->dpm_table.mem_table.dpm_levels[soft_level].value; PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr), "Failed to upload boot level to highest!", @@ -1398,17 +1480,6 @@ static int vega12_force_dpm_lowest(struct pp_hwmgr *hwmgr) static int vega12_unforce_dpm_levels(struct pp_hwmgr *hwmgr) { - struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); - - data->smc_state_table.gfx_boot_level = - vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table)); - data->smc_state_table.gfx_max_level = - vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table)); - data->smc_state_table.mem_boot_level = - vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table)); - data->smc_state_table.mem_max_level = - vega12_find_highest_dpm_level(&(data->dpm_table.mem_table)); - PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr), "Failed to upload DPM Bootup Levels!", return -1); @@ -1416,22 +1487,28 @@ static int vega12_unforce_dpm_levels(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr), "Failed to upload DPM Max Levels!", return -1); + return 0; } -#if 0 static int vega12_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level, uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask) { - struct phm_ppt_v2_information *table_info = - (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); + struct vega12_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table); + struct vega12_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table); + struct vega12_single_dpm_table *soc_dpm_table = &(data->dpm_table.soc_table); - if (table_info->vdd_dep_on_sclk->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL && - table_info->vdd_dep_on_socclk->count > VEGA12_UMD_PSTATE_SOCCLK_LEVEL && - table_info->vdd_dep_on_mclk->count > VEGA12_UMD_PSTATE_MCLK_LEVEL) { + *sclk_mask = 0; + *mclk_mask = 0; + *soc_mask = 0; + + if (gfx_dpm_table->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL && + mem_dpm_table->count > VEGA12_UMD_PSTATE_MCLK_LEVEL && + soc_dpm_table->count > VEGA12_UMD_PSTATE_SOCCLK_LEVEL) { *sclk_mask = VEGA12_UMD_PSTATE_GFXCLK_LEVEL; - *soc_mask = VEGA12_UMD_PSTATE_SOCCLK_LEVEL; *mclk_mask = VEGA12_UMD_PSTATE_MCLK_LEVEL; + *soc_mask = VEGA12_UMD_PSTATE_SOCCLK_LEVEL; } if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { @@ -1439,13 +1516,13 @@ static int vega12_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_fo } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { *mclk_mask = 0; } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { - *sclk_mask = table_info->vdd_dep_on_sclk->count - 1; - *soc_mask = table_info->vdd_dep_on_socclk->count - 1; - *mclk_mask = table_info->vdd_dep_on_mclk->count - 1; + *sclk_mask = gfx_dpm_table->count - 1; + *mclk_mask = mem_dpm_table->count - 1; + *soc_mask = soc_dpm_table->count - 1; } + return 0; } -#endif static void vega12_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) { @@ -1469,11 +1546,9 @@ static int vega12_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level) { int ret = 0; -#if 0 uint32_t sclk_mask = 0; uint32_t mclk_mask = 0; uint32_t soc_mask = 0; -#endif switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: @@ -1489,27 +1564,18 @@ static int vega12_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: -#if 0 ret = vega12_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask); if (ret) return ret; - vega12_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask); - vega12_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask); -#endif + vega12_force_clock_level(hwmgr, PP_SCLK, 1 << sclk_mask); + vega12_force_clock_level(hwmgr, PP_MCLK, 1 << mclk_mask); break; case AMD_DPM_FORCED_LEVEL_MANUAL: case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: default: break; } -#if 0 - if (!ret) { - if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) - vega12_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE); - else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) - vega12_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO); - } -#endif + return ret; } @@ -1543,24 +1609,14 @@ static int vega12_get_clock_ranges(struct pp_hwmgr *hwmgr, PPCLK_e clock_select, bool max) { - int result; - *clock = 0; + struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); - if (max) { - PP_ASSERT_WITH_CODE( - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16)) == 0, - "[GetClockRanges] Failed to get max clock from SMC!", - return -1); - result = vega12_read_arg_from_smc(hwmgr, clock); - } else { - PP_ASSERT_WITH_CODE( - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clock_select << 16)) == 0, - "[GetClockRanges] Failed to get min clock from SMC!", - return -1); - result = vega12_read_arg_from_smc(hwmgr, clock); - } + if (max) + *clock = data->clk_range[clock_select].ACMax; + else + *clock = data->clk_range[clock_select].ACMin; - return result; + return 0; } static int vega12_get_sclks(struct pp_hwmgr *hwmgr, @@ -1575,12 +1631,12 @@ static int vega12_get_sclks(struct pp_hwmgr *hwmgr, return -1; dpm_table = &(data->dpm_table.gfx_table); - ucount = (dpm_table->count > VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS) ? - VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS : dpm_table->count; + ucount = (dpm_table->count > MAX_NUM_CLOCKS) ? + MAX_NUM_CLOCKS : dpm_table->count; for (i = 0; i < ucount; i++) { clocks->data[i].clocks_in_khz = - dpm_table->dpm_levels[i].value * 100; + dpm_table->dpm_levels[i].value * 1000; clocks->data[i].latency_in_us = 0; } @@ -1607,13 +1663,12 @@ static int vega12_get_memclocks(struct pp_hwmgr *hwmgr, return -1; dpm_table = &(data->dpm_table.mem_table); - ucount = (dpm_table->count > VG12_PSUEDO_NUM_UCLK_DPM_LEVELS) ? - VG12_PSUEDO_NUM_UCLK_DPM_LEVELS : dpm_table->count; + ucount = (dpm_table->count > MAX_NUM_CLOCKS) ? + MAX_NUM_CLOCKS : dpm_table->count; for (i = 0; i < ucount; i++) { - clocks->data[i].clocks_in_khz = - dpm_table->dpm_levels[i].value * 100; - + clocks->data[i].clocks_in_khz = dpm_table->dpm_levels[i].value * 1000; + data->mclk_latency_table.entries[i].frequency = dpm_table->dpm_levels[i].value * 100; clocks->data[i].latency_in_us = data->mclk_latency_table.entries[i].latency = vega12_get_mem_latency(hwmgr, dpm_table->dpm_levels[i].value); @@ -1637,12 +1692,12 @@ static int vega12_get_dcefclocks(struct pp_hwmgr *hwmgr, dpm_table = &(data->dpm_table.dcef_table); - ucount = (dpm_table->count > VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS) ? - VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS : dpm_table->count; + ucount = (dpm_table->count > MAX_NUM_CLOCKS) ? + MAX_NUM_CLOCKS : dpm_table->count; for (i = 0; i < ucount; i++) { clocks->data[i].clocks_in_khz = - dpm_table->dpm_levels[i].value * 100; + dpm_table->dpm_levels[i].value * 1000; clocks->data[i].latency_in_us = 0; } @@ -1665,12 +1720,12 @@ static int vega12_get_socclocks(struct pp_hwmgr *hwmgr, dpm_table = &(data->dpm_table.soc_table); - ucount = (dpm_table->count > VG12_PSUEDO_NUM_SOCCLK_DPM_LEVELS) ? - VG12_PSUEDO_NUM_SOCCLK_DPM_LEVELS : dpm_table->count; + ucount = (dpm_table->count > MAX_NUM_CLOCKS) ? + MAX_NUM_CLOCKS : dpm_table->count; for (i = 0; i < ucount; i++) { clocks->data[i].clocks_in_khz = - dpm_table->dpm_levels[i].value * 100; + dpm_table->dpm_levels[i].value * 1000; clocks->data[i].latency_in_us = 0; } @@ -1717,99 +1772,69 @@ static int vega12_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, } static int vega12_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, - struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges) + void *clock_ranges) { struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); Watermarks_t *table = &(data->smc_state_table.water_marks_table); - int result = 0; - uint32_t i; + struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges; if (!data->registry_data.disable_water_mark && data->smu_features[GNLD_DPM_DCEFCLK].supported && data->smu_features[GNLD_DPM_SOCCLK].supported) { - for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) { - table->WatermarkRow[WM_DCEFCLK][i].MinClock = - cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) / - 100); - table->WatermarkRow[WM_DCEFCLK][i].MaxClock = - cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) / - 100); - table->WatermarkRow[WM_DCEFCLK][i].MinUclk = - cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) / - 100); - table->WatermarkRow[WM_DCEFCLK][i].MaxUclk = - cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) / - 100); - table->WatermarkRow[WM_DCEFCLK][i].WmSetting = (uint8_t) - wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id; - } - - for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) { - table->WatermarkRow[WM_SOCCLK][i].MinClock = - cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) / - 100); - table->WatermarkRow[WM_SOCCLK][i].MaxClock = - cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) / - 100); - table->WatermarkRow[WM_SOCCLK][i].MinUclk = - cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) / - 100); - table->WatermarkRow[WM_SOCCLK][i].MaxUclk = - cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) / - 100); - table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t) - wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id; - } + smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges); data->water_marks_bitmap |= WaterMarksExist; data->water_marks_bitmap &= ~WaterMarksLoaded; } - return result; + return 0; } static int vega12_force_clock_level(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask) { struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); - - if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO | - AMD_DPM_FORCED_LEVEL_LOW | - AMD_DPM_FORCED_LEVEL_HIGH)) - return -EINVAL; + uint32_t soft_min_level, soft_max_level; + int ret = 0; switch (type) { case PP_SCLK: - data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0; - data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0; + soft_min_level = mask ? (ffs(mask) - 1) : 0; + soft_max_level = mask ? (fls(mask) - 1) : 0; + + data->dpm_table.gfx_table.dpm_state.soft_min_level = + data->dpm_table.gfx_table.dpm_levels[soft_min_level].value; + data->dpm_table.gfx_table.dpm_state.soft_max_level = + data->dpm_table.gfx_table.dpm_levels[soft_max_level].value; - PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr), + ret = vega12_upload_dpm_min_level(hwmgr); + PP_ASSERT_WITH_CODE(!ret, "Failed to upload boot level to lowest!", - return -EINVAL); + return ret); - PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr), + ret = vega12_upload_dpm_max_level(hwmgr); + PP_ASSERT_WITH_CODE(!ret, "Failed to upload dpm max level to highest!", - return -EINVAL); + return ret); break; case PP_MCLK: - data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0; - data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0; + soft_min_level = mask ? (ffs(mask) - 1) : 0; + soft_max_level = mask ? (fls(mask) - 1) : 0; - PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr), + data->dpm_table.mem_table.dpm_state.soft_min_level = + data->dpm_table.mem_table.dpm_levels[soft_min_level].value; + data->dpm_table.mem_table.dpm_state.soft_max_level = + data->dpm_table.mem_table.dpm_levels[soft_max_level].value; + + ret = vega12_upload_dpm_min_level(hwmgr); + PP_ASSERT_WITH_CODE(!ret, "Failed to upload boot level to lowest!", - return -EINVAL); + return ret); - PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr), + ret = vega12_upload_dpm_max_level(hwmgr); + PP_ASSERT_WITH_CODE(!ret, "Failed to upload dpm max level to highest!", - return -EINVAL); + return ret); break; @@ -1842,8 +1867,8 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, return -1); for (i = 0; i < clocks.num_levels; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", - i, clocks.data[i].clocks_in_khz / 100, - (clocks.data[i].clocks_in_khz == now) ? "*" : ""); + i, clocks.data[i].clocks_in_khz / 1000, + (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : ""); break; case PP_MCLK: @@ -1858,8 +1883,8 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, return -1); for (i = 0; i < clocks.num_levels; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", - i, clocks.data[i].clocks_in_khz / 100, - (clocks.data[i].clocks_in_khz == now) ? "*" : ""); + i, clocks.data[i].clocks_in_khz / 1000, + (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : ""); break; case PP_PCIE: @@ -1871,6 +1896,205 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, return size; } +static int vega12_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) +{ + struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); + struct vega12_single_dpm_table *dpm_table; + bool vblank_too_short = false; + bool disable_mclk_switching; + uint32_t i, latency; + + disable_mclk_switching = ((1 < hwmgr->display_config->num_display) && + !hwmgr->display_config->multi_monitor_in_sync) || + vblank_too_short; + latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency; + + /* gfxclk */ + dpm_table = &(data->dpm_table.gfx_table); + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + + if (PP_CAP(PHM_PlatformCaps_UMDPState)) { + if (VEGA12_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value; + } + + if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value; + } + + if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + } + } + + /* memclk */ + dpm_table = &(data->dpm_table.mem_table); + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + + if (PP_CAP(PHM_PlatformCaps_UMDPState)) { + if (VEGA12_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value; + } + + if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value; + } + + if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + } + } + + /* honour DAL's UCLK Hardmin */ + if (dpm_table->dpm_state.hard_min_level < (hwmgr->display_config->min_mem_set_clock / 100)) + dpm_table->dpm_state.hard_min_level = hwmgr->display_config->min_mem_set_clock / 100; + + /* Hardmin is dependent on displayconfig */ + if (disable_mclk_switching) { + dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + for (i = 0; i < data->mclk_latency_table.count - 1; i++) { + if (data->mclk_latency_table.entries[i].latency <= latency) { + if (dpm_table->dpm_levels[i].value >= (hwmgr->display_config->min_mem_set_clock / 100)) { + dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[i].value; + break; + } + } + } + } + + if (hwmgr->display_config->nb_pstate_switch_disable) + dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + + /* vclk */ + dpm_table = &(data->dpm_table.vclk_table); + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + + if (PP_CAP(PHM_PlatformCaps_UMDPState)) { + if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value; + } + + if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + } + } + + /* dclk */ + dpm_table = &(data->dpm_table.dclk_table); + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + + if (PP_CAP(PHM_PlatformCaps_UMDPState)) { + if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value; + } + + if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + } + } + + /* socclk */ + dpm_table = &(data->dpm_table.soc_table); + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + + if (PP_CAP(PHM_PlatformCaps_UMDPState)) { + if (VEGA12_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value; + } + + if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + } + } + + /* eclk */ + dpm_table = &(data->dpm_table.eclk_table); + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + + if (PP_CAP(PHM_PlatformCaps_UMDPState)) { + if (VEGA12_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value; + } + + if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + } + } + + return 0; +} + +static int vega12_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr, + struct vega12_single_dpm_table *dpm_table) +{ + struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); + int ret = 0; + + if (data->smu_features[GNLD_DPM_UCLK].enabled) { + PP_ASSERT_WITH_CODE(dpm_table->count > 0, + "[SetUclkToHightestDpmLevel] Dpm table has no entry!", + return -EINVAL); + PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_UCLK_DPM_LEVELS, + "[SetUclkToHightestDpmLevel] Dpm table has too many entries!", + return -EINVAL); + + dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetHardMinByFreq, + (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)), + "[SetUclkToHightestDpmLevel] Set hard min uclk failed!", + return ret); + } + + return ret; +} + +static int vega12_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr) +{ + struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); + int ret = 0; + + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_NumOfDisplays, 0); + + ret = vega12_set_uclk_to_highest_dpm_level(hwmgr, + &data->dpm_table.mem_table); + + return ret; +} + static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr) { struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); @@ -1915,6 +2139,9 @@ static void vega12_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate) { struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); + if (data->vce_power_gated == bgate) + return; + data->vce_power_gated = bgate; vega12_enable_disable_vce_dpm(hwmgr, !bgate); } @@ -1923,6 +2150,9 @@ static void vega12_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate) { struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); + if (data->uvd_power_gated == bgate) + return; + data->uvd_power_gated = bgate; vega12_enable_disable_uvd_dpm(hwmgr, !bgate); } @@ -2090,6 +2320,38 @@ static int vega12_get_thermal_temperature_range(struct pp_hwmgr *hwmgr, return 0; } +static int vega12_enable_gfx_off(struct pp_hwmgr *hwmgr) +{ + struct vega12_hwmgr *data = + (struct vega12_hwmgr *)(hwmgr->backend); + int ret = 0; + + if (data->gfxoff_controlled_by_driver) + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_AllowGfxOff); + + return ret; +} + +static int vega12_disable_gfx_off(struct pp_hwmgr *hwmgr) +{ + struct vega12_hwmgr *data = + (struct vega12_hwmgr *)(hwmgr->backend); + int ret = 0; + + if (data->gfxoff_controlled_by_driver) + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisallowGfxOff); + + return ret; +} + +static int vega12_gfx_off_control(struct pp_hwmgr *hwmgr, bool enable) +{ + if (enable) + return vega12_enable_gfx_off(hwmgr); + else + return vega12_disable_gfx_off(hwmgr); +} + static const struct pp_hwmgr_func vega12_hwmgr_funcs = { .backend_init = vega12_hwmgr_backend_init, .backend_fini = vega12_hwmgr_backend_fini, @@ -2117,6 +2379,10 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = { .display_clock_voltage_request = vega12_display_clock_voltage_request, .force_clock_level = vega12_force_clock_level, .print_clock_levels = vega12_print_clock_levels, + .apply_clocks_adjust_rules = + vega12_apply_clocks_adjust_rules, + .pre_display_config_changed = + vega12_pre_display_configuration_changed_task, .display_config_changed = vega12_display_configuration_changed_task, .powergate_uvd = vega12_power_gate_uvd, .powergate_vce = vega12_power_gate_vce, @@ -2135,6 +2401,7 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = { .get_thermal_temperature_range = vega12_get_thermal_temperature_range, .register_irq_handlers = smu9_register_irq_handlers, .start_thermal_controller = vega12_start_thermal_controller, + .powergate_gfx = vega12_gfx_off_control, }; int vega12_hwmgr_init(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h index 49b38df8c7f2..b3e424d28994 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h @@ -304,6 +304,12 @@ struct vega12_odn_fan_table { bool force_fan_pwm; }; +struct vega12_clock_range { + uint32_t ACMax; + uint32_t ACMin; + uint32_t DCMax; +}; + struct vega12_hwmgr { struct vega12_dpm_table dpm_table; struct vega12_dpm_table golden_dpm_table; @@ -385,6 +391,11 @@ struct vega12_hwmgr { uint32_t smu_version; struct smu_features smu_features[GNLD_FEATURES_MAX]; struct vega12_smc_state_table smc_state_table; + + struct vega12_clock_range clk_range[PPCLK_COUNT]; + + /* ---- Gfxoff ---- */ + bool gfxoff_controlled_by_driver; }; #define VEGA12_DPM2_NEAR_TDP_DEC 10 @@ -435,6 +446,8 @@ struct vega12_hwmgr { #define VEGA12_UMD_PSTATE_GFXCLK_LEVEL 0x3 #define VEGA12_UMD_PSTATE_SOCCLK_LEVEL 0x3 #define VEGA12_UMD_PSTATE_MCLK_LEVEL 0x2 +#define VEGA12_UMD_PSTATE_UVDCLK_LEVEL 0x3 +#define VEGA12_UMD_PSTATE_VCEMCLK_LEVEL 0x3 int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c index 29914700ee82..cb3a5b1737c8 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c @@ -224,11 +224,7 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable ppsmc_pptable->AcgGfxclkSpreadPercent = smc_dpm_table.acggfxclkspreadpercent; ppsmc_pptable->AcgGfxclkSpreadFreq = smc_dpm_table.acggfxclkspreadfreq; - /* 0xFFFF will disable the ACG feature */ - if (!(hwmgr->feature_mask & PP_ACG_MASK)) { - ppsmc_pptable->AcgThresholdFreqHigh = 0xFFFF; - ppsmc_pptable->AcgThresholdFreqLow = 0xFFFF; - } + ppsmc_pptable->Vr2_I2C_address = smc_dpm_table.Vr2_I2C_address; ppsmc_pptable->Vr2_I2C_address = smc_dpm_table.Vr2_I2C_address; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c index cfd9e6ccb790..904eb2c9155b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c @@ -34,11 +34,9 @@ static int vega12_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm) PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentRpm), "Attempt to get current RPM from SMC Failed!", - return -1); - PP_ASSERT_WITH_CODE(!vega12_read_arg_from_smc(hwmgr, - current_rpm), - "Attempt to read current RPM from SMC Failed!", - return -1); + return -EINVAL); + *current_rpm = smum_get_argument(hwmgr); + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h index a202247c9894..429c9c4322da 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h @@ -455,7 +455,7 @@ extern int phm_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct pp_clock_levels_with_voltage *clocks); extern int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, - struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges); + void *clock_ranges); extern int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr, struct pp_display_clock_request *clock); diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index b99fb8ac822c..d3d96260f440 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -26,7 +26,6 @@ #include <linux/seq_file.h> #include "amd_powerplay.h" #include "hardwaremanager.h" -#include "pp_power_source.h" #include "hwmgr_ppt.h" #include "ppatomctrl.h" #include "hwmgr_ppt.h" @@ -195,7 +194,7 @@ struct pp_smumgr_func { int (*request_smu_load_fw)(struct pp_hwmgr *hwmgr); int (*request_smu_load_specific_fw)(struct pp_hwmgr *hwmgr, uint32_t firmware); - int (*get_argument)(struct pp_hwmgr *hwmgr); + uint32_t (*get_argument)(struct pp_hwmgr *hwmgr); int (*send_msg_to_smc)(struct pp_hwmgr *hwmgr, uint16_t msg); int (*send_msg_to_smc_with_parameter)(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter); @@ -294,8 +293,7 @@ struct pp_hwmgr_func { int (*get_clock_by_type_with_voltage)(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct pp_clock_levels_with_voltage *clocks); - int (*set_watermarks_for_clocks_ranges)(struct pp_hwmgr *hwmgr, - struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges); + int (*set_watermarks_for_clocks_ranges)(struct pp_hwmgr *hwmgr, void *clock_ranges); int (*display_clock_voltage_request)(struct pp_hwmgr *hwmgr, struct pp_display_clock_request *clock); int (*get_max_high_clocks)(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks); @@ -303,7 +301,7 @@ struct pp_hwmgr_func { int (*power_off_asic)(struct pp_hwmgr *hwmgr); int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask); int (*print_clock_levels)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf); - int (*enable_per_cu_power_gating)(struct pp_hwmgr *hwmgr, bool enable); + int (*powergate_gfx)(struct pp_hwmgr *hwmgr, bool enable); int (*get_sclk_od)(struct pp_hwmgr *hwmgr); int (*set_sclk_od)(struct pp_hwmgr *hwmgr, uint32_t value); int (*get_mclk_od)(struct pp_hwmgr *hwmgr); @@ -328,7 +326,7 @@ struct pp_hwmgr_func { enum PP_OD_DPM_TABLE_COMMAND type, long *input, uint32_t size); int (*set_power_limit)(struct pp_hwmgr *hwmgr, uint32_t n); - int (*set_mmhub_powergating_by_smu)(struct pp_hwmgr *hwmgr); + int (*powergate_mmhub)(struct pp_hwmgr *hwmgr); int (*smus_notify_pwe)(struct pp_hwmgr *hwmgr); }; @@ -741,7 +739,6 @@ struct pp_hwmgr { const struct pp_table_func *pptable_func; struct pp_power_state *ps; - enum pp_power_source power_source; uint32_t num_ps; struct pp_thermal_controller_info thermal_controller; bool fan_ctrl_is_in_default_mode; diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h index 6c22ed9249bf..82550a8a3a3f 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h @@ -29,7 +29,6 @@ enum SMU_TABLE { SMU_UVD_TABLE = 0, SMU_VCE_TABLE, - SMU_SAMU_TABLE, SMU_BIF_TABLE, }; @@ -47,7 +46,6 @@ enum SMU_MEMBER { UcodeLoadStatus, UvdBootLevel, VceBootLevel, - SamuBootLevel, LowSclkInterruptThreshold, DRAM_LOG_ADDR_H, DRAM_LOG_ADDR_L, @@ -82,7 +80,7 @@ enum SMU10_TABLE_ID { SMU10_CLOCKTABLE, }; -extern int smum_get_argument(struct pp_hwmgr *hwmgr); +extern uint32_t smum_get_argument(struct pp_hwmgr *hwmgr); extern int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table); diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h index b08526fd1619..b6ffd08784e7 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h +++ b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h @@ -412,10 +412,10 @@ typedef struct { QuadraticInt_t ReservedEquation2; QuadraticInt_t ReservedEquation3; - uint16_t MinVoltageUlvGfx; - uint16_t MinVoltageUlvSoc; + uint16_t MinVoltageUlvGfx; + uint16_t MinVoltageUlvSoc; - uint32_t Reserved[14]; + uint32_t Reserved[14]; @@ -483,9 +483,9 @@ typedef struct { uint8_t padding8_4; - uint8_t PllGfxclkSpreadEnabled; - uint8_t PllGfxclkSpreadPercent; - uint16_t PllGfxclkSpreadFreq; + uint8_t PllGfxclkSpreadEnabled; + uint8_t PllGfxclkSpreadPercent; + uint16_t PllGfxclkSpreadFreq; uint8_t UclkSpreadEnabled; uint8_t UclkSpreadPercent; @@ -495,9 +495,9 @@ typedef struct { uint8_t SocclkSpreadPercent; uint16_t SocclkSpreadFreq; - uint8_t AcgGfxclkSpreadEnabled; - uint8_t AcgGfxclkSpreadPercent; - uint16_t AcgGfxclkSpreadFreq; + uint8_t AcgGfxclkSpreadEnabled; + uint8_t AcgGfxclkSpreadPercent; + uint16_t AcgGfxclkSpreadFreq; uint8_t Vr2_I2C_address; uint8_t padding_vr2[3]; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile index 0a200406a1ec..8d557accaef2 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile @@ -26,7 +26,7 @@ SMU_MGR = smumgr.o smu8_smumgr.o tonga_smumgr.o fiji_smumgr.o \ polaris10_smumgr.o iceland_smumgr.o \ smu7_smumgr.o vega10_smumgr.o smu10_smumgr.o ci_smumgr.o \ - vega12_smumgr.o vegam_smumgr.o + vega12_smumgr.o vegam_smumgr.o smu9_smumgr.o AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR)) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c index 2d4ec8ac3a08..fbe3ef4ee45c 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c @@ -1614,37 +1614,6 @@ static int ci_populate_smc_acp_level(struct pp_hwmgr *hwmgr, return result; } -static int ci_populate_smc_samu_level(struct pp_hwmgr *hwmgr, - SMU7_Discrete_DpmTable *table) -{ - int result = -EINVAL; - uint8_t count; - struct pp_atomctrl_clock_dividers_vi dividers; - struct phm_samu_clock_voltage_dependency_table *samu_table = - hwmgr->dyn_state.samu_clock_voltage_dependency_table; - - table->SamuBootLevel = 0; - table->SamuLevelCount = (uint8_t)(samu_table->count); - - for (count = 0; count < table->SamuLevelCount; count++) { - table->SamuLevel[count].Frequency = samu_table->entries[count].samclk; - table->SamuLevel[count].MinVoltage = samu_table->entries[count].v * VOLTAGE_SCALE; - table->SamuLevel[count].MinPhases = 1; - - /* retrieve divider value for VBIOS */ - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, - table->SamuLevel[count].Frequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), - "can not find divide id for samu clock", return result); - - table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider; - - CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency); - CONVERT_FROM_HOST_TO_SMC_US(table->SamuLevel[count].MinVoltage); - } - return result; -} - static int ci_populate_memory_timing_parameters( struct pp_hwmgr *hwmgr, uint32_t engine_clock, @@ -2026,10 +1995,6 @@ static int ci_init_smc_table(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize ACP Level!", return result); - result = ci_populate_smc_samu_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize SAMU Level!", return result); - /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */ /* need to populate the ARB settings for the initial state. */ result = ci_program_memory_timing_parameters(hwmgr); @@ -2881,6 +2846,89 @@ static int ci_update_dpm_settings(struct pp_hwmgr *hwmgr, return 0; } +static int ci_update_uvd_smc_table(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = hwmgr->adev; + struct smu7_hwmgr *data = hwmgr->backend; + struct ci_smumgr *smu_data = hwmgr->smu_backend; + struct phm_uvd_clock_voltage_dependency_table *uvd_table = + hwmgr->dyn_state.uvd_clock_voltage_dependency_table; + uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | + AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | + AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | + AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; + uint32_t max_vddc = adev->pm.ac_power ? hwmgr->dyn_state.max_clock_voltage_on_ac.vddc : + hwmgr->dyn_state.max_clock_voltage_on_dc.vddc; + int32_t i; + + if (PP_CAP(PHM_PlatformCaps_UVDDPM) || uvd_table->count <= 0) + smu_data->smc_state_table.UvdBootLevel = 0; + else + smu_data->smc_state_table.UvdBootLevel = uvd_table->count - 1; + + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, DPM_TABLE_475, + UvdBootLevel, smu_data->smc_state_table.UvdBootLevel); + + data->dpm_level_enable_mask.uvd_dpm_enable_mask = 0; + + for (i = uvd_table->count - 1; i >= 0; i--) { + if (uvd_table->entries[i].v <= max_vddc) + data->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i; + if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_UVDDPM)) + break; + } + ci_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask, + data->dpm_level_enable_mask.uvd_dpm_enable_mask); + + return 0; +} + +static int ci_update_vce_smc_table(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = hwmgr->adev; + struct smu7_hwmgr *data = hwmgr->backend; + struct phm_vce_clock_voltage_dependency_table *vce_table = + hwmgr->dyn_state.vce_clock_voltage_dependency_table; + uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | + AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | + AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | + AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; + uint32_t max_vddc = adev->pm.ac_power ? hwmgr->dyn_state.max_clock_voltage_on_ac.vddc : + hwmgr->dyn_state.max_clock_voltage_on_dc.vddc; + int32_t i; + + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, DPM_TABLE_475, + VceBootLevel, 0); /* temp hard code to level 0, vce can set min evclk*/ + + data->dpm_level_enable_mask.vce_dpm_enable_mask = 0; + + for (i = vce_table->count - 1; i >= 0; i--) { + if (vce_table->entries[i].v <= max_vddc) + data->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i; + if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_VCEDPM)) + break; + } + ci_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask, + data->dpm_level_enable_mask.vce_dpm_enable_mask); + + return 0; +} + +static int ci_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) +{ + switch (type) { + case SMU_UVD_TABLE: + ci_update_uvd_smc_table(hwmgr); + break; + case SMU_VCE_TABLE: + ci_update_vce_smc_table(hwmgr); + break; + default: + break; + } + return 0; +} + const struct pp_smumgr_func ci_smu_funcs = { .smu_init = ci_smu_init, .smu_fini = ci_smu_fini, @@ -2903,4 +2951,5 @@ const struct pp_smumgr_func ci_smu_funcs = { .initialize_mc_reg_table = ci_initialize_mc_reg_table, .is_dpm_running = ci_is_dpm_running, .update_dpm_settings = ci_update_dpm_settings, + .update_smc_table = ci_update_smc_table, }; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index 53df9405f43a..18048f8e2f13 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c @@ -1503,44 +1503,6 @@ static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr, return result; } -static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr, - SMU73_Discrete_DpmTable *table) -{ - int result = -EINVAL; - uint8_t count; - struct pp_atomctrl_clock_dividers_vi dividers; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = - table_info->mm_dep_table; - - table->SamuBootLevel = 0; - table->SamuLevelCount = (uint8_t)(mm_table->count); - - for (count = 0; count < table->SamuLevelCount; count++) { - /* not sure whether we need evclk or not */ - table->SamuLevel[count].MinVoltage = 0; - table->SamuLevel[count].Frequency = mm_table->entries[count].samclock; - table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc * - VOLTAGE_SCALE) << VDDC_SHIFT; - table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc - - VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT; - table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT; - - /* retrieve divider value for VBIOS */ - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, - table->SamuLevel[count].Frequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), - "can not find divide id for samu clock", return result); - - table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider; - - CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency); - CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage); - } - return result; -} - static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr, int32_t eng_clock, int32_t mem_clock, struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs) @@ -2028,10 +1990,6 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize ACP Level!", return result); - result = fiji_populate_smc_samu_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize SAMU Level!", return result); - /* Since only the initial state is completely set up at this point * (the other states are just copies of the boot state) we only * need to populate the ARB settings for the initial state. @@ -2378,8 +2336,6 @@ static uint32_t fiji_get_offsetof(uint32_t type, uint32_t member) return offsetof(SMU73_Discrete_DpmTable, UvdBootLevel); case VceBootLevel: return offsetof(SMU73_Discrete_DpmTable, VceBootLevel); - case SamuBootLevel: - return offsetof(SMU73_Discrete_DpmTable, SamuBootLevel); case LowSclkInterruptThreshold: return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold); } @@ -2478,33 +2434,6 @@ static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr) return 0; } -static int fiji_update_samu_smc_table(struct pp_hwmgr *hwmgr) -{ - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); - uint32_t mm_boot_level_offset, mm_boot_level_value; - - - smu_data->smc_state_table.SamuBootLevel = 0; - mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + - offsetof(SMU73_Discrete_DpmTable, SamuBootLevel); - - mm_boot_level_offset /= 4; - mm_boot_level_offset *= 4; - mm_boot_level_value = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset); - mm_boot_level_value &= 0xFFFFFF00; - mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0; - cgs_write_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SAMUDPM_SetEnabledMask, - (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel)); - return 0; -} - static int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) { switch (type) { @@ -2514,9 +2443,6 @@ static int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) case SMU_VCE_TABLE: fiji_update_vce_smc_table(hwmgr); break; - case SMU_SAMU_TABLE: - fiji_update_samu_smc_table(hwmgr); - break; default: break; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c index 415f691c3fa9..9299b93aa09a 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c @@ -1578,12 +1578,6 @@ static int iceland_populate_smc_acp_level(struct pp_hwmgr *hwmgr, return 0; } -static int iceland_populate_smc_samu_level(struct pp_hwmgr *hwmgr, - SMU71_Discrete_DpmTable *table) -{ - return 0; -} - static int iceland_populate_memory_timing_parameters( struct pp_hwmgr *hwmgr, uint32_t engine_clock, @@ -1992,10 +1986,6 @@ static int iceland_init_smc_table(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize ACP Level!", return result;); - result = iceland_populate_smc_samu_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize SAMU Level!", return result;); - /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */ /* need to populate the ARB settings for the initial state. */ result = iceland_program_memory_timing_parameters(hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index a8c6524f07e4..1276f168ff68 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -1204,7 +1204,6 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, (struct phm_ppt_v1_information *)(hwmgr->pptable); SMIO_Pattern vol_level; uint32_t mvdd; - uint16_t us_mvdd; table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; @@ -1255,16 +1254,11 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, "in Clock Dependency Table", ); - us_mvdd = 0; - if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) || - (data->mclk_dpm_key_disabled)) - us_mvdd = data->vbios_boot_state.mvdd_bootup_value; - else { - if (!polaris10_populate_mvdd_value(hwmgr, + if (!((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) || + (data->mclk_dpm_key_disabled))) + polaris10_populate_mvdd_value(hwmgr, data->dpm_table.mclk_table.dpm_levels[0].value, - &vol_level)) - us_mvdd = vol_level.Voltage; - } + &vol_level); if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level)) table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage); @@ -1337,55 +1331,6 @@ static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr, return result; } - -static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr, - SMU74_Discrete_DpmTable *table) -{ - int result = -EINVAL; - uint8_t count; - struct pp_atomctrl_clock_dividers_vi dividers; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = - table_info->mm_dep_table; - struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - uint32_t vddci; - - table->SamuBootLevel = 0; - table->SamuLevelCount = (uint8_t)(mm_table->count); - - for (count = 0; count < table->SamuLevelCount; count++) { - /* not sure whether we need evclk or not */ - table->SamuLevel[count].MinVoltage = 0; - table->SamuLevel[count].Frequency = mm_table->entries[count].samclock; - table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc * - VOLTAGE_SCALE) << VDDC_SHIFT; - - if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) - vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table), - mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); - else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) - vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA; - else - vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT; - - table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; - table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT; - - /* retrieve divider value for VBIOS */ - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, - table->SamuLevel[count].Frequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), - "can not find divide id for samu clock", return result); - - table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider; - - CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency); - CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage); - } - return result; -} - static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr, int32_t eng_clock, int32_t mem_clock, SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs) @@ -1566,7 +1511,7 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min; struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); - uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0; + uint8_t i, stretch_amount, volt_offset = 0; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = @@ -1617,11 +1562,7 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) smu_data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6; /* Populate CKS Lookup Table */ - if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5) - stretch_amount2 = 0; - else if (stretch_amount == 3 || stretch_amount == 4) - stretch_amount2 = 1; - else { + if (stretch_amount == 0 || stretch_amount > 5) { phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ClockStretcher); PP_ASSERT_WITH_CODE(false, @@ -1865,10 +1806,6 @@ static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize VCE Level!", return result); - result = polaris10_populate_smc_samu_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize SAMU Level!", return result); - /* Since only the initial state is completely set up at this point * (the other states are just copies of the boot state) we only * need to populate the ARB settings for the initial state. @@ -2222,34 +2159,6 @@ static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr) return 0; } -static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr) -{ - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); - uint32_t mm_boot_level_offset, mm_boot_level_value; - - - smu_data->smc_state_table.SamuBootLevel = 0; - mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + - offsetof(SMU74_Discrete_DpmTable, SamuBootLevel); - - mm_boot_level_offset /= 4; - mm_boot_level_offset *= 4; - mm_boot_level_value = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset); - mm_boot_level_value &= 0xFFFFFF00; - mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0; - cgs_write_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SAMUDPM_SetEnabledMask, - (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel)); - return 0; -} - - static int polaris10_update_bif_smc_table(struct pp_hwmgr *hwmgr) { struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); @@ -2276,9 +2185,6 @@ static int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) case SMU_VCE_TABLE: polaris10_update_vce_smc_table(hwmgr); break; - case SMU_SAMU_TABLE: - polaris10_update_samu_smc_table(hwmgr); - break; case SMU_BIF_TABLE: polaris10_update_bif_smc_table(hwmgr); default: @@ -2357,8 +2263,6 @@ static uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member) return offsetof(SMU74_Discrete_DpmTable, UvdBootLevel); case VceBootLevel: return offsetof(SMU74_Discrete_DpmTable, VceBootLevel); - case SamuBootLevel: - return offsetof(SMU74_Discrete_DpmTable, SamuBootLevel); case LowSclkInterruptThreshold: return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold); } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c index 0a563f6fe9ea..bb07d43f3874 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c @@ -68,7 +68,7 @@ static int smu10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, return 0; } -static int smu10_read_arg_from_smc(struct pp_hwmgr *hwmgr) +static uint32_t smu10_read_arg_from_smc(struct pp_hwmgr *hwmgr) { struct amdgpu_device *adev = hwmgr->adev; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index 9f407c48d4f0..a029e47c2319 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c @@ -379,9 +379,7 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr) { struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); uint32_t fw_to_load; - int result = 0; - struct SMU_DRAMData_TOC *toc; - uint32_t num_entries = 0; + int r = 0; if (!hwmgr->reload_fw) { pr_info("skip reloading...\n"); @@ -422,49 +420,62 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr) + UCODE_ID_CP_MEC_JT2_MASK; } - toc = (struct SMU_DRAMData_TOC *)smu_data->header; - toc->structure_version = 1; - - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, - UCODE_ID_RLC_G, &toc->entry[num_entries++]), - "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, - UCODE_ID_CP_CE, &toc->entry[num_entries++]), - "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, - UCODE_ID_CP_PFP, &toc->entry[num_entries++]), - "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, - UCODE_ID_CP_ME, &toc->entry[num_entries++]), - "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, - UCODE_ID_CP_MEC, &toc->entry[num_entries++]), - "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, - UCODE_ID_CP_MEC_JT1, &toc->entry[num_entries++]), - "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, - UCODE_ID_CP_MEC_JT2, &toc->entry[num_entries++]), - "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, - UCODE_ID_SDMA0, &toc->entry[num_entries++]), - "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, - UCODE_ID_SDMA1, &toc->entry[num_entries++]), - "Failed to Get Firmware Entry.", return -EINVAL); - if (!hwmgr->not_vf) - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, - UCODE_ID_MEC_STORAGE, &toc->entry[num_entries++]), - "Failed to Get Firmware Entry.", return -EINVAL); + if (!smu_data->toc) { + struct SMU_DRAMData_TOC *toc; - toc->num_entries = num_entries; + smu_data->toc = kzalloc(sizeof(struct SMU_DRAMData_TOC), GFP_KERNEL); + if (!smu_data->toc) + return -ENOMEM; + toc = smu_data->toc; + toc->num_entries = 0; + toc->structure_version = 1; + + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, + UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.", r = -EINVAL; goto failed); + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, + UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.", r = -EINVAL; goto failed); + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, + UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.", r = -EINVAL; goto failed); + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, + UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.", r = -EINVAL; goto failed); + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, + UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.", r = -EINVAL; goto failed); + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, + UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.", r = -EINVAL; goto failed); + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, + UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.", r = -EINVAL; goto failed); + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, + UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.", r = -EINVAL; goto failed); + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, + UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.", r = -EINVAL; goto failed); + if (!hwmgr->not_vf) + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, + UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.", r = -EINVAL; goto failed); + } + memcpy_toio(smu_data->header_buffer.kaddr, smu_data->toc, + sizeof(struct SMU_DRAMData_TOC)); smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, upper_32_bits(smu_data->header_buffer.mc_addr)); smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, lower_32_bits(smu_data->header_buffer.mc_addr)); if (smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load)) pr_err("Fail to Request SMU Load uCode"); - return result; + return r; + +failed: + kfree(smu_data->toc); + smu_data->toc = NULL; + return r; } /* Check if the FW has been loaded, SMU will not return if loading has not finished. */ @@ -571,7 +582,6 @@ int smu7_setup_pwr_virus(struct pp_hwmgr *hwmgr) int smu7_init(struct pp_hwmgr *hwmgr) { struct smu7_smumgr *smu_data; - uint64_t mc_addr = 0; int r; /* Allocate memory for backend private data */ smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); @@ -585,15 +595,12 @@ int smu7_init(struct pp_hwmgr *hwmgr) PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &smu_data->header_buffer.handle, - &mc_addr, + &smu_data->header_buffer.mc_addr, &smu_data->header_buffer.kaddr); if (r) return -EINVAL; - smu_data->header = smu_data->header_buffer.kaddr; - smu_data->header_buffer.mc_addr = mc_addr; - if (!hwmgr->not_vf) return 0; @@ -603,7 +610,7 @@ int smu7_init(struct pp_hwmgr *hwmgr) PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &smu_data->smu_buffer.handle, - &mc_addr, + &smu_data->smu_buffer.mc_addr, &smu_data->smu_buffer.kaddr); if (r) { @@ -612,7 +619,6 @@ int smu7_init(struct pp_hwmgr *hwmgr) &smu_data->header_buffer.kaddr); return -EINVAL; } - smu_data->smu_buffer.mc_addr = mc_addr; if (smum_is_hw_avfs_present(hwmgr)) hwmgr->avfs_supported = true; @@ -634,6 +640,9 @@ int smu7_smu_fini(struct pp_hwmgr *hwmgr) &smu_data->smu_buffer.mc_addr, &smu_data->smu_buffer.kaddr); + + kfree(smu_data->toc); + smu_data->toc = NULL; kfree(hwmgr->smu_backend); hwmgr->smu_backend = NULL; return 0; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h index 39c9bfda0ab4..01f0538fba6b 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h @@ -37,10 +37,9 @@ struct smu7_buffer_entry { }; struct smu7_smumgr { - uint8_t *header; - uint8_t *mec_image; struct smu7_buffer_entry smu_buffer; struct smu7_buffer_entry header_buffer; + struct SMU_DRAMData_TOC *toc; uint32_t soft_regs_start; uint32_t dpm_table_start; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c index c861d3023474..f7e3bc22bb93 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c @@ -52,10 +52,10 @@ static const enum smu8_scratch_entry firmware_list[] = { SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G, }; -static int smu8_get_argument(struct pp_hwmgr *hwmgr) +static uint32_t smu8_get_argument(struct pp_hwmgr *hwmgr) { if (hwmgr == NULL || hwmgr->device == NULL) - return -EINVAL; + return 0; return cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c new file mode 100644 index 000000000000..079fc8e8f709 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c @@ -0,0 +1,150 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "smumgr.h" +#include "vega10_inc.h" +#include "soc15_common.h" +#include "pp_debug.h" + + +/* MP Apertures */ +#define MP0_Public 0x03800000 +#define MP0_SRAM 0x03900000 +#define MP1_Public 0x03b00000 +#define MP1_SRAM 0x03c00004 + +#define smnMP1_FIRMWARE_FLAGS 0x3010028 + +bool smu9_is_smc_ram_running(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = hwmgr->adev; + uint32_t mp1_fw_flags; + + WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2, + (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff))); + + mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2); + + if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) + return true; + + return false; +} + +/* + * Check if SMC has responded to previous message. + * + * @param smumgr the address of the powerplay hardware manager. + * @return TRUE SMC has responded, FALSE otherwise. + */ +static uint32_t smu9_wait_for_response(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = hwmgr->adev; + uint32_t reg; + uint32_t ret; + + reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90); + + ret = phm_wait_for_register_unequal(hwmgr, reg, + 0, MP1_C2PMSG_90__CONTENT_MASK); + + if (ret) + pr_err("No response from smu\n"); + + return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90); +} + +/* + * Send a message to the SMC, and do not wait for its response. + * @param smumgr the address of the powerplay hardware manager. + * @param msg the message to send. + * @return Always return 0. + */ +static int smu9_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, + uint16_t msg) +{ + struct amdgpu_device *adev = hwmgr->adev; + + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg); + + return 0; +} + +/* + * Send a message to the SMC, and wait for its response. + * @param hwmgr the address of the powerplay hardware manager. + * @param msg the message to send. + * @return Always return 0. + */ +int smu9_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) +{ + struct amdgpu_device *adev = hwmgr->adev; + uint32_t ret; + + smu9_wait_for_response(hwmgr); + + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); + + smu9_send_msg_to_smc_without_waiting(hwmgr, msg); + + ret = smu9_wait_for_response(hwmgr); + if (ret != 1) + pr_err("Failed to send message: 0x%x, ret value: 0x%x\n", msg, ret); + + return 0; +} + +/* + * Send a message to the SMC with parameter + * @param hwmgr: the address of the powerplay hardware manager. + * @param msg: the message to send. + * @param parameter: the parameter to send + * @return Always return 0. + */ +int smu9_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, + uint16_t msg, uint32_t parameter) +{ + struct amdgpu_device *adev = hwmgr->adev; + uint32_t ret; + + smu9_wait_for_response(hwmgr); + + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); + + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter); + + smu9_send_msg_to_smc_without_waiting(hwmgr, msg); + + ret = smu9_wait_for_response(hwmgr); + if (ret != 1) + pr_err("Failed message: 0x%x, input parameter: 0x%x, error code: 0x%x\n", msg, parameter, ret); + + return 0; +} + +uint32_t smu9_get_argument(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = hwmgr->adev; + + return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); +} diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_power_source.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h index b43315cc5d58..1462279ca128 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/pp_power_source.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h @@ -1,5 +1,5 @@ /* - * Copyright 2015 Advanced Micro Devices, Inc. + * Copyright 2018 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -20,17 +20,13 @@ * OTHER DEALINGS IN THE SOFTWARE. * */ +#ifndef _SMU9_SMUMANAGER_H_ +#define _SMU9_SMUMANAGER_H_ -#ifndef PP_POWERSOURCE_H -#define PP_POWERSOURCE_H - -enum pp_power_source { - PP_PowerSource_AC = 0, - PP_PowerSource_DC, - PP_PowerSource_LimitedPower, - PP_PowerSource_LimitedPower_2, - PP_PowerSource_Max -}; - +bool smu9_is_smc_ram_running(struct pp_hwmgr *hwmgr); +int smu9_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg); +int smu9_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, + uint16_t msg, uint32_t parameter); +uint32_t smu9_get_argument(struct pp_hwmgr *hwmgr); #endif diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index c9837935f0f5..99d5e4f98f49 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c @@ -96,7 +96,7 @@ int smum_process_firmware_header(struct pp_hwmgr *hwmgr) return 0; } -int smum_get_argument(struct pp_hwmgr *hwmgr) +uint32_t smum_get_argument(struct pp_hwmgr *hwmgr) { if (NULL != hwmgr->smumgr_funcs->get_argument) return hwmgr->smumgr_funcs->get_argument(hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c index 782b19fc2e70..7dabc6c456e1 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c @@ -1443,51 +1443,6 @@ static int tonga_populate_smc_acp_level(struct pp_hwmgr *hwmgr, return result; } -static int tonga_populate_smc_samu_level(struct pp_hwmgr *hwmgr, - SMU72_Discrete_DpmTable *table) -{ - int result = 0; - uint8_t count; - pp_atomctrl_clock_dividers_vi dividers; - struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = - pptable_info->mm_dep_table; - - table->SamuBootLevel = 0; - table->SamuLevelCount = (uint8_t) (mm_table->count); - - for (count = 0; count < table->SamuLevelCount; count++) { - /* not sure whether we need evclk or not */ - table->SamuLevel[count].Frequency = - pptable_info->mm_dep_table->entries[count].samclock; - table->SamuLevel[count].MinVoltage.Vddc = - phm_get_voltage_index(pptable_info->vddc_lookup_table, - mm_table->entries[count].vddc); - table->SamuLevel[count].MinVoltage.VddGfx = - (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ? - phm_get_voltage_index(pptable_info->vddgfx_lookup_table, - mm_table->entries[count].vddgfx) : 0; - table->SamuLevel[count].MinVoltage.Vddci = - phm_get_voltage_id(&data->vddci_voltage_table, - mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); - table->SamuLevel[count].MinVoltage.Phases = 1; - - /* retrieve divider value for VBIOS */ - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, - table->SamuLevel[count].Frequency, ÷rs); - PP_ASSERT_WITH_CODE((!result), - "can not find divide id for samu clock", return result); - - table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider; - - CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency); - } - - return result; -} - static int tonga_populate_memory_timing_parameters( struct pp_hwmgr *hwmgr, uint32_t engine_clock, @@ -2323,10 +2278,6 @@ static int tonga_init_smc_table(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(!result, "Failed to initialize ACP Level !", return result); - result = tonga_populate_smc_samu_level(hwmgr, table); - PP_ASSERT_WITH_CODE(!result, - "Failed to initialize SAMU Level !", return result); - /* Since only the initial state is completely set up at this * point (the other states are just copies of the boot state) we only * need to populate the ARB settings for the initial state. @@ -2673,8 +2624,6 @@ static uint32_t tonga_get_offsetof(uint32_t type, uint32_t member) return offsetof(SMU72_Discrete_DpmTable, UvdBootLevel); case VceBootLevel: return offsetof(SMU72_Discrete_DpmTable, VceBootLevel); - case SamuBootLevel: - return offsetof(SMU72_Discrete_DpmTable, SamuBootLevel); case LowSclkInterruptThreshold: return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold); } @@ -2773,32 +2722,6 @@ static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr) return 0; } -static int tonga_update_samu_smc_table(struct pp_hwmgr *hwmgr) -{ - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend); - uint32_t mm_boot_level_offset, mm_boot_level_value; - - smu_data->smc_state_table.SamuBootLevel = 0; - mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + - offsetof(SMU72_Discrete_DpmTable, SamuBootLevel); - - mm_boot_level_offset /= 4; - mm_boot_level_offset *= 4; - mm_boot_level_value = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset); - mm_boot_level_value &= 0xFFFFFF00; - mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0; - cgs_write_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SAMUDPM_SetEnabledMask, - (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel)); - return 0; -} - static int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) { switch (type) { @@ -2808,9 +2731,6 @@ static int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) case SMU_VCE_TABLE: tonga_update_vce_smc_table(hwmgr); break; - case SMU_SAMU_TABLE: - tonga_update_samu_smc_table(hwmgr); - break; default: break; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c index e84669c448a3..5d19115f410c 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c @@ -28,142 +28,11 @@ #include "vega10_hwmgr.h" #include "vega10_ppsmc.h" #include "smu9_driver_if.h" +#include "smu9_smumgr.h" #include "ppatomctrl.h" #include "pp_debug.h" -#define AVFS_EN_MSB 1568 -#define AVFS_EN_LSB 1568 - -/* Microcode file is stored in this buffer */ -#define BUFFER_SIZE 80000 -#define MAX_STRING_SIZE 15 -#define BUFFER_SIZETWO 131072 /* 128 *1024 */ - -/* MP Apertures */ -#define MP0_Public 0x03800000 -#define MP0_SRAM 0x03900000 -#define MP1_Public 0x03b00000 -#define MP1_SRAM 0x03c00004 - -#define smnMP1_FIRMWARE_FLAGS 0x3010028 -#define smnMP0_FW_INTF 0x3010104 -#define smnMP1_PUB_CTRL 0x3010b14 - -static bool vega10_is_smc_ram_running(struct pp_hwmgr *hwmgr) -{ - struct amdgpu_device *adev = hwmgr->adev; - uint32_t mp1_fw_flags; - - WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2, - (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff))); - - mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2); - - if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) - return true; - - return false; -} - -/* - * Check if SMC has responded to previous message. - * - * @param smumgr the address of the powerplay hardware manager. - * @return TRUE SMC has responded, FALSE otherwise. - */ -static uint32_t vega10_wait_for_response(struct pp_hwmgr *hwmgr) -{ - struct amdgpu_device *adev = hwmgr->adev; - uint32_t reg; - uint32_t ret; - - reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90); - - ret = phm_wait_for_register_unequal(hwmgr, reg, - 0, MP1_C2PMSG_90__CONTENT_MASK); - - if (ret) - pr_err("No response from smu\n"); - - return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90); -} - -/* - * Send a message to the SMC, and do not wait for its response. - * @param smumgr the address of the powerplay hardware manager. - * @param msg the message to send. - * @return Always return 0. - */ -static int vega10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, - uint16_t msg) -{ - struct amdgpu_device *adev = hwmgr->adev; - - WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg); - - return 0; -} - -/* - * Send a message to the SMC, and wait for its response. - * @param hwmgr the address of the powerplay hardware manager. - * @param msg the message to send. - * @return Always return 0. - */ -static int vega10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) -{ - struct amdgpu_device *adev = hwmgr->adev; - uint32_t ret; - - vega10_wait_for_response(hwmgr); - - WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); - - vega10_send_msg_to_smc_without_waiting(hwmgr, msg); - - ret = vega10_wait_for_response(hwmgr); - if (ret != 1) - pr_err("Failed to send message: 0x%x, ret value: 0x%x\n", msg, ret); - - return 0; -} - -/* - * Send a message to the SMC with parameter - * @param hwmgr: the address of the powerplay hardware manager. - * @param msg: the message to send. - * @param parameter: the parameter to send - * @return Always return 0. - */ -static int vega10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, - uint16_t msg, uint32_t parameter) -{ - struct amdgpu_device *adev = hwmgr->adev; - uint32_t ret; - - vega10_wait_for_response(hwmgr); - - WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); - - WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter); - - vega10_send_msg_to_smc_without_waiting(hwmgr, msg); - - ret = vega10_wait_for_response(hwmgr); - if (ret != 1) - pr_err("Failed message: 0x%x, input parameter: 0x%x, error code: 0x%x\n", msg, parameter, ret); - - return 0; -} - -static int vega10_get_argument(struct pp_hwmgr *hwmgr) -{ - struct amdgpu_device *adev = hwmgr->adev; - - return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); -} - static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr, uint8_t *table, int16_t table_id) { @@ -175,13 +44,13 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr, "Invalid SMU Table version!", return -EINVAL); PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, "Invalid SMU Table Length!", return -EINVAL); - vega10_send_msg_to_smc_with_parameter(hwmgr, + smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)); - vega10_send_msg_to_smc_with_parameter(hwmgr, + smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrLow, lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)); - vega10_send_msg_to_smc_with_parameter(hwmgr, + smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_TransferTableSmu2Dram, priv->smu_tables.entry[table_id].table_id); @@ -206,13 +75,13 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr, memcpy(priv->smu_tables.entry[table_id].table, table, priv->smu_tables.entry[table_id].size); - vega10_send_msg_to_smc_with_parameter(hwmgr, + smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)); - vega10_send_msg_to_smc_with_parameter(hwmgr, + smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrLow, lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)); - vega10_send_msg_to_smc_with_parameter(hwmgr, + smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_TransferTableDram2Smu, priv->smu_tables.entry[table_id].table_id); @@ -225,8 +94,8 @@ static int vega10_get_smc_features(struct pp_hwmgr *hwmgr, if (features_enabled == NULL) return -EINVAL; - vega10_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeatures); - *features_enabled = vega10_get_argument(hwmgr); + smu9_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeatures); + *features_enabled = smu9_get_argument(hwmgr); return 0; } @@ -248,10 +117,10 @@ static int vega10_set_tools_address(struct pp_hwmgr *hwmgr) struct vega10_smumgr *priv = hwmgr->smu_backend; if (priv->smu_tables.entry[TOOLSTABLE].mc_addr) { - vega10_send_msg_to_smc_with_parameter(hwmgr, + smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetToolsDramAddrHigh, upper_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr)); - vega10_send_msg_to_smc_with_parameter(hwmgr, + smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetToolsDramAddrLow, lower_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr)); } @@ -265,11 +134,11 @@ static int vega10_verify_smc_interface(struct pp_hwmgr *hwmgr) uint32_t dev_id; uint32_t rev_id; - PP_ASSERT_WITH_CODE(!vega10_send_msg_to_smc(hwmgr, + PP_ASSERT_WITH_CODE(!smu9_send_msg_to_smc(hwmgr, PPSMC_MSG_GetDriverIfVersion), "Attempt to get SMC IF Version Number Failed!", return -EINVAL); - smc_driver_if_version = vega10_get_argument(hwmgr); + smc_driver_if_version = smu9_get_argument(hwmgr); dev_id = adev->pdev->device; rev_id = adev->pdev->revision; @@ -441,7 +310,7 @@ static int vega10_smu_fini(struct pp_hwmgr *hwmgr) static int vega10_start_smu(struct pp_hwmgr *hwmgr) { - if (!vega10_is_smc_ram_running(hwmgr)) + if (!smu9_is_smc_ram_running(hwmgr)) return -EINVAL; PP_ASSERT_WITH_CODE(!vega10_verify_smc_interface(hwmgr), @@ -453,7 +322,8 @@ static int vega10_start_smu(struct pp_hwmgr *hwmgr) return 0; } -static int vega10_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw) +static int vega10_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, + uint16_t table_id, bool rw) { int ret; @@ -470,11 +340,11 @@ const struct pp_smumgr_func vega10_smu_funcs = { .smu_fini = &vega10_smu_fini, .start_smu = &vega10_start_smu, .request_smu_load_specific_fw = NULL, - .send_msg_to_smc = &vega10_send_msg_to_smc, - .send_msg_to_smc_with_parameter = &vega10_send_msg_to_smc_with_parameter, + .send_msg_to_smc = &smu9_send_msg_to_smc, + .send_msg_to_smc_with_parameter = &smu9_send_msg_to_smc_with_parameter, .download_pptable_settings = NULL, .upload_pptable_settings = NULL, .is_dpm_running = vega10_is_dpm_running, - .get_argument = vega10_get_argument, + .get_argument = smu9_get_argument, .smc_table_manager = vega10_smc_table_manager, }; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c index 7d9b40e8b1bf..7f0e2109f40d 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c @@ -24,157 +24,14 @@ #include "smumgr.h" #include "vega12_inc.h" #include "soc15_common.h" +#include "smu9_smumgr.h" #include "vega12_smumgr.h" #include "vega12_ppsmc.h" #include "vega12/smu9_driver_if.h" - #include "ppatomctrl.h" #include "pp_debug.h" -/* MP Apertures */ -#define MP0_Public 0x03800000 -#define MP0_SRAM 0x03900000 -#define MP1_Public 0x03b00000 -#define MP1_SRAM 0x03c00004 - -#define smnMP1_FIRMWARE_FLAGS 0x3010028 -#define smnMP0_FW_INTF 0x3010104 -#define smnMP1_PUB_CTRL 0x3010b14 - -static bool vega12_is_smc_ram_running(struct pp_hwmgr *hwmgr) -{ - struct amdgpu_device *adev = hwmgr->adev; - uint32_t mp1_fw_flags; - - WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2, - (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff))); - - mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2); - - if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> - MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) - return true; - - return false; -} - -/* - * Check if SMC has responded to previous message. - * - * @param smumgr the address of the powerplay hardware manager. - * @return TRUE SMC has responded, FALSE otherwise. - */ -static uint32_t vega12_wait_for_response(struct pp_hwmgr *hwmgr) -{ - struct amdgpu_device *adev = hwmgr->adev; - uint32_t reg; - - reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90); - - phm_wait_for_register_unequal(hwmgr, reg, - 0, MP1_C2PMSG_90__CONTENT_MASK); - - return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90); -} - -/* - * Send a message to the SMC, and do not wait for its response. - * @param smumgr the address of the powerplay hardware manager. - * @param msg the message to send. - * @return Always return 0. - */ -int vega12_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, - uint16_t msg) -{ - struct amdgpu_device *adev = hwmgr->adev; - - WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg); - - return 0; -} - -/* - * Send a message to the SMC, and wait for its response. - * @param hwmgr the address of the powerplay hardware manager. - * @param msg the message to send. - * @return Always return 0. - */ -int vega12_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) -{ - struct amdgpu_device *adev = hwmgr->adev; - - vega12_wait_for_response(hwmgr); - - WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); - - vega12_send_msg_to_smc_without_waiting(hwmgr, msg); - - if (vega12_wait_for_response(hwmgr) != 1) - pr_err("Failed to send message: 0x%x\n", msg); - - return 0; -} - -/* - * Send a message to the SMC with parameter - * @param hwmgr: the address of the powerplay hardware manager. - * @param msg: the message to send. - * @param parameter: the parameter to send - * @return Always return 0. - */ -int vega12_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, - uint16_t msg, uint32_t parameter) -{ - struct amdgpu_device *adev = hwmgr->adev; - - vega12_wait_for_response(hwmgr); - - WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); - - WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter); - - vega12_send_msg_to_smc_without_waiting(hwmgr, msg); - - if (vega12_wait_for_response(hwmgr) != 1) - pr_err("Failed to send message: 0x%x\n", msg); - - return 0; -} - - -/* - * Send a message to the SMC with parameter, do not wait for response - * @param hwmgr: the address of the powerplay hardware manager. - * @param msg: the message to send. - * @param parameter: the parameter to send - * @return The response that came from the SMC. - */ -int vega12_send_msg_to_smc_with_parameter_without_waiting( - struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter) -{ - struct amdgpu_device *adev = hwmgr->adev; - - WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, parameter); - - return vega12_send_msg_to_smc_without_waiting(hwmgr, msg); -} - -/* - * Retrieve an argument from SMC. - * @param hwmgr the address of the powerplay hardware manager. - * @param arg pointer to store the argument from SMC. - * @return Always return 0. - */ -int vega12_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg) -{ - struct amdgpu_device *adev = hwmgr->adev; - - *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); - - return 0; -} - /* * Copy table from SMC into driver FB * @param hwmgr the address of the HW manager @@ -192,16 +49,16 @@ int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr, "Invalid SMU Table version!", return -EINVAL); PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, "Invalid SMU Table Length!", return -EINVAL); - PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr, + PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0, "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL); - PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr, + PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrLow, lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0, "[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!", return -EINVAL); - PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr, + PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_TransferTableSmu2Dram, table_id) == 0, "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!", @@ -234,17 +91,17 @@ int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr, memcpy(priv->smu_tables.entry[table_id].table, table, priv->smu_tables.entry[table_id].size); - PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr, + PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0, "[CopyTableToSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL;); - PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr, + PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrLow, lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0, "[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!", return -EINVAL); - PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr, + PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_TransferTableDram2Smu, table_id) == 0, "[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!", @@ -262,20 +119,20 @@ int vega12_enable_smc_features(struct pp_hwmgr *hwmgr, smu_features_high = (uint32_t)((feature_mask & SMU_FEATURES_HIGH_MASK) >> SMU_FEATURES_HIGH_SHIFT); if (enable) { - PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr, + PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low) == 0, "[EnableDisableSMCFeatures] Attemp to enable SMU features Low failed!", return -EINVAL); - PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr, + PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high) == 0, "[EnableDisableSMCFeatures] Attemp to enable SMU features High failed!", return -EINVAL); } else { - PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr, + PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low) == 0, "[EnableDisableSMCFeatures] Attemp to disable SMU features Low failed!", return -EINVAL); - PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr, + PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high) == 0, "[EnableDisableSMCFeatures] Attemp to disable SMU features High failed!", return -EINVAL); @@ -292,22 +149,17 @@ int vega12_get_enabled_smc_features(struct pp_hwmgr *hwmgr, if (features_enabled == NULL) return -EINVAL; - PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc(hwmgr, + PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeaturesLow) == 0, "[GetEnabledSMCFeatures] Attemp to get SMU features Low failed!", return -EINVAL); - PP_ASSERT_WITH_CODE(vega12_read_arg_from_smc(hwmgr, - &smc_features_low) == 0, - "[GetEnabledSMCFeatures] Attemp to read SMU features Low argument failed!", - return -EINVAL); - PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc(hwmgr, + smc_features_low = smu9_get_argument(hwmgr); + + PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeaturesHigh) == 0, "[GetEnabledSMCFeatures] Attemp to get SMU features High failed!", return -EINVAL); - PP_ASSERT_WITH_CODE(vega12_read_arg_from_smc(hwmgr, - &smc_features_high) == 0, - "[GetEnabledSMCFeatures] Attemp to read SMU features High argument failed!", - return -EINVAL); + smc_features_high = smu9_get_argument(hwmgr); *features_enabled = ((((uint64_t)smc_features_low << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) | (((uint64_t)smc_features_high << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK)); @@ -333,39 +185,16 @@ static int vega12_set_tools_address(struct pp_hwmgr *hwmgr) (struct vega12_smumgr *)(hwmgr->smu_backend); if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr) { - if (!vega12_send_msg_to_smc_with_parameter(hwmgr, + if (!smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetToolsDramAddrHigh, upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr))) - vega12_send_msg_to_smc_with_parameter(hwmgr, + smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetToolsDramAddrLow, lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr)); } return 0; } -#if 0 /* tentatively remove */ -static int vega12_verify_smc_interface(struct pp_hwmgr *hwmgr) -{ - uint32_t smc_driver_if_version; - - PP_ASSERT_WITH_CODE(!vega12_send_msg_to_smc(hwmgr, - PPSMC_MSG_GetDriverIfVersion), - "Attempt to get SMC IF Version Number Failed!", - return -EINVAL); - vega12_read_arg_from_smc(hwmgr, &smc_driver_if_version); - - if (smc_driver_if_version != SMU9_DRIVER_IF_VERSION) { - pr_err("Your firmware(0x%x) doesn't match \ - SMU9_DRIVER_IF_VERSION(0x%x). \ - Please update your firmware!\n", - smc_driver_if_version, SMU9_DRIVER_IF_VERSION); - return -EINVAL; - } - - return 0; -} -#endif - static int vega12_smu_init(struct pp_hwmgr *hwmgr) { struct vega12_smumgr *priv; @@ -513,16 +342,10 @@ static int vega12_smu_fini(struct pp_hwmgr *hwmgr) static int vega12_start_smu(struct pp_hwmgr *hwmgr) { - PP_ASSERT_WITH_CODE(vega12_is_smc_ram_running(hwmgr), + PP_ASSERT_WITH_CODE(smu9_is_smc_ram_running(hwmgr), "SMC is not running!", return -EINVAL); -#if 0 /* tentatively remove */ - PP_ASSERT_WITH_CODE(!vega12_verify_smc_interface(hwmgr), - "Failed to verify SMC interface!", - return -EINVAL); -#endif - vega12_set_tools_address(hwmgr); return 0; @@ -533,9 +356,10 @@ const struct pp_smumgr_func vega12_smu_funcs = { .smu_fini = &vega12_smu_fini, .start_smu = &vega12_start_smu, .request_smu_load_specific_fw = NULL, - .send_msg_to_smc = &vega12_send_msg_to_smc, - .send_msg_to_smc_with_parameter = &vega12_send_msg_to_smc_with_parameter, + .send_msg_to_smc = &smu9_send_msg_to_smc, + .send_msg_to_smc_with_parameter = &smu9_send_msg_to_smc_with_parameter, .download_pptable_settings = NULL, .upload_pptable_settings = NULL, .is_dpm_running = vega12_is_dpm_running, + .get_argument = smu9_get_argument, }; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h index 2810d387b611..b285cbc04019 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h @@ -48,7 +48,6 @@ struct vega12_smumgr { #define SMU_FEATURES_HIGH_MASK 0xFFFFFFFF00000000 #define SMU_FEATURES_HIGH_SHIFT 32 -int vega12_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg); int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr, uint8_t *table, int16_t table_id); int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr, diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c index 2de48959ac93..57420d7caa4e 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c @@ -393,34 +393,6 @@ static int vegam_update_vce_smc_table(struct pp_hwmgr *hwmgr) return 0; } -static int vegam_update_samu_smc_table(struct pp_hwmgr *hwmgr) -{ - struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); - uint32_t mm_boot_level_offset, mm_boot_level_value; - - - smu_data->smc_state_table.SamuBootLevel = 0; - mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + - offsetof(SMU75_Discrete_DpmTable, SamuBootLevel); - - mm_boot_level_offset /= 4; - mm_boot_level_offset *= 4; - mm_boot_level_value = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset); - mm_boot_level_value &= 0xFFFFFF00; - mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0; - cgs_write_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SAMUDPM_SetEnabledMask, - (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel)); - return 0; -} - - static int vegam_update_bif_smc_table(struct pp_hwmgr *hwmgr) { struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); @@ -447,9 +419,6 @@ static int vegam_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) case SMU_VCE_TABLE: vegam_update_vce_smc_table(hwmgr); break; - case SMU_SAMU_TABLE: - vegam_update_samu_smc_table(hwmgr); - break; case SMU_BIF_TABLE: vegam_update_bif_smc_table(hwmgr); break; @@ -1281,54 +1250,6 @@ static int vegam_populate_smc_vce_level(struct pp_hwmgr *hwmgr, return result; } -static int vegam_populate_smc_samu_level(struct pp_hwmgr *hwmgr, - SMU75_Discrete_DpmTable *table) -{ - int result = -EINVAL; - uint8_t count; - struct pp_atomctrl_clock_dividers_vi dividers; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = - table_info->mm_dep_table; - struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - uint32_t vddci; - - table->SamuBootLevel = 0; - table->SamuLevelCount = (uint8_t)(mm_table->count); - - for (count = 0; count < table->SamuLevelCount; count++) { - /* not sure whether we need evclk or not */ - table->SamuLevel[count].MinVoltage = 0; - table->SamuLevel[count].Frequency = mm_table->entries[count].samclock; - table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc * - VOLTAGE_SCALE) << VDDC_SHIFT; - - if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) - vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table), - mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); - else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) - vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA; - else - vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT; - - table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; - table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT; - - /* retrieve divider value for VBIOS */ - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, - table->SamuLevel[count].Frequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), - "can not find divide id for samu clock", return result); - - table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider; - - CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency); - CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage); - } - return result; -} - static int vegam_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr, int32_t eng_clock, int32_t mem_clock, SMU75_Discrete_MCArbDramTimingTableEntry *arb_regs) @@ -2062,10 +1983,6 @@ static int vegam_init_smc_table(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(!result, "Failed to initialize VCE Level!", return result); - result = vegam_populate_smc_samu_level(hwmgr, table); - PP_ASSERT_WITH_CODE(!result, - "Failed to initialize SAMU Level!", return result); - /* Since only the initial state is completely set up at this point * (the other states are just copies of the boot state) we only * need to populate the ARB settings for the initial state. @@ -2273,8 +2190,6 @@ static uint32_t vegam_get_offsetof(uint32_t type, uint32_t member) return offsetof(SMU75_Discrete_DpmTable, UvdBootLevel); case VceBootLevel: return offsetof(SMU75_Discrete_DpmTable, VceBootLevel); - case SamuBootLevel: - return offsetof(SMU75_Discrete_DpmTable, SamuBootLevel); case LowSclkInterruptThreshold: return offsetof(SMU75_Discrete_DpmTable, LowSclkInterruptThreshold); } |