diff options
author | Joonas Lahtinen <joonas.lahtinen@linux.intel.com> | 2018-03-01 12:14:24 +0300 |
---|---|---|
committer | Joonas Lahtinen <joonas.lahtinen@linux.intel.com> | 2018-03-01 12:14:24 +0300 |
commit | bba73071b6f71be0a101658d7c13866e30b264a6 (patch) | |
tree | 2a0ea1fc5fd975f1c2e9e50de5bb3cb2cb3cb5f7 /drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | |
parent | c71b53cc66c5053ff3524a6132f8fc8199d618c3 (diff) | |
parent | f073d78eeb8efd85718e611c15f9a78647751dea (diff) | |
download | linux-bba73071b6f71be0a101658d7c13866e30b264a6.tar.xz |
Merge drm-next into drm-intel-next-queued (this time for real)
To pull in the HDCP changes, especially wait_for changes to drm/i915
that Chris wants to build on top of.
Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Diffstat (limited to 'drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c')
-rw-r--r-- | drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 562 |
1 files changed, 414 insertions, 148 deletions
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 41e42beff213..0202841ae639 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -48,6 +48,7 @@ #include "smu7_thermal.h" #include "smu7_clockpowergating.h" #include "processpptables.h" +#include "pp_thermal.h" #define MC_CG_ARB_FREQ_F0 0x0a #define MC_CG_ARB_FREQ_F1 0x0b @@ -90,7 +91,6 @@ enum DPM_EVENT_SRC { DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 }; -static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable); static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic); static int smu7_force_clock_level(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask); @@ -792,6 +792,76 @@ static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr) return 0; } +static int smu7_get_voltage_dependency_table( + const struct phm_ppt_v1_clock_voltage_dependency_table *allowed_dep_table, + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table) +{ + uint8_t i = 0; + PP_ASSERT_WITH_CODE((0 != allowed_dep_table->count), + "Voltage Lookup Table empty", + return -EINVAL); + + dep_table->count = allowed_dep_table->count; + for (i=0; i<dep_table->count; i++) { + dep_table->entries[i].clk = allowed_dep_table->entries[i].clk; + dep_table->entries[i].vddInd = allowed_dep_table->entries[i].vddInd; + dep_table->entries[i].vdd_offset = allowed_dep_table->entries[i].vdd_offset; + dep_table->entries[i].vddc = allowed_dep_table->entries[i].vddc; + dep_table->entries[i].vddgfx = allowed_dep_table->entries[i].vddgfx; + dep_table->entries[i].vddci = allowed_dep_table->entries[i].vddci; + dep_table->entries[i].mvdd = allowed_dep_table->entries[i].mvdd; + dep_table->entries[i].phases = allowed_dep_table->entries[i].phases; + dep_table->entries[i].cks_enable = allowed_dep_table->entries[i].cks_enable; + dep_table->entries[i].cks_voffset = allowed_dep_table->entries[i].cks_voffset; + } + + return 0; +} + +static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint32_t i; + + struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; + + if (table_info == NULL) + return -EINVAL; + + dep_sclk_table = table_info->vdd_dep_on_sclk; + dep_mclk_table = table_info->vdd_dep_on_mclk; + + odn_table->odn_core_clock_dpm_levels.num_of_pl = + data->golden_dpm_table.sclk_table.count; + for (i=0; i<data->golden_dpm_table.sclk_table.count; i++) { + odn_table->odn_core_clock_dpm_levels.entries[i].clock = + data->golden_dpm_table.sclk_table.dpm_levels[i].value; + odn_table->odn_core_clock_dpm_levels.entries[i].enabled = true; + odn_table->odn_core_clock_dpm_levels.entries[i].vddc = dep_sclk_table->entries[i].vddc; + } + + smu7_get_voltage_dependency_table(dep_sclk_table, + (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk)); + + odn_table->odn_memory_clock_dpm_levels.num_of_pl = + data->golden_dpm_table.mclk_table.count; + for (i=0; i<data->golden_dpm_table.sclk_table.count; i++) { + odn_table->odn_memory_clock_dpm_levels.entries[i].clock = + data->golden_dpm_table.mclk_table.dpm_levels[i].value; + odn_table->odn_memory_clock_dpm_levels.entries[i].enabled = true; + odn_table->odn_memory_clock_dpm_levels.entries[i].vddc = dep_mclk_table->entries[i].vddc; + } + + smu7_get_voltage_dependency_table(dep_mclk_table, + (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk)); + + return 0; +} + static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); @@ -808,6 +878,11 @@ static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) /* save a copy of the default DPM table */ memcpy(&(data->golden_dpm_table), &(data->dpm_table), sizeof(struct smu7_dpm_table)); + + /* initialize ODN table */ + if (hwmgr->od_enabled) + smu7_odn_initial_default_setting(hwmgr); + return 0; } @@ -1275,6 +1350,58 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr) return 0; } +static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable) +{ + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); + + if (smu_data == NULL) + return -EINVAL; + + if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED) + return 0; + + if (enable) { + if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) { + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc( + hwmgr, PPSMC_MSG_EnableAvfs), + "Failed to enable AVFS!", + return -EINVAL); + } + } else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) { + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc( + hwmgr, PPSMC_MSG_DisableAvfs), + "Failed to disable AVFS!", + return -EINVAL); + } + + return 0; +} + +static int smu7_update_avfs(struct pp_hwmgr *hwmgr) +{ + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (smu_data == NULL) + return -EINVAL; + + if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED) + return 0; + + if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { + smu7_avfs_control(hwmgr, false); + } else if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) { + smu7_avfs_control(hwmgr, false); + smu7_avfs_control(hwmgr, true); + } else { + smu7_avfs_control(hwmgr, true); + } + + return 0; +} + int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr) { int tmp_result, result = 0; @@ -1357,7 +1484,6 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr) data->dll_default_on = false; data->mclk_dpm0_activity_target = 0xa; - data->mclk_activity_target = SMU7_MCLK_TARGETACTIVITY_DFLT; data->vddc_vddgfx_delta = 300; data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT; data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT; @@ -1381,6 +1507,14 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr) data->enable_pkg_pwr_tracking_feature = true; data->force_pcie_gen = PP_PCIEGenInvalid; data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false; + data->current_profile_setting.bupdate_sclk = 1; + data->current_profile_setting.sclk_up_hyst = 0; + data->current_profile_setting.sclk_down_hyst = 100; + data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT; + data->current_profile_setting.bupdate_sclk = 1; + data->current_profile_setting.mclk_up_hyst = 0; + data->current_profile_setting.mclk_down_hyst = 100; + data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT; if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->is_kicker) { uint8_t tmp1, tmp2; @@ -2266,14 +2400,18 @@ static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr) struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk; PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL, - "VDDC dependency on SCLK table is missing. This table is mandatory\n", return -EINVAL); + "VDDC dependency on SCLK table is missing. This table is mandatory", + return -EINVAL); PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1, - "VDDC dependency on SCLK table has to have is missing. This table is mandatory\n", return -EINVAL); + "VDDC dependency on SCLK table has to have is missing. This table is mandatory", + return -EINVAL); PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL, - "VDDC dependency on MCLK table is missing. This table is mandatory\n", return -EINVAL); + "VDDC dependency on MCLK table is missing. This table is mandatory", + return -EINVAL); PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1, - "VDD dependency on MCLK table has to have is missing. This table is mandatory\n", return -EINVAL); + "VDD dependency on MCLK table has to have is missing. This table is mandatory", + return -EINVAL); data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[0].v; data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; @@ -2574,8 +2712,10 @@ static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_le break; } } - if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) + if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { *sclk_mask = 0; + tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk; + } if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) *sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1; @@ -2590,8 +2730,10 @@ static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_le break; } } - if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) + if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { *sclk_mask = 0; + tmp_sclk = table_info->vdd_dep_on_sclk->entries[0].clk; + } if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) *sclk_mask = table_info->vdd_dep_on_sclk->count - 1; @@ -2603,6 +2745,9 @@ static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_le *mclk_mask = golden_dpm_table->mclk_table.count - 1; *pcie_mask = data->dpm_table.pcie_speed_table.count - 1; + hwmgr->pstate_sclk = tmp_sclk; + hwmgr->pstate_mclk = tmp_mclk; + return 0; } @@ -2614,6 +2759,9 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr, uint32_t mclk_mask = 0; uint32_t pcie_mask = 0; + if (hwmgr->pstate_sclk == 0) + smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask); + switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: ret = smu7_force_dpm_highest(hwmgr); @@ -2756,10 +2904,12 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); - disable_mclk_switching = ((1 < info.display_count) || - disable_mclk_switching_for_frame_lock || - smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) || - (mode_info.refresh_rate > 120)); + if (info.display_count == 0) + disable_mclk_switching = false; + else + disable_mclk_switching = ((1 < info.display_count) || + disable_mclk_switching_for_frame_lock || + smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us)); sclk = smu7_ps->performance_levels[0].engine_clock; mclk = smu7_ps->performance_levels[0].memory_clock; @@ -3312,7 +3462,7 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, void *value, int *size) { uint32_t sclk, mclk, activity_percent; - uint32_t offset; + uint32_t offset, val_vid; struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); /* size must be at least 4 bytes for all sensors */ @@ -3360,6 +3510,16 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, return -EINVAL; *size = sizeof(struct pp_gpu_power); return smu7_get_gpu_power(hwmgr, (struct pp_gpu_power *)value); + case AMDGPU_PP_SENSOR_VDDGFX: + if ((data->vr_config & 0xff) == 0x2) + val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE2_VID); + else + val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE1_VID); + + *((uint32_t *)value) = (uint32_t)convert_to_vddc(val_vid); + return 0; default: return -EINVAL; } @@ -3382,8 +3542,6 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons uint32_t i; struct cgs_display_info info = {0}; - data->need_update_smu7_dpm_table = 0; - for (i = 0; i < sclk_table->count; i++) { if (sclk == sclk_table->dpm_levels[i].value) break; @@ -3525,108 +3683,27 @@ static int smu7_populate_and_upload_sclk_mclk_dpm_levels( struct pp_hwmgr *hwmgr, const void *input) { int result = 0; - const struct phm_set_power_state_input *states = - (const struct phm_set_power_state_input *)input; - const struct smu7_power_state *smu7_ps = - cast_const_phw_smu7_power_state(states->pnew_state); struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - uint32_t sclk = smu7_ps->performance_levels - [smu7_ps->performance_level_count - 1].engine_clock; - uint32_t mclk = smu7_ps->performance_levels - [smu7_ps->performance_level_count - 1].memory_clock; struct smu7_dpm_table *dpm_table = &data->dpm_table; - - struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table; - uint32_t dpm_count, clock_percent; - uint32_t i; + uint32_t count; + struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table); + struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels); + struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels); if (0 == data->need_update_smu7_dpm_table) return 0; - if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) { - dpm_table->sclk_table.dpm_levels - [dpm_table->sclk_table.count - 1].value = sclk; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) { - /* Need to do calculation based on the golden DPM table - * as the Heatmap GPU Clock axis is also based on the default values - */ - PP_ASSERT_WITH_CODE( - (golden_dpm_table->sclk_table.dpm_levels - [golden_dpm_table->sclk_table.count - 1].value != 0), - "Divide by 0!", - return -EINVAL); - dpm_count = dpm_table->sclk_table.count < 2 ? 0 : dpm_table->sclk_table.count - 2; - - for (i = dpm_count; i > 1; i--) { - if (sclk > golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value) { - clock_percent = - ((sclk - - golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value - ) * 100) - / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value; - - dpm_table->sclk_table.dpm_levels[i].value = - golden_dpm_table->sclk_table.dpm_levels[i].value + - (golden_dpm_table->sclk_table.dpm_levels[i].value * - clock_percent)/100; - - } else if (golden_dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value > sclk) { - clock_percent = - ((golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value - - sclk) * 100) - / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value; - - dpm_table->sclk_table.dpm_levels[i].value = - golden_dpm_table->sclk_table.dpm_levels[i].value - - (golden_dpm_table->sclk_table.dpm_levels[i].value * - clock_percent) / 100; - } else - dpm_table->sclk_table.dpm_levels[i].value = - golden_dpm_table->sclk_table.dpm_levels[i].value; - } + if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) { + for (count = 0; count < dpm_table->sclk_table.count; count++) { + dpm_table->sclk_table.dpm_levels[count].enabled = odn_sclk_table->entries[count].enabled; + dpm_table->sclk_table.dpm_levels[count].value = odn_sclk_table->entries[count].clock; } } - if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) { - dpm_table->mclk_table.dpm_levels - [dpm_table->mclk_table.count - 1].value = mclk; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) { - - PP_ASSERT_WITH_CODE( - (golden_dpm_table->mclk_table.dpm_levels - [golden_dpm_table->mclk_table.count-1].value != 0), - "Divide by 0!", - return -EINVAL); - dpm_count = dpm_table->mclk_table.count < 2 ? 0 : dpm_table->mclk_table.count - 2; - for (i = dpm_count; i > 1; i--) { - if (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value < mclk) { - clock_percent = ((mclk - - golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value) * 100) - / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value; - - dpm_table->mclk_table.dpm_levels[i].value = - golden_dpm_table->mclk_table.dpm_levels[i].value + - (golden_dpm_table->mclk_table.dpm_levels[i].value * - clock_percent) / 100; - - } else if (golden_dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value > mclk) { - clock_percent = ( - (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value - mclk) - * 100) - / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value; - - dpm_table->mclk_table.dpm_levels[i].value = - golden_dpm_table->mclk_table.dpm_levels[i].value - - (golden_dpm_table->mclk_table.dpm_levels[i].value * - clock_percent) / 100; - } else - dpm_table->mclk_table.dpm_levels[i].value = - golden_dpm_table->mclk_table.dpm_levels[i].value; - } + if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) { + for (count = 0; count < dpm_table->mclk_table.count; count++) { + dpm_table->mclk_table.dpm_levels[count].enabled = odn_mclk_table->entries[count].enabled; + dpm_table->mclk_table.dpm_levels[count].value = odn_mclk_table->entries[count].clock; } } @@ -3748,7 +3825,7 @@ static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) return -EINVAL); } - data->need_update_smu7_dpm_table = 0; + data->need_update_smu7_dpm_table &= DPMTABLE_OD_UPDATE_VDDC; return 0; } @@ -3825,6 +3902,11 @@ static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) "Failed to populate and upload SCLK MCLK DPM levels!", result = tmp_result); + tmp_result = smu7_update_avfs(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to update avfs voltages!", + result = tmp_result); + tmp_result = smu7_generate_dpm_level_enable_mask(hwmgr, input); PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to generate DPM level enabled mask!", @@ -4016,6 +4098,7 @@ static int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct smu7_power_state *psa; const struct smu7_power_state *psb; int i; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); if (pstate1 == NULL || pstate2 == NULL || equal == NULL) return -EINVAL; @@ -4040,6 +4123,10 @@ static int smu7_check_states_equal(struct pp_hwmgr *hwmgr, *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk)); *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk)); *equal &= (psa->sclk_threshold == psb->sclk_threshold); + /* For OD call, set value based on flag */ + *equal &= !(data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | + DPMTABLE_OD_UPDATE_MCLK | + DPMTABLE_OD_UPDATE_VDDC)); return 0; } @@ -4211,9 +4298,7 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr, { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO | - AMD_DPM_FORCED_LEVEL_LOW | - AMD_DPM_FORCED_LEVEL_HIGH)) + if (mask == 0) return -EINVAL; switch (type) { @@ -4232,15 +4317,15 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr, case PP_PCIE: { uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask; - uint32_t level = 0; - while (tmp >>= 1) - level++; - - if (!data->pcie_dpm_key_disabled) - smum_send_msg_to_smc_with_parameter(hwmgr, + if (!data->pcie_dpm_key_disabled) { + if (fls(tmp) != ffs(tmp)) + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel); + else + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PCIeDPM_ForceLevel, - level); + fls(tmp) - 1); + } break; } default: @@ -4257,6 +4342,9 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); struct smu7_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table); + struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table); + struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels); + struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels); int i, now, size = 0; uint32_t clock, pcie_speed; @@ -4309,6 +4397,24 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "", (i == now) ? "*" : ""); break; + case OD_SCLK: + if (hwmgr->od_enabled) { + size = sprintf(buf, "%s: \n", "OD_SCLK"); + for (i = 0; i < odn_sclk_table->num_of_pl; i++) + size += sprintf(buf + size, "%d: %10uMhz %10u mV\n", + i, odn_sclk_table->entries[i].clock / 100, + odn_sclk_table->entries[i].vddc); + } + break; + case OD_MCLK: + if (hwmgr->od_enabled) { + size = sprintf(buf, "%s: \n", "OD_MCLK"); + for (i = 0; i < odn_mclk_table->num_of_pl; i++) + size += sprintf(buf + size, "%d: %10uMhz %10u mV\n", + i, odn_mclk_table->entries[i].clock / 100, + odn_mclk_table->entries[i].vddc); + } + break; default: break; } @@ -4583,33 +4689,6 @@ static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr, return result; } -static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable) -{ - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); - - if (smu_data == NULL) - return -EINVAL; - - if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED) - return 0; - - if (enable) { - if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, - CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) - PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc( - hwmgr, PPSMC_MSG_EnableAvfs), - "Failed to enable AVFS!", - return -EINVAL); - } else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, - CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) - PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc( - hwmgr, PPSMC_MSG_DisableAvfs), - "Failed to disable AVFS!", - return -EINVAL); - - return 0; -} - static int smu7_notify_cac_buffer_info(struct pp_hwmgr *hwmgr, uint32_t virtual_addr_low, uint32_t virtual_addr_hi, @@ -4670,6 +4749,192 @@ static int smu7_get_max_high_clocks(struct pp_hwmgr *hwmgr, return 0; } +static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr, + struct PP_TemperatureRange *thermal_data) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)hwmgr->pptable; + + memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange)); + + if (hwmgr->pp_table_version == PP_TABLE_V1) + thermal_data->max = table_info->cac_dtp_table->usSoftwareShutdownTemp * + PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + else if (hwmgr->pp_table_version == PP_TABLE_V0) + thermal_data->max = data->thermal_temp_setting.temperature_shutdown * + PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + + return 0; +} + +static bool smu7_check_clk_voltage_valid(struct pp_hwmgr *hwmgr, + enum PP_OD_DPM_TABLE_COMMAND type, + uint32_t clk, + uint32_t voltage) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint32_t min_vddc; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table; + + if (table_info == NULL) + return -EINVAL; + + dep_sclk_table = table_info->vdd_dep_on_sclk; + min_vddc = dep_sclk_table->entries[0].vddc; + + if (voltage < min_vddc || voltage > 2000) { + pr_info("OD voltage is out of range [%d - 2000] mV\n", min_vddc); + return false; + } + + if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) { + if (data->vbios_boot_state.sclk_bootup_value > clk || + hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) { + pr_info("OD engine clock is out of range [%d - %d] MHz\n", + data->vbios_boot_state.sclk_bootup_value, + hwmgr->platform_descriptor.overdriveLimit.engineClock / 100); + return false; + } + } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) { + if (data->vbios_boot_state.mclk_bootup_value > clk || + hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) { + pr_info("OD memory clock is out of range [%d - %d] MHz\n", + data->vbios_boot_state.mclk_bootup_value/100, + hwmgr->platform_descriptor.overdriveLimit.memoryClock / 100); + return false; + } + } else { + return false; + } + + return true; +} + +static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint32_t i; + + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table; + struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table; + + if (table_info == NULL) + return; + + for (i=0; i<data->dpm_table.sclk_table.count; i++) { + if (odn_table->odn_core_clock_dpm_levels.entries[i].clock != + data->dpm_table.sclk_table.dpm_levels[i].value) { + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; + break; + } + } + + for (i=0; i<data->dpm_table.sclk_table.count; i++) { + if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock != + data->dpm_table.mclk_table.dpm_levels[i].value) { + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; + break; + } + } + + dep_table = table_info->vdd_dep_on_mclk; + odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk); + + for (i=0; i < dep_table->count; i++) { + if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; + break; + } + } + if (i == dep_table->count) + data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; + + dep_table = table_info->vdd_dep_on_sclk; + odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk); + for (i=0; i < dep_table->count; i++) { + if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; + break; + } + } + if (i == dep_table->count) + data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; +} + +static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, + enum PP_OD_DPM_TABLE_COMMAND type, + long *input, uint32_t size) +{ + uint32_t i; + struct phm_odn_clock_levels *podn_dpm_table_in_backend = NULL; + struct smu7_odn_clock_voltage_dependency_table *podn_vdd_dep_in_backend = NULL; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + uint32_t input_clk; + uint32_t input_vol; + uint32_t input_level; + + PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage", + return -EINVAL); + + if (!hwmgr->od_enabled) { + pr_info("OverDrive feature not enabled\n"); + return -EINVAL; + } + + if (PP_OD_EDIT_SCLK_VDDC_TABLE == type) { + podn_dpm_table_in_backend = &data->odn_dpm_table.odn_core_clock_dpm_levels; + podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_sclk; + PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend), + "Failed to get ODN SCLK and Voltage tables", + return -EINVAL); + } else if (PP_OD_EDIT_MCLK_VDDC_TABLE == type) { + podn_dpm_table_in_backend = &data->odn_dpm_table.odn_memory_clock_dpm_levels; + podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_mclk; + + PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend), + "Failed to get ODN MCLK and Voltage tables", + return -EINVAL); + } else if (PP_OD_RESTORE_DEFAULT_TABLE == type) { + smu7_odn_initial_default_setting(hwmgr); + return 0; + } else if (PP_OD_COMMIT_DPM_TABLE == type) { + smu7_check_dpm_table_updated(hwmgr); + return 0; + } else { + return -EINVAL; + } + + for (i = 0; i < size; i += 3) { + if (i + 3 > size || input[i] >= podn_dpm_table_in_backend->num_of_pl) { + pr_info("invalid clock voltage input \n"); + return 0; + } + input_level = input[i]; + input_clk = input[i+1] * 100; + input_vol = input[i+2]; + + if (smu7_check_clk_voltage_valid(hwmgr, type, input_clk, input_vol)) { + podn_dpm_table_in_backend->entries[input_level].clock = input_clk; + podn_vdd_dep_in_backend->entries[input_level].clk = input_clk; + podn_dpm_table_in_backend->entries[input_level].vddc = input_vol; + podn_vdd_dep_in_backend->entries[input_level].vddc = input_vol; + } else { + return -EINVAL; + } + } + + return 0; +} + + static const struct pp_hwmgr_func smu7_hwmgr_funcs = { .backend_init = &smu7_hwmgr_backend_init, .backend_fini = &smu7_hwmgr_backend_fini, @@ -4693,7 +4958,6 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = { .display_config_changed = smu7_display_configuration_changed_task, .set_max_fan_pwm_output = smu7_set_max_fan_pwm_output, .set_max_fan_rpm_output = smu7_set_max_fan_rpm_output, - .get_temperature = smu7_thermal_get_temperature, .stop_thermal_controller = smu7_thermal_stop_thermal_controller, .get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info, .get_fan_speed_percent = smu7_fan_ctrl_get_fan_speed_percent, @@ -4723,6 +4987,9 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = { .start_thermal_controller = smu7_start_thermal_controller, .notify_cac_buffer_info = smu7_notify_cac_buffer_info, .get_max_high_clocks = smu7_get_max_high_clocks, + .get_thermal_temperature_range = smu7_get_thermal_temperature_range, + .odn_edit_dpm_table = smu7_odn_edit_dpm_table, + .set_power_limit = smu7_set_power_limit, }; uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock, @@ -4754,4 +5021,3 @@ int smu7_init_function_pointers(struct pp_hwmgr *hwmgr) return ret; } - |