diff options
Diffstat (limited to 'drivers/gpu/drm/amd/powerplay')
71 files changed, 19426 insertions, 24450 deletions
diff --git a/drivers/gpu/drm/amd/powerplay/Kconfig b/drivers/gpu/drm/amd/powerplay/Kconfig deleted file mode 100644 index af380335b425..000000000000 --- a/drivers/gpu/drm/amd/powerplay/Kconfig +++ /dev/null @@ -1,6 +0,0 @@ -config DRM_AMD_POWERPLAY - bool "Enable AMD powerplay component" - depends on DRM_AMDGPU - default n - help - select this option will enable AMD powerplay component. diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index abbb658bdc1e..7174f7a68266 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -31,6 +31,7 @@ #include "eventmanager.h" #include "pp_debug.h" + #define PP_CHECK(handle) \ do { \ if ((handle) == NULL || (handle)->pp_valid != PP_VALID) \ @@ -162,12 +163,12 @@ static int pp_hw_fini(void *handle) pp_handle = (struct pp_instance *)handle; eventmgr = pp_handle->eventmgr; - if (eventmgr != NULL || eventmgr->pp_eventmgr_fini != NULL) + if (eventmgr != NULL && eventmgr->pp_eventmgr_fini != NULL) eventmgr->pp_eventmgr_fini(eventmgr); smumgr = pp_handle->smu_mgr; - if (smumgr != NULL || smumgr->smumgr_funcs != NULL || + if (smumgr != NULL && smumgr->smumgr_funcs != NULL && smumgr->smumgr_funcs->smu_fini != NULL) smumgr->smumgr_funcs->smu_fini(smumgr); @@ -190,11 +191,9 @@ static int pp_sw_reset(void *handle) } -static int pp_set_clockgating_state(void *handle, - enum amd_clockgating_state state) +int amd_set_clockgating_by_smu(void *handle, uint32_t msg_id) { struct pp_hwmgr *hwmgr; - uint32_t msg_id, pp_state; if (handle == NULL) return -EINVAL; @@ -208,76 +207,7 @@ static int pp_set_clockgating_state(void *handle, return 0; } - if (state == AMD_CG_STATE_UNGATE) - pp_state = 0; - else - pp_state = PP_STATE_CG | PP_STATE_LS; - - /* Enable/disable GFX blocks clock gating through SMU */ - msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, - PP_BLOCK_GFX_CG, - PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, - pp_state); - hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id); - msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, - PP_BLOCK_GFX_3D, - PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, - pp_state); - hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id); - msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, - PP_BLOCK_GFX_RLC, - PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, - pp_state); - hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id); - msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, - PP_BLOCK_GFX_CP, - PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, - pp_state); - hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id); - msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, - PP_BLOCK_GFX_MG, - PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, - pp_state); - hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id); - - /* Enable/disable System blocks clock gating through SMU */ - msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, - PP_BLOCK_SYS_BIF, - PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, - pp_state); - hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id); - msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, - PP_BLOCK_SYS_BIF, - PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, - pp_state); - hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id); - msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, - PP_BLOCK_SYS_MC, - PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, - pp_state); - hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id); - msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, - PP_BLOCK_SYS_ROM, - PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, - pp_state); - hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id); - msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, - PP_BLOCK_SYS_DRM, - PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, - pp_state); - hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id); - msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, - PP_BLOCK_SYS_HDP, - PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, - pp_state); - hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id); - msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, - PP_BLOCK_SYS_SDMA, - PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, - pp_state); - hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id); - - return 0; + return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id); } static int pp_set_powergating_state(void *handle, @@ -361,7 +291,7 @@ const struct amd_ip_funcs pp_ip_funcs = { .is_idle = pp_is_idle, .wait_for_idle = pp_wait_for_idle, .soft_reset = pp_sw_reset, - .set_clockgating_state = pp_set_clockgating_state, + .set_clockgating_state = NULL, .set_powergating_state = pp_set_powergating_state, }; @@ -537,7 +467,6 @@ int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input, ret = pem_handle_event(pp_handle->eventmgr, event_id, &data); break; case AMD_PP_EVENT_READJUST_POWER_STATE: - pp_handle->hwmgr->current_ps = pp_handle->hwmgr->boot_ps; ret = pem_handle_event(pp_handle->eventmgr, event_id, &data); break; default: @@ -576,28 +505,6 @@ enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle) } } -static void -pp_debugfs_print_current_performance_level(void *handle, - struct seq_file *m) -{ - struct pp_hwmgr *hwmgr; - - if (handle == NULL) - return; - - hwmgr = ((struct pp_instance *)handle)->hwmgr; - - if (hwmgr == NULL || hwmgr->hwmgr_func == NULL) - return; - - if (hwmgr->hwmgr_func->print_current_perforce_level == NULL) { - printk(KERN_INFO "%s was not implemented.\n", __func__); - return; - } - - hwmgr->hwmgr_func->print_current_perforce_level(hwmgr, m); -} - static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode) { struct pp_hwmgr *hwmgr; @@ -764,15 +671,12 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size) PP_CHECK_HW(hwmgr); if (!hwmgr->hardcode_pp_table) { - hwmgr->hardcode_pp_table = - kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL); + hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table, + hwmgr->soft_pp_table_size, + GFP_KERNEL); if (!hwmgr->hardcode_pp_table) return -ENOMEM; - - /* to avoid powerplay crash when hardcode pptable is empty */ - memcpy(hwmgr->hardcode_pp_table, hwmgr->soft_pp_table, - hwmgr->soft_pp_table_size); } memcpy(hwmgr->hardcode_pp_table, buf, size); @@ -897,6 +801,25 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value) return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value); } +static int pp_dpm_read_sensor(void *handle, int idx, int32_t *value) +{ + struct pp_hwmgr *hwmgr; + + if (!handle) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + PP_CHECK_HW(hwmgr); + + if (hwmgr->hwmgr_func->read_sensor == NULL) { + printk(KERN_INFO "%s was not implemented.\n", __func__); + return 0; + } + + return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value); +} + const struct amd_powerplay_funcs pp_dpm_funcs = { .get_temperature = pp_dpm_get_temperature, .load_firmware = pp_dpm_load_fw, @@ -909,7 +832,6 @@ const struct amd_powerplay_funcs pp_dpm_funcs = { .powergate_vce = pp_dpm_powergate_vce, .powergate_uvd = pp_dpm_powergate_uvd, .dispatch_tasks = pp_dpm_dispatch_tasks, - .print_current_performance_level = pp_debugfs_print_current_performance_level, .set_fan_control_mode = pp_dpm_set_fan_control_mode, .get_fan_control_mode = pp_dpm_get_fan_control_mode, .set_fan_speed_percent = pp_dpm_set_fan_speed_percent, @@ -923,6 +845,7 @@ const struct amd_powerplay_funcs pp_dpm_funcs = { .set_sclk_od = pp_dpm_set_sclk_od, .get_mclk_od = pp_dpm_get_mclk_od, .set_mclk_od = pp_dpm_set_mclk_od, + .read_sensor = pp_dpm_read_sensor, }; static int amd_pp_instance_init(struct amd_pp_init *pp_init, diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c index 635fc4b48184..92b117843875 100644 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c @@ -262,6 +262,8 @@ static const pem_event_action * const display_config_change_event[] = { unblock_adjust_power_state_tasks, set_cpu_power_state, notify_hw_power_source_tasks, + get_2d_performance_state_tasks, + set_performance_state_tasks, /* updateDALConfigurationTasks, variBrightDisplayConfigurationChangeTasks, */ adjust_power_state_tasks, diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c b/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c index a46225c0fc01..489908887e9c 100644 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c @@ -70,11 +70,12 @@ int psm_set_states(struct pp_eventmgr *eventmgr, unsigned long *state_id) int i; table_entries = hwmgr->num_ps; + state = hwmgr->ps; for (i = 0; i < table_entries; i++) { if (state->id == *state_id) { - hwmgr->request_ps = state; + memcpy(hwmgr->request_ps, state, hwmgr->ps_size); return 0; } state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size); @@ -100,13 +101,14 @@ int psm_adjust_power_state_dynamic(struct pp_eventmgr *eventmgr, bool skip) if (requested == NULL) return 0; + phm_apply_state_adjust_rules(hwmgr, requested, pcurrent); + if (pcurrent == NULL || (0 != phm_check_states_equal(hwmgr, &pcurrent->hardware, &requested->hardware, &equal))) equal = false; if (!equal || phm_check_smc_update_required_for_display_configuration(hwmgr)) { - phm_apply_state_adjust_rules(hwmgr, requested, pcurrent); phm_set_power_state(hwmgr, &pcurrent->hardware, &requested->hardware); - hwmgr->current_ps = requested; + memcpy(hwmgr->current_ps, hwmgr->request_ps, hwmgr->ps_size); } return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile index f7ce4cb71346..5fff1d636ab7 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile @@ -3,14 +3,12 @@ # It provides the hardware management services for the driver. HARDWARE_MGR = hwmgr.o processpptables.o functiontables.o \ - hardwaremanager.o pp_acpi.o cz_hwmgr.o \ - cz_clockpowergating.o \ - tonga_processpptables.o ppatomctrl.o \ - tonga_hwmgr.o pppcielanes.o tonga_thermal.o\ - fiji_powertune.o fiji_hwmgr.o tonga_clockpowergating.o \ - fiji_clockpowergating.o fiji_thermal.o \ - polaris10_hwmgr.o polaris10_powertune.o polaris10_thermal.o \ - polaris10_clockpowergating.o + hardwaremanager.o pp_acpi.o cz_hwmgr.o \ + cz_clockpowergating.o pppcielanes.o\ + process_pptables_v1_0.o ppatomctrl.o \ + smu7_hwmgr.o smu7_powertune.o smu7_thermal.o \ + smu7_clockpowergating.o + AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR)) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index 8cc0df9b534a..7e4fcbbbe086 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c @@ -178,7 +178,6 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) int result; cz_hwmgr->gfx_ramp_step = 256*25/100; - cz_hwmgr->gfx_ramp_delay = 1; /* by default, we delay 1us */ for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++) @@ -186,33 +185,19 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) cz_hwmgr->mgcg_cgtt_local0 = 0x00000000; cz_hwmgr->mgcg_cgtt_local1 = 0x00000000; - cz_hwmgr->clock_slow_down_freq = 25000; - cz_hwmgr->skip_clock_slow_down = 1; - cz_hwmgr->enable_nb_ps_policy = 1; /* disable until UNB is ready, Enabled */ - cz_hwmgr->voltage_drop_in_dce_power_gating = 0; /* disable until fully verified */ - cz_hwmgr->voting_rights_clients = 0x00C00033; - cz_hwmgr->static_screen_threshold = 8; - cz_hwmgr->ddi_power_gating_disabled = 0; - cz_hwmgr->bapm_enabled = 1; - cz_hwmgr->voltage_drop_threshold = 0; - cz_hwmgr->gfx_power_gating_threshold = 500; - cz_hwmgr->vce_slow_sclk_threshold = 20000; - cz_hwmgr->dce_slow_sclk_threshold = 30000; - cz_hwmgr->disable_driver_thermal_policy = 1; - cz_hwmgr->disable_nb_ps3_in_battery = 0; phm_cap_unset(hwmgr->platform_descriptor.platformCaps, @@ -221,9 +206,6 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_NonABMSupportInPPLib); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkDeepSleep); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicM3Arbiter); @@ -233,9 +215,7 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) PHM_PlatformCaps_DynamicPatchPowerState); cz_hwmgr->thermal_auto_throttling_treshold = 0; - cz_hwmgr->tdr_clock = 0; - cz_hwmgr->disable_gfx_power_gating_in_uvd = 0; phm_cap_set(hwmgr->platform_descriptor.platformCaps, @@ -450,19 +430,12 @@ static int cz_construct_boot_state(struct pp_hwmgr *hwmgr) (uint8_t)cz_hwmgr->sys_info.bootup_nb_voltage_index; cz_hwmgr->boot_power_level.dsDividerIndex = 0; - cz_hwmgr->boot_power_level.ssDividerIndex = 0; - cz_hwmgr->boot_power_level.allowGnbSlow = 1; - cz_hwmgr->boot_power_level.forceNBPstate = 0; - cz_hwmgr->boot_power_level.hysteresis_up = 0; - cz_hwmgr->boot_power_level.numSIMDToPowerDown = 0; - cz_hwmgr->boot_power_level.display_wm = 0; - cz_hwmgr->boot_power_level.vce_wm = 0; return 0; @@ -749,7 +722,6 @@ static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr, cz_hwmgr->sclk_dpm.soft_max_clk = table->entries[table->count - 1].clk; clock = hwmgr->display_config.min_core_set_clock; -; if (clock == 0) printk(KERN_INFO "[ powerplay ] min_core_set_clock not set\n"); @@ -832,7 +804,7 @@ static int cz_tf_set_watermark_threshold(struct pp_hwmgr *hwmgr, smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetWatermarkFrequency, - cz_hwmgr->sclk_dpm.soft_max_clk); + cz_hwmgr->sclk_dpm.soft_max_clk); return 0; } @@ -858,9 +830,9 @@ static int cz_tf_enable_nb_dpm(struct pp_hwmgr *hwmgr, PP_DBG_LOG("enabling ALL SMU features.\n"); dpm_features |= NB_DPM_MASK; ret = smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, - PPSMC_MSG_EnableAllSmuFeatures, - dpm_features); + hwmgr->smumgr, + PPSMC_MSG_EnableAllSmuFeatures, + dpm_features); if (ret == 0) cz_hwmgr->is_nb_dpm_enabled = true; } @@ -1246,7 +1218,7 @@ static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr) static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) { - if (hwmgr != NULL || hwmgr->backend != NULL) { + if (hwmgr != NULL && hwmgr->backend != NULL) { kfree(hwmgr->backend); kfree(hwmgr); } @@ -1402,10 +1374,12 @@ int cz_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate) PPSMC_MSG_SetUvdHardMin)); cz_enable_disable_uvd_dpm(hwmgr, true); - } else + } else { cz_enable_disable_uvd_dpm(hwmgr, true); - } else + } + } else { cz_enable_disable_uvd_dpm(hwmgr, false); + } return 0; } @@ -1564,78 +1538,6 @@ int cz_get_power_state_size(struct pp_hwmgr *hwmgr) return sizeof(struct cz_power_state); } -static void -cz_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m) -{ - struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); - - struct phm_clock_voltage_dependency_table *table = - hwmgr->dyn_state.vddc_dependency_on_sclk; - - struct phm_vce_clock_voltage_dependency_table *vce_table = - hwmgr->dyn_state.vce_clock_voltage_dependency_table; - - struct phm_uvd_clock_voltage_dependency_table *uvd_table = - hwmgr->dyn_state.uvd_clock_voltage_dependency_table; - - uint32_t sclk_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX), - TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX); - uint32_t uvd_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2), - TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX); - uint32_t vce_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2), - TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX); - - uint32_t sclk, vclk, dclk, ecclk, tmp, activity_percent; - uint16_t vddnb, vddgfx; - int result; - - if (sclk_index >= NUM_SCLK_LEVELS) { - seq_printf(m, "\n invalid sclk dpm profile %d\n", sclk_index); - } else { - sclk = table->entries[sclk_index].clk; - seq_printf(m, "\n index: %u sclk: %u MHz\n", sclk_index, sclk/100); - } - - tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) & - CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT; - vddnb = cz_convert_8Bit_index_to_voltage(hwmgr, tmp); - tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) & - CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT; - vddgfx = cz_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp); - seq_printf(m, "\n vddnb: %u vddgfx: %u\n", vddnb, vddgfx); - - seq_printf(m, "\n uvd %sabled\n", cz_hwmgr->uvd_power_gated ? "dis" : "en"); - if (!cz_hwmgr->uvd_power_gated) { - if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) { - seq_printf(m, "\n invalid uvd dpm level %d\n", uvd_index); - } else { - vclk = uvd_table->entries[uvd_index].vclk; - dclk = uvd_table->entries[uvd_index].dclk; - seq_printf(m, "\n index: %u uvd vclk: %u MHz dclk: %u MHz\n", uvd_index, vclk/100, dclk/100); - } - } - - seq_printf(m, "\n vce %sabled\n", cz_hwmgr->vce_power_gated ? "dis" : "en"); - if (!cz_hwmgr->vce_power_gated) { - if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) { - seq_printf(m, "\n invalid vce dpm level %d\n", vce_index); - } else { - ecclk = vce_table->entries[vce_index].ecclk; - seq_printf(m, "\n index: %u vce ecclk: %u MHz\n", vce_index, ecclk/100); - } - } - - result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetAverageGraphicsActivity); - if (0 == result) { - activity_percent = cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0); - activity_percent = activity_percent > 100 ? 100 : activity_percent; - } else { - activity_percent = 50; - } - - seq_printf(m, "\n [GPU load]: %u %%\n\n", activity_percent); -} - static void cz_hw_print_display_cfg( const struct cc6_settings *cc6_settings) { @@ -1690,13 +1592,10 @@ static int cz_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time, struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); if (separation_time != - hw_data->cc6_settings.cpu_pstate_separation_time - || cc6_disable != - hw_data->cc6_settings.cpu_cc6_disable - || pstate_disable != - hw_data->cc6_settings.cpu_pstate_disable - || pstate_switch_disable != - hw_data->cc6_settings.nb_pstate_switch_disable) { + hw_data->cc6_settings.cpu_pstate_separation_time || + cc6_disable != hw_data->cc6_settings.cpu_cc6_disable || + pstate_disable != hw_data->cc6_settings.cpu_pstate_disable || + pstate_switch_disable != hw_data->cc6_settings.nb_pstate_switch_disable) { hw_data->cc6_settings.cc6_setting_changed = true; @@ -1799,8 +1698,7 @@ static int cz_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_p ps = cast_const_PhwCzPowerState(state); level_index = index > ps->level - 1 ? ps->level - 1 : index; - - level->coreClock = ps->levels[level_index].engineClock; + level->coreClock = ps->levels[level_index].engineClock; if (designation == PHM_PerformanceLevelDesignation_PowerContainment) { for (i = 1; i < ps->level; i++) { @@ -1887,6 +1785,107 @@ static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_c return 0; } +static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value) +{ + struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); + + struct phm_clock_voltage_dependency_table *table = + hwmgr->dyn_state.vddc_dependency_on_sclk; + + struct phm_vce_clock_voltage_dependency_table *vce_table = + hwmgr->dyn_state.vce_clock_voltage_dependency_table; + + struct phm_uvd_clock_voltage_dependency_table *uvd_table = + hwmgr->dyn_state.uvd_clock_voltage_dependency_table; + + uint32_t sclk_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX), + TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX); + uint32_t uvd_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2), + TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX); + uint32_t vce_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2), + TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX); + + uint32_t sclk, vclk, dclk, ecclk, tmp, activity_percent; + uint16_t vddnb, vddgfx; + int result; + + switch (idx) { + case AMDGPU_PP_SENSOR_GFX_SCLK: + if (sclk_index < NUM_SCLK_LEVELS) { + sclk = table->entries[sclk_index].clk; + *value = sclk; + return 0; + } + return -EINVAL; + case AMDGPU_PP_SENSOR_VDDNB: + tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) & + CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT; + vddnb = cz_convert_8Bit_index_to_voltage(hwmgr, tmp); + *value = vddnb; + return 0; + case AMDGPU_PP_SENSOR_VDDGFX: + tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) & + CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT; + vddgfx = cz_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp); + *value = vddgfx; + return 0; + case AMDGPU_PP_SENSOR_UVD_VCLK: + if (!cz_hwmgr->uvd_power_gated) { + if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) { + return -EINVAL; + } else { + vclk = uvd_table->entries[uvd_index].vclk; + *value = vclk; + return 0; + } + } + *value = 0; + return 0; + case AMDGPU_PP_SENSOR_UVD_DCLK: + if (!cz_hwmgr->uvd_power_gated) { + if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) { + return -EINVAL; + } else { + dclk = uvd_table->entries[uvd_index].dclk; + *value = dclk; + return 0; + } + } + *value = 0; + return 0; + case AMDGPU_PP_SENSOR_VCE_ECCLK: + if (!cz_hwmgr->vce_power_gated) { + if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) { + return -EINVAL; + } else { + ecclk = vce_table->entries[vce_index].ecclk; + *value = ecclk; + return 0; + } + } + *value = 0; + return 0; + case AMDGPU_PP_SENSOR_GPU_LOAD: + result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetAverageGraphicsActivity); + if (0 == result) { + activity_percent = cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0); + activity_percent = activity_percent > 100 ? 100 : activity_percent; + } else { + activity_percent = 50; + } + *value = activity_percent; + return 0; + case AMDGPU_PP_SENSOR_UVD_POWER: + *value = cz_hwmgr->uvd_power_gated ? 0 : 1; + return 0; + case AMDGPU_PP_SENSOR_VCE_POWER: + *value = cz_hwmgr->vce_power_gated ? 0 : 1; + return 0; + default: + return -EINVAL; + } +} + static const struct pp_hwmgr_func cz_hwmgr_funcs = { .backend_init = cz_hwmgr_backend_init, .backend_fini = cz_hwmgr_backend_fini, @@ -1902,7 +1901,6 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = { .patch_boot_state = cz_dpm_patch_boot_state, .get_pp_table_entry = cz_dpm_get_pp_table_entry, .get_num_of_pp_table_entries = cz_dpm_get_num_of_pp_table_entries, - .print_current_perforce_level = cz_print_current_perforce_level, .set_cpu_power_state = cz_set_cpu_power_state, .store_cc6_data = cz_store_cc6_data, .force_clock_level = cz_force_clock_level, @@ -1912,6 +1910,7 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = { .get_current_shallow_sleep_clocks = cz_get_current_shallow_sleep_clocks, .get_clock_by_type = cz_get_clock_by_type, .get_max_high_clocks = cz_get_max_high_clocks, + .read_sensor = cz_read_sensor, }; int cz_hwmgr_init(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c deleted file mode 100644 index 5afe82068b29..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include "hwmgr.h" -#include "fiji_clockpowergating.h" -#include "fiji_ppsmc.h" -#include "fiji_hwmgr.h" - -int fiji_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - data->uvd_power_gated = false; - data->vce_power_gated = false; - data->samu_power_gated = false; - data->acp_power_gated = false; - - return 0; -} - -int fiji_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - if (data->uvd_power_gated == bgate) - return 0; - - data->uvd_power_gated = bgate; - - if (bgate) { - cgs_set_clockgating_state(hwmgr->device, - AMD_IP_BLOCK_TYPE_UVD, - AMD_CG_STATE_GATE); - fiji_update_uvd_dpm(hwmgr, true); - } else { - fiji_update_uvd_dpm(hwmgr, false); - cgs_set_clockgating_state(hwmgr->device, - AMD_IP_BLOCK_TYPE_UVD, - AMD_CG_STATE_UNGATE); - } - - return 0; -} - -int fiji_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_set_power_state_input states; - const struct pp_power_state *pcurrent; - struct pp_power_state *requested; - - if (data->vce_power_gated == bgate) - return 0; - - data->vce_power_gated = bgate; - - pcurrent = hwmgr->current_ps; - requested = hwmgr->request_ps; - - states.pcurrent_state = &(pcurrent->hardware); - states.pnew_state = &(requested->hardware); - - fiji_update_vce_dpm(hwmgr, &states); - fiji_enable_disable_vce_dpm(hwmgr, !bgate); - - return 0; -} - -int fiji_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - if (data->samu_power_gated == bgate) - return 0; - - data->samu_power_gated = bgate; - - if (bgate) - fiji_update_samu_dpm(hwmgr, true); - else - fiji_update_samu_dpm(hwmgr, false); - - return 0; -} - -int fiji_phm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - if (data->acp_power_gated == bgate) - return 0; - - data->acp_power_gated = bgate; - - if (bgate) - fiji_update_acp_dpm(hwmgr, true); - else - fiji_update_acp_dpm(hwmgr, false); - - return 0; -} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h deleted file mode 100644 index 32d43e8fecb2..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef FIJI_DYN_DEFAULTS_H -#define FIJI_DYN_DEFAULTS_H - -/** \file -* Volcanic Islands Dynamic default parameters. -*/ - -enum FIJIdpm_TrendDetection -{ - FIJIAdpm_TrendDetection_AUTO, - FIJIAdpm_TrendDetection_UP, - FIJIAdpm_TrendDetection_DOWN -}; -typedef enum FIJIdpm_TrendDetection FIJIdpm_TrendDetection; - -/* We need to fill in the default values!!!!!!!!!!!!!!!!!!!!!!! */ - -/* Bit vector representing same fields as hardware register. */ -#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102 /* CP_Gfx_busy ???? - * HDP_busy - * IH_busy - * UVD_busy - * VCE_busy - * ACP_busy - * SAMU_busy - * SDMA enabled */ -#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT1 0x000400 /* FE_Gfx_busy - Intended for primary usage. Rest are for flexibility. ???? - * SH_Gfx_busy - * RB_Gfx_busy - * VCE_busy */ - -#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080 /* SH_Gfx_busy - Intended for primary usage. Rest are for flexibility. - * FE_Gfx_busy - * RB_Gfx_busy - * ACP_busy */ - -#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200 /* RB_Gfx_busy - Intended for primary usage. Rest are for flexibility. - * FE_Gfx_busy - * SH_Gfx_busy - * UVD_busy */ - -#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680 /* UVD_busy - * VCE_busy - * ACP_busy - * SAMU_busy */ - -#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033 /* GFX, HDP */ -#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033 /* GFX, HDP */ -#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000 /* GFX, HDP */ - - -/* thermal protection counter (units). */ -#define PPFIJI_THERMALPROTECTCOUNTER_DFLT 0x200 /* ~19us */ - -/* static screen threshold unit */ -#define PPFIJI_STATICSCREENTHRESHOLDUNIT_DFLT 0 - -/* static screen threshold */ -#define PPFIJI_STATICSCREENTHRESHOLD_DFLT 0x00C8 - -/* gfx idle clock stop threshold */ -#define PPFIJI_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200 /* ~19us with static screen threshold unit of 0 */ - -/* Fixed reference divider to use when building baby stepping tables. */ -#define PPFIJI_REFERENCEDIVIDER_DFLT 4 - -/* ULV voltage change delay time - * Used to be delay_vreg in N.I. split for S.I. - * Using N.I. delay_vreg value as default - * ReferenceClock = 2700 - * VoltageResponseTime = 1000 - * VDDCDelayTime = (VoltageResponseTime * ReferenceClock) / 1600 = 1687 - */ -#define PPFIJI_ULVVOLTAGECHANGEDELAY_DFLT 1687 - -#define PPFIJI_CGULVPARAMETER_DFLT 0x00040035 -#define PPFIJI_CGULVCONTROL_DFLT 0x00007450 -#define PPFIJI_TARGETACTIVITY_DFLT 30 /* 30%*/ -#define PPFIJI_MCLK_TARGETACTIVITY_DFLT 10 /* 10% */ - -#endif - diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c deleted file mode 100644 index 120a9e2c3152..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c +++ /dev/null @@ -1,5599 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include <linux/module.h> -#include <linux/slab.h> -#include <linux/fb.h> -#include "linux/delay.h" - -#include "hwmgr.h" -#include "fiji_smumgr.h" -#include "atombios.h" -#include "hardwaremanager.h" -#include "ppatomctrl.h" -#include "atombios.h" -#include "cgs_common.h" -#include "fiji_dyn_defaults.h" -#include "fiji_powertune.h" -#include "smu73.h" -#include "smu/smu_7_1_3_d.h" -#include "smu/smu_7_1_3_sh_mask.h" -#include "gmc/gmc_8_1_d.h" -#include "gmc/gmc_8_1_sh_mask.h" -#include "bif/bif_5_0_d.h" -#include "bif/bif_5_0_sh_mask.h" -#include "dce/dce_10_0_d.h" -#include "dce/dce_10_0_sh_mask.h" -#include "pppcielanes.h" -#include "fiji_hwmgr.h" -#include "tonga_processpptables.h" -#include "tonga_pptable.h" -#include "pp_debug.h" -#include "pp_acpi.h" -#include "amd_pcie_helpers.h" -#include "cgs_linux.h" -#include "ppinterrupt.h" - -#include "fiji_clockpowergating.h" -#include "fiji_thermal.h" - -#define VOLTAGE_SCALE 4 -#define SMC_RAM_END 0x40000 -#define VDDC_VDDCI_DELTA 300 - -#define MC_SEQ_MISC0_GDDR5_SHIFT 28 -#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000 -#define MC_SEQ_MISC0_GDDR5_VALUE 5 - -#define MC_CG_ARB_FREQ_F0 0x0a /* boot-up default */ -#define MC_CG_ARB_FREQ_F1 0x0b -#define MC_CG_ARB_FREQ_F2 0x0c -#define MC_CG_ARB_FREQ_F3 0x0d - -/* From smc_reg.h */ -#define SMC_CG_IND_START 0xc0030000 -#define SMC_CG_IND_END 0xc0040000 /* First byte after SMC_CG_IND */ - -#define VOLTAGE_SCALE 4 -#define VOLTAGE_VID_OFFSET_SCALE1 625 -#define VOLTAGE_VID_OFFSET_SCALE2 100 - -#define VDDC_VDDCI_DELTA 300 - -#define ixSWRST_COMMAND_1 0x1400103 -#define MC_SEQ_CNTL__CAC_EN_MASK 0x40000000 - -/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */ -enum DPM_EVENT_SRC { - DPM_EVENT_SRC_ANALOG = 0, /* Internal analog trip point */ - DPM_EVENT_SRC_EXTERNAL = 1, /* External (GPIO 17) signal */ - DPM_EVENT_SRC_DIGITAL = 2, /* Internal digital trip point (DIG_THERM_DPM) */ - DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, /* Internal analog or external */ - DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 /* Internal digital or external */ -}; - - -/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs - * not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ] - */ -static const uint16_t fiji_clock_stretcher_lookup_table[2][4] = -{ {600, 1050, 3, 0}, {600, 1050, 6, 1} }; - -/* [FF, SS] type, [] 4 voltage ranges, and - * [Floor Freq, Boundary Freq, VID min , VID max] - */ -static const uint32_t fiji_clock_stretcher_ddt_table[2][4][4] = -{ { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} }, - { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } }; - -/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] - * (coming from PWR_CKS_CNTL.stretch_amount reg spec) - */ -static const uint8_t fiji_clock_stretch_amount_conversion[2][6] = -{ {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} }; - -static const unsigned long PhwFiji_Magic = (unsigned long)(PHM_VIslands_Magic); - -struct fiji_power_state *cast_phw_fiji_power_state( - struct pp_hw_power_state *hw_ps) -{ - PP_ASSERT_WITH_CODE((PhwFiji_Magic == hw_ps->magic), - "Invalid Powerstate Type!", - return NULL;); - - return (struct fiji_power_state *)hw_ps; -} - -const struct fiji_power_state *cast_const_phw_fiji_power_state( - const struct pp_hw_power_state *hw_ps) -{ - PP_ASSERT_WITH_CODE((PhwFiji_Magic == hw_ps->magic), - "Invalid Powerstate Type!", - return NULL;); - - return (const struct fiji_power_state *)hw_ps; -} - -static bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr) -{ - return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device, - CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON)) - ? true : false; -} - -static void fiji_init_dpm_defaults(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct fiji_ulv_parm *ulv = &data->ulv; - - ulv->cg_ulv_parameter = PPFIJI_CGULVPARAMETER_DFLT; - data->voting_rights_clients0 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT0; - data->voting_rights_clients1 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT1; - data->voting_rights_clients2 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT2; - data->voting_rights_clients3 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT3; - data->voting_rights_clients4 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT4; - data->voting_rights_clients5 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT5; - data->voting_rights_clients6 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT6; - data->voting_rights_clients7 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT7; - - data->static_screen_threshold_unit = - PPFIJI_STATICSCREENTHRESHOLDUNIT_DFLT; - data->static_screen_threshold = - PPFIJI_STATICSCREENTHRESHOLD_DFLT; - - /* Unset ABM cap as it moved to DAL. - * Add PHM_PlatformCaps_NonABMSupportInPPLib - * for re-direct ABM related request to DAL - */ - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ABM); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_NonABMSupportInPPLib); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DynamicACTiming); - - fiji_initialize_power_tune_defaults(hwmgr); - - data->mclk_stutter_mode_threshold = 60000; - data->pcie_gen_performance.max = PP_PCIEGen1; - data->pcie_gen_performance.min = PP_PCIEGen3; - data->pcie_gen_power_saving.max = PP_PCIEGen1; - data->pcie_gen_power_saving.min = PP_PCIEGen3; - data->pcie_lane_performance.max = 0; - data->pcie_lane_performance.min = 16; - data->pcie_lane_power_saving.max = 0; - data->pcie_lane_power_saving.min = 16; - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DynamicUVDState); -} - -static int fiji_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr, - phm_ppt_v1_voltage_lookup_table *lookup_table, - uint16_t virtual_voltage_id, int32_t *sclk) -{ - uint8_t entryId; - uint8_t voltageId; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL); - - /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */ - for (entryId = 0; entryId < table_info->vdd_dep_on_sclk->count; entryId++) { - voltageId = table_info->vdd_dep_on_sclk->entries[entryId].vddInd; - if (lookup_table->entries[voltageId].us_vdd == virtual_voltage_id) - break; - } - - PP_ASSERT_WITH_CODE(entryId < table_info->vdd_dep_on_sclk->count, - "Can't find requested voltage id in vdd_dep_on_sclk table!", - return -EINVAL; - ); - - *sclk = table_info->vdd_dep_on_sclk->entries[entryId].clk; - - return 0; -} - -/** -* Get Leakage VDDC based on leakage ID. -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -*/ -static int fiji_get_evv_voltages(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - uint16_t vv_id; - uint16_t vddc = 0; - uint16_t evv_default = 1150; - uint16_t i, j; - uint32_t sclk = 0; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)hwmgr->pptable; - struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = - table_info->vdd_dep_on_sclk; - int result; - - for (i = 0; i < FIJI_MAX_LEAKAGE_COUNT; i++) { - vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; - if (!fiji_get_sclk_for_voltage_evv(hwmgr, - table_info->vddc_lookup_table, vv_id, &sclk)) { - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ClockStretcher)) { - for (j = 1; j < sclk_table->count; j++) { - if (sclk_table->entries[j].clk == sclk && - sclk_table->entries[j].cks_enable == 0) { - sclk += 5000; - break; - } - } - } - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_EnableDriverEVV)) - result = atomctrl_calculate_voltage_evv_on_sclk(hwmgr, - VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc, i, true); - else - result = -EINVAL; - - if (result) - result = atomctrl_get_voltage_evv_on_sclk(hwmgr, - VOLTAGE_TYPE_VDDC, sclk,vv_id, &vddc); - - /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */ - PP_ASSERT_WITH_CODE((vddc < 2000), - "Invalid VDDC value, greater than 2v!", result = -EINVAL;); - - if (result) - /* 1.15V is the default safe value for Fiji */ - vddc = evv_default; - - /* the voltage should not be zero nor equal to leakage ID */ - if (vddc != 0 && vddc != vv_id) { - data->vddc_leakage.actual_voltage - [data->vddc_leakage.count] = vddc; - data->vddc_leakage.leakage_id - [data->vddc_leakage.count] = vv_id; - data->vddc_leakage.count++; - } - } - } - return 0; -} - -/** - * Change virtual leakage voltage to actual value. - * - * @param hwmgr the address of the powerplay hardware manager. - * @param pointer to changing voltage - * @param pointer to leakage table - */ -static void fiji_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr, - uint16_t *voltage, struct fiji_leakage_voltage *leakage_table) -{ - uint32_t index; - - /* search for leakage voltage ID 0xff01 ~ 0xff08 */ - for (index = 0; index < leakage_table->count; index++) { - /* if this voltage matches a leakage voltage ID */ - /* patch with actual leakage voltage */ - if (leakage_table->leakage_id[index] == *voltage) { - *voltage = leakage_table->actual_voltage[index]; - break; - } - } - - if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0) - printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n"); -} - -/** -* Patch voltage lookup table by EVV leakages. -* -* @param hwmgr the address of the powerplay hardware manager. -* @param pointer to voltage lookup table -* @param pointer to leakage table -* @return always 0 -*/ -static int fiji_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr, - phm_ppt_v1_voltage_lookup_table *lookup_table, - struct fiji_leakage_voltage *leakage_table) -{ - uint32_t i; - - for (i = 0; i < lookup_table->count; i++) - fiji_patch_with_vdd_leakage(hwmgr, - &lookup_table->entries[i].us_vdd, leakage_table); - - return 0; -} - -static int fiji_patch_clock_voltage_limits_with_vddc_leakage( - struct pp_hwmgr *hwmgr, struct fiji_leakage_voltage *leakage_table, - uint16_t *vddc) -{ - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - fiji_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table); - hwmgr->dyn_state.max_clock_voltage_on_dc.vddc = - table_info->max_clock_voltage_on_dc.vddc; - return 0; -} - -static int fiji_patch_voltage_dependency_tables_with_lookup_table( - struct pp_hwmgr *hwmgr) -{ - uint8_t entryId; - uint8_t voltageId; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = - table_info->vdd_dep_on_sclk; - struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = - table_info->vdd_dep_on_mclk; - struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = - table_info->mm_dep_table; - - for (entryId = 0; entryId < sclk_table->count; ++entryId) { - voltageId = sclk_table->entries[entryId].vddInd; - sclk_table->entries[entryId].vddc = - table_info->vddc_lookup_table->entries[voltageId].us_vdd; - } - - for (entryId = 0; entryId < mclk_table->count; ++entryId) { - voltageId = mclk_table->entries[entryId].vddInd; - mclk_table->entries[entryId].vddc = - table_info->vddc_lookup_table->entries[voltageId].us_vdd; - } - - for (entryId = 0; entryId < mm_table->count; ++entryId) { - voltageId = mm_table->entries[entryId].vddcInd; - mm_table->entries[entryId].vddc = - table_info->vddc_lookup_table->entries[voltageId].us_vdd; - } - - return 0; - -} - -static int fiji_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr) -{ - /* Need to determine if we need calculated voltage. */ - return 0; -} - -static int fiji_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr) -{ - /* Need to determine if we need calculated voltage from mm table. */ - return 0; -} - -static int fiji_sort_lookup_table(struct pp_hwmgr *hwmgr, - struct phm_ppt_v1_voltage_lookup_table *lookup_table) -{ - uint32_t table_size, i, j; - struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record; - table_size = lookup_table->count; - - PP_ASSERT_WITH_CODE(0 != lookup_table->count, - "Lookup table is empty", return -EINVAL); - - /* Sorting voltages */ - for (i = 0; i < table_size - 1; i++) { - for (j = i + 1; j > 0; j--) { - if (lookup_table->entries[j].us_vdd < - lookup_table->entries[j - 1].us_vdd) { - tmp_voltage_lookup_record = lookup_table->entries[j - 1]; - lookup_table->entries[j - 1] = lookup_table->entries[j]; - lookup_table->entries[j] = tmp_voltage_lookup_record; - } - } - } - - return 0; -} - -static int fiji_complete_dependency_tables(struct pp_hwmgr *hwmgr) -{ - int result = 0; - int tmp_result; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - tmp_result = fiji_patch_lookup_table_with_leakage(hwmgr, - table_info->vddc_lookup_table, &(data->vddc_leakage)); - if (tmp_result) - result = tmp_result; - - tmp_result = fiji_patch_clock_voltage_limits_with_vddc_leakage(hwmgr, - &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc); - if (tmp_result) - result = tmp_result; - - tmp_result = fiji_patch_voltage_dependency_tables_with_lookup_table(hwmgr); - if (tmp_result) - result = tmp_result; - - tmp_result = fiji_calc_voltage_dependency_tables(hwmgr); - if (tmp_result) - result = tmp_result; - - tmp_result = fiji_calc_mm_voltage_dependency_table(hwmgr); - if (tmp_result) - result = tmp_result; - - tmp_result = fiji_sort_lookup_table(hwmgr, table_info->vddc_lookup_table); - if(tmp_result) - result = tmp_result; - - return result; -} - -static int fiji_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table = - table_info->vdd_dep_on_sclk; - struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table = - table_info->vdd_dep_on_mclk; - - PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL, - "VDD dependency on SCLK table is missing. \ - This table is mandatory", return -EINVAL); - PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1, - "VDD dependency on SCLK table has to have is missing. \ - This table is mandatory", return -EINVAL); - - PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL, - "VDD dependency on MCLK table is missing. \ - This table is mandatory", return -EINVAL); - PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1, - "VDD dependency on MCLK table has to have is missing. \ - This table is mandatory", return -EINVAL); - - data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vdd_table->entries[0].vddc; - data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vdd_table-> - entries[allowed_sclk_vdd_table->count - 1].vddc; - - table_info->max_clock_voltage_on_ac.sclk = - allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk; - table_info->max_clock_voltage_on_ac.mclk = - allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk; - table_info->max_clock_voltage_on_ac.vddc = - allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc; - table_info->max_clock_voltage_on_ac.vddci = - allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci; - - hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = - table_info->max_clock_voltage_on_ac.sclk; - hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = - table_info->max_clock_voltage_on_ac.mclk; - hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = - table_info->max_clock_voltage_on_ac.vddc; - hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = - table_info->max_clock_voltage_on_ac.vddci; - - return 0; -} - -static uint16_t fiji_get_current_pcie_speed(struct pp_hwmgr *hwmgr) -{ - uint32_t speedCntl = 0; - - /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */ - speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE, - ixPCIE_LC_SPEED_CNTL); - return((uint16_t)PHM_GET_FIELD(speedCntl, - PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE)); -} - -static int fiji_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr) -{ - uint32_t link_width; - - /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */ - link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, - PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD); - - PP_ASSERT_WITH_CODE((7 >= link_width), - "Invalid PCIe lane width!", return 0); - - return decode_pcie_lane_width(link_width); -} - -/** Patch the Boot State to match VBIOS boot clocks and voltage. -* -* @param hwmgr Pointer to the hardware manager. -* @param pPowerState The address of the PowerState instance being created. -* -*/ -static int fiji_patch_boot_state(struct pp_hwmgr *hwmgr, - struct pp_hw_power_state *hw_ps) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct fiji_power_state *ps = (struct fiji_power_state *)hw_ps; - ATOM_FIRMWARE_INFO_V2_2 *fw_info; - uint16_t size; - uint8_t frev, crev; - int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); - - /* First retrieve the Boot clocks and VDDC from the firmware info table. - * We assume here that fw_info is unchanged if this call fails. - */ - fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table( - hwmgr->device, index, - &size, &frev, &crev); - if (!fw_info) - /* During a test, there is no firmware info table. */ - return 0; - - /* Patch the state. */ - data->vbios_boot_state.sclk_bootup_value = - le32_to_cpu(fw_info->ulDefaultEngineClock); - data->vbios_boot_state.mclk_bootup_value = - le32_to_cpu(fw_info->ulDefaultMemoryClock); - data->vbios_boot_state.mvdd_bootup_value = - le16_to_cpu(fw_info->usBootUpMVDDCVoltage); - data->vbios_boot_state.vddc_bootup_value = - le16_to_cpu(fw_info->usBootUpVDDCVoltage); - data->vbios_boot_state.vddci_bootup_value = - le16_to_cpu(fw_info->usBootUpVDDCIVoltage); - data->vbios_boot_state.pcie_gen_bootup_value = - fiji_get_current_pcie_speed(hwmgr); - data->vbios_boot_state.pcie_lane_bootup_value = - (uint16_t)fiji_get_current_pcie_lane_number(hwmgr); - - /* set boot power state */ - ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value; - ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value; - ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value; - ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value; - - return 0; -} - -static int fiji_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) -{ - return phm_hwmgr_backend_fini(hwmgr); -} - -static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data; - uint32_t i; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - bool stay_in_boot; - int result; - - data = kzalloc(sizeof(struct fiji_hwmgr), GFP_KERNEL); - if (data == NULL) - return -ENOMEM; - - hwmgr->backend = data; - - data->dll_default_on = false; - data->sram_end = SMC_RAM_END; - - for (i = 0; i < SMU73_MAX_LEVELS_GRAPHICS; i++) - data->activity_target[i] = FIJI_AT_DFLT; - - data->vddc_vddci_delta = VDDC_VDDCI_DELTA; - - data->mclk_activity_target = PPFIJI_MCLK_TARGETACTIVITY_DFLT; - data->mclk_dpm0_activity_target = 0xa; - - data->sclk_dpm_key_disabled = 0; - data->mclk_dpm_key_disabled = 0; - data->pcie_dpm_key_disabled = 0; - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_UnTabledHardwareInterface); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkDeepSleep); - - data->gpio_debug = 0; - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DynamicPatchPowerState); - - /* need to set voltage control types before EVV patching */ - data->voltage_control = FIJI_VOLTAGE_CONTROL_NONE; - data->vddci_control = FIJI_VOLTAGE_CONTROL_NONE; - data->mvdd_control = FIJI_VOLTAGE_CONTROL_NONE; - - data->force_pcie_gen = PP_PCIEGenInvalid; - - if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, - VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) - data->voltage_control = FIJI_VOLTAGE_CONTROL_BY_SVID2; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_EnableMVDDControl)) - if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, - VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT)) - data->mvdd_control = FIJI_VOLTAGE_CONTROL_BY_GPIO; - - if (data->mvdd_control == FIJI_VOLTAGE_CONTROL_NONE) - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_EnableMVDDControl); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ControlVDDCI)) { - if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, - VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT)) - data->vddci_control = FIJI_VOLTAGE_CONTROL_BY_GPIO; - else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, - VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2)) - data->vddci_control = FIJI_VOLTAGE_CONTROL_BY_SVID2; - } - - if (data->vddci_control == FIJI_VOLTAGE_CONTROL_NONE) - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ControlVDDCI); - - if (table_info && table_info->cac_dtp_table->usClockStretchAmount) - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ClockStretcher); - - fiji_init_dpm_defaults(hwmgr); - - /* Get leakage voltage based on leakage ID. */ - fiji_get_evv_voltages(hwmgr); - - /* Patch our voltage dependency table with actual leakage voltage - * We need to perform leakage translation before it's used by other functions - */ - fiji_complete_dependency_tables(hwmgr); - - /* Parse pptable data read from VBIOS */ - fiji_set_private_data_based_on_pptable(hwmgr); - - /* ULV Support */ - data->ulv.ulv_supported = true; /* ULV feature is enabled by default */ - - /* Initalize Dynamic State Adjustment Rule Settings */ - result = tonga_initializa_dynamic_state_adjustment_rule_settings(hwmgr); - - if (!result) { - data->uvd_enabled = false; - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_EnableSMU7ThermalManagement); - data->vddc_phase_shed_control = false; - } - - stay_in_boot = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StayInBootState); - - if (0 == result) { - struct cgs_system_info sys_info = {0}; - - data->is_tlu_enabled = false; - hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = - FIJI_MAX_HARDWARE_POWERLEVELS; - hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; - hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_FanSpeedInTableIsRPM); - - if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp && - hwmgr->thermal_controller. - advanceFanControlParameters.ucFanControlMode) { - hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM = - hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM; - hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = - hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM; - hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit = - table_info->cac_dtp_table->usOperatingTempMinLimit; - hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit = - table_info->cac_dtp_table->usOperatingTempMaxLimit; - hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp = - table_info->cac_dtp_table->usDefaultTargetOperatingTemp; - hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep = - table_info->cac_dtp_table->usOperatingTempStep; - hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp = - table_info->cac_dtp_table->usTargetOperatingTemp; - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ODFuzzyFanControlSupport); - } - - sys_info.size = sizeof(struct cgs_system_info); - sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO; - result = cgs_query_system_info(hwmgr->device, &sys_info); - if (result) - data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK; - else - data->pcie_gen_cap = (uint32_t)sys_info.value; - if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) - data->pcie_spc_cap = 20; - sys_info.size = sizeof(struct cgs_system_info); - sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW; - result = cgs_query_system_info(hwmgr->device, &sys_info); - if (result) - data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK; - else - data->pcie_lane_cap = (uint32_t)sys_info.value; - } else { - /* Ignore return value in here, we are cleaning up a mess. */ - fiji_hwmgr_backend_fini(hwmgr); - } - - return 0; -} - -/** - * Read clock related registers. - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -static int fiji_read_clock_registers(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - data->clock_registers.vCG_SPLL_FUNC_CNTL = - cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_SPLL_FUNC_CNTL); - data->clock_registers.vCG_SPLL_FUNC_CNTL_2 = - cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_SPLL_FUNC_CNTL_2); - data->clock_registers.vCG_SPLL_FUNC_CNTL_3 = - cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_SPLL_FUNC_CNTL_3); - data->clock_registers.vCG_SPLL_FUNC_CNTL_4 = - cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_SPLL_FUNC_CNTL_4); - data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM = - cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_SPLL_SPREAD_SPECTRUM); - data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 = - cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_SPLL_SPREAD_SPECTRUM_2); - - return 0; -} - -/** - * Find out if memory is GDDR5. - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -static int fiji_get_memory_type(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - uint32_t temp; - - temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0); - - data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE == - ((temp & MC_SEQ_MISC0_GDDR5_MASK) >> - MC_SEQ_MISC0_GDDR5_SHIFT)); - - return 0; -} - -/** - * Enables Dynamic Power Management by SMC - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -static int fiji_enable_acpi_power_management(struct pp_hwmgr *hwmgr) -{ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - GENERAL_PWRMGT, STATIC_PM_EN, 1); - - return 0; -} - -/** - * Initialize PowerGating States for different engines - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -static int fiji_init_power_gate_state(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - data->uvd_power_gated = false; - data->vce_power_gated = false; - data->samu_power_gated = false; - data->acp_power_gated = false; - data->pg_acp_init = true; - - return 0; -} - -static int fiji_init_sclk_threshold(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - data->low_sclk_interrupt_threshold = 0; - - return 0; -} - -static int fiji_setup_asic_task(struct pp_hwmgr *hwmgr) -{ - int tmp_result, result = 0; - - tmp_result = fiji_read_clock_registers(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to read clock registers!", result = tmp_result); - - tmp_result = fiji_get_memory_type(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to get memory type!", result = tmp_result); - - tmp_result = fiji_enable_acpi_power_management(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable ACPI power management!", result = tmp_result); - - tmp_result = fiji_init_power_gate_state(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to init power gate state!", result = tmp_result); - - tmp_result = tonga_get_mc_microcode_version(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to get MC microcode version!", result = tmp_result); - - tmp_result = fiji_init_sclk_threshold(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to init sclk threshold!", result = tmp_result); - - return result; -} - -/** -* Checks if we want to support voltage control -* -* @param hwmgr the address of the powerplay hardware manager. -*/ -static bool fiji_voltage_control(const struct pp_hwmgr *hwmgr) -{ - const struct fiji_hwmgr *data = - (const struct fiji_hwmgr *)(hwmgr->backend); - - return (FIJI_VOLTAGE_CONTROL_NONE != data->voltage_control); -} - -/** -* Enable voltage control -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -*/ -static int fiji_enable_voltage_control(struct pp_hwmgr *hwmgr) -{ - /* enable voltage control */ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1); - - return 0; -} - -/** -* Remove repeated voltage values and create table with unique values. -* -* @param hwmgr the address of the powerplay hardware manager. -* @param vol_table the pointer to changing voltage table -* @return 0 in success -*/ - -static int fiji_trim_voltage_table(struct pp_hwmgr *hwmgr, - struct pp_atomctrl_voltage_table *vol_table) -{ - uint32_t i, j; - uint16_t vvalue; - bool found = false; - struct pp_atomctrl_voltage_table *table; - - PP_ASSERT_WITH_CODE((NULL != vol_table), - "Voltage Table empty.", return -EINVAL); - table = kzalloc(sizeof(struct pp_atomctrl_voltage_table), - GFP_KERNEL); - - if (NULL == table) - return -ENOMEM; - - table->mask_low = vol_table->mask_low; - table->phase_delay = vol_table->phase_delay; - - for (i = 0; i < vol_table->count; i++) { - vvalue = vol_table->entries[i].value; - found = false; - - for (j = 0; j < table->count; j++) { - if (vvalue == table->entries[j].value) { - found = true; - break; - } - } - - if (!found) { - table->entries[table->count].value = vvalue; - table->entries[table->count].smio_low = - vol_table->entries[i].smio_low; - table->count++; - } - } - - memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table)); - kfree(table); - - return 0; -} - -static int fiji_get_svi2_mvdd_voltage_table(struct pp_hwmgr *hwmgr, - phm_ppt_v1_clock_voltage_dependency_table *dep_table) -{ - uint32_t i; - int result; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct pp_atomctrl_voltage_table *vol_table = &(data->mvdd_voltage_table); - - PP_ASSERT_WITH_CODE((0 != dep_table->count), - "Voltage Dependency Table empty.", return -EINVAL); - - vol_table->mask_low = 0; - vol_table->phase_delay = 0; - vol_table->count = dep_table->count; - - for (i = 0; i < dep_table->count; i++) { - vol_table->entries[i].value = dep_table->entries[i].mvdd; - vol_table->entries[i].smio_low = 0; - } - - result = fiji_trim_voltage_table(hwmgr, vol_table); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to trim MVDD table.", return result); - - return 0; -} - -static int fiji_get_svi2_vddci_voltage_table(struct pp_hwmgr *hwmgr, - phm_ppt_v1_clock_voltage_dependency_table *dep_table) -{ - uint32_t i; - int result; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct pp_atomctrl_voltage_table *vol_table = &(data->vddci_voltage_table); - - PP_ASSERT_WITH_CODE((0 != dep_table->count), - "Voltage Dependency Table empty.", return -EINVAL); - - vol_table->mask_low = 0; - vol_table->phase_delay = 0; - vol_table->count = dep_table->count; - - for (i = 0; i < dep_table->count; i++) { - vol_table->entries[i].value = dep_table->entries[i].vddci; - vol_table->entries[i].smio_low = 0; - } - - result = fiji_trim_voltage_table(hwmgr, vol_table); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to trim VDDCI table.", return result); - - return 0; -} - -static int fiji_get_svi2_vdd_voltage_table(struct pp_hwmgr *hwmgr, - phm_ppt_v1_voltage_lookup_table *lookup_table) -{ - int i = 0; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct pp_atomctrl_voltage_table *vol_table = &(data->vddc_voltage_table); - - PP_ASSERT_WITH_CODE((0 != lookup_table->count), - "Voltage Lookup Table empty.", return -EINVAL); - - vol_table->mask_low = 0; - vol_table->phase_delay = 0; - - vol_table->count = lookup_table->count; - - for (i = 0; i < vol_table->count; i++) { - vol_table->entries[i].value = lookup_table->entries[i].us_vdd; - vol_table->entries[i].smio_low = 0; - } - - return 0; -} - -/* ---- Voltage Tables ---- - * If the voltage table would be bigger than - * what will fit into the state table on - * the SMC keep only the higher entries. - */ -static void fiji_trim_voltage_table_to_fit_state_table(struct pp_hwmgr *hwmgr, - uint32_t max_vol_steps, struct pp_atomctrl_voltage_table *vol_table) -{ - unsigned int i, diff; - - if (vol_table->count <= max_vol_steps) - return; - - diff = vol_table->count - max_vol_steps; - - for (i = 0; i < max_vol_steps; i++) - vol_table->entries[i] = vol_table->entries[i + diff]; - - vol_table->count = max_vol_steps; - - return; -} - -/** -* Create Voltage Tables. -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -*/ -static int fiji_construct_voltage_tables(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)hwmgr->pptable; - int result; - - if (FIJI_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { - result = atomctrl_get_voltage_table_v3(hwmgr, - VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT, - &(data->mvdd_voltage_table)); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve MVDD table.", - return result); - } else if (FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) { - result = fiji_get_svi2_mvdd_voltage_table(hwmgr, - table_info->vdd_dep_on_mclk); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve SVI2 MVDD table from dependancy table.", - return result;); - } - - if (FIJI_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) { - result = atomctrl_get_voltage_table_v3(hwmgr, - VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT, - &(data->vddci_voltage_table)); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve VDDCI table.", - return result); - } else if (FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) { - result = fiji_get_svi2_vddci_voltage_table(hwmgr, - table_info->vdd_dep_on_mclk); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve SVI2 VDDCI table from dependancy table.", - return result); - } - - if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { - result = fiji_get_svi2_vdd_voltage_table(hwmgr, - table_info->vddc_lookup_table); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve SVI2 VDDC table from lookup table.", - return result); - } - - PP_ASSERT_WITH_CODE( - (data->vddc_voltage_table.count <= (SMU73_MAX_LEVELS_VDDC)), - "Too many voltage values for VDDC. Trimming to fit state table.", - fiji_trim_voltage_table_to_fit_state_table(hwmgr, - SMU73_MAX_LEVELS_VDDC, &(data->vddc_voltage_table))); - - PP_ASSERT_WITH_CODE( - (data->vddci_voltage_table.count <= (SMU73_MAX_LEVELS_VDDCI)), - "Too many voltage values for VDDCI. Trimming to fit state table.", - fiji_trim_voltage_table_to_fit_state_table(hwmgr, - SMU73_MAX_LEVELS_VDDCI, &(data->vddci_voltage_table))); - - PP_ASSERT_WITH_CODE( - (data->mvdd_voltage_table.count <= (SMU73_MAX_LEVELS_MVDD)), - "Too many voltage values for MVDD. Trimming to fit state table.", - fiji_trim_voltage_table_to_fit_state_table(hwmgr, - SMU73_MAX_LEVELS_MVDD, &(data->mvdd_voltage_table))); - - return 0; -} - -static int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) -{ - /* Program additional LP registers - * that are no longer programmed by VBIOS - */ - cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, - cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING)); - cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, - cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING)); - cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, - cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2)); - cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, - cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1)); - cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, - cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0)); - cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, - cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1)); - cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, - cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING)); - - return 0; -} - -/** -* Programs static screed detection parameters -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -*/ -static int fiji_program_static_screen_threshold_parameters( - struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - /* Set static screen threshold unit */ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT, - data->static_screen_threshold_unit); - /* Set static screen threshold */ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD, - data->static_screen_threshold); - - return 0; -} - -/** -* Setup display gap for glitch free memory clock switching. -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -*/ -static int fiji_enable_display_gap(struct pp_hwmgr *hwmgr) -{ - uint32_t displayGap = - cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_DISPLAY_GAP_CNTL); - - displayGap = PHM_SET_FIELD(displayGap, CG_DISPLAY_GAP_CNTL, - DISP_GAP, DISPLAY_GAP_IGNORE); - - displayGap = PHM_SET_FIELD(displayGap, CG_DISPLAY_GAP_CNTL, - DISP_GAP_MCHG, DISPLAY_GAP_VBLANK); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_DISPLAY_GAP_CNTL, displayGap); - - return 0; -} - -/** -* Programs activity state transition voting clients -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -*/ -static int fiji_program_voting_clients(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - /* Clear reset for voting clients before enabling DPM */ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0); - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7); - - return 0; -} - -static int fiji_clear_voting_clients(struct pp_hwmgr *hwmgr) -{ - /* Reset voting clients before disabling DPM */ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1); - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_0, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_1, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_2, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_3, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_4, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_5, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_6, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_7, 0); - - return 0; -} - -/** -* Get the location of various tables inside the FW image. -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -*/ -static int fiji_process_firmware_header(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); - uint32_t tmp; - int result; - bool error = false; - - result = fiji_read_smc_sram_dword(hwmgr->smumgr, - SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU73_Firmware_Header, DpmTable), - &tmp, data->sram_end); - - if (0 == result) - data->dpm_table_start = tmp; - - error |= (0 != result); - - result = fiji_read_smc_sram_dword(hwmgr->smumgr, - SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU73_Firmware_Header, SoftRegisters), - &tmp, data->sram_end); - - if (!result) { - data->soft_regs_start = tmp; - smu_data->soft_regs_start = tmp; - } - - error |= (0 != result); - - result = fiji_read_smc_sram_dword(hwmgr->smumgr, - SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU73_Firmware_Header, mcRegisterTable), - &tmp, data->sram_end); - - if (!result) - data->mc_reg_table_start = tmp; - - result = fiji_read_smc_sram_dword(hwmgr->smumgr, - SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU73_Firmware_Header, FanTable), - &tmp, data->sram_end); - - if (!result) - data->fan_table_start = tmp; - - error |= (0 != result); - - result = fiji_read_smc_sram_dword(hwmgr->smumgr, - SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU73_Firmware_Header, mcArbDramTimingTable), - &tmp, data->sram_end); - - if (!result) - data->arb_table_start = tmp; - - error |= (0 != result); - - result = fiji_read_smc_sram_dword(hwmgr->smumgr, - SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU73_Firmware_Header, Version), - &tmp, data->sram_end); - - if (!result) - hwmgr->microcode_version_info.SMC = tmp; - - error |= (0 != result); - - return error ? -1 : 0; -} - -/* Copy one arb setting to another and then switch the active set. - * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants. - */ -static int fiji_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr, - uint32_t arb_src, uint32_t arb_dest) -{ - uint32_t mc_arb_dram_timing; - uint32_t mc_arb_dram_timing2; - uint32_t burst_time; - uint32_t mc_cg_config; - - switch (arb_src) { - case MC_CG_ARB_FREQ_F0: - mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); - mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); - burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); - break; - case MC_CG_ARB_FREQ_F1: - mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1); - mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1); - burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1); - break; - default: - return -EINVAL; - } - - switch (arb_dest) { - case MC_CG_ARB_FREQ_F0: - cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing); - cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2); - PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time); - break; - case MC_CG_ARB_FREQ_F1: - cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing); - cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2); - PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time); - break; - default: - return -EINVAL; - } - - mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG); - mc_cg_config |= 0x0000000F; - cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config); - PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest); - - return 0; -} - -/** -* Call SMC to reset S0/S1 to S1 and Reset SMIO to initial value -* -* @param hwmgr the address of the powerplay hardware manager. -* @return if success then 0; -*/ -static int fiji_reset_to_default(struct pp_hwmgr *hwmgr) -{ - return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults); -} - -/** -* Initial switch from ARB F0->F1 -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -* This function is to be called from the SetPowerState table. -*/ -static int fiji_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr) -{ - return fiji_copy_and_switch_arb_sets(hwmgr, - MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); -} - -static int fiji_force_switch_to_arbf0(struct pp_hwmgr *hwmgr) -{ - uint32_t tmp; - - tmp = (cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, ixSMC_SCRATCH9) & - 0x0000ff00) >> 8; - - if (tmp == MC_CG_ARB_FREQ_F0) - return 0; - - return fiji_copy_and_switch_arb_sets(hwmgr, - tmp, MC_CG_ARB_FREQ_F0); -} - -static int fiji_reset_single_dpm_table(struct pp_hwmgr *hwmgr, - struct fiji_single_dpm_table *dpm_table, uint32_t count) -{ - int i; - PP_ASSERT_WITH_CODE(count <= MAX_REGULAR_DPM_NUMBER, - "Fatal error, can not set up single DPM table entries " - "to exceed max number!",); - - dpm_table->count = count; - for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) - dpm_table->dpm_levels[i].enabled = false; - - return 0; -} - -static void fiji_setup_pcie_table_entry( - struct fiji_single_dpm_table *dpm_table, - uint32_t index, uint32_t pcie_gen, - uint32_t pcie_lanes) -{ - dpm_table->dpm_levels[index].value = pcie_gen; - dpm_table->dpm_levels[index].param1 = pcie_lanes; - dpm_table->dpm_levels[index].enabled = true; -} - -static int fiji_setup_default_pcie_table(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table; - uint32_t i, max_entry; - - PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels || - data->use_pcie_power_saving_levels), "No pcie performance levels!", - return -EINVAL); - - if (data->use_pcie_performance_levels && - !data->use_pcie_power_saving_levels) { - data->pcie_gen_power_saving = data->pcie_gen_performance; - data->pcie_lane_power_saving = data->pcie_lane_performance; - } else if (!data->use_pcie_performance_levels && - data->use_pcie_power_saving_levels) { - data->pcie_gen_performance = data->pcie_gen_power_saving; - data->pcie_lane_performance = data->pcie_lane_power_saving; - } - - fiji_reset_single_dpm_table(hwmgr, - &data->dpm_table.pcie_speed_table, SMU73_MAX_LEVELS_LINK); - - if (pcie_table != NULL) { - /* max_entry is used to make sure we reserve one PCIE level - * for boot level (fix for A+A PSPP issue). - * If PCIE table from PPTable have ULV entry + 8 entries, - * then ignore the last entry.*/ - max_entry = (SMU73_MAX_LEVELS_LINK < pcie_table->count) ? - SMU73_MAX_LEVELS_LINK : pcie_table->count; - for (i = 1; i < max_entry; i++) { - fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1, - get_pcie_gen_support(data->pcie_gen_cap, - pcie_table->entries[i].gen_speed), - get_pcie_lane_support(data->pcie_lane_cap, - pcie_table->entries[i].lane_width)); - } - data->dpm_table.pcie_speed_table.count = max_entry - 1; - } else { - /* Hardcode Pcie Table */ - fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0, - get_pcie_gen_support(data->pcie_gen_cap, - PP_Min_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, - PP_Max_PCIELane)); - fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1, - get_pcie_gen_support(data->pcie_gen_cap, - PP_Min_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, - PP_Max_PCIELane)); - fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2, - get_pcie_gen_support(data->pcie_gen_cap, - PP_Max_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, - PP_Max_PCIELane)); - fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3, - get_pcie_gen_support(data->pcie_gen_cap, - PP_Max_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, - PP_Max_PCIELane)); - fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4, - get_pcie_gen_support(data->pcie_gen_cap, - PP_Max_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, - PP_Max_PCIELane)); - fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5, - get_pcie_gen_support(data->pcie_gen_cap, - PP_Max_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, - PP_Max_PCIELane)); - - data->dpm_table.pcie_speed_table.count = 6; - } - /* Populate last level for boot PCIE level, but do not increment count. */ - fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, - data->dpm_table.pcie_speed_table.count, - get_pcie_gen_support(data->pcie_gen_cap, - PP_Min_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, - PP_Max_PCIELane)); - - return 0; -} - -/* - * This function is to initalize all DPM state tables - * for SMU7 based on the dependency table. - * Dynamic state patching function will then trim these - * state tables to the allowed range based - * on the power policy or external client requests, - * such as UVD request, etc. - */ -static int fiji_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - uint32_t i; - - struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = - table_info->vdd_dep_on_sclk; - struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table = - table_info->vdd_dep_on_mclk; - - PP_ASSERT_WITH_CODE(dep_sclk_table != NULL, - "SCLK dependency table is missing. This table is mandatory", - return -EINVAL); - PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1, - "SCLK dependency table has to have is missing. " - "This table is mandatory", - return -EINVAL); - - PP_ASSERT_WITH_CODE(dep_mclk_table != NULL, - "MCLK dependency table is missing. This table is mandatory", - return -EINVAL); - PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1, - "MCLK dependency table has to have is missing. " - "This table is mandatory", - return -EINVAL); - - /* clear the state table to reset everything to default */ - fiji_reset_single_dpm_table(hwmgr, - &data->dpm_table.sclk_table, SMU73_MAX_LEVELS_GRAPHICS); - fiji_reset_single_dpm_table(hwmgr, - &data->dpm_table.mclk_table, SMU73_MAX_LEVELS_MEMORY); - - /* Initialize Sclk DPM table based on allow Sclk values */ - data->dpm_table.sclk_table.count = 0; - for (i = 0; i < dep_sclk_table->count; i++) { - if (i == 0 || data->dpm_table.sclk_table.dpm_levels - [data->dpm_table.sclk_table.count - 1].value != - dep_sclk_table->entries[i].clk) { - data->dpm_table.sclk_table.dpm_levels - [data->dpm_table.sclk_table.count].value = - dep_sclk_table->entries[i].clk; - data->dpm_table.sclk_table.dpm_levels - [data->dpm_table.sclk_table.count].enabled = - (i == 0) ? true : false; - data->dpm_table.sclk_table.count++; - } - } - - /* Initialize Mclk DPM table based on allow Mclk values */ - data->dpm_table.mclk_table.count = 0; - for (i=0; i<dep_mclk_table->count; i++) { - if ( i==0 || data->dpm_table.mclk_table.dpm_levels - [data->dpm_table.mclk_table.count - 1].value != - dep_mclk_table->entries[i].clk) { - data->dpm_table.mclk_table.dpm_levels - [data->dpm_table.mclk_table.count].value = - dep_mclk_table->entries[i].clk; - data->dpm_table.mclk_table.dpm_levels - [data->dpm_table.mclk_table.count].enabled = - (i == 0) ? true : false; - data->dpm_table.mclk_table.count++; - } - } - - /* setup PCIE gen speed levels */ - fiji_setup_default_pcie_table(hwmgr); - - /* save a copy of the default DPM table */ - memcpy(&(data->golden_dpm_table), &(data->dpm_table), - sizeof(struct fiji_dpm_table)); - - return 0; -} - -/** - * @brief PhwFiji_GetVoltageOrder - * Returns index of requested voltage record in lookup(table) - * @param lookup_table - lookup list to search in - * @param voltage - voltage to look for - * @return 0 on success - */ -uint8_t fiji_get_voltage_index( - struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage) -{ - uint8_t count = (uint8_t) (lookup_table->count); - uint8_t i; - - PP_ASSERT_WITH_CODE((NULL != lookup_table), - "Lookup Table empty.", return 0); - PP_ASSERT_WITH_CODE((0 != count), - "Lookup Table empty.", return 0); - - for (i = 0; i < lookup_table->count; i++) { - /* find first voltage equal or bigger than requested */ - if (lookup_table->entries[i].us_vdd >= voltage) - return i; - } - /* voltage is bigger than max voltage in the table */ - return i - 1; -} - -/** -* Preparation of vddc and vddgfx CAC tables for SMC. -* -* @param hwmgr the address of the hardware manager -* @param table the SMC DPM table structure to be populated -* @return always 0 -*/ -static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr, - struct SMU73_Discrete_DpmTable *table) -{ - uint32_t count; - uint8_t index; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_voltage_lookup_table *lookup_table = - table_info->vddc_lookup_table; - /* tables is already swapped, so in order to use the value from it, - * we need to swap it back. - * We are populating vddc CAC data to BapmVddc table - * in split and merged mode - */ - for( count = 0; count<lookup_table->count; count++) { - index = fiji_get_voltage_index(lookup_table, - data->vddc_voltage_table.entries[count].value); - table->BapmVddcVidLoSidd[count] = (uint8_t) ((6200 - - (lookup_table->entries[index].us_cac_low * - VOLTAGE_SCALE)) / 25); - table->BapmVddcVidHiSidd[count] = (uint8_t) ((6200 - - (lookup_table->entries[index].us_cac_high * - VOLTAGE_SCALE)) / 25); - } - - return 0; -} - -/** -* Preparation of voltage tables for SMC. -* -* @param hwmgr the address of the hardware manager -* @param table the SMC DPM table structure to be populated -* @return always 0 -*/ - -int fiji_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr, - struct SMU73_Discrete_DpmTable *table) -{ - int result; - - result = fiji_populate_cac_table(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "can not populate CAC voltage tables to SMC", - return -EINVAL); - - return 0; -} - -static int fiji_populate_ulv_level(struct pp_hwmgr *hwmgr, - struct SMU73_Discrete_Ulv *state) -{ - int result = 0; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - state->CcPwrDynRm = 0; - state->CcPwrDynRm1 = 0; - - state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset; - state->VddcOffsetVid = (uint8_t)( table_info->us_ulv_voltage_offset * - VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1 ); - - state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1; - - if (!result) { - CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm); - CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1); - CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset); - } - return result; -} - -static int fiji_populate_ulv_state(struct pp_hwmgr *hwmgr, - struct SMU73_Discrete_DpmTable *table) -{ - return fiji_populate_ulv_level(hwmgr, &table->Ulv); -} - -static int32_t fiji_get_dpm_level_enable_mask_value( - struct fiji_single_dpm_table* dpm_table) -{ - int32_t i; - int32_t mask = 0; - - for (i = dpm_table->count; i > 0; i--) { - mask = mask << 1; - if (dpm_table->dpm_levels[i - 1].enabled) - mask |= 0x1; - else - mask &= 0xFFFFFFFE; - } - return mask; -} - -static int fiji_populate_smc_link_level(struct pp_hwmgr *hwmgr, - struct SMU73_Discrete_DpmTable *table) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct fiji_dpm_table *dpm_table = &data->dpm_table; - int i; - - /* Index (dpm_table->pcie_speed_table.count) - * is reserved for PCIE boot level. */ - for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) { - table->LinkLevel[i].PcieGenSpeed = - (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value; - table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width( - dpm_table->pcie_speed_table.dpm_levels[i].param1); - table->LinkLevel[i].EnabledForActivity = 1; - table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff); - table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5); - table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30); - } - - data->smc_state_table.LinkLevelCount = - (uint8_t)dpm_table->pcie_speed_table.count; - data->dpm_level_enable_mask.pcie_dpm_enable_mask = - fiji_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table); - - return 0; -} - -/** -* Calculates the SCLK dividers using the provided engine clock -* -* @param hwmgr the address of the hardware manager -* @param clock the engine clock to use to populate the structure -* @param sclk the SMC SCLK structure to be populated -*/ -static int fiji_calculate_sclk_params(struct pp_hwmgr *hwmgr, - uint32_t clock, struct SMU73_Discrete_GraphicsLevel *sclk) -{ - const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct pp_atomctrl_clock_dividers_vi dividers; - uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; - uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; - uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; - uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; - uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; - uint32_t ref_clock; - uint32_t ref_divider; - uint32_t fbdiv; - int result; - - /* get the engine clock dividers for this clock value */ - result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, ÷rs); - - PP_ASSERT_WITH_CODE(result == 0, - "Error retrieving Engine Clock dividers from VBIOS.", - return result); - - /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */ - ref_clock = atomctrl_get_reference_clock(hwmgr); - ref_divider = 1 + dividers.uc_pll_ref_div; - - /* low 14 bits is fraction and high 12 bits is divider */ - fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF; - - /* SPLL_FUNC_CNTL setup */ - spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL, - SPLL_REF_DIV, dividers.uc_pll_ref_div); - spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL, - SPLL_PDIV_A, dividers.uc_pll_post_div); - - /* SPLL_FUNC_CNTL_3 setup*/ - spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3, - SPLL_FB_DIV, fbdiv); - - /* set to use fractional accumulation*/ - spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3, - SPLL_DITHEN, 1); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_EngineSpreadSpectrumSupport)) { - struct pp_atomctrl_internal_ss_info ssInfo; - - uint32_t vco_freq = clock * dividers.uc_pll_post_div; - if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr, - vco_freq, &ssInfo)) { - /* - * ss_info.speed_spectrum_percentage -- in unit of 0.01% - * ss_info.speed_spectrum_rate -- in unit of khz - * - * clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 - */ - uint32_t clk_s = ref_clock * 5 / - (ref_divider * ssInfo.speed_spectrum_rate); - /* clkv = 2 * D * fbdiv / NS */ - uint32_t clk_v = 4 * ssInfo.speed_spectrum_percentage * - fbdiv / (clk_s * 10000); - - cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum, - CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s); - cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum, - CG_SPLL_SPREAD_SPECTRUM, SSEN, 1); - cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2, - CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v); - } - } - - sclk->SclkFrequency = clock; - sclk->CgSpllFuncCntl3 = spll_func_cntl_3; - sclk->CgSpllFuncCntl4 = spll_func_cntl_4; - sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum; - sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2; - sclk->SclkDid = (uint8_t)dividers.pll_post_divider; - - return 0; -} - -static uint16_t fiji_find_closest_vddci(struct pp_hwmgr *hwmgr, uint16_t vddci) -{ - uint32_t i; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct pp_atomctrl_voltage_table *vddci_table = - &(data->vddci_voltage_table); - - for (i = 0; i < vddci_table->count; i++) { - if (vddci_table->entries[i].value >= vddci) - return vddci_table->entries[i].value; - } - - PP_ASSERT_WITH_CODE(false, - "VDDCI is larger than max VDDCI in VDDCI Voltage Table!", - return vddci_table->entries[i-1].value); -} - -static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, - struct phm_ppt_v1_clock_voltage_dependency_table* dep_table, - uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd) -{ - uint32_t i; - uint16_t vddci; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - *voltage = *mvdd = 0; - - /* clock - voltage dependency table is empty table */ - if (dep_table->count == 0) - return -EINVAL; - - for (i = 0; i < dep_table->count; i++) { - /* find first sclk bigger than request */ - if (dep_table->entries[i].clk >= clock) { - *voltage |= (dep_table->entries[i].vddc * - VOLTAGE_SCALE) << VDDC_SHIFT; - if (FIJI_VOLTAGE_CONTROL_NONE == data->vddci_control) - *voltage |= (data->vbios_boot_state.vddci_bootup_value * - VOLTAGE_SCALE) << VDDCI_SHIFT; - else if (dep_table->entries[i].vddci) - *voltage |= (dep_table->entries[i].vddci * - VOLTAGE_SCALE) << VDDCI_SHIFT; - else { - vddci = fiji_find_closest_vddci(hwmgr, - (dep_table->entries[i].vddc - - (uint16_t)data->vddc_vddci_delta)); - *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; - } - - if (FIJI_VOLTAGE_CONTROL_NONE == data->mvdd_control) - *mvdd = data->vbios_boot_state.mvdd_bootup_value * - VOLTAGE_SCALE; - else if (dep_table->entries[i].mvdd) - *mvdd = (uint32_t) dep_table->entries[i].mvdd * - VOLTAGE_SCALE; - - *voltage |= 1 << PHASES_SHIFT; - return 0; - } - } - - /* sclk is bigger than max sclk in the dependence table */ - *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; - - if (FIJI_VOLTAGE_CONTROL_NONE == data->vddci_control) - *voltage |= (data->vbios_boot_state.vddci_bootup_value * - VOLTAGE_SCALE) << VDDCI_SHIFT; - else if (dep_table->entries[i-1].vddci) { - vddci = fiji_find_closest_vddci(hwmgr, - (dep_table->entries[i].vddc - - (uint16_t)data->vddc_vddci_delta)); - *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; - } - - if (FIJI_VOLTAGE_CONTROL_NONE == data->mvdd_control) - *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE; - else if (dep_table->entries[i].mvdd) - *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE; - - return 0; -} - -static uint8_t fiji_get_sleep_divider_id_from_clock(uint32_t clock, - uint32_t clock_insr) -{ - uint8_t i; - uint32_t temp; - uint32_t min = max(clock_insr, (uint32_t)FIJI_MINIMUM_ENGINE_CLOCK); - - PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0); - for (i = FIJI_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) { - temp = clock >> i; - - if (temp >= min || i == 0) - break; - } - return i; -} -/** -* Populates single SMC SCLK structure using the provided engine clock -* -* @param hwmgr the address of the hardware manager -* @param clock the engine clock to use to populate the structure -* @param sclk the SMC SCLK structure to be populated -*/ - -static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr, - uint32_t clock, uint16_t sclk_al_threshold, - struct SMU73_Discrete_GraphicsLevel *level) -{ - int result; - /* PP_Clocks minClocks; */ - uint32_t threshold, mvdd; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - result = fiji_calculate_sclk_params(hwmgr, clock, level); - - /* populate graphics levels */ - result = fiji_get_dependency_volt_by_clk(hwmgr, - table_info->vdd_dep_on_sclk, clock, - &level->MinVoltage, &mvdd); - PP_ASSERT_WITH_CODE((0 == result), - "can not find VDDC voltage value for " - "VDDC engine clock dependency table", - return result); - - level->SclkFrequency = clock; - level->ActivityLevel = sclk_al_threshold; - level->CcPwrDynRm = 0; - level->CcPwrDynRm1 = 0; - level->EnabledForActivity = 0; - level->EnabledForThrottle = 1; - level->UpHyst = 10; - level->DownHyst = 0; - level->VoltageDownHyst = 0; - level->PowerThrottle = 0; - - threshold = clock * data->fast_watermark_threshold / 100; - - - data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) - level->DeepSleepDivId = fiji_get_sleep_divider_id_from_clock(clock, - hwmgr->display_config.min_core_set_clock_in_sr); - - - /* Default to slow, highest DPM level will be - * set to PPSMC_DISPLAY_WATERMARK_LOW later. - */ - level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; - - CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage); - CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency); - CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel); - CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3); - CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4); - CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum); - CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2); - CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm); - CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1); - - return 0; -} -/** -* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states -* -* @param hwmgr the address of the hardware manager -*/ -static int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct fiji_dpm_table *dpm_table = &data->dpm_table; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table; - uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count; - int result = 0; - uint32_t array = data->dpm_table_start + - offsetof(SMU73_Discrete_DpmTable, GraphicsLevel); - uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) * - SMU73_MAX_LEVELS_GRAPHICS; - struct SMU73_Discrete_GraphicsLevel *levels = - data->smc_state_table.GraphicsLevel; - uint32_t i, max_entry; - uint8_t hightest_pcie_level_enabled = 0, - lowest_pcie_level_enabled = 0, - mid_pcie_level_enabled = 0, - count = 0; - - for (i = 0; i < dpm_table->sclk_table.count; i++) { - result = fiji_populate_single_graphic_level(hwmgr, - dpm_table->sclk_table.dpm_levels[i].value, - (uint16_t)data->activity_target[i], - &levels[i]); - if (result) - return result; - - /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */ - if (i > 1) - levels[i].DeepSleepDivId = 0; - } - - /* Only enable level 0 for now.*/ - levels[0].EnabledForActivity = 1; - - /* set highest level watermark to high */ - levels[dpm_table->sclk_table.count - 1].DisplayWatermark = - PPSMC_DISPLAY_WATERMARK_HIGH; - - data->smc_state_table.GraphicsDpmLevelCount = - (uint8_t)dpm_table->sclk_table.count; - data->dpm_level_enable_mask.sclk_dpm_enable_mask = - fiji_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); - - if (pcie_table != NULL) { - PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt), - "There must be 1 or more PCIE levels defined in PPTable.", - return -EINVAL); - max_entry = pcie_entry_cnt - 1; - for (i = 0; i < dpm_table->sclk_table.count; i++) - levels[i].pcieDpmLevel = - (uint8_t) ((i < max_entry)? i : max_entry); - } else { - while (data->dpm_level_enable_mask.pcie_dpm_enable_mask && - ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & - (1 << (hightest_pcie_level_enabled + 1))) != 0 )) - hightest_pcie_level_enabled++; - - while (data->dpm_level_enable_mask.pcie_dpm_enable_mask && - ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & - (1 << lowest_pcie_level_enabled)) == 0 )) - lowest_pcie_level_enabled++; - - while ((count < hightest_pcie_level_enabled) && - ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & - (1 << (lowest_pcie_level_enabled + 1 + count))) == 0 )) - count++; - - mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1+ count) < - hightest_pcie_level_enabled? - (lowest_pcie_level_enabled + 1 + count) : - hightest_pcie_level_enabled; - - /* set pcieDpmLevel to hightest_pcie_level_enabled */ - for(i = 2; i < dpm_table->sclk_table.count; i++) - levels[i].pcieDpmLevel = hightest_pcie_level_enabled; - - /* set pcieDpmLevel to lowest_pcie_level_enabled */ - levels[0].pcieDpmLevel = lowest_pcie_level_enabled; - - /* set pcieDpmLevel to mid_pcie_level_enabled */ - levels[1].pcieDpmLevel = mid_pcie_level_enabled; - } - /* level count will send to smc once at init smc table and never change */ - result = fiji_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, - (uint32_t)array_size, data->sram_end); - - return result; -} - -/** - * MCLK Frequency Ratio - * SEQ_CG_RESP Bit[31:24] - 0x0 - * Bit[27:24] \96 DDR3 Frequency ratio - * 0x0 <= 100MHz, 450 < 0x8 <= 500MHz - * 100 < 0x1 <= 150MHz, 500 < 0x9 <= 550MHz - * 150 < 0x2 <= 200MHz, 550 < 0xA <= 600MHz - * 200 < 0x3 <= 250MHz, 600 < 0xB <= 650MHz - * 250 < 0x4 <= 300MHz, 650 < 0xC <= 700MHz - * 300 < 0x5 <= 350MHz, 700 < 0xD <= 750MHz - * 350 < 0x6 <= 400MHz, 750 < 0xE <= 800MHz - * 400 < 0x7 <= 450MHz, 800 < 0xF - */ -static uint8_t fiji_get_mclk_frequency_ratio(uint32_t mem_clock) -{ - if (mem_clock <= 10000) return 0x0; - if (mem_clock <= 15000) return 0x1; - if (mem_clock <= 20000) return 0x2; - if (mem_clock <= 25000) return 0x3; - if (mem_clock <= 30000) return 0x4; - if (mem_clock <= 35000) return 0x5; - if (mem_clock <= 40000) return 0x6; - if (mem_clock <= 45000) return 0x7; - if (mem_clock <= 50000) return 0x8; - if (mem_clock <= 55000) return 0x9; - if (mem_clock <= 60000) return 0xa; - if (mem_clock <= 65000) return 0xb; - if (mem_clock <= 70000) return 0xc; - if (mem_clock <= 75000) return 0xd; - if (mem_clock <= 80000) return 0xe; - /* mem_clock > 800MHz */ - return 0xf; -} - -/** -* Populates the SMC MCLK structure using the provided memory clock -* -* @param hwmgr the address of the hardware manager -* @param clock the memory clock to use to populate the structure -* @param sclk the SMC SCLK structure to be populated -*/ -static int fiji_calculate_mclk_params(struct pp_hwmgr *hwmgr, - uint32_t clock, struct SMU73_Discrete_MemoryLevel *mclk) -{ - struct pp_atomctrl_memory_clock_param mem_param; - int result; - - result = atomctrl_get_memory_pll_dividers_vi(hwmgr, clock, &mem_param); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to get Memory PLL Dividers.",); - - /* Save the result data to outpupt memory level structure */ - mclk->MclkFrequency = clock; - mclk->MclkDivider = (uint8_t)mem_param.mpll_post_divider; - mclk->FreqRange = fiji_get_mclk_frequency_ratio(clock); - - return result; -} - -static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr, - uint32_t clock, struct SMU73_Discrete_MemoryLevel *mem_level) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - int result = 0; - - if (table_info->vdd_dep_on_mclk) { - result = fiji_get_dependency_volt_by_clk(hwmgr, - table_info->vdd_dep_on_mclk, clock, - &mem_level->MinVoltage, &mem_level->MinMvdd); - PP_ASSERT_WITH_CODE((0 == result), - "can not find MinVddc voltage value from memory " - "VDDC voltage dependency table", return result); - } - - mem_level->EnabledForThrottle = 1; - mem_level->EnabledForActivity = 0; - mem_level->UpHyst = 0; - mem_level->DownHyst = 100; - mem_level->VoltageDownHyst = 0; - mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target; - mem_level->StutterEnable = false; - - mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; - - /* enable stutter mode if all the follow condition applied - * PECI_GetNumberOfActiveDisplays(hwmgr->pPECI, - * &(data->DisplayTiming.numExistingDisplays)); - */ - data->display_timing.num_existing_displays = 1; - - if ((data->mclk_stutter_mode_threshold) && - (clock <= data->mclk_stutter_mode_threshold) && - (!data->is_uvd_enabled) && - (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, - STUTTER_ENABLE) & 0x1)) - mem_level->StutterEnable = true; - - result = fiji_calculate_mclk_params(hwmgr, clock, mem_level); - if (!result) { - CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd); - CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency); - CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel); - CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage); - } - return result; -} - -/** -* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states -* -* @param hwmgr the address of the hardware manager -*/ -static int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct fiji_dpm_table *dpm_table = &data->dpm_table; - int result; - /* populate MCLK dpm table to SMU7 */ - uint32_t array = data->dpm_table_start + - offsetof(SMU73_Discrete_DpmTable, MemoryLevel); - uint32_t array_size = sizeof(SMU73_Discrete_MemoryLevel) * - SMU73_MAX_LEVELS_MEMORY; - struct SMU73_Discrete_MemoryLevel *levels = - data->smc_state_table.MemoryLevel; - uint32_t i; - - for (i = 0; i < dpm_table->mclk_table.count; i++) { - PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value), - "can not populate memory level as memory clock is zero", - return -EINVAL); - result = fiji_populate_single_memory_level(hwmgr, - dpm_table->mclk_table.dpm_levels[i].value, - &levels[i]); - if (result) - return result; - } - - /* Only enable level 0 for now. */ - levels[0].EnabledForActivity = 1; - - /* in order to prevent MC activity from stutter mode to push DPM up. - * the UVD change complements this by putting the MCLK in - * a higher state by default such that we are not effected by - * up threshold or and MCLK DPM latency. - */ - levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target; - CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel); - - data->smc_state_table.MemoryDpmLevelCount = - (uint8_t)dpm_table->mclk_table.count; - data->dpm_level_enable_mask.mclk_dpm_enable_mask = - fiji_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); - /* set highest level watermark to high */ - levels[dpm_table->mclk_table.count - 1].DisplayWatermark = - PPSMC_DISPLAY_WATERMARK_HIGH; - - /* level count will send to smc once at init smc table and never change */ - result = fiji_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, - (uint32_t)array_size, data->sram_end); - - return result; -} - -/** -* Populates the SMC MVDD structure using the provided memory clock. -* -* @param hwmgr the address of the hardware manager -* @param mclk the MCLK value to be used in the decision if MVDD should be high or low. -* @param voltage the SMC VOLTAGE structure to be populated -*/ -int fiji_populate_mvdd_value(struct pp_hwmgr *hwmgr, - uint32_t mclk, SMIO_Pattern *smio_pat) -{ - const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - uint32_t i = 0; - - if (FIJI_VOLTAGE_CONTROL_NONE != data->mvdd_control) { - /* find mvdd value which clock is more than request */ - for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) { - if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) { - smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value; - break; - } - } - PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count, - "MVDD Voltage is outside the supported range.", - return -EINVAL); - } else - return -EINVAL; - - return 0; -} - -static int fiji_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, - SMU73_Discrete_DpmTable *table) -{ - int result = 0; - const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct pp_atomctrl_clock_dividers_vi dividers; - SMIO_Pattern vol_level; - uint32_t mvdd; - uint16_t us_mvdd; - uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; - uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2; - - table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; - - if (!data->sclk_dpm_key_disabled) { - /* Get MinVoltage and Frequency from DPM0, - * already converted to SMC_UL */ - table->ACPILevel.SclkFrequency = - data->dpm_table.sclk_table.dpm_levels[0].value; - result = fiji_get_dependency_volt_by_clk(hwmgr, - table_info->vdd_dep_on_sclk, - table->ACPILevel.SclkFrequency, - &table->ACPILevel.MinVoltage, &mvdd); - PP_ASSERT_WITH_CODE((0 == result), - "Cannot find ACPI VDDC voltage value " - "in Clock Dependency Table",); - } else { - table->ACPILevel.SclkFrequency = - data->vbios_boot_state.sclk_bootup_value; - table->ACPILevel.MinVoltage = - data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE; - } - - /* get the engine clock dividers for this clock value */ - result = atomctrl_get_engine_pll_dividers_vi(hwmgr, - table->ACPILevel.SclkFrequency, ÷rs); - PP_ASSERT_WITH_CODE(result == 0, - "Error retrieving Engine Clock dividers from VBIOS.", - return result); - - table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider; - table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; - table->ACPILevel.DeepSleepDivId = 0; - - spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL, - SPLL_PWRON, 0); - spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL, - SPLL_RESET, 1); - spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2, - SCLK_MUX_SEL, 4); - - table->ACPILevel.CgSpllFuncCntl = spll_func_cntl; - table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2; - table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; - table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; - table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; - table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; - table->ACPILevel.CcPwrDynRm = 0; - table->ACPILevel.CcPwrDynRm1 = 0; - - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1); - - if (!data->mclk_dpm_key_disabled) { - /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */ - table->MemoryACPILevel.MclkFrequency = - data->dpm_table.mclk_table.dpm_levels[0].value; - result = fiji_get_dependency_volt_by_clk(hwmgr, - table_info->vdd_dep_on_mclk, - table->MemoryACPILevel.MclkFrequency, - &table->MemoryACPILevel.MinVoltage, &mvdd); - PP_ASSERT_WITH_CODE((0 == result), - "Cannot find ACPI VDDCI voltage value " - "in Clock Dependency Table",); - } else { - table->MemoryACPILevel.MclkFrequency = - data->vbios_boot_state.mclk_bootup_value; - table->MemoryACPILevel.MinVoltage = - data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE; - } - - us_mvdd = 0; - if ((FIJI_VOLTAGE_CONTROL_NONE == data->mvdd_control) || - (data->mclk_dpm_key_disabled)) - us_mvdd = data->vbios_boot_state.mvdd_bootup_value; - else { - if (!fiji_populate_mvdd_value(hwmgr, - data->dpm_table.mclk_table.dpm_levels[0].value, - &vol_level)) - us_mvdd = vol_level.Voltage; - } - - table->MemoryACPILevel.MinMvdd = - PP_HOST_TO_SMC_UL(us_mvdd * VOLTAGE_SCALE); - - table->MemoryACPILevel.EnabledForThrottle = 0; - table->MemoryACPILevel.EnabledForActivity = 0; - table->MemoryACPILevel.UpHyst = 0; - table->MemoryACPILevel.DownHyst = 100; - table->MemoryACPILevel.VoltageDownHyst = 0; - table->MemoryACPILevel.ActivityLevel = - PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target); - - table->MemoryACPILevel.StutterEnable = false; - CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency); - CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage); - - return result; -} - -static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr, - SMU73_Discrete_DpmTable *table) -{ - int result = -EINVAL; - uint8_t count; - struct pp_atomctrl_clock_dividers_vi dividers; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = - table_info->mm_dep_table; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - table->VceLevelCount = (uint8_t)(mm_table->count); - table->VceBootLevel = 0; - - for(count = 0; count < table->VceLevelCount; count++) { - table->VceLevel[count].Frequency = mm_table->entries[count].eclk; - table->VceLevel[count].MinVoltage = 0; - table->VceLevel[count].MinVoltage |= - (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; - table->VceLevel[count].MinVoltage |= - ((mm_table->entries[count].vddc - data->vddc_vddci_delta) * - VOLTAGE_SCALE) << VDDCI_SHIFT; - table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT; - - /*retrieve divider value for VBIOS */ - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, - table->VceLevel[count].Frequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), - "can not find divide id for VCE engine clock", - return result); - - table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider; - - CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency); - CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage); - } - return result; -} - -static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr, - SMU73_Discrete_DpmTable *table) -{ - int result = -EINVAL; - uint8_t count; - struct pp_atomctrl_clock_dividers_vi dividers; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = - table_info->mm_dep_table; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - table->AcpLevelCount = (uint8_t)(mm_table->count); - table->AcpBootLevel = 0; - - for (count = 0; count < table->AcpLevelCount; count++) { - table->AcpLevel[count].Frequency = mm_table->entries[count].aclk; - table->AcpLevel[count].MinVoltage |= (mm_table->entries[count].vddc * - VOLTAGE_SCALE) << VDDC_SHIFT; - table->AcpLevel[count].MinVoltage |= ((mm_table->entries[count].vddc - - data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT; - table->AcpLevel[count].MinVoltage |= 1 << PHASES_SHIFT; - - /* retrieve divider value for VBIOS */ - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, - table->AcpLevel[count].Frequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), - "can not find divide id for engine clock", return result); - - table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider; - - CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency); - CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].MinVoltage); - } - return result; -} - -static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr, - SMU73_Discrete_DpmTable *table) -{ - int result = -EINVAL; - uint8_t count; - struct pp_atomctrl_clock_dividers_vi dividers; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = - table_info->mm_dep_table; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - table->SamuBootLevel = 0; - table->SamuLevelCount = (uint8_t)(mm_table->count); - - for (count = 0; count < table->SamuLevelCount; count++) { - /* not sure whether we need evclk or not */ - table->SamuLevel[count].MinVoltage = 0; - table->SamuLevel[count].Frequency = mm_table->entries[count].samclock; - table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc * - VOLTAGE_SCALE) << VDDC_SHIFT; - table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc - - data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT; - table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT; - - /* retrieve divider value for VBIOS */ - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, - table->SamuLevel[count].Frequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), - "can not find divide id for samu clock", return result); - - table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider; - - CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency); - CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage); - } - return result; -} - -static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr, - int32_t eng_clock, int32_t mem_clock, - struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs) -{ - uint32_t dram_timing; - uint32_t dram_timing2; - uint32_t burstTime; - ULONG state, trrds, trrdl; - int result; - - result = atomctrl_set_engine_dram_timings_rv770(hwmgr, - eng_clock, mem_clock); - PP_ASSERT_WITH_CODE(result == 0, - "Error calling VBIOS to set DRAM_TIMING.", return result); - - dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); - dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); - burstTime = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME); - - state = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, STATE0); - trrds = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDS0); - trrdl = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDL0); - - arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing); - arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2); - arb_regs->McArbBurstTime = (uint8_t)burstTime; - arb_regs->TRRDS = (uint8_t)trrds; - arb_regs->TRRDL = (uint8_t)trrdl; - - return 0; -} - -static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct SMU73_Discrete_MCArbDramTimingTable arb_regs; - uint32_t i, j; - int result = 0; - - for (i = 0; i < data->dpm_table.sclk_table.count; i++) { - for (j = 0; j < data->dpm_table.mclk_table.count; j++) { - result = fiji_populate_memory_timing_parameters(hwmgr, - data->dpm_table.sclk_table.dpm_levels[i].value, - data->dpm_table.mclk_table.dpm_levels[j].value, - &arb_regs.entries[i][j]); - if (result) - break; - } - } - - if (!result) - result = fiji_copy_bytes_to_smc( - hwmgr->smumgr, - data->arb_table_start, - (uint8_t *)&arb_regs, - sizeof(SMU73_Discrete_MCArbDramTimingTable), - data->sram_end); - return result; -} - -static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr, - struct SMU73_Discrete_DpmTable *table) -{ - int result = -EINVAL; - uint8_t count; - struct pp_atomctrl_clock_dividers_vi dividers; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = - table_info->mm_dep_table; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - table->UvdLevelCount = (uint8_t)(mm_table->count); - table->UvdBootLevel = 0; - - for (count = 0; count < table->UvdLevelCount; count++) { - table->UvdLevel[count].MinVoltage = 0; - table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk; - table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk; - table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc * - VOLTAGE_SCALE) << VDDC_SHIFT; - table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc - - data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT; - table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT; - - /* retrieve divider value for VBIOS */ - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, - table->UvdLevel[count].VclkFrequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), - "can not find divide id for Vclk clock", return result); - - table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider; - - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, - table->UvdLevel[count].DclkFrequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), - "can not find divide id for Dclk clock", return result); - - table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider; - - CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency); - CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency); - CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage); - - } - return result; -} - -static int fiji_find_boot_level(struct fiji_single_dpm_table *table, - uint32_t value, uint32_t *boot_level) -{ - int result = -EINVAL; - uint32_t i; - - for (i = 0; i < table->count; i++) { - if (value == table->dpm_levels[i].value) { - *boot_level = i; - result = 0; - } - } - return result; -} - -static int fiji_populate_smc_boot_level(struct pp_hwmgr *hwmgr, - struct SMU73_Discrete_DpmTable *table) -{ - int result = 0; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - table->GraphicsBootLevel = 0; - table->MemoryBootLevel = 0; - - /* find boot level from dpm table */ - result = fiji_find_boot_level(&(data->dpm_table.sclk_table), - data->vbios_boot_state.sclk_bootup_value, - (uint32_t *)&(table->GraphicsBootLevel)); - - result = fiji_find_boot_level(&(data->dpm_table.mclk_table), - data->vbios_boot_state.mclk_bootup_value, - (uint32_t *)&(table->MemoryBootLevel)); - - table->BootVddc = data->vbios_boot_state.vddc_bootup_value * - VOLTAGE_SCALE; - table->BootVddci = data->vbios_boot_state.vddci_bootup_value * - VOLTAGE_SCALE; - table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value * - VOLTAGE_SCALE; - - CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc); - CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci); - CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd); - - return 0; -} - -static int fiji_populate_smc_initailial_state(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - uint8_t count, level; - - count = (uint8_t)(table_info->vdd_dep_on_sclk->count); - for (level = 0; level < count; level++) { - if(table_info->vdd_dep_on_sclk->entries[level].clk >= - data->vbios_boot_state.sclk_bootup_value) { - data->smc_state_table.GraphicsBootLevel = level; - break; - } - } - - count = (uint8_t)(table_info->vdd_dep_on_mclk->count); - for (level = 0; level < count; level++) { - if(table_info->vdd_dep_on_mclk->entries[level].clk >= - data->vbios_boot_state.mclk_bootup_value) { - data->smc_state_table.MemoryBootLevel = level; - break; - } - } - - return 0; -} - -static int fiji_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) -{ - uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks, - volt_with_cks, value; - uint16_t clock_freq_u16; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2, - volt_offset = 0; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = - table_info->vdd_dep_on_sclk; - - stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount; - - /* Read SMU_Eefuse to read and calculate RO and determine - * if the part is SS or FF. if RO >= 1660MHz, part is FF. - */ - efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixSMU_EFUSE_0 + (146 * 4)); - efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixSMU_EFUSE_0 + (148 * 4)); - efuse &= 0xFF000000; - efuse = efuse >> 24; - efuse2 &= 0xF; - - if (efuse2 == 1) - ro = (2300 - 1350) * efuse / 255 + 1350; - else - ro = (2500 - 1000) * efuse / 255 + 1000; - - if (ro >= 1660) - type = 0; - else - type = 1; - - /* Populate Stretch amount */ - data->smc_state_table.ClockStretcherAmount = stretch_amount; - - /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */ - for (i = 0; i < sclk_table->count; i++) { - data->smc_state_table.Sclk_CKS_masterEn0_7 |= - sclk_table->entries[i].cks_enable << i; - volt_without_cks = (uint32_t)((14041 * - (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 / - (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000))); - volt_with_cks = (uint32_t)((13946 * - (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 / - (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000))); - if (volt_without_cks >= volt_with_cks) - volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks + - sclk_table->entries[i].cks_voffset) * 100 / 625) + 1); - data->smc_state_table.Sclk_voltageOffset[i] = volt_offset; - } - - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, - STRETCH_ENABLE, 0x0); - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, - masterReset, 0x1); - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, - staticEnable, 0x1); - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, - masterReset, 0x0); - - /* Populate CKS Lookup Table */ - if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5) - stretch_amount2 = 0; - else if (stretch_amount == 3 || stretch_amount == 4) - stretch_amount2 = 1; - else { - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ClockStretcher); - PP_ASSERT_WITH_CODE(false, - "Stretch Amount in PPTable not supported\n", - return -EINVAL); - } - - value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixPWR_CKS_CNTL); - value &= 0xFFC2FF87; - data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq = - fiji_clock_stretcher_lookup_table[stretch_amount2][0]; - data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq = - fiji_clock_stretcher_lookup_table[stretch_amount2][1]; - clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(data->smc_state_table. - GraphicsLevel[data->smc_state_table.GraphicsDpmLevelCount - 1]. - SclkFrequency) / 100); - if (fiji_clock_stretcher_lookup_table[stretch_amount2][0] < - clock_freq_u16 && - fiji_clock_stretcher_lookup_table[stretch_amount2][1] > - clock_freq_u16) { - /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */ - value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 16; - /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */ - value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][2]) << 18; - /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */ - value |= (fiji_clock_stretch_amount_conversion - [fiji_clock_stretcher_lookup_table[stretch_amount2][3]] - [stretch_amount]) << 3; - } - CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable. - CKS_LOOKUPTableEntry[0].minFreq); - CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable. - CKS_LOOKUPTableEntry[0].maxFreq); - data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting = - fiji_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F; - data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |= - (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 7; - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixPWR_CKS_CNTL, value); - - /* Populate DDT Lookup Table */ - for (i = 0; i < 4; i++) { - /* Assign the minimum and maximum VID stored - * in the last row of Clock Stretcher Voltage Table. - */ - data->smc_state_table.ClockStretcherDataTable. - ClockStretcherDataTableEntry[i].minVID = - (uint8_t) fiji_clock_stretcher_ddt_table[type][i][2]; - data->smc_state_table.ClockStretcherDataTable. - ClockStretcherDataTableEntry[i].maxVID = - (uint8_t) fiji_clock_stretcher_ddt_table[type][i][3]; - /* Loop through each SCLK and check the frequency - * to see if it lies within the frequency for clock stretcher. - */ - for (j = 0; j < data->smc_state_table.GraphicsDpmLevelCount; j++) { - cks_setting = 0; - clock_freq = PP_SMC_TO_HOST_UL( - data->smc_state_table.GraphicsLevel[j].SclkFrequency); - /* Check the allowed frequency against the sclk level[j]. - * Sclk's endianness has already been converted, - * and it's in 10Khz unit, - * as opposed to Data table, which is in Mhz unit. - */ - if (clock_freq >= - (fiji_clock_stretcher_ddt_table[type][i][0]) * 100) { - cks_setting |= 0x2; - if (clock_freq < - (fiji_clock_stretcher_ddt_table[type][i][1]) * 100) - cks_setting |= 0x1; - } - data->smc_state_table.ClockStretcherDataTable. - ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2); - } - CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table. - ClockStretcherDataTable. - ClockStretcherDataTableEntry[i].setting); - } - - value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL); - value &= 0xFFFFFFFE; - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value); - - return 0; -} - -/** -* Populates the SMC VRConfig field in DPM table. -* -* @param hwmgr the address of the hardware manager -* @param table the SMC DPM table structure to be populated -* @return always 0 -*/ -static int fiji_populate_vr_config(struct pp_hwmgr *hwmgr, - struct SMU73_Discrete_DpmTable *table) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - uint16_t config; - - config = VR_MERGED_WITH_VDDC; - table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT); - - /* Set Vddc Voltage Controller */ - if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { - config = VR_SVI2_PLANE_1; - table->VRConfig |= config; - } else { - PP_ASSERT_WITH_CODE(false, - "VDDC should be on SVI2 control in merged mode!",); - } - /* Set Vddci Voltage Controller */ - if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) { - config = VR_SVI2_PLANE_2; /* only in merged mode */ - table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); - } else if (FIJI_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) { - config = VR_SMIO_PATTERN_1; - table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); - } else { - config = VR_STATIC_VOLTAGE; - table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); - } - /* Set Mvdd Voltage Controller */ - if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) { - config = VR_SVI2_PLANE_2; - table->VRConfig |= (config << VRCONF_MVDD_SHIFT); - } else if(FIJI_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { - config = VR_SMIO_PATTERN_2; - table->VRConfig |= (config << VRCONF_MVDD_SHIFT); - } else { - config = VR_STATIC_VOLTAGE; - table->VRConfig |= (config << VRCONF_MVDD_SHIFT); - } - - return 0; -} - -/** -* Initializes the SMC table and uploads it -* -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data (PowerState) -* @return always 0 -*/ -static int fiji_init_smc_table(struct pp_hwmgr *hwmgr) -{ - int result; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct SMU73_Discrete_DpmTable *table = &(data->smc_state_table); - const struct fiji_ulv_parm *ulv = &(data->ulv); - uint8_t i; - struct pp_atomctrl_gpio_pin_assignment gpio_pin; - - result = fiji_setup_default_dpm_tables(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to setup default DPM tables!", return result); - - if(FIJI_VOLTAGE_CONTROL_NONE != data->voltage_control) - fiji_populate_smc_voltage_tables(hwmgr, table); - - table->SystemFlags = 0; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_AutomaticDCTransition)) - table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StepVddc)) - table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; - - if (data->is_memory_gddr5) - table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; - - if (ulv->ulv_supported && table_info->us_ulv_voltage_offset) { - result = fiji_populate_ulv_state(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize ULV state!", return result); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_ULV_PARAMETER, ulv->cg_ulv_parameter); - } - - result = fiji_populate_smc_link_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize Link Level!", return result); - - result = fiji_populate_all_graphic_levels(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize Graphics Level!", return result); - - result = fiji_populate_all_memory_levels(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize Memory Level!", return result); - - result = fiji_populate_smc_acpi_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize ACPI Level!", return result); - - result = fiji_populate_smc_vce_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize VCE Level!", return result); - - result = fiji_populate_smc_acp_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize ACP Level!", return result); - - result = fiji_populate_smc_samu_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize SAMU Level!", return result); - - /* Since only the initial state is completely set up at this point - * (the other states are just copies of the boot state) we only - * need to populate the ARB settings for the initial state. - */ - result = fiji_program_memory_timing_parameters(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to Write ARB settings for the initial state.", return result); - - result = fiji_populate_smc_uvd_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize UVD Level!", return result); - - result = fiji_populate_smc_boot_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize Boot Level!", return result); - - result = fiji_populate_smc_initailial_state(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize Boot State!", return result); - - result = fiji_populate_bapm_parameters_in_dpm_table(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to populate BAPM Parameters!", return result); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ClockStretcher)) { - result = fiji_populate_clock_stretcher_data_table(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to populate Clock Stretcher Data Table!", - return result); - } - - table->GraphicsVoltageChangeEnable = 1; - table->GraphicsThermThrottleEnable = 1; - table->GraphicsInterval = 1; - table->VoltageInterval = 1; - table->ThermalInterval = 1; - table->TemperatureLimitHigh = - table_info->cac_dtp_table->usTargetOperatingTemp * - FIJI_Q88_FORMAT_CONVERSION_UNIT; - table->TemperatureLimitLow = - (table_info->cac_dtp_table->usTargetOperatingTemp - 1) * - FIJI_Q88_FORMAT_CONVERSION_UNIT; - table->MemoryVoltageChangeEnable = 1; - table->MemoryInterval = 1; - table->VoltageResponseTime = 0; - table->PhaseResponseTime = 0; - table->MemoryThermThrottleEnable = 1; - table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/ - table->PCIeGenInterval = 1; - table->VRConfig = 0; - - result = fiji_populate_vr_config(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to populate VRConfig setting!", return result); - - table->ThermGpio = 17; - table->SclkStepSize = 0x4000; - - if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) { - table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift; - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_RegulatorHot); - } else { - table->VRHotGpio = FIJI_UNUSED_GPIO_PIN; - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_RegulatorHot); - } - - if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID, - &gpio_pin)) { - table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift; - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_AutomaticDCTransition); - } else { - table->AcDcGpio = FIJI_UNUSED_GPIO_PIN; - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_AutomaticDCTransition); - } - - /* Thermal Output GPIO */ - if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID, - &gpio_pin)) { - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ThermalOutGPIO); - - table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift; - - /* For porlarity read GPIOPAD_A with assigned Gpio pin - * since VBIOS will program this register to set 'inactive state', - * driver can then determine 'active state' from this and - * program SMU with correct polarity - */ - table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) & - (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0; - table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY; - - /* if required, combine VRHot/PCC with thermal out GPIO */ - if(phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_RegulatorHot) && - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_CombinePCCWithThermalSignal)) - table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT; - } else { - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ThermalOutGPIO); - table->ThermOutGpio = 17; - table->ThermOutPolarity = 1; - table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE; - } - - for (i = 0; i < SMU73_MAX_ENTRIES_SMIO; i++) - table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]); - - CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags); - CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig); - CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1); - CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2); - CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); - CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); - CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); - CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); - CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); - - /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ - result = fiji_copy_bytes_to_smc(hwmgr->smumgr, - data->dpm_table_start + - offsetof(SMU73_Discrete_DpmTable, SystemFlags), - (uint8_t *)&(table->SystemFlags), - sizeof(SMU73_Discrete_DpmTable) - 3 * sizeof(SMU73_PIDController), - data->sram_end); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to upload dpm data to SMC memory!", return result); - - return 0; -} - -/** -* Initialize the ARB DRAM timing table's index field. -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -*/ -static int fiji_init_arb_table_index(struct pp_hwmgr *hwmgr) -{ - const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - uint32_t tmp; - int result; - - /* This is a read-modify-write on the first byte of the ARB table. - * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure - * is the field 'current'. - * This solution is ugly, but we never write the whole table only - * individual fields in it. - * In reality this field should not be in that structure - * but in a soft register. - */ - result = fiji_read_smc_sram_dword(hwmgr->smumgr, - data->arb_table_start, &tmp, data->sram_end); - - if (result) - return result; - - tmp &= 0x00FFFFFF; - tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24; - - return fiji_write_smc_sram_dword(hwmgr->smumgr, - data->arb_table_start, tmp, data->sram_end); -} - -static int fiji_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr) -{ - if(phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_RegulatorHot)) - return smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_EnableVRHotGPIOInterrupt); - - return 0; -} - -static int fiji_enable_sclk_control(struct pp_hwmgr *hwmgr) -{ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, - SCLK_PWRMGT_OFF, 0); - return 0; -} - -static int fiji_enable_ulv(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct fiji_ulv_parm *ulv = &(data->ulv); - - if (ulv->ulv_supported) - return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV); - - return 0; -} - -static int fiji_disable_ulv(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct fiji_ulv_parm *ulv = &(data->ulv); - - if (ulv->ulv_supported) - return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV); - - return 0; -} - -static int fiji_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) -{ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkDeepSleep)) { - if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON)) - PP_ASSERT_WITH_CODE(false, - "Attempt to enable Master Deep Sleep switch failed!", - return -1); - } else { - if (smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_MASTER_DeepSleep_OFF)) { - PP_ASSERT_WITH_CODE(false, - "Attempt to disable Master Deep Sleep switch failed!", - return -1); - } - } - - return 0; -} - -static int fiji_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) -{ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkDeepSleep)) { - if (smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_MASTER_DeepSleep_OFF)) { - PP_ASSERT_WITH_CODE(false, - "Attempt to disable Master Deep Sleep switch failed!", - return -1); - } - } - - return 0; -} - -static int fiji_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - uint32_t val, val0, val2; - uint32_t i, cpl_cntl, cpl_threshold, mc_threshold; - - /* enable SCLK dpm */ - if(!data->sclk_dpm_key_disabled) - PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)), - "Failed to enable SCLK DPM during DPM Start Function!", - return -1); - - /* enable MCLK dpm */ - if(0 == data->mclk_dpm_key_disabled) { - cpl_threshold = 0; - mc_threshold = 0; - - /* Read per MCD tile (0 - 7) */ - for (i = 0; i < 8; i++) { - PHM_WRITE_FIELD(hwmgr->device, MC_CONFIG_MCD, MC_RD_ENABLE, i); - val = cgs_read_register(hwmgr->device, mmMC_SEQ_RESERVE_0_S) & 0xf0000000; - if (0xf0000000 != val) { - /* count number of MCQ that has channel(s) enabled */ - cpl_threshold++; - /* only harvest 3 or full 4 supported */ - mc_threshold = val ? 3 : 4; - } - } - PP_ASSERT_WITH_CODE(0 != cpl_threshold, - "Number of MCQ is zero!", return -EINVAL;); - - mc_threshold = ((mc_threshold & LCAC_MC0_CNTL__MC0_THRESHOLD_MASK) << - LCAC_MC0_CNTL__MC0_THRESHOLD__SHIFT) | - LCAC_MC0_CNTL__MC0_ENABLE_MASK; - cpl_cntl = ((cpl_threshold & LCAC_CPL_CNTL__CPL_THRESHOLD_MASK) << - LCAC_CPL_CNTL__CPL_THRESHOLD__SHIFT) | - LCAC_CPL_CNTL__CPL_ENABLE_MASK; - cpl_cntl = (cpl_cntl | (8 << LCAC_CPL_CNTL__CPL_BLOCK_ID__SHIFT)); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC0_CNTL, mc_threshold); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC1_CNTL, mc_threshold); - if (8 == cpl_threshold) { - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC2_CNTL, mc_threshold); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC3_CNTL, mc_threshold); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC4_CNTL, mc_threshold); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC5_CNTL, mc_threshold); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC6_CNTL, mc_threshold); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC7_CNTL, mc_threshold); - } - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_CPL_CNTL, cpl_cntl); - - udelay(5); - - mc_threshold = mc_threshold | - (1 << LCAC_MC0_CNTL__MC0_SIGNAL_ID__SHIFT); - cpl_cntl = cpl_cntl | (1 << LCAC_CPL_CNTL__CPL_SIGNAL_ID__SHIFT); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC0_CNTL, mc_threshold); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC1_CNTL, mc_threshold); - if (8 == cpl_threshold) { - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC2_CNTL, mc_threshold); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC3_CNTL, mc_threshold); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC4_CNTL, mc_threshold); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC5_CNTL, mc_threshold); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC6_CNTL, mc_threshold); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC7_CNTL, mc_threshold); - } - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_CPL_CNTL, cpl_cntl); - - /* Program CAC_EN per MCD (0-7) Tile */ - val0 = val = cgs_read_register(hwmgr->device, mmMC_CONFIG_MCD); - val &= ~(MC_CONFIG_MCD__MCD0_WR_ENABLE_MASK | - MC_CONFIG_MCD__MCD1_WR_ENABLE_MASK | - MC_CONFIG_MCD__MCD2_WR_ENABLE_MASK | - MC_CONFIG_MCD__MCD3_WR_ENABLE_MASK | - MC_CONFIG_MCD__MCD4_WR_ENABLE_MASK | - MC_CONFIG_MCD__MCD5_WR_ENABLE_MASK | - MC_CONFIG_MCD__MCD6_WR_ENABLE_MASK | - MC_CONFIG_MCD__MCD7_WR_ENABLE_MASK | - MC_CONFIG_MCD__MC_RD_ENABLE_MASK); - - for (i = 0; i < 8; i++) { - /* Enable MCD i Tile read & write */ - val2 = (val | (i << MC_CONFIG_MCD__MC_RD_ENABLE__SHIFT) | - (1 << i)); - cgs_write_register(hwmgr->device, mmMC_CONFIG_MCD, val2); - /* Enbale CAC_ON MCD i Tile */ - val2 = cgs_read_register(hwmgr->device, mmMC_SEQ_CNTL); - val2 |= MC_SEQ_CNTL__CAC_EN_MASK; - cgs_write_register(hwmgr->device, mmMC_SEQ_CNTL, val2); - } - /* Set MC_CONFIG_MCD back to its default setting val0 */ - cgs_write_register(hwmgr->device, mmMC_CONFIG_MCD, val0); - - PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_Enable)), - "Failed to enable MCLK DPM during DPM Start Function!", - return -1); - } - return 0; -} - -static int fiji_start_dpm(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - /*enable general power management */ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, - GLOBAL_PWRMGT_EN, 1); - /* enable sclk deep sleep */ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, - DYNAMIC_PM_EN, 1); - /* prepare for PCIE DPM */ - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - data->soft_regs_start + offsetof(SMU73_SoftRegisters, - VoltageChangeTimeout), 0x1000); - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, - SWRST_COMMAND_1, RESETLC, 0x0); - - PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_Voltage_Cntl_Enable)), - "Failed to enable voltage DPM during DPM Start Function!", - return -1); - - if (fiji_enable_sclk_mclk_dpm(hwmgr)) { - printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!"); - return -1; - } - - /* enable PCIE dpm */ - if(!data->pcie_dpm_key_disabled) { - PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_PCIeDPM_Enable)), - "Failed to enable pcie DPM during DPM Start Function!", - return -1); - } - - return 0; -} - -static int fiji_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - /* disable SCLK dpm */ - if (!data->sclk_dpm_key_disabled) - PP_ASSERT_WITH_CODE( - (smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_DPM_Disable) == 0), - "Failed to disable SCLK DPM!", - return -1); - - /* disable MCLK dpm */ - if (!data->mclk_dpm_key_disabled) { - PP_ASSERT_WITH_CODE( - (smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_SetEnabledMask, 1) == 0), - "Failed to force MCLK DPM0!", - return -1); - - PP_ASSERT_WITH_CODE( - (smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_Disable) == 0), - "Failed to disable MCLK DPM!", - return -1); - } - - return 0; -} - -static int fiji_stop_dpm(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - /* disable general power management */ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, - GLOBAL_PWRMGT_EN, 0); - /* disable sclk deep sleep */ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, - DYNAMIC_PM_EN, 0); - - /* disable PCIE dpm */ - if (!data->pcie_dpm_key_disabled) { - PP_ASSERT_WITH_CODE( - (smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_PCIeDPM_Disable) == 0), - "Failed to disable pcie DPM during DPM Stop Function!", - return -1); - } - - if (fiji_disable_sclk_mclk_dpm(hwmgr)) { - printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!"); - return -1; - } - - PP_ASSERT_WITH_CODE( - (smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_Voltage_Cntl_Disable) == 0), - "Failed to disable voltage DPM during DPM Stop Function!", - return -1); - - return 0; -} - -static void fiji_set_dpm_event_sources(struct pp_hwmgr *hwmgr, - uint32_t sources) -{ - bool protection; - enum DPM_EVENT_SRC src; - - switch (sources) { - default: - printk(KERN_ERR "Unknown throttling event sources."); - /* fall through */ - case 0: - protection = false; - /* src is unused */ - break; - case (1 << PHM_AutoThrottleSource_Thermal): - protection = true; - src = DPM_EVENT_SRC_DIGITAL; - break; - case (1 << PHM_AutoThrottleSource_External): - protection = true; - src = DPM_EVENT_SRC_EXTERNAL; - break; - case (1 << PHM_AutoThrottleSource_External) | - (1 << PHM_AutoThrottleSource_Thermal): - protection = true; - src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL; - break; - } - /* Order matters - don't enable thermal protection for the wrong source. */ - if (protection) { - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, - DPM_EVENT_SRC, src); - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, - THERMAL_PROTECTION_DIS, - !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ThermalController)); - } else - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, - THERMAL_PROTECTION_DIS, 1); -} - -static int fiji_enable_auto_throttle_source(struct pp_hwmgr *hwmgr, - PHM_AutoThrottleSource source) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - if (!(data->active_auto_throttle_sources & (1 << source))) { - data->active_auto_throttle_sources |= 1 << source; - fiji_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources); - } - return 0; -} - -static int fiji_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) -{ - return fiji_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); -} - -static int fiji_disable_auto_throttle_source(struct pp_hwmgr *hwmgr, - PHM_AutoThrottleSource source) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - if (data->active_auto_throttle_sources & (1 << source)) { - data->active_auto_throttle_sources &= ~(1 << source); - fiji_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources); - } - return 0; -} - -static int fiji_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) -{ - return fiji_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); -} - -static int fiji_enable_dpm_tasks(struct pp_hwmgr *hwmgr) -{ - int tmp_result, result = 0; - - tmp_result = (!fiji_is_dpm_running(hwmgr))? 0 : -1; - PP_ASSERT_WITH_CODE(result == 0, - "DPM is already running right now, no need to enable DPM!", - return 0); - - if (fiji_voltage_control(hwmgr)) { - tmp_result = fiji_enable_voltage_control(hwmgr); - PP_ASSERT_WITH_CODE(tmp_result == 0, - "Failed to enable voltage control!", - result = tmp_result); - } - - if (fiji_voltage_control(hwmgr)) { - tmp_result = fiji_construct_voltage_tables(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to contruct voltage tables!", - result = tmp_result); - } - - tmp_result = fiji_initialize_mc_reg_table(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to initialize MC reg table!", result = tmp_result); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_EngineSpreadSpectrumSupport)) - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ThermalController)) - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0); - - tmp_result = fiji_program_static_screen_threshold_parameters(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to program static screen threshold parameters!", - result = tmp_result); - - tmp_result = fiji_enable_display_gap(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable display gap!", result = tmp_result); - - tmp_result = fiji_program_voting_clients(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to program voting clients!", result = tmp_result); - - tmp_result = fiji_process_firmware_header(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to process firmware header!", result = tmp_result); - - tmp_result = fiji_initial_switch_from_arbf0_to_f1(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to initialize switch from ArbF0 to F1!", - result = tmp_result); - - tmp_result = fiji_init_smc_table(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to initialize SMC table!", result = tmp_result); - - tmp_result = fiji_init_arb_table_index(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to initialize ARB table index!", result = tmp_result); - - tmp_result = fiji_populate_pm_fuses(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to populate PM fuses!", result = tmp_result); - - tmp_result = fiji_enable_vrhot_gpio_interrupt(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable VR hot GPIO interrupt!", result = tmp_result); - - tmp_result = tonga_notify_smc_display_change(hwmgr, false); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to notify no display!", result = tmp_result); - - tmp_result = fiji_enable_sclk_control(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable SCLK control!", result = tmp_result); - - tmp_result = fiji_enable_ulv(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable ULV!", result = tmp_result); - - tmp_result = fiji_enable_deep_sleep_master_switch(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable deep sleep master switch!", result = tmp_result); - - tmp_result = fiji_start_dpm(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to start DPM!", result = tmp_result); - - tmp_result = fiji_enable_smc_cac(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable SMC CAC!", result = tmp_result); - - tmp_result = fiji_enable_power_containment(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable power containment!", result = tmp_result); - - tmp_result = fiji_power_control_set_level(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to power control set level!", result = tmp_result); - - tmp_result = fiji_enable_thermal_auto_throttle(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable thermal auto throttle!", result = tmp_result); - - return result; -} - -static int fiji_disable_dpm_tasks(struct pp_hwmgr *hwmgr) -{ - int tmp_result, result = 0; - - tmp_result = (fiji_is_dpm_running(hwmgr)) ? 0 : -1; - PP_ASSERT_WITH_CODE(tmp_result == 0, - "DPM is not running right now, no need to disable DPM!", - return 0); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ThermalController)) - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1); - - tmp_result = fiji_disable_power_containment(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to disable power containment!", result = tmp_result); - - tmp_result = fiji_disable_smc_cac(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to disable SMC CAC!", result = tmp_result); - - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_SPLL_SPREAD_SPECTRUM, SSEN, 0); - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0); - - tmp_result = fiji_disable_thermal_auto_throttle(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to disable thermal auto throttle!", result = tmp_result); - - tmp_result = fiji_stop_dpm(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to stop DPM!", result = tmp_result); - - tmp_result = fiji_disable_deep_sleep_master_switch(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to disable deep sleep master switch!", result = tmp_result); - - tmp_result = fiji_disable_ulv(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to disable ULV!", result = tmp_result); - - tmp_result = fiji_clear_voting_clients(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to clear voting clients!", result = tmp_result); - - tmp_result = fiji_reset_to_default(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to reset to default!", result = tmp_result); - - tmp_result = fiji_force_switch_to_arbf0(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to force to switch arbf0!", result = tmp_result); - - return result; -} - -static int fiji_force_dpm_highest(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - uint32_t level, tmp; - - if (!data->sclk_dpm_key_disabled) { - if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { - level = 0; - tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask; - while (tmp >>= 1) - level++; - if (level) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_SetEnabledMask, - (1 << level)); - } - } - - if (!data->mclk_dpm_key_disabled) { - if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) { - level = 0; - tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask; - while (tmp >>= 1) - level++; - if (level) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_SetEnabledMask, - (1 << level)); - } - } - - if (!data->pcie_dpm_key_disabled) { - if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) { - level = 0; - tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask; - while (tmp >>= 1) - level++; - if (level) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_PCIeDPM_ForceLevel, - (1 << level)); - } - } - return 0; -} - -static int fiji_upload_dpmlevel_enable_mask(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - phm_apply_dal_min_voltage_request(hwmgr); - - if (!data->sclk_dpm_key_disabled) { - if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_SetEnabledMask, - data->dpm_level_enable_mask.sclk_dpm_enable_mask); - } - return 0; -} - -static int fiji_unforce_dpm_levels(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - if (!fiji_is_dpm_running(hwmgr)) - return -EINVAL; - - if (!data->pcie_dpm_key_disabled) { - smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_PCIeDPM_UnForceLevel); - } - - return fiji_upload_dpmlevel_enable_mask(hwmgr); -} - -static uint32_t fiji_get_lowest_enabled_level( - struct pp_hwmgr *hwmgr, uint32_t mask) -{ - uint32_t level = 0; - - while(0 == (mask & (1 << level))) - level++; - - return level; -} - -static int fiji_force_dpm_lowest(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = - (struct fiji_hwmgr *)(hwmgr->backend); - uint32_t level; - - if (!data->sclk_dpm_key_disabled) - if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { - level = fiji_get_lowest_enabled_level(hwmgr, - data->dpm_level_enable_mask.sclk_dpm_enable_mask); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_SetEnabledMask, - (1 << level)); - - } - - if (!data->mclk_dpm_key_disabled) { - if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) { - level = fiji_get_lowest_enabled_level(hwmgr, - data->dpm_level_enable_mask.mclk_dpm_enable_mask); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_SetEnabledMask, - (1 << level)); - } - } - - if (!data->pcie_dpm_key_disabled) { - if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) { - level = fiji_get_lowest_enabled_level(hwmgr, - data->dpm_level_enable_mask.pcie_dpm_enable_mask); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_PCIeDPM_ForceLevel, - (1 << level)); - } - } - - return 0; - -} -static int fiji_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, - enum amd_dpm_forced_level level) -{ - int ret = 0; - - switch (level) { - case AMD_DPM_FORCED_LEVEL_HIGH: - ret = fiji_force_dpm_highest(hwmgr); - if (ret) - return ret; - break; - case AMD_DPM_FORCED_LEVEL_LOW: - ret = fiji_force_dpm_lowest(hwmgr); - if (ret) - return ret; - break; - case AMD_DPM_FORCED_LEVEL_AUTO: - ret = fiji_unforce_dpm_levels(hwmgr); - if (ret) - return ret; - break; - default: - break; - } - - hwmgr->dpm_level = level; - - return ret; -} - -static int fiji_get_power_state_size(struct pp_hwmgr *hwmgr) -{ - return sizeof(struct fiji_power_state); -} - -static int fiji_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr, - void *state, struct pp_power_state *power_state, - void *pp_table, uint32_t classification_flag) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct fiji_power_state *fiji_power_state = - (struct fiji_power_state *)(&(power_state->hardware)); - struct fiji_performance_level *performance_level; - ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state; - ATOM_Tonga_POWERPLAYTABLE *powerplay_table = - (ATOM_Tonga_POWERPLAYTABLE *)pp_table; - ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table = - (ATOM_Tonga_SCLK_Dependency_Table *) - (((unsigned long)powerplay_table) + - le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); - ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = - (ATOM_Tonga_MCLK_Dependency_Table *) - (((unsigned long)powerplay_table) + - le16_to_cpu(powerplay_table->usMclkDependencyTableOffset)); - - /* The following fields are not initialized here: id orderedList allStatesList */ - power_state->classification.ui_label = - (le16_to_cpu(state_entry->usClassification) & - ATOM_PPLIB_CLASSIFICATION_UI_MASK) >> - ATOM_PPLIB_CLASSIFICATION_UI_SHIFT; - power_state->classification.flags = classification_flag; - /* NOTE: There is a classification2 flag in BIOS that is not being used right now */ - - power_state->classification.temporary_state = false; - power_state->classification.to_be_deleted = false; - - power_state->validation.disallowOnDC = - (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & - ATOM_Tonga_DISALLOW_ON_DC)); - - power_state->pcie.lanes = 0; - - power_state->display.disableFrameModulation = false; - power_state->display.limitRefreshrate = false; - power_state->display.enableVariBright = - (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & - ATOM_Tonga_ENABLE_VARIBRIGHT)); - - power_state->validation.supportedPowerLevels = 0; - power_state->uvd_clocks.VCLK = 0; - power_state->uvd_clocks.DCLK = 0; - power_state->temperatures.min = 0; - power_state->temperatures.max = 0; - - performance_level = &(fiji_power_state->performance_levels - [fiji_power_state->performance_level_count++]); - - PP_ASSERT_WITH_CODE( - (fiji_power_state->performance_level_count < SMU73_MAX_LEVELS_GRAPHICS), - "Performance levels exceeds SMC limit!", - return -1); - - PP_ASSERT_WITH_CODE( - (fiji_power_state->performance_level_count <= - hwmgr->platform_descriptor.hardwareActivityPerformanceLevels), - "Performance levels exceeds Driver limit!", - return -1); - - /* Performance levels are arranged from low to high. */ - performance_level->memory_clock = mclk_dep_table->entries - [state_entry->ucMemoryClockIndexLow].ulMclk; - performance_level->engine_clock = sclk_dep_table->entries - [state_entry->ucEngineClockIndexLow].ulSclk; - performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, - state_entry->ucPCIEGenLow); - performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, - state_entry->ucPCIELaneHigh); - - performance_level = &(fiji_power_state->performance_levels - [fiji_power_state->performance_level_count++]); - performance_level->memory_clock = mclk_dep_table->entries - [state_entry->ucMemoryClockIndexHigh].ulMclk; - performance_level->engine_clock = sclk_dep_table->entries - [state_entry->ucEngineClockIndexHigh].ulSclk; - performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, - state_entry->ucPCIEGenHigh); - performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, - state_entry->ucPCIELaneHigh); - - return 0; -} - -static int fiji_get_pp_table_entry(struct pp_hwmgr *hwmgr, - unsigned long entry_index, struct pp_power_state *state) -{ - int result; - struct fiji_power_state *ps; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table = - table_info->vdd_dep_on_mclk; - - state->hardware.magic = PHM_VIslands_Magic; - - ps = (struct fiji_power_state *)(&state->hardware); - - result = tonga_get_powerplay_table_entry(hwmgr, entry_index, state, - fiji_get_pp_table_entry_callback_func); - - /* This is the earliest time we have all the dependency table and the VBIOS boot state - * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state - * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state - */ - if (dep_mclk_table != NULL && dep_mclk_table->count == 1) { - if (dep_mclk_table->entries[0].clk != - data->vbios_boot_state.mclk_bootup_value) - printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table " - "does not match VBIOS boot MCLK level"); - if (dep_mclk_table->entries[0].vddci != - data->vbios_boot_state.vddci_bootup_value) - printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table " - "does not match VBIOS boot VDDCI level"); - } - - /* set DC compatible flag if this state supports DC */ - if (!state->validation.disallowOnDC) - ps->dc_compatible = true; - - if (state->classification.flags & PP_StateClassificationFlag_ACPI) - data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen; - - ps->uvd_clks.vclk = state->uvd_clocks.VCLK; - ps->uvd_clks.dclk = state->uvd_clocks.DCLK; - - if (!result) { - uint32_t i; - - switch (state->classification.ui_label) { - case PP_StateUILabel_Performance: - data->use_pcie_performance_levels = true; - - for (i = 0; i < ps->performance_level_count; i++) { - if (data->pcie_gen_performance.max < - ps->performance_levels[i].pcie_gen) - data->pcie_gen_performance.max = - ps->performance_levels[i].pcie_gen; - - if (data->pcie_gen_performance.min > - ps->performance_levels[i].pcie_gen) - data->pcie_gen_performance.min = - ps->performance_levels[i].pcie_gen; - - if (data->pcie_lane_performance.max < - ps->performance_levels[i].pcie_lane) - data->pcie_lane_performance.max = - ps->performance_levels[i].pcie_lane; - - if (data->pcie_lane_performance.min > - ps->performance_levels[i].pcie_lane) - data->pcie_lane_performance.min = - ps->performance_levels[i].pcie_lane; - } - break; - case PP_StateUILabel_Battery: - data->use_pcie_power_saving_levels = true; - - for (i = 0; i < ps->performance_level_count; i++) { - if (data->pcie_gen_power_saving.max < - ps->performance_levels[i].pcie_gen) - data->pcie_gen_power_saving.max = - ps->performance_levels[i].pcie_gen; - - if (data->pcie_gen_power_saving.min > - ps->performance_levels[i].pcie_gen) - data->pcie_gen_power_saving.min = - ps->performance_levels[i].pcie_gen; - - if (data->pcie_lane_power_saving.max < - ps->performance_levels[i].pcie_lane) - data->pcie_lane_power_saving.max = - ps->performance_levels[i].pcie_lane; - - if (data->pcie_lane_power_saving.min > - ps->performance_levels[i].pcie_lane) - data->pcie_lane_power_saving.min = - ps->performance_levels[i].pcie_lane; - } - break; - default: - break; - } - } - return 0; -} - -static int fiji_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, - struct pp_power_state *request_ps, - const struct pp_power_state *current_ps) -{ - struct fiji_power_state *fiji_ps = - cast_phw_fiji_power_state(&request_ps->hardware); - uint32_t sclk; - uint32_t mclk; - struct PP_Clocks minimum_clocks = {0}; - bool disable_mclk_switching; - bool disable_mclk_switching_for_frame_lock; - struct cgs_display_info info = {0}; - const struct phm_clock_and_voltage_limits *max_limits; - uint32_t i; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - int32_t count; - int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; - - data->battery_state = (PP_StateUILabel_Battery == - request_ps->classification.ui_label); - - PP_ASSERT_WITH_CODE(fiji_ps->performance_level_count == 2, - "VI should always have 2 performance levels",); - - max_limits = (PP_PowerSource_AC == hwmgr->power_source) ? - &(hwmgr->dyn_state.max_clock_voltage_on_ac) : - &(hwmgr->dyn_state.max_clock_voltage_on_dc); - - /* Cap clock DPM tables at DC MAX if it is in DC. */ - if (PP_PowerSource_DC == hwmgr->power_source) { - for (i = 0; i < fiji_ps->performance_level_count; i++) { - if (fiji_ps->performance_levels[i].memory_clock > max_limits->mclk) - fiji_ps->performance_levels[i].memory_clock = max_limits->mclk; - if (fiji_ps->performance_levels[i].engine_clock > max_limits->sclk) - fiji_ps->performance_levels[i].engine_clock = max_limits->sclk; - } - } - - fiji_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk; - fiji_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk; - - fiji_ps->acp_clk = hwmgr->acp_arbiter.acpclk; - - cgs_get_active_displays_info(hwmgr->device, &info); - - /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/ - - /* TO DO GetMinClockSettings(hwmgr->pPECI, &minimum_clocks); */ - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StablePState)) { - max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac); - stable_pstate_sclk = (max_limits->sclk * 75) / 100; - - for (count = table_info->vdd_dep_on_sclk->count - 1; - count >= 0; count--) { - if (stable_pstate_sclk >= - table_info->vdd_dep_on_sclk->entries[count].clk) { - stable_pstate_sclk = - table_info->vdd_dep_on_sclk->entries[count].clk; - break; - } - } - - if (count < 0) - stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk; - - stable_pstate_mclk = max_limits->mclk; - - minimum_clocks.engineClock = stable_pstate_sclk; - minimum_clocks.memoryClock = stable_pstate_mclk; - } - - if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk) - minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk; - - if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk) - minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk; - - fiji_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold; - - if (0 != hwmgr->gfx_arbiter.sclk_over_drive) { - PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <= - hwmgr->platform_descriptor.overdriveLimit.engineClock), - "Overdrive sclk exceeds limit", - hwmgr->gfx_arbiter.sclk_over_drive = - hwmgr->platform_descriptor.overdriveLimit.engineClock); - - if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk) - fiji_ps->performance_levels[1].engine_clock = - hwmgr->gfx_arbiter.sclk_over_drive; - } - - if (0 != hwmgr->gfx_arbiter.mclk_over_drive) { - PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <= - hwmgr->platform_descriptor.overdriveLimit.memoryClock), - "Overdrive mclk exceeds limit", - hwmgr->gfx_arbiter.mclk_over_drive = - hwmgr->platform_descriptor.overdriveLimit.memoryClock); - - if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk) - fiji_ps->performance_levels[1].memory_clock = - hwmgr->gfx_arbiter.mclk_over_drive; - } - - disable_mclk_switching_for_frame_lock = phm_cap_enabled( - hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); - - disable_mclk_switching = (1 < info.display_count) || - disable_mclk_switching_for_frame_lock; - - sclk = fiji_ps->performance_levels[0].engine_clock; - mclk = fiji_ps->performance_levels[0].memory_clock; - - if (disable_mclk_switching) - mclk = fiji_ps->performance_levels - [fiji_ps->performance_level_count - 1].memory_clock; - - if (sclk < minimum_clocks.engineClock) - sclk = (minimum_clocks.engineClock > max_limits->sclk) ? - max_limits->sclk : minimum_clocks.engineClock; - - if (mclk < minimum_clocks.memoryClock) - mclk = (minimum_clocks.memoryClock > max_limits->mclk) ? - max_limits->mclk : minimum_clocks.memoryClock; - - fiji_ps->performance_levels[0].engine_clock = sclk; - fiji_ps->performance_levels[0].memory_clock = mclk; - - fiji_ps->performance_levels[1].engine_clock = - (fiji_ps->performance_levels[1].engine_clock >= - fiji_ps->performance_levels[0].engine_clock) ? - fiji_ps->performance_levels[1].engine_clock : - fiji_ps->performance_levels[0].engine_clock; - - if (disable_mclk_switching) { - if (mclk < fiji_ps->performance_levels[1].memory_clock) - mclk = fiji_ps->performance_levels[1].memory_clock; - - fiji_ps->performance_levels[0].memory_clock = mclk; - fiji_ps->performance_levels[1].memory_clock = mclk; - } else { - if (fiji_ps->performance_levels[1].memory_clock < - fiji_ps->performance_levels[0].memory_clock) - fiji_ps->performance_levels[1].memory_clock = - fiji_ps->performance_levels[0].memory_clock; - } - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StablePState)) { - for (i = 0; i < fiji_ps->performance_level_count; i++) { - fiji_ps->performance_levels[i].engine_clock = stable_pstate_sclk; - fiji_ps->performance_levels[i].memory_clock = stable_pstate_mclk; - fiji_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max; - fiji_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max; - } - } - - return 0; -} - -static int fiji_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input) -{ - const struct phm_set_power_state_input *states = - (const struct phm_set_power_state_input *)input; - const struct fiji_power_state *fiji_ps = - cast_const_phw_fiji_power_state(states->pnew_state); - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); - uint32_t sclk = fiji_ps->performance_levels - [fiji_ps->performance_level_count - 1].engine_clock; - struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); - uint32_t mclk = fiji_ps->performance_levels - [fiji_ps->performance_level_count - 1].memory_clock; - uint32_t i; - struct cgs_display_info info = {0}; - - data->need_update_smu7_dpm_table = 0; - - for (i = 0; i < sclk_table->count; i++) { - if (sclk == sclk_table->dpm_levels[i].value) - break; - } - - if (i >= sclk_table->count) - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; - else { - if(data->display_timing.min_clock_in_sr != - hwmgr->display_config.min_core_set_clock_in_sr) - data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK; - } - - for (i = 0; i < mclk_table->count; i++) { - if (mclk == mclk_table->dpm_levels[i].value) - break; - } - - if (i >= mclk_table->count) - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; - - cgs_get_active_displays_info(hwmgr->device, &info); - - if (data->display_timing.num_existing_displays != info.display_count) - data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK; - - return 0; -} - -static uint16_t fiji_get_maximum_link_speed(struct pp_hwmgr *hwmgr, - const struct fiji_power_state *fiji_ps) -{ - uint32_t i; - uint32_t sclk, max_sclk = 0; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct fiji_dpm_table *dpm_table = &data->dpm_table; - - for (i = 0; i < fiji_ps->performance_level_count; i++) { - sclk = fiji_ps->performance_levels[i].engine_clock; - if (max_sclk < sclk) - max_sclk = sclk; - } - - for (i = 0; i < dpm_table->sclk_table.count; i++) { - if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk) - return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ? - dpm_table->pcie_speed_table.dpm_levels - [dpm_table->pcie_speed_table.count - 1].value : - dpm_table->pcie_speed_table.dpm_levels[i].value); - } - - return 0; -} - -static int fiji_request_link_speed_change_before_state_change( - struct pp_hwmgr *hwmgr, const void *input) -{ - const struct phm_set_power_state_input *states = - (const struct phm_set_power_state_input *)input; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - const struct fiji_power_state *fiji_nps = - cast_const_phw_fiji_power_state(states->pnew_state); - const struct fiji_power_state *fiji_cps = - cast_const_phw_fiji_power_state(states->pcurrent_state); - - uint16_t target_link_speed = fiji_get_maximum_link_speed(hwmgr, fiji_nps); - uint16_t current_link_speed; - - if (data->force_pcie_gen == PP_PCIEGenInvalid) - current_link_speed = fiji_get_maximum_link_speed(hwmgr, fiji_cps); - else - current_link_speed = data->force_pcie_gen; - - data->force_pcie_gen = PP_PCIEGenInvalid; - data->pspp_notify_required = false; - if (target_link_speed > current_link_speed) { - switch(target_link_speed) { - case PP_PCIEGen3: - if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false)) - break; - data->force_pcie_gen = PP_PCIEGen2; - if (current_link_speed == PP_PCIEGen2) - break; - case PP_PCIEGen2: - if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false)) - break; - default: - data->force_pcie_gen = fiji_get_current_pcie_speed(hwmgr); - break; - } - } else { - if (target_link_speed < current_link_speed) - data->pspp_notify_required = true; - } - - return 0; -} - -static int fiji_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - if (0 == data->need_update_smu7_dpm_table) - return 0; - - if ((0 == data->sclk_dpm_key_disabled) && - (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { - PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr), - "Trying to freeze SCLK DPM when DPM is disabled", - ); - PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_FreezeLevel), - "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!", - return -1); - } - - if ((0 == data->mclk_dpm_key_disabled) && - (data->need_update_smu7_dpm_table & - DPMTABLE_OD_UPDATE_MCLK)) { - PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr), - "Trying to freeze MCLK DPM when DPM is disabled", - ); - PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_FreezeLevel), - "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!", - return -1); - } - - return 0; -} - -static int fiji_populate_and_upload_sclk_mclk_dpm_levels( - struct pp_hwmgr *hwmgr, const void *input) -{ - int result = 0; - const struct phm_set_power_state_input *states = - (const struct phm_set_power_state_input *)input; - const struct fiji_power_state *fiji_ps = - cast_const_phw_fiji_power_state(states->pnew_state); - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - uint32_t sclk = fiji_ps->performance_levels - [fiji_ps->performance_level_count - 1].engine_clock; - uint32_t mclk = fiji_ps->performance_levels - [fiji_ps->performance_level_count - 1].memory_clock; - struct fiji_dpm_table *dpm_table = &data->dpm_table; - - struct fiji_dpm_table *golden_dpm_table = &data->golden_dpm_table; - uint32_t dpm_count, clock_percent; - uint32_t i; - - if (0 == data->need_update_smu7_dpm_table) - return 0; - - if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) { - dpm_table->sclk_table.dpm_levels - [dpm_table->sclk_table.count - 1].value = sclk; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_OD6PlusinACSupport) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_OD6PlusinDCSupport)) { - /* Need to do calculation based on the golden DPM table - * as the Heatmap GPU Clock axis is also based on the default values - */ - PP_ASSERT_WITH_CODE( - (golden_dpm_table->sclk_table.dpm_levels - [golden_dpm_table->sclk_table.count - 1].value != 0), - "Divide by 0!", - return -1); - dpm_count = dpm_table->sclk_table.count < 2 ? - 0 : dpm_table->sclk_table.count - 2; - for (i = dpm_count; i > 1; i--) { - if (sclk > golden_dpm_table->sclk_table.dpm_levels - [golden_dpm_table->sclk_table.count-1].value) { - clock_percent = - ((sclk - golden_dpm_table->sclk_table.dpm_levels - [golden_dpm_table->sclk_table.count-1].value) * 100) / - golden_dpm_table->sclk_table.dpm_levels - [golden_dpm_table->sclk_table.count-1].value; - - dpm_table->sclk_table.dpm_levels[i].value = - golden_dpm_table->sclk_table.dpm_levels[i].value + - (golden_dpm_table->sclk_table.dpm_levels[i].value * - clock_percent)/100; - - } else if (golden_dpm_table->sclk_table.dpm_levels - [dpm_table->sclk_table.count-1].value > sclk) { - clock_percent = - ((golden_dpm_table->sclk_table.dpm_levels - [golden_dpm_table->sclk_table.count - 1].value - sclk) * - 100) / - golden_dpm_table->sclk_table.dpm_levels - [golden_dpm_table->sclk_table.count-1].value; - - dpm_table->sclk_table.dpm_levels[i].value = - golden_dpm_table->sclk_table.dpm_levels[i].value - - (golden_dpm_table->sclk_table.dpm_levels[i].value * - clock_percent) / 100; - } else - dpm_table->sclk_table.dpm_levels[i].value = - golden_dpm_table->sclk_table.dpm_levels[i].value; - } - } - } - - if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) { - dpm_table->mclk_table.dpm_levels - [dpm_table->mclk_table.count - 1].value = mclk; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_OD6PlusinACSupport) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_OD6PlusinDCSupport)) { - - PP_ASSERT_WITH_CODE( - (golden_dpm_table->mclk_table.dpm_levels - [golden_dpm_table->mclk_table.count-1].value != 0), - "Divide by 0!", - return -1); - dpm_count = dpm_table->mclk_table.count < 2 ? - 0 : dpm_table->mclk_table.count - 2; - for (i = dpm_count; i > 1; i--) { - if (mclk > golden_dpm_table->mclk_table.dpm_levels - [golden_dpm_table->mclk_table.count-1].value) { - clock_percent = ((mclk - - golden_dpm_table->mclk_table.dpm_levels - [golden_dpm_table->mclk_table.count-1].value) * 100) / - golden_dpm_table->mclk_table.dpm_levels - [golden_dpm_table->mclk_table.count-1].value; - - dpm_table->mclk_table.dpm_levels[i].value = - golden_dpm_table->mclk_table.dpm_levels[i].value + - (golden_dpm_table->mclk_table.dpm_levels[i].value * - clock_percent) / 100; - - } else if (golden_dpm_table->mclk_table.dpm_levels - [dpm_table->mclk_table.count-1].value > mclk) { - clock_percent = ((golden_dpm_table->mclk_table.dpm_levels - [golden_dpm_table->mclk_table.count-1].value - mclk) * 100) / - golden_dpm_table->mclk_table.dpm_levels - [golden_dpm_table->mclk_table.count-1].value; - - dpm_table->mclk_table.dpm_levels[i].value = - golden_dpm_table->mclk_table.dpm_levels[i].value - - (golden_dpm_table->mclk_table.dpm_levels[i].value * - clock_percent) / 100; - } else - dpm_table->mclk_table.dpm_levels[i].value = - golden_dpm_table->mclk_table.dpm_levels[i].value; - } - } - } - - if (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) { - result = fiji_populate_all_graphic_levels(hwmgr); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", - return result); - } - - if (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) { - /*populate MCLK dpm table to SMU7 */ - result = fiji_populate_all_memory_levels(hwmgr); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to populate MCLK during PopulateNewDPMClocksStates Function!", - return result); - } - - return result; -} - -static int fiji_trim_single_dpm_states(struct pp_hwmgr *hwmgr, - struct fiji_single_dpm_table * dpm_table, - uint32_t low_limit, uint32_t high_limit) -{ - uint32_t i; - - for (i = 0; i < dpm_table->count; i++) { - if ((dpm_table->dpm_levels[i].value < low_limit) || - (dpm_table->dpm_levels[i].value > high_limit)) - dpm_table->dpm_levels[i].enabled = false; - else - dpm_table->dpm_levels[i].enabled = true; - } - return 0; -} - -static int fiji_trim_dpm_states(struct pp_hwmgr *hwmgr, - const struct fiji_power_state *fiji_ps) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - uint32_t high_limit_count; - - PP_ASSERT_WITH_CODE((fiji_ps->performance_level_count >= 1), - "power state did not have any performance level", - return -1); - - high_limit_count = (1 == fiji_ps->performance_level_count) ? 0 : 1; - - fiji_trim_single_dpm_states(hwmgr, - &(data->dpm_table.sclk_table), - fiji_ps->performance_levels[0].engine_clock, - fiji_ps->performance_levels[high_limit_count].engine_clock); - - fiji_trim_single_dpm_states(hwmgr, - &(data->dpm_table.mclk_table), - fiji_ps->performance_levels[0].memory_clock, - fiji_ps->performance_levels[high_limit_count].memory_clock); - - return 0; -} - -static int fiji_generate_dpm_level_enable_mask( - struct pp_hwmgr *hwmgr, const void *input) -{ - int result; - const struct phm_set_power_state_input *states = - (const struct phm_set_power_state_input *)input; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - const struct fiji_power_state *fiji_ps = - cast_const_phw_fiji_power_state(states->pnew_state); - - result = fiji_trim_dpm_states(hwmgr, fiji_ps); - if (result) - return result; - - data->dpm_level_enable_mask.sclk_dpm_enable_mask = - fiji_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table); - data->dpm_level_enable_mask.mclk_dpm_enable_mask = - fiji_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table); - data->last_mclk_dpm_enable_mask = - data->dpm_level_enable_mask.mclk_dpm_enable_mask; - - if (data->uvd_enabled) { - if (data->dpm_level_enable_mask.mclk_dpm_enable_mask & 1) - data->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE; - } - - data->dpm_level_enable_mask.pcie_dpm_enable_mask = - fiji_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table); - - return 0; -} - -int fiji_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable) -{ - return smum_send_msg_to_smc(hwmgr->smumgr, enable ? - (PPSMC_Msg)PPSMC_MSG_UVDDPM_Enable : - (PPSMC_Msg)PPSMC_MSG_UVDDPM_Disable); -} - -int fiji_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) -{ - return smum_send_msg_to_smc(hwmgr->smumgr, enable? - PPSMC_MSG_VCEDPM_Enable : - PPSMC_MSG_VCEDPM_Disable); -} - -int fiji_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable) -{ - return smum_send_msg_to_smc(hwmgr->smumgr, enable? - PPSMC_MSG_SAMUDPM_Enable : - PPSMC_MSG_SAMUDPM_Disable); -} - -int fiji_enable_disable_acp_dpm(struct pp_hwmgr *hwmgr, bool enable) -{ - return smum_send_msg_to_smc(hwmgr->smumgr, enable? - PPSMC_MSG_ACPDPM_Enable : - PPSMC_MSG_ACPDPM_Disable); -} - -int fiji_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - uint32_t mm_boot_level_offset, mm_boot_level_value; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - if (!bgate) { - data->smc_state_table.UvdBootLevel = 0; - if (table_info->mm_dep_table->count > 0) - data->smc_state_table.UvdBootLevel = - (uint8_t) (table_info->mm_dep_table->count - 1); - mm_boot_level_offset = data->dpm_table_start + - offsetof(SMU73_Discrete_DpmTable, UvdBootLevel); - mm_boot_level_offset /= 4; - mm_boot_level_offset *= 4; - mm_boot_level_value = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset); - mm_boot_level_value &= 0x00FFFFFF; - mm_boot_level_value |= data->smc_state_table.UvdBootLevel << 24; - cgs_write_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); - - if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_UVDDPM) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_UVDDPM_SetEnabledMask, - (uint32_t)(1 << data->smc_state_table.UvdBootLevel)); - } - - return fiji_enable_disable_uvd_dpm(hwmgr, !bgate); -} - -int fiji_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input) -{ - const struct phm_set_power_state_input *states = - (const struct phm_set_power_state_input *)input; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - const struct fiji_power_state *fiji_nps = - cast_const_phw_fiji_power_state(states->pnew_state); - const struct fiji_power_state *fiji_cps = - cast_const_phw_fiji_power_state(states->pcurrent_state); - - uint32_t mm_boot_level_offset, mm_boot_level_value; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - if (fiji_nps->vce_clks.evclk >0 && - (fiji_cps == NULL || fiji_cps->vce_clks.evclk == 0)) { - data->smc_state_table.VceBootLevel = - (uint8_t) (table_info->mm_dep_table->count - 1); - - mm_boot_level_offset = data->dpm_table_start + - offsetof(SMU73_Discrete_DpmTable, VceBootLevel); - mm_boot_level_offset /= 4; - mm_boot_level_offset *= 4; - mm_boot_level_value = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset); - mm_boot_level_value &= 0xFF00FFFF; - mm_boot_level_value |= data->smc_state_table.VceBootLevel << 16; - cgs_write_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StablePState)) { - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_VCEDPM_SetEnabledMask, - (uint32_t)1 << data->smc_state_table.VceBootLevel); - - fiji_enable_disable_vce_dpm(hwmgr, true); - } else if (fiji_nps->vce_clks.evclk == 0 && - fiji_cps != NULL && - fiji_cps->vce_clks.evclk > 0) - fiji_enable_disable_vce_dpm(hwmgr, false); - } - - return 0; -} - -int fiji_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - uint32_t mm_boot_level_offset, mm_boot_level_value; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - if (!bgate) { - data->smc_state_table.SamuBootLevel = - (uint8_t) (table_info->mm_dep_table->count - 1); - mm_boot_level_offset = data->dpm_table_start + - offsetof(SMU73_Discrete_DpmTable, SamuBootLevel); - mm_boot_level_offset /= 4; - mm_boot_level_offset *= 4; - mm_boot_level_value = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset); - mm_boot_level_value &= 0xFFFFFF00; - mm_boot_level_value |= data->smc_state_table.SamuBootLevel << 0; - cgs_write_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SAMUDPM_SetEnabledMask, - (uint32_t)(1 << data->smc_state_table.SamuBootLevel)); - } - - return fiji_enable_disable_samu_dpm(hwmgr, !bgate); -} - -int fiji_update_acp_dpm(struct pp_hwmgr *hwmgr, bool bgate) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - uint32_t mm_boot_level_offset, mm_boot_level_value; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - if (!bgate) { - data->smc_state_table.AcpBootLevel = - (uint8_t) (table_info->mm_dep_table->count - 1); - mm_boot_level_offset = data->dpm_table_start + - offsetof(SMU73_Discrete_DpmTable, AcpBootLevel); - mm_boot_level_offset /= 4; - mm_boot_level_offset *= 4; - mm_boot_level_value = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset); - mm_boot_level_value &= 0xFFFF00FF; - mm_boot_level_value |= data->smc_state_table.AcpBootLevel << 8; - cgs_write_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_ACPDPM_SetEnabledMask, - (uint32_t)(1 << data->smc_state_table.AcpBootLevel)); - } - - return fiji_enable_disable_acp_dpm(hwmgr, !bgate); -} - -static int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - int result = 0; - uint32_t low_sclk_interrupt_threshold = 0; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkThrottleLowNotification) - && (hwmgr->gfx_arbiter.sclk_threshold != - data->low_sclk_interrupt_threshold)) { - data->low_sclk_interrupt_threshold = - hwmgr->gfx_arbiter.sclk_threshold; - low_sclk_interrupt_threshold = - data->low_sclk_interrupt_threshold; - - CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); - - result = fiji_copy_bytes_to_smc( - hwmgr->smumgr, - data->dpm_table_start + - offsetof(SMU73_Discrete_DpmTable, - LowSclkInterruptThreshold), - (uint8_t *)&low_sclk_interrupt_threshold, - sizeof(uint32_t), - data->sram_end); - } - - return result; -} - -static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - if (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK)) - return fiji_program_memory_timing_parameters(hwmgr); - - return 0; -} - -static int fiji_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - if (0 == data->need_update_smu7_dpm_table) - return 0; - - if ((0 == data->sclk_dpm_key_disabled) && - (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { - - PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr), - "Trying to Unfreeze SCLK DPM when DPM is disabled", - ); - PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_UnfreezeLevel), - "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!", - return -1); - } - - if ((0 == data->mclk_dpm_key_disabled) && - (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { - - PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr), - "Trying to Unfreeze MCLK DPM when DPM is disabled", - ); - PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_UnfreezeLevel), - "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!", - return -1); - } - - data->need_update_smu7_dpm_table = 0; - - return 0; -} - -/* Look up the voltaged based on DAL's requested level. - * and then send the requested VDDC voltage to SMC - */ -static void fiji_apply_dal_minimum_voltage_request(struct pp_hwmgr *hwmgr) -{ - return; -} - -int fiji_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr) -{ - int result; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - /* Apply minimum voltage based on DAL's request level */ - fiji_apply_dal_minimum_voltage_request(hwmgr); - - if (0 == data->sclk_dpm_key_disabled) { - /* Checking if DPM is running. If we discover hang because of this, - * we should skip this message. - */ - if (!fiji_is_dpm_running(hwmgr)) - printk(KERN_ERR "[ powerplay ] " - "Trying to set Enable Mask when DPM is disabled \n"); - - if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { - result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_SetEnabledMask, - data->dpm_level_enable_mask.sclk_dpm_enable_mask); - PP_ASSERT_WITH_CODE((0 == result), - "Set Sclk Dpm enable Mask failed", return -1); - } - } - - if (0 == data->mclk_dpm_key_disabled) { - /* Checking if DPM is running. If we discover hang because of this, - * we should skip this message. - */ - if (!fiji_is_dpm_running(hwmgr)) - printk(KERN_ERR "[ powerplay ]" - " Trying to set Enable Mask when DPM is disabled \n"); - - if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) { - result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_SetEnabledMask, - data->dpm_level_enable_mask.mclk_dpm_enable_mask); - PP_ASSERT_WITH_CODE((0 == result), - "Set Mclk Dpm enable Mask failed", return -1); - } - } - - return 0; -} - -static int fiji_notify_link_speed_change_after_state_change( - struct pp_hwmgr *hwmgr, const void *input) -{ - const struct phm_set_power_state_input *states = - (const struct phm_set_power_state_input *)input; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - const struct fiji_power_state *fiji_ps = - cast_const_phw_fiji_power_state(states->pnew_state); - uint16_t target_link_speed = fiji_get_maximum_link_speed(hwmgr, fiji_ps); - uint8_t request; - - if (data->pspp_notify_required) { - if (target_link_speed == PP_PCIEGen3) - request = PCIE_PERF_REQ_GEN3; - else if (target_link_speed == PP_PCIEGen2) - request = PCIE_PERF_REQ_GEN2; - else - request = PCIE_PERF_REQ_GEN1; - - if(request == PCIE_PERF_REQ_GEN1 && - fiji_get_current_pcie_speed(hwmgr) > 0) - return 0; - - if (acpi_pcie_perf_request(hwmgr->device, request, false)) { - if (PP_PCIEGen2 == target_link_speed) - printk("PSPP request to switch to Gen2 from Gen3 Failed!"); - else - printk("PSPP request to switch to Gen1 from Gen2 Failed!"); - } - } - - return 0; -} - -static int fiji_set_power_state_tasks(struct pp_hwmgr *hwmgr, - const void *input) -{ - int tmp_result, result = 0; - - tmp_result = fiji_find_dpm_states_clocks_in_dpm_table(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to find DPM states clocks in DPM table!", - result = tmp_result); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PCIEPerformanceRequest)) { - tmp_result = - fiji_request_link_speed_change_before_state_change(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to request link speed change before state change!", - result = tmp_result); - } - - tmp_result = fiji_freeze_sclk_mclk_dpm(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to freeze SCLK MCLK DPM!", result = tmp_result); - - tmp_result = fiji_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to populate and upload SCLK MCLK DPM levels!", - result = tmp_result); - - tmp_result = fiji_generate_dpm_level_enable_mask(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to generate DPM level enabled mask!", - result = tmp_result); - - tmp_result = fiji_update_vce_dpm(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to update VCE DPM!", - result = tmp_result); - - tmp_result = fiji_update_sclk_threshold(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to update SCLK threshold!", - result = tmp_result); - - tmp_result = fiji_program_mem_timing_parameters(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to program memory timing parameters!", - result = tmp_result); - - tmp_result = fiji_unfreeze_sclk_mclk_dpm(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to unfreeze SCLK MCLK DPM!", - result = tmp_result); - - tmp_result = fiji_upload_dpm_level_enable_mask(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to upload DPM level enabled mask!", - result = tmp_result); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PCIEPerformanceRequest)) { - tmp_result = - fiji_notify_link_speed_change_after_state_change(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to notify link speed change after state change!", - result = tmp_result); - } - - return result; -} - -static int fiji_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) -{ - struct pp_power_state *ps; - struct fiji_power_state *fiji_ps; - - if (hwmgr == NULL) - return -EINVAL; - - ps = hwmgr->request_ps; - - if (ps == NULL) - return -EINVAL; - - fiji_ps = cast_phw_fiji_power_state(&ps->hardware); - - if (low) - return fiji_ps->performance_levels[0].engine_clock; - else - return fiji_ps->performance_levels - [fiji_ps->performance_level_count-1].engine_clock; -} - -static int fiji_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) -{ - struct pp_power_state *ps; - struct fiji_power_state *fiji_ps; - - if (hwmgr == NULL) - return -EINVAL; - - ps = hwmgr->request_ps; - - if (ps == NULL) - return -EINVAL; - - fiji_ps = cast_phw_fiji_power_state(&ps->hardware); - - if (low) - return fiji_ps->performance_levels[0].memory_clock; - else - return fiji_ps->performance_levels - [fiji_ps->performance_level_count-1].memory_clock; -} - -static void fiji_print_current_perforce_level( - struct pp_hwmgr *hwmgr, struct seq_file *m) -{ - uint32_t sclk, mclk, activity_percent = 0; - uint32_t offset; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency); - - sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency); - - mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - seq_printf(m, "\n [ mclk ]: %u MHz\n\n [ sclk ]: %u MHz\n", - mclk / 100, sclk / 100); - - offset = data->soft_regs_start + offsetof(SMU73_SoftRegisters, AverageGraphicsActivity); - activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset); - activity_percent += 0x80; - activity_percent >>= 8; - - seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent); - - seq_printf(m, "uvd %sabled\n", data->uvd_power_gated ? "dis" : "en"); - - seq_printf(m, "vce %sabled\n", data->vce_power_gated ? "dis" : "en"); -} - -static int fiji_program_display_gap(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - uint32_t num_active_displays = 0; - uint32_t display_gap = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL); - uint32_t display_gap2; - uint32_t pre_vbi_time_in_us; - uint32_t frame_time_in_us; - uint32_t ref_clock; - uint32_t refresh_rate = 0; - struct cgs_display_info info = {0}; - struct cgs_mode_info mode_info; - - info.mode_info = &mode_info; - - cgs_get_active_displays_info(hwmgr->device, &info); - num_active_displays = info.display_count; - - display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, - DISP_GAP, (num_active_displays > 0)? - DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_DISPLAY_GAP_CNTL, display_gap); - - ref_clock = mode_info.ref_clock; - refresh_rate = mode_info.refresh_rate; - - if (refresh_rate == 0) - refresh_rate = 60; - - frame_time_in_us = 1000000 / refresh_rate; - - pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us; - display_gap2 = pre_vbi_time_in_us * (ref_clock / 100); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_DISPLAY_GAP_CNTL2, display_gap2); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - data->soft_regs_start + - offsetof(SMU73_SoftRegisters, PreVBlankGap), 0x64); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - data->soft_regs_start + - offsetof(SMU73_SoftRegisters, VBlankTimeout), - (frame_time_in_us - pre_vbi_time_in_us)); - - if (num_active_displays == 1) - tonga_notify_smc_display_change(hwmgr, true); - - return 0; -} - -int fiji_display_configuration_changed_task(struct pp_hwmgr *hwmgr) -{ - return fiji_program_display_gap(hwmgr); -} - -static int fiji_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, - uint16_t us_max_fan_pwm) -{ - hwmgr->thermal_controller. - advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm; - - if (phm_is_hw_access_blocked(hwmgr)) - return 0; - - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm); -} - -static int fiji_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, - uint16_t us_max_fan_rpm) -{ - hwmgr->thermal_controller. - advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm; - - if (phm_is_hw_access_blocked(hwmgr)) - return 0; - - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm); -} - -int fiji_dpm_set_interrupt_state(void *private_data, - unsigned src_id, unsigned type, - int enabled) -{ - uint32_t cg_thermal_int; - struct pp_hwmgr *hwmgr = ((struct pp_eventmgr *)private_data)->hwmgr; - - if (hwmgr == NULL) - return -EINVAL; - - switch (type) { - case AMD_THERMAL_IRQ_LOW_TO_HIGH: - if (enabled) { - cg_thermal_int = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, ixCG_THERMAL_INT); - cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; - cgs_write_ind_register(hwmgr->device, - CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); - } else { - cg_thermal_int = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, ixCG_THERMAL_INT); - cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; - cgs_write_ind_register(hwmgr->device, - CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); - } - break; - - case AMD_THERMAL_IRQ_HIGH_TO_LOW: - if (enabled) { - cg_thermal_int = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, ixCG_THERMAL_INT); - cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; - cgs_write_ind_register(hwmgr->device, - CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); - } else { - cg_thermal_int = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, ixCG_THERMAL_INT); - cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; - cgs_write_ind_register(hwmgr->device, - CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); - } - break; - default: - break; - } - return 0; -} - -int fiji_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr, - const void *thermal_interrupt_info) -{ - int result; - const struct pp_interrupt_registration_info *info = - (const struct pp_interrupt_registration_info *) - thermal_interrupt_info; - - if (info == NULL) - return -EINVAL; - - result = cgs_add_irq_source(hwmgr->device, 230, AMD_THERMAL_IRQ_LAST, - fiji_dpm_set_interrupt_state, - info->call_back, info->context); - - if (result) - return -EINVAL; - - result = cgs_add_irq_source(hwmgr->device, 231, AMD_THERMAL_IRQ_LAST, - fiji_dpm_set_interrupt_state, - info->call_back, info->context); - - if (result) - return -EINVAL; - - return 0; -} - -static int fiji_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) -{ - if (mode) { - /* stop auto-manage */ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl)) - fiji_fan_ctrl_stop_smc_fan_control(hwmgr); - fiji_fan_ctrl_set_static_mode(hwmgr, mode); - } else - /* restart auto-manage */ - fiji_fan_ctrl_reset_fan_speed_to_default(hwmgr); - - return 0; -} - -static int fiji_get_fan_control_mode(struct pp_hwmgr *hwmgr) -{ - if (hwmgr->fan_ctrl_is_in_default_mode) - return hwmgr->fan_ctrl_default_mode; - else - return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL2, FDO_PWM_MODE); -} - -static int fiji_force_clock_level(struct pp_hwmgr *hwmgr, - enum pp_clock_type type, uint32_t mask) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) - return -EINVAL; - - switch (type) { - case PP_SCLK: - if (!data->sclk_dpm_key_disabled) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_SetEnabledMask, - data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask); - break; - - case PP_MCLK: - if (!data->mclk_dpm_key_disabled) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_SetEnabledMask, - data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask); - break; - - case PP_PCIE: - { - uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask; - uint32_t level = 0; - - while (tmp >>= 1) - level++; - - if (!data->pcie_dpm_key_disabled) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_PCIeDPM_ForceLevel, - level); - break; - } - default: - break; - } - - return 0; -} - -static int fiji_print_clock_levels(struct pp_hwmgr *hwmgr, - enum pp_clock_type type, char *buf) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); - struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); - struct fiji_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table); - int i, now, size = 0; - uint32_t clock, pcie_speed; - - switch (type) { - case PP_SCLK: - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency); - clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - - for (i = 0; i < sclk_table->count; i++) { - if (clock > sclk_table->dpm_levels[i].value) - continue; - break; - } - now = i; - - for (i = 0; i < sclk_table->count; i++) - size += sprintf(buf + size, "%d: %uMhz %s\n", - i, sclk_table->dpm_levels[i].value / 100, - (i == now) ? "*" : ""); - break; - case PP_MCLK: - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency); - clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - - for (i = 0; i < mclk_table->count; i++) { - if (clock > mclk_table->dpm_levels[i].value) - continue; - break; - } - now = i; - - for (i = 0; i < mclk_table->count; i++) - size += sprintf(buf + size, "%d: %uMhz %s\n", - i, mclk_table->dpm_levels[i].value / 100, - (i == now) ? "*" : ""); - break; - case PP_PCIE: - pcie_speed = fiji_get_current_pcie_speed(hwmgr); - for (i = 0; i < pcie_table->count; i++) { - if (pcie_speed != pcie_table->dpm_levels[i].value) - continue; - break; - } - now = i; - - for (i = 0; i < pcie_table->count; i++) - size += sprintf(buf + size, "%d: %s %s\n", i, - (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" : - (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" : - (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "", - (i == now) ? "*" : ""); - break; - default: - break; - } - return size; -} - -static inline bool fiji_are_power_levels_equal(const struct fiji_performance_level *pl1, - const struct fiji_performance_level *pl2) -{ - return ((pl1->memory_clock == pl2->memory_clock) && - (pl1->engine_clock == pl2->engine_clock) && - (pl1->pcie_gen == pl2->pcie_gen) && - (pl1->pcie_lane == pl2->pcie_lane)); -} - -int fiji_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal) -{ - const struct fiji_power_state *psa = cast_const_phw_fiji_power_state(pstate1); - const struct fiji_power_state *psb = cast_const_phw_fiji_power_state(pstate2); - int i; - - if (equal == NULL || psa == NULL || psb == NULL) - return -EINVAL; - - /* If the two states don't even have the same number of performance levels they cannot be the same state. */ - if (psa->performance_level_count != psb->performance_level_count) { - *equal = false; - return 0; - } - - for (i = 0; i < psa->performance_level_count; i++) { - if (!fiji_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) { - /* If we have found even one performance level pair that is different the states are different. */ - *equal = false; - return 0; - } - } - - /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ - *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk)); - *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk)); - *equal &= (psa->sclk_threshold == psb->sclk_threshold); - *equal &= (psa->acp_clk == psb->acp_clk); - - return 0; -} - -bool fiji_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - bool is_update_required = false; - struct cgs_display_info info = {0,0,NULL}; - - cgs_get_active_displays_info(hwmgr->device, &info); - - if (data->display_timing.num_existing_displays != info.display_count) - is_update_required = true; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { - if(hwmgr->display_config.min_core_set_clock_in_sr != data->display_timing.min_clock_in_sr) - is_update_required = true; - } - - return is_update_required; -} - -static int fiji_get_sclk_od(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); - struct fiji_single_dpm_table *golden_sclk_table = - &(data->golden_dpm_table.sclk_table); - int value; - - value = (sclk_table->dpm_levels[sclk_table->count - 1].value - - golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * - 100 / - golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; - - return value; -} - -static int fiji_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct fiji_single_dpm_table *golden_sclk_table = - &(data->golden_dpm_table.sclk_table); - struct pp_power_state *ps; - struct fiji_power_state *fiji_ps; - - if (value > 20) - value = 20; - - ps = hwmgr->request_ps; - - if (ps == NULL) - return -EINVAL; - - fiji_ps = cast_phw_fiji_power_state(&ps->hardware); - - fiji_ps->performance_levels[fiji_ps->performance_level_count - 1].engine_clock = - golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * - value / 100 + - golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; - - return 0; -} - -static int fiji_get_mclk_od(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); - struct fiji_single_dpm_table *golden_mclk_table = - &(data->golden_dpm_table.mclk_table); - int value; - - value = (mclk_table->dpm_levels[mclk_table->count - 1].value - - golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * - 100 / - golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; - - return value; -} - -static int fiji_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct fiji_single_dpm_table *golden_mclk_table = - &(data->golden_dpm_table.mclk_table); - struct pp_power_state *ps; - struct fiji_power_state *fiji_ps; - - if (value > 20) - value = 20; - - ps = hwmgr->request_ps; - - if (ps == NULL) - return -EINVAL; - - fiji_ps = cast_phw_fiji_power_state(&ps->hardware); - - fiji_ps->performance_levels[fiji_ps->performance_level_count - 1].memory_clock = - golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * - value / 100 + - golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; - - return 0; -} - -static const struct pp_hwmgr_func fiji_hwmgr_funcs = { - .backend_init = &fiji_hwmgr_backend_init, - .backend_fini = &fiji_hwmgr_backend_fini, - .asic_setup = &fiji_setup_asic_task, - .dynamic_state_management_enable = &fiji_enable_dpm_tasks, - .dynamic_state_management_disable = &fiji_disable_dpm_tasks, - .force_dpm_level = &fiji_dpm_force_dpm_level, - .get_num_of_pp_table_entries = &tonga_get_number_of_powerplay_table_entries, - .get_power_state_size = &fiji_get_power_state_size, - .get_pp_table_entry = &fiji_get_pp_table_entry, - .patch_boot_state = &fiji_patch_boot_state, - .apply_state_adjust_rules = &fiji_apply_state_adjust_rules, - .power_state_set = &fiji_set_power_state_tasks, - .get_sclk = &fiji_dpm_get_sclk, - .get_mclk = &fiji_dpm_get_mclk, - .print_current_perforce_level = &fiji_print_current_perforce_level, - .powergate_uvd = &fiji_phm_powergate_uvd, - .powergate_vce = &fiji_phm_powergate_vce, - .disable_clock_power_gating = &fiji_phm_disable_clock_power_gating, - .notify_smc_display_config_after_ps_adjustment = - &tonga_notify_smc_display_config_after_ps_adjustment, - .display_config_changed = &fiji_display_configuration_changed_task, - .set_max_fan_pwm_output = fiji_set_max_fan_pwm_output, - .set_max_fan_rpm_output = fiji_set_max_fan_rpm_output, - .get_temperature = fiji_thermal_get_temperature, - .stop_thermal_controller = fiji_thermal_stop_thermal_controller, - .get_fan_speed_info = fiji_fan_ctrl_get_fan_speed_info, - .get_fan_speed_percent = fiji_fan_ctrl_get_fan_speed_percent, - .set_fan_speed_percent = fiji_fan_ctrl_set_fan_speed_percent, - .reset_fan_speed_to_default = fiji_fan_ctrl_reset_fan_speed_to_default, - .get_fan_speed_rpm = fiji_fan_ctrl_get_fan_speed_rpm, - .set_fan_speed_rpm = fiji_fan_ctrl_set_fan_speed_rpm, - .uninitialize_thermal_controller = fiji_thermal_ctrl_uninitialize_thermal_controller, - .register_internal_thermal_interrupt = fiji_register_internal_thermal_interrupt, - .set_fan_control_mode = fiji_set_fan_control_mode, - .get_fan_control_mode = fiji_get_fan_control_mode, - .check_states_equal = fiji_check_states_equal, - .check_smc_update_required_for_display_configuration = fiji_check_smc_update_required_for_display_configuration, - .force_clock_level = fiji_force_clock_level, - .print_clock_levels = fiji_print_clock_levels, - .get_sclk_od = fiji_get_sclk_od, - .set_sclk_od = fiji_set_sclk_od, - .get_mclk_od = fiji_get_mclk_od, - .set_mclk_od = fiji_set_mclk_od, -}; - -int fiji_hwmgr_init(struct pp_hwmgr *hwmgr) -{ - hwmgr->hwmgr_func = &fiji_hwmgr_funcs; - hwmgr->pptable_func = &tonga_pptable_funcs; - pp_fiji_thermal_initialize(hwmgr); - return 0; -} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h deleted file mode 100644 index bf67c2a92c68..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h +++ /dev/null @@ -1,350 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef _FIJI_HWMGR_H_ -#define _FIJI_HWMGR_H_ - -#include "hwmgr.h" -#include "smu73.h" -#include "smu73_discrete.h" -#include "ppatomctrl.h" -#include "fiji_ppsmc.h" -#include "pp_endian.h" - -#define FIJI_MAX_HARDWARE_POWERLEVELS 2 -#define FIJI_AT_DFLT 30 - -#define FIJI_VOLTAGE_CONTROL_NONE 0x0 -#define FIJI_VOLTAGE_CONTROL_BY_GPIO 0x1 -#define FIJI_VOLTAGE_CONTROL_BY_SVID2 0x2 -#define FIJI_VOLTAGE_CONTROL_MERGED 0x3 - -#define DPMTABLE_OD_UPDATE_SCLK 0x00000001 -#define DPMTABLE_OD_UPDATE_MCLK 0x00000002 -#define DPMTABLE_UPDATE_SCLK 0x00000004 -#define DPMTABLE_UPDATE_MCLK 0x00000008 - -struct fiji_performance_level { - uint32_t memory_clock; - uint32_t engine_clock; - uint16_t pcie_gen; - uint16_t pcie_lane; -}; - -struct fiji_uvd_clocks { - uint32_t vclk; - uint32_t dclk; -}; - -struct fiji_vce_clocks { - uint32_t evclk; - uint32_t ecclk; -}; - -struct fiji_power_state { - uint32_t magic; - struct fiji_uvd_clocks uvd_clks; - struct fiji_vce_clocks vce_clks; - uint32_t sam_clk; - uint32_t acp_clk; - uint16_t performance_level_count; - bool dc_compatible; - uint32_t sclk_threshold; - struct fiji_performance_level performance_levels[FIJI_MAX_HARDWARE_POWERLEVELS]; -}; - -struct fiji_dpm_level { - bool enabled; - uint32_t value; - uint32_t param1; -}; - -#define FIJI_MAX_DEEPSLEEP_DIVIDER_ID 5 -#define MAX_REGULAR_DPM_NUMBER 8 -#define FIJI_MINIMUM_ENGINE_CLOCK 2500 - -struct fiji_single_dpm_table { - uint32_t count; - struct fiji_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER]; -}; - -struct fiji_dpm_table { - struct fiji_single_dpm_table sclk_table; - struct fiji_single_dpm_table mclk_table; - struct fiji_single_dpm_table pcie_speed_table; - struct fiji_single_dpm_table vddc_table; - struct fiji_single_dpm_table vddci_table; - struct fiji_single_dpm_table mvdd_table; -}; - -struct fiji_clock_registers { - uint32_t vCG_SPLL_FUNC_CNTL; - uint32_t vCG_SPLL_FUNC_CNTL_2; - uint32_t vCG_SPLL_FUNC_CNTL_3; - uint32_t vCG_SPLL_FUNC_CNTL_4; - uint32_t vCG_SPLL_SPREAD_SPECTRUM; - uint32_t vCG_SPLL_SPREAD_SPECTRUM_2; - uint32_t vDLL_CNTL; - uint32_t vMCLK_PWRMGT_CNTL; - uint32_t vMPLL_AD_FUNC_CNTL; - uint32_t vMPLL_DQ_FUNC_CNTL; - uint32_t vMPLL_FUNC_CNTL; - uint32_t vMPLL_FUNC_CNTL_1; - uint32_t vMPLL_FUNC_CNTL_2; - uint32_t vMPLL_SS1; - uint32_t vMPLL_SS2; -}; - -struct fiji_voltage_smio_registers { - uint32_t vS0_VID_LOWER_SMIO_CNTL; -}; - -#define FIJI_MAX_LEAKAGE_COUNT 8 -struct fiji_leakage_voltage { - uint16_t count; - uint16_t leakage_id[FIJI_MAX_LEAKAGE_COUNT]; - uint16_t actual_voltage[FIJI_MAX_LEAKAGE_COUNT]; -}; - -struct fiji_vbios_boot_state { - uint16_t mvdd_bootup_value; - uint16_t vddc_bootup_value; - uint16_t vddci_bootup_value; - uint32_t sclk_bootup_value; - uint32_t mclk_bootup_value; - uint16_t pcie_gen_bootup_value; - uint16_t pcie_lane_bootup_value; -}; - -struct fiji_bacos { - uint32_t best_match; - uint32_t baco_flags; - struct fiji_performance_level performance_level; -}; - -/* Ultra Low Voltage parameter structure */ -struct fiji_ulv_parm { - bool ulv_supported; - uint32_t cg_ulv_parameter; - uint32_t ulv_volt_change_delay; - struct fiji_performance_level ulv_power_level; -}; - -struct fiji_display_timing { - uint32_t min_clock_in_sr; - uint32_t num_existing_displays; -}; - -struct fiji_dpmlevel_enable_mask { - uint32_t uvd_dpm_enable_mask; - uint32_t vce_dpm_enable_mask; - uint32_t acp_dpm_enable_mask; - uint32_t samu_dpm_enable_mask; - uint32_t sclk_dpm_enable_mask; - uint32_t mclk_dpm_enable_mask; - uint32_t pcie_dpm_enable_mask; -}; - -struct fiji_pcie_perf_range { - uint16_t max; - uint16_t min; -}; - -struct fiji_hwmgr { - struct fiji_dpm_table dpm_table; - struct fiji_dpm_table golden_dpm_table; - - uint32_t voting_rights_clients0; - uint32_t voting_rights_clients1; - uint32_t voting_rights_clients2; - uint32_t voting_rights_clients3; - uint32_t voting_rights_clients4; - uint32_t voting_rights_clients5; - uint32_t voting_rights_clients6; - uint32_t voting_rights_clients7; - uint32_t static_screen_threshold_unit; - uint32_t static_screen_threshold; - uint32_t voltage_control; - uint32_t vddc_vddci_delta; - - uint32_t active_auto_throttle_sources; - - struct fiji_clock_registers clock_registers; - struct fiji_voltage_smio_registers voltage_smio_registers; - - bool is_memory_gddr5; - uint16_t acpi_vddc; - bool pspp_notify_required; - uint16_t force_pcie_gen; - uint16_t acpi_pcie_gen; - uint32_t pcie_gen_cap; - uint32_t pcie_lane_cap; - uint32_t pcie_spc_cap; - struct fiji_leakage_voltage vddc_leakage; - struct fiji_leakage_voltage Vddci_leakage; - - uint32_t mvdd_control; - uint32_t vddc_mask_low; - uint32_t mvdd_mask_low; - uint16_t max_vddc_in_pptable; - uint16_t min_vddc_in_pptable; - uint16_t max_vddci_in_pptable; - uint16_t min_vddci_in_pptable; - uint32_t mclk_strobe_mode_threshold; - uint32_t mclk_stutter_mode_threshold; - uint32_t mclk_edc_enable_threshold; - uint32_t mclk_edcwr_enable_threshold; - bool is_uvd_enabled; - struct fiji_vbios_boot_state vbios_boot_state; - - bool battery_state; - bool is_tlu_enabled; - - /* ---- SMC SRAM Address of firmware header tables ---- */ - uint32_t sram_end; - uint32_t dpm_table_start; - uint32_t soft_regs_start; - uint32_t mc_reg_table_start; - uint32_t fan_table_start; - uint32_t arb_table_start; - struct SMU73_Discrete_DpmTable smc_state_table; - struct SMU73_Discrete_Ulv ulv_setting; - - /* ---- Stuff originally coming from Evergreen ---- */ - uint32_t vddci_control; - struct pp_atomctrl_voltage_table vddc_voltage_table; - struct pp_atomctrl_voltage_table vddci_voltage_table; - struct pp_atomctrl_voltage_table mvdd_voltage_table; - - uint32_t mgcg_cgtt_local2; - uint32_t mgcg_cgtt_local3; - uint32_t gpio_debug; - uint32_t mc_micro_code_feature; - uint32_t highest_mclk; - uint16_t acpi_vddci; - uint8_t mvdd_high_index; - uint8_t mvdd_low_index; - bool dll_default_on; - bool performance_request_registered; - - /* ---- Low Power Features ---- */ - struct fiji_bacos bacos; - struct fiji_ulv_parm ulv; - - /* ---- CAC Stuff ---- */ - uint32_t cac_table_start; - bool cac_configuration_required; - bool driver_calculate_cac_leakage; - bool cac_enabled; - - /* ---- DPM2 Parameters ---- */ - uint32_t power_containment_features; - bool enable_dte_feature; - bool enable_tdc_limit_feature; - bool enable_pkg_pwr_tracking_feature; - bool disable_uvd_power_tune_feature; - const struct fiji_pt_defaults *power_tune_defaults; - struct SMU73_Discrete_PmFuses power_tune_table; - uint32_t dte_tj_offset; - uint32_t fast_watermark_threshold; - - /* ---- Phase Shedding ---- */ - bool vddc_phase_shed_control; - - /* ---- DI/DT ---- */ - struct fiji_display_timing display_timing; - - /* ---- Thermal Temperature Setting ---- */ - struct fiji_dpmlevel_enable_mask dpm_level_enable_mask; - uint32_t need_update_smu7_dpm_table; - uint32_t sclk_dpm_key_disabled; - uint32_t mclk_dpm_key_disabled; - uint32_t pcie_dpm_key_disabled; - uint32_t min_engine_clocks; - struct fiji_pcie_perf_range pcie_gen_performance; - struct fiji_pcie_perf_range pcie_lane_performance; - struct fiji_pcie_perf_range pcie_gen_power_saving; - struct fiji_pcie_perf_range pcie_lane_power_saving; - bool use_pcie_performance_levels; - bool use_pcie_power_saving_levels; - uint32_t activity_target[SMU73_MAX_LEVELS_GRAPHICS]; - uint32_t mclk_activity_target; - uint32_t mclk_dpm0_activity_target; - uint32_t low_sclk_interrupt_threshold; - uint32_t last_mclk_dpm_enable_mask; - bool uvd_enabled; - - /* ---- Power Gating States ---- */ - bool uvd_power_gated; - bool vce_power_gated; - bool samu_power_gated; - bool acp_power_gated; - bool pg_acp_init; - bool frtc_enabled; - bool frtc_status_changed; -}; - -/* To convert to Q8.8 format for firmware */ -#define FIJI_Q88_FORMAT_CONVERSION_UNIT 256 - -enum Fiji_I2CLineID { - Fiji_I2CLineID_DDC1 = 0x90, - Fiji_I2CLineID_DDC2 = 0x91, - Fiji_I2CLineID_DDC3 = 0x92, - Fiji_I2CLineID_DDC4 = 0x93, - Fiji_I2CLineID_DDC5 = 0x94, - Fiji_I2CLineID_DDC6 = 0x95, - Fiji_I2CLineID_SCLSDA = 0x96, - Fiji_I2CLineID_DDCVGA = 0x97 -}; - -#define Fiji_I2C_DDC1DATA 0 -#define Fiji_I2C_DDC1CLK 1 -#define Fiji_I2C_DDC2DATA 2 -#define Fiji_I2C_DDC2CLK 3 -#define Fiji_I2C_DDC3DATA 4 -#define Fiji_I2C_DDC3CLK 5 -#define Fiji_I2C_SDA 40 -#define Fiji_I2C_SCL 41 -#define Fiji_I2C_DDC4DATA 65 -#define Fiji_I2C_DDC4CLK 66 -#define Fiji_I2C_DDC5DATA 0x48 -#define Fiji_I2C_DDC5CLK 0x49 -#define Fiji_I2C_DDC6DATA 0x4a -#define Fiji_I2C_DDC6CLK 0x4b -#define Fiji_I2C_DDCVGADATA 0x4c -#define Fiji_I2C_DDCVGACLK 0x4d - -#define FIJI_UNUSED_GPIO_PIN 0x7F - -extern int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr); -extern int tonga_get_mc_microcode_version (struct pp_hwmgr *hwmgr); -extern int tonga_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr); -extern int tonga_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display); -int fiji_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input); -int fiji_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate); -int fiji_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate); -int fiji_update_acp_dpm(struct pp_hwmgr *hwmgr, bool bgate); -int fiji_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable); - -#endif /* _FIJI_HWMGR_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c deleted file mode 100644 index 44658451a8d2..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c +++ /dev/null @@ -1,613 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include "hwmgr.h" -#include "smumgr.h" -#include "fiji_hwmgr.h" -#include "fiji_powertune.h" -#include "fiji_smumgr.h" -#include "smu73_discrete.h" -#include "pp_debug.h" - -#define VOLTAGE_SCALE 4 -#define POWERTUNE_DEFAULT_SET_MAX 1 - -const struct fiji_pt_defaults fiji_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = { - /*sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc */ - {1, 0xF, 0xFD, - /* TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase */ - 0x19, 5, 45} -}; - -void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *fiji_hwmgr = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - uint32_t tmp = 0; - - if(table_info && - table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX && - table_info->cac_dtp_table->usPowerTuneDataSetID) - fiji_hwmgr->power_tune_defaults = - &fiji_power_tune_data_set_array - [table_info->cac_dtp_table->usPowerTuneDataSetID - 1]; - else - fiji_hwmgr->power_tune_defaults = &fiji_power_tune_data_set_array[0]; - - /* Assume disabled */ - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_CAC); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SQRamping); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DBRamping); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TDRamping); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TCPRamping); - - fiji_hwmgr->dte_tj_offset = tmp; - - if (!tmp) { - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_CAC); - - fiji_hwmgr->fast_watermark_threshold = 100; - - if (hwmgr->powercontainment_enabled) { - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment); - tmp = 1; - fiji_hwmgr->enable_dte_feature = tmp ? false : true; - fiji_hwmgr->enable_tdc_limit_feature = tmp ? true : false; - fiji_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false; - } - } -} - -/* PPGen has the gain setting generated in x * 100 unit - * This function is to convert the unit to x * 4096(0x1000) unit. - * This is the unit expected by SMC firmware - */ -static uint16_t scale_fan_gain_settings(uint16_t raw_setting) -{ - uint32_t tmp; - tmp = raw_setting * 4096 / 100; - return (uint16_t)tmp; -} - -static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t* sda) -{ - switch (line) { - case Fiji_I2CLineID_DDC1 : - *scl = Fiji_I2C_DDC1CLK; - *sda = Fiji_I2C_DDC1DATA; - break; - case Fiji_I2CLineID_DDC2 : - *scl = Fiji_I2C_DDC2CLK; - *sda = Fiji_I2C_DDC2DATA; - break; - case Fiji_I2CLineID_DDC3 : - *scl = Fiji_I2C_DDC3CLK; - *sda = Fiji_I2C_DDC3DATA; - break; - case Fiji_I2CLineID_DDC4 : - *scl = Fiji_I2C_DDC4CLK; - *sda = Fiji_I2C_DDC4DATA; - break; - case Fiji_I2CLineID_DDC5 : - *scl = Fiji_I2C_DDC5CLK; - *sda = Fiji_I2C_DDC5DATA; - break; - case Fiji_I2CLineID_DDC6 : - *scl = Fiji_I2C_DDC6CLK; - *sda = Fiji_I2C_DDC6DATA; - break; - case Fiji_I2CLineID_SCLSDA : - *scl = Fiji_I2C_SCL; - *sda = Fiji_I2C_SDA; - break; - case Fiji_I2CLineID_DDCVGA : - *scl = Fiji_I2C_DDCVGACLK; - *sda = Fiji_I2C_DDCVGADATA; - break; - default: - *scl = 0; - *sda = 0; - break; - } -} - -int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - const struct fiji_pt_defaults *defaults = data->power_tune_defaults; - SMU73_Discrete_DpmTable *dpm_table = &(data->smc_state_table); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table; - struct pp_advance_fan_control_parameters *fan_table= - &hwmgr->thermal_controller.advanceFanControlParameters; - uint8_t uc_scl, uc_sda; - - /* TDP number of fraction bits are changed from 8 to 7 for Fiji - * as requested by SMC team - */ - dpm_table->DefaultTdp = PP_HOST_TO_SMC_US( - (uint16_t)(cac_dtp_table->usTDP * 128)); - dpm_table->TargetTdp = PP_HOST_TO_SMC_US( - (uint16_t)(cac_dtp_table->usTDP * 128)); - - PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255, - "Target Operating Temp is out of Range!",); - - dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp); - dpm_table->GpuTjHyst = 8; - - dpm_table->DTEAmbientTempBase = defaults->DTEAmbientTempBase; - - /* The following are for new Fiji Multi-input fan/thermal control */ - dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US( - cac_dtp_table->usTargetOperatingTemp * 256); - dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US( - cac_dtp_table->usTemperatureLimitHotspot * 256); - dpm_table->TemperatureLimitLiquid1 = PP_HOST_TO_SMC_US( - cac_dtp_table->usTemperatureLimitLiquid1 * 256); - dpm_table->TemperatureLimitLiquid2 = PP_HOST_TO_SMC_US( - cac_dtp_table->usTemperatureLimitLiquid2 * 256); - dpm_table->TemperatureLimitVrVddc = PP_HOST_TO_SMC_US( - cac_dtp_table->usTemperatureLimitVrVddc * 256); - dpm_table->TemperatureLimitVrMvdd = PP_HOST_TO_SMC_US( - cac_dtp_table->usTemperatureLimitVrMvdd * 256); - dpm_table->TemperatureLimitPlx = PP_HOST_TO_SMC_US( - cac_dtp_table->usTemperatureLimitPlx * 256); - - dpm_table->FanGainEdge = PP_HOST_TO_SMC_US( - scale_fan_gain_settings(fan_table->usFanGainEdge)); - dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US( - scale_fan_gain_settings(fan_table->usFanGainHotspot)); - dpm_table->FanGainLiquid = PP_HOST_TO_SMC_US( - scale_fan_gain_settings(fan_table->usFanGainLiquid)); - dpm_table->FanGainVrVddc = PP_HOST_TO_SMC_US( - scale_fan_gain_settings(fan_table->usFanGainVrVddc)); - dpm_table->FanGainVrMvdd = PP_HOST_TO_SMC_US( - scale_fan_gain_settings(fan_table->usFanGainVrMvdd)); - dpm_table->FanGainPlx = PP_HOST_TO_SMC_US( - scale_fan_gain_settings(fan_table->usFanGainPlx)); - dpm_table->FanGainHbm = PP_HOST_TO_SMC_US( - scale_fan_gain_settings(fan_table->usFanGainHbm)); - - dpm_table->Liquid1_I2C_address = cac_dtp_table->ucLiquid1_I2C_address; - dpm_table->Liquid2_I2C_address = cac_dtp_table->ucLiquid2_I2C_address; - dpm_table->Vr_I2C_address = cac_dtp_table->ucVr_I2C_address; - dpm_table->Plx_I2C_address = cac_dtp_table->ucPlx_I2C_address; - - get_scl_sda_value(cac_dtp_table->ucLiquid_I2C_Line, &uc_scl, &uc_sda); - dpm_table->Liquid_I2C_LineSCL = uc_scl; - dpm_table->Liquid_I2C_LineSDA = uc_sda; - - get_scl_sda_value(cac_dtp_table->ucVr_I2C_Line, &uc_scl, &uc_sda); - dpm_table->Vr_I2C_LineSCL = uc_scl; - dpm_table->Vr_I2C_LineSDA = uc_sda; - - get_scl_sda_value(cac_dtp_table->ucPlx_I2C_Line, &uc_scl, &uc_sda); - dpm_table->Plx_I2C_LineSCL = uc_scl; - dpm_table->Plx_I2C_LineSDA = uc_sda; - - return 0; -} - -static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - const struct fiji_pt_defaults *defaults = data->power_tune_defaults; - - data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn; - data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC; - data->power_tune_table.SviLoadLineTrimVddC = 3; - data->power_tune_table.SviLoadLineOffsetVddC = 0; - - return 0; -} - -static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr) -{ - uint16_t tdc_limit; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - const struct fiji_pt_defaults *defaults = data->power_tune_defaults; - - /* TDC number of fraction bits are changed from 8 to 7 - * for Fiji as requested by SMC team - */ - tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128); - data->power_tune_table.TDC_VDDC_PkgLimit = - CONVERT_FROM_HOST_TO_SMC_US(tdc_limit); - data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc = - defaults->TDC_VDDC_ThrottleReleaseLimitPerc; - data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt; - - return 0; -} - -static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - const struct fiji_pt_defaults *defaults = data->power_tune_defaults; - uint32_t temp; - - if (fiji_read_smc_sram_dword(hwmgr->smumgr, - fuse_table_offset + - offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl), - (uint32_t *)&temp, data->sram_end)) - PP_ASSERT_WITH_CODE(false, - "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!", - return -EINVAL); - else { - data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl; - data->power_tune_table.LPMLTemperatureMin = - (uint8_t)((temp >> 16) & 0xff); - data->power_tune_table.LPMLTemperatureMax = - (uint8_t)((temp >> 8) & 0xff); - data->power_tune_table.Reserved = (uint8_t)(temp & 0xff); - } - return 0; -} - -static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr) -{ - int i; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - /* Currently not used. Set all to zero. */ - for (i = 0; i < 16; i++) - data->power_tune_table.LPMLTemperatureScaler[i] = 0; - - return 0; -} - -static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - if( (hwmgr->thermal_controller.advanceFanControlParameters. - usFanOutputSensitivity & (1 << 15)) || - 0 == hwmgr->thermal_controller.advanceFanControlParameters. - usFanOutputSensitivity ) - hwmgr->thermal_controller.advanceFanControlParameters. - usFanOutputSensitivity = hwmgr->thermal_controller. - advanceFanControlParameters.usDefaultFanOutputSensitivity; - - data->power_tune_table.FuzzyFan_PwmSetDelta = - PP_HOST_TO_SMC_US(hwmgr->thermal_controller. - advanceFanControlParameters.usFanOutputSensitivity); - return 0; -} - -static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr) -{ - int i; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - /* Currently not used. Set all to zero. */ - for (i = 0; i < 16; i++) - data->power_tune_table.GnbLPML[i] = 0; - - return 0; -} - -static int fiji_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr) -{ - /* int i, min, max; - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - uint8_t * pHiVID = data->power_tune_table.BapmVddCVidHiSidd; - uint8_t * pLoVID = data->power_tune_table.BapmVddCVidLoSidd; - - min = max = pHiVID[0]; - for (i = 0; i < 8; i++) { - if (0 != pHiVID[i]) { - if (min > pHiVID[i]) - min = pHiVID[i]; - if (max < pHiVID[i]) - max = pHiVID[i]; - } - - if (0 != pLoVID[i]) { - if (min > pLoVID[i]) - min = pLoVID[i]; - if (max < pLoVID[i]) - max = pLoVID[i]; - } - } - - PP_ASSERT_WITH_CODE((0 != min) && (0 != max), "BapmVddcVidSidd table does not exist!", return int_Failed); - data->power_tune_table.GnbLPMLMaxVid = (uint8_t)max; - data->power_tune_table.GnbLPMLMinVid = (uint8_t)min; -*/ - return 0; -} - -static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - uint16_t HiSidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd; - uint16_t LoSidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd; - struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table; - - HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256); - LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256); - - data->power_tune_table.BapmVddCBaseLeakageHiSidd = - CONVERT_FROM_HOST_TO_SMC_US(HiSidd); - data->power_tune_table.BapmVddCBaseLeakageLoSidd = - CONVERT_FROM_HOST_TO_SMC_US(LoSidd); - - return 0; -} - -int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - uint32_t pm_fuse_table_offset; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment)) { - if (fiji_read_smc_sram_dword(hwmgr->smumgr, - SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU73_Firmware_Header, PmFuseTable), - &pm_fuse_table_offset, data->sram_end)) - PP_ASSERT_WITH_CODE(false, - "Attempt to get pm_fuse_table_offset Failed!", - return -EINVAL); - - /* DW6 */ - if (fiji_populate_svi_load_line(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate SviLoadLine Failed!", - return -EINVAL); - /* DW7 */ - if (fiji_populate_tdc_limit(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate TDCLimit Failed!", return -EINVAL); - /* DW8 */ - if (fiji_populate_dw8(hwmgr, pm_fuse_table_offset)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate TdcWaterfallCtl, " - "LPMLTemperature Min and Max Failed!", - return -EINVAL); - - /* DW9-DW12 */ - if (0 != fiji_populate_temperature_scaler(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate LPMLTemperatureScaler Failed!", - return -EINVAL); - - /* DW13-DW14 */ - if(fiji_populate_fuzzy_fan(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate Fuzzy Fan Control parameters Failed!", - return -EINVAL); - - /* DW15-DW18 */ - if (fiji_populate_gnb_lpml(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate GnbLPML Failed!", - return -EINVAL); - - /* DW19 */ - if (fiji_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate GnbLPML Min and Max Vid Failed!", - return -EINVAL); - - /* DW20 */ - if (fiji_populate_bapm_vddc_base_leakage_sidd(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate BapmVddCBaseLeakage Hi and Lo " - "Sidd Failed!", return -EINVAL); - - if (fiji_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, - (uint8_t *)&data->power_tune_table, - sizeof(struct SMU73_Discrete_PmFuses), data->sram_end)) - PP_ASSERT_WITH_CODE(false, - "Attempt to download PmFuseTable Failed!", - return -EINVAL); - } - return 0; -} - -int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - int result = 0; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_CAC)) { - int smc_result; - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, - (uint16_t)(PPSMC_MSG_EnableCac)); - PP_ASSERT_WITH_CODE((0 == smc_result), - "Failed to enable CAC in SMC.", result = -1); - - data->cac_enabled = (0 == smc_result) ? true : false; - } - return result; -} - -int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - int result = 0; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_CAC) && data->cac_enabled) { - int smc_result = smum_send_msg_to_smc(hwmgr->smumgr, - (uint16_t)(PPSMC_MSG_DisableCac)); - PP_ASSERT_WITH_CODE((smc_result == 0), - "Failed to disable CAC in SMC.", result = -1); - - data->cac_enabled = false; - } - return result; -} - -int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - if(data->power_containment_features & - POWERCONTAINMENT_FEATURE_PkgPwrLimit) - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_PkgPwrSetLimit, n); - return 0; -} - -static int fiji_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp) -{ - return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr, - PPSMC_MSG_OverDriveSetTargetTdp, target_tdp); -} - -int fiji_enable_power_containment(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - int smc_result; - int result = 0; - - data->power_containment_features = 0; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment)) { - if (data->enable_dte_feature) { - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, - (uint16_t)(PPSMC_MSG_EnableDTE)); - PP_ASSERT_WITH_CODE((0 == smc_result), - "Failed to enable DTE in SMC.", result = -1;); - if (0 == smc_result) - data->power_containment_features |= POWERCONTAINMENT_FEATURE_DTE; - } - - if (data->enable_tdc_limit_feature) { - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, - (uint16_t)(PPSMC_MSG_TDCLimitEnable)); - PP_ASSERT_WITH_CODE((0 == smc_result), - "Failed to enable TDCLimit in SMC.", result = -1;); - if (0 == smc_result) - data->power_containment_features |= - POWERCONTAINMENT_FEATURE_TDCLimit; - } - - if (data->enable_pkg_pwr_tracking_feature) { - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, - (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable)); - PP_ASSERT_WITH_CODE((0 == smc_result), - "Failed to enable PkgPwrTracking in SMC.", result = -1;); - if (0 == smc_result) { - struct phm_cac_tdp_table *cac_table = - table_info->cac_dtp_table; - uint32_t default_limit = - (uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256); - - data->power_containment_features |= - POWERCONTAINMENT_FEATURE_PkgPwrLimit; - - if (fiji_set_power_limit(hwmgr, default_limit)) - printk(KERN_ERR "Failed to set Default Power Limit in SMC!"); - } - } - } - return result; -} - -int fiji_disable_power_containment(struct pp_hwmgr *hwmgr) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - int result = 0; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment) && - data->power_containment_features) { - int smc_result; - - if (data->power_containment_features & - POWERCONTAINMENT_FEATURE_TDCLimit) { - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, - (uint16_t)(PPSMC_MSG_TDCLimitDisable)); - PP_ASSERT_WITH_CODE((smc_result == 0), - "Failed to disable TDCLimit in SMC.", - result = smc_result); - } - - if (data->power_containment_features & - POWERCONTAINMENT_FEATURE_DTE) { - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, - (uint16_t)(PPSMC_MSG_DisableDTE)); - PP_ASSERT_WITH_CODE((smc_result == 0), - "Failed to disable DTE in SMC.", - result = smc_result); - } - - if (data->power_containment_features & - POWERCONTAINMENT_FEATURE_PkgPwrLimit) { - smc_result = smum_send_msg_to_smc(hwmgr->smumgr, - (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable)); - PP_ASSERT_WITH_CODE((smc_result == 0), - "Failed to disable PkgPwrTracking in SMC.", - result = smc_result); - } - data->power_containment_features = 0; - } - - return result; -} - -int fiji_power_control_set_level(struct pp_hwmgr *hwmgr) -{ - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table; - int adjust_percent, target_tdp; - int result = 0; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment)) { - /* adjustment percentage has already been validated */ - adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ? - hwmgr->platform_descriptor.TDPAdjustment : - (-1 * hwmgr->platform_descriptor.TDPAdjustment); - /* SMC requested that target_tdp to be 7 bit fraction in DPM table - * but message to be 8 bit fraction for messages - */ - target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100; - result = fiji_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp); - } - - return result; -} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h deleted file mode 100644 index fec772421733..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef FIJI_POWERTUNE_H -#define FIJI_POWERTUNE_H - -enum fiji_pt_config_reg_type { - FIJI_CONFIGREG_MMR = 0, - FIJI_CONFIGREG_SMC_IND, - FIJI_CONFIGREG_DIDT_IND, - FIJI_CONFIGREG_CACHE, - FIJI_CONFIGREG_MAX -}; - -/* PowerContainment Features */ -#define POWERCONTAINMENT_FEATURE_DTE 0x00000001 -#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002 -#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004 - -#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xffffffc0 -#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x6 -#define DIDT_TD_CTRL0__UNUSED_0_MASK 0xffffffc0 -#define DIDT_TD_CTRL0__UNUSED_0__SHIFT 0x6 -#define DIDT_TCP_CTRL0__UNUSED_0_MASK 0xffffffc0 -#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x6 -#define DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK 0xe0000000 -#define DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d -#define DIDT_TD_TUNING_CTRL__UNUSED_0_MASK 0xe0000000 -#define DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d -#define DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK 0xe0000000 -#define DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d - -struct fiji_pt_config_reg { - uint32_t offset; - uint32_t mask; - uint32_t shift; - uint32_t value; - enum fiji_pt_config_reg_type type; -}; - -struct fiji_pt_defaults -{ - uint8_t SviLoadLineEn; - uint8_t SviLoadLineVddC; - uint8_t TDC_VDDC_ThrottleReleaseLimitPerc; - uint8_t TDC_MAWt; - uint8_t TdcWaterfallCtl; - uint8_t DTEAmbientTempBase; -}; - -void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr); -int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr); -int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr); -int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr); -int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr); -int fiji_enable_power_containment(struct pp_hwmgr *hwmgr); -int fiji_disable_power_containment(struct pp_hwmgr *hwmgr); -int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n); -int fiji_power_control_set_level(struct pp_hwmgr *hwmgr); - -#endif /* FIJI_POWERTUNE_H */ - diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h deleted file mode 100644 index 8621493b8574..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef FIJI_THERMAL_H -#define FIJI_THERMAL_H - -#include "hwmgr.h" - -#define FIJI_THERMAL_HIGH_ALERT_MASK 0x1 -#define FIJI_THERMAL_LOW_ALERT_MASK 0x2 - -#define FIJI_THERMAL_MINIMUM_TEMP_READING -256 -#define FIJI_THERMAL_MAXIMUM_TEMP_READING 255 - -#define FIJI_THERMAL_MINIMUM_ALERT_TEMP 0 -#define FIJI_THERMAL_MAXIMUM_ALERT_TEMP 255 - -#define FDO_PWM_MODE_STATIC 1 -#define FDO_PWM_MODE_STATIC_RPM 5 - - -extern int tf_fiji_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result); -extern int tf_fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result); -extern int tf_fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result); - -extern int fiji_thermal_get_temperature(struct pp_hwmgr *hwmgr); -extern int fiji_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr); -extern int fiji_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info); -extern int fiji_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed); -extern int fiji_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr); -extern int fiji_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode); -extern int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed); -extern int fiji_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr); -extern int pp_fiji_thermal_initialize(struct pp_hwmgr *hwmgr); -extern int fiji_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr); -extern int fiji_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed); -extern int fiji_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed); -extern int fiji_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr); -extern uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr); - -#endif - diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c index 789f98ad2615..14f8c1f4da3d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c @@ -24,8 +24,6 @@ #include "hwmgr.h" #include "hardwaremanager.h" #include "power_state.h" -#include "pp_acpi.h" -#include "amd_acpi.h" #include "pp_debug.h" #define PHM_FUNC_CHECK(hw) \ @@ -34,38 +32,6 @@ return -EINVAL; \ } while (0) -void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr) -{ - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableVoltageTransition); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableEngineTransition); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMemoryTransition); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGClockGating); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGCGTSSM); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLSClockGating); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_Force3DClockSupport); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLightSleep); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMCLS); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisablePowerGating); - - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableDPM); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableSMUUVDHandshake); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ThermalAutoThrottling); - - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest); - - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_NoOD5Support); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UserMaxClockForMultiDisplays); - - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VpuRecoveryInProgress); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM); - - if (acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST) && - acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION)) - phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest); -} - bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr) { return hwmgr->block_hw_access; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 27e07624ac28..1167205057b3 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -32,13 +32,22 @@ #include "pp_debug.h" #include "ppatomctrl.h" #include "ppsmc.h" - -#define VOLTAGE_SCALE 4 +#include "pp_acpi.h" +#include "amd_acpi.h" extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr); -extern int tonga_hwmgr_init(struct pp_hwmgr *hwmgr); -extern int fiji_hwmgr_init(struct pp_hwmgr *hwmgr); -extern int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr); + +static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr); +static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr); +static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr); +static int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr); +static int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr); +static int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr); + +uint8_t convert_to_vid(uint16_t vddc) +{ + return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25); +} int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle) { @@ -56,10 +65,12 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle) hwmgr->device = pp_init->device; hwmgr->chip_family = pp_init->chip_family; hwmgr->chip_id = pp_init->chip_id; - hwmgr->hw_revision = pp_init->rev_id; hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT; hwmgr->power_source = PP_PowerSource_AC; - hwmgr->powercontainment_enabled = pp_init->powercontainment_enabled; + hwmgr->pp_table_version = PP_TABLE_V1; + + hwmgr_init_default_caps(hwmgr); + hwmgr_set_user_specify_caps(hwmgr); switch (hwmgr->chip_family) { case AMDGPU_FAMILY_CZ: @@ -67,26 +78,38 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle) break; case AMDGPU_FAMILY_VI: switch (hwmgr->chip_id) { + case CHIP_TOPAZ: + topaz_set_asic_special_caps(hwmgr); + hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK | + PP_VBI_TIME_SUPPORT_MASK | + PP_ENABLE_GFX_CG_THRU_SMU); + hwmgr->pp_table_version = PP_TABLE_V0; + break; case CHIP_TONGA: - tonga_hwmgr_init(hwmgr); + tonga_set_asic_special_caps(hwmgr); + hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK | + PP_VBI_TIME_SUPPORT_MASK); break; case CHIP_FIJI: - fiji_hwmgr_init(hwmgr); + fiji_set_asic_special_caps(hwmgr); + hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK | + PP_VBI_TIME_SUPPORT_MASK | + PP_ENABLE_GFX_CG_THRU_SMU); break; case CHIP_POLARIS11: case CHIP_POLARIS10: - polaris10_hwmgr_init(hwmgr); + polaris_set_asic_special_caps(hwmgr); + hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK); break; default: return -EINVAL; } + smu7_hwmgr_init(hwmgr); break; default: return -EINVAL; } - phm_init_dynamic_caps(hwmgr); - return 0; } @@ -105,6 +128,8 @@ int hwmgr_fini(struct pp_hwmgr *hwmgr) kfree(hwmgr->set_temperature_range.function_list); kfree(hwmgr->ps); + kfree(hwmgr->current_ps); + kfree(hwmgr->request_ps); kfree(hwmgr); return 0; } @@ -129,10 +154,17 @@ int hw_init_power_state_table(struct pp_hwmgr *hwmgr) sizeof(struct pp_power_state); hwmgr->ps = kzalloc(size * table_entries, GFP_KERNEL); - if (hwmgr->ps == NULL) return -ENOMEM; + hwmgr->request_ps = kzalloc(size, GFP_KERNEL); + if (hwmgr->request_ps == NULL) + return -ENOMEM; + + hwmgr->current_ps = kzalloc(size, GFP_KERNEL); + if (hwmgr->current_ps == NULL) + return -ENOMEM; + state = hwmgr->ps; for (i = 0; i < table_entries; i++) { @@ -140,7 +172,8 @@ int hw_init_power_state_table(struct pp_hwmgr *hwmgr) if (state->classification.flags & PP_StateClassificationFlag_Boot) { hwmgr->boot_ps = state; - hwmgr->current_ps = hwmgr->request_ps = state; + memcpy(hwmgr->current_ps, state, size); + memcpy(hwmgr->request_ps, state, size); } state->id = i + 1; /* assigned unique num for every power state id */ @@ -150,6 +183,7 @@ int hw_init_power_state_table(struct pp_hwmgr *hwmgr) state = (struct pp_power_state *)((unsigned long)state + size); } + return 0; } @@ -182,30 +216,6 @@ int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index, return 0; } -int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr, - uint32_t index, uint32_t value, uint32_t mask) -{ - uint32_t i; - uint32_t cur_value; - - if (hwmgr == NULL || hwmgr->device == NULL) { - printk(KERN_ERR "[ powerplay ] Invalid Hardware Manager!"); - return -EINVAL; - } - - for (i = 0; i < hwmgr->usec_timeout; i++) { - cur_value = cgs_read_register(hwmgr->device, index); - if ((cur_value & mask) != (value & mask)) - break; - udelay(1); - } - - /* timeout means wrong logic*/ - if (i == hwmgr->usec_timeout) - return -1; - return 0; -} - /** * Returns once the part of the register indicated by the mask has @@ -227,21 +237,7 @@ void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr, phm_wait_on_register(hwmgr, indirect_port + 1, mask, value); } -void phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr, - uint32_t indirect_port, - uint32_t index, - uint32_t value, - uint32_t mask) -{ - if (hwmgr == NULL || hwmgr->device == NULL) { - printk(KERN_ERR "[ powerplay ] Invalid Hardware Manager!"); - return; - } - cgs_write_register(hwmgr->device, indirect_port, index); - phm_wait_for_register_unequal(hwmgr, indirect_port + 1, - value, mask); -} bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr) { @@ -403,12 +399,9 @@ int phm_reset_single_dpm_table(void *table, struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table; - PP_ASSERT_WITH_CODE(count <= max, - "Fatal error, can not set up single DPM table entries to exceed max number!", - ); + dpm_table->count = count > max ? max : count; - dpm_table->count = count; - for (i = 0; i < max; i++) + for (i = 0; i < dpm_table->count; i++) dpm_table->dpm_level[i].enabled = false; return 0; @@ -462,6 +455,27 @@ uint8_t phm_get_voltage_index( return i - 1; } +uint8_t phm_get_voltage_id(pp_atomctrl_voltage_table *voltage_table, + uint32_t voltage) +{ + uint8_t count = (uint8_t) (voltage_table->count); + uint8_t i = 0; + + PP_ASSERT_WITH_CODE((NULL != voltage_table), + "Voltage Table empty.", return 0;); + PP_ASSERT_WITH_CODE((0 != count), + "Voltage Table empty.", return 0;); + + for (i = 0; i < count; i++) { + /* find first voltage bigger than requested */ + if (voltage_table->entries[i].value >= voltage) + return i; + } + + /* voltage is bigger than max voltage in the table */ + return i - 1; +} + uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci) { uint32_t i; @@ -549,7 +563,8 @@ int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr table_clk_vlt->entries[2].v = 810; table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE; table_clk_vlt->entries[3].v = 900; - pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt; + if (pptable_info != NULL) + pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt; hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt; } @@ -615,3 +630,186 @@ void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr) printk(KERN_ERR "DAL requested level can not" " found a available voltage in VDDC DPM Table \n"); } + +void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr) +{ + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableVoltageTransition); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableEngineTransition); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMemoryTransition); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGClockGating); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGCGTSSM); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLSClockGating); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_Force3DClockSupport); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLightSleep); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMCLS); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisablePowerGating); + + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableDPM); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableSMUUVDHandshake); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ThermalAutoThrottling); + + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest); + + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_NoOD5Support); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UserMaxClockForMultiDisplays); + + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VpuRecoveryInProgress); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM); + + if (acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST) && + acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION)) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DynamicPatchPowerState); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EnableSMU7ThermalManagement); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DynamicPowerManagement); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SMC); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DynamicUVDState); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_FanSpeedInTableIsRPM); + + return; +} + +int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr) +{ + if (amdgpu_sclk_deep_sleep_en) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep); + else + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep); + + if (amdgpu_powercontainment) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment); + else + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment); + + hwmgr->feature_mask = amdgpu_pp_feature_mask; + + return 0; +} + +int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, + uint32_t sclk, uint16_t id, uint16_t *voltage) +{ + uint32_t vol; + int ret = 0; + + if (hwmgr->chip_id < CHIP_POLARIS10) { + atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage); + if (*voltage >= 2000 || *voltage == 0) + *voltage = 1150; + } else { + ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol); + *voltage = (uint16_t)vol/100; + } + return ret; +} + +int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr) +{ + /* power tune caps Assume disabled */ + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SQRamping); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DBRamping); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TDRamping); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TCPRamping); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TablelessHardwareInterface); + + if (hwmgr->chip_id == CHIP_POLARIS11) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SPLLShutdownSupport); + return 0; +} + +int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr) +{ + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SQRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DBRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TDRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TCPRamping); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TablelessHardwareInterface); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC); + return 0; +} + +int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr) +{ + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SQRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DBRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TDRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TCPRamping); + + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDPowerGating); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_VCEPowerGating); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TablelessHardwareInterface); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC); + + return 0; +} + +int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr) +{ + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SQRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DBRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TDRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TCPRamping); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TablelessHardwareInterface); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EVV); + return 0; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_dyn_defaults.h deleted file mode 100644 index f78ffd935cee..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_dyn_defaults.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef POLARIS10_DYN_DEFAULTS_H -#define POLARIS10_DYN_DEFAULTS_H - - -enum Polaris10dpm_TrendDetection { - Polaris10Adpm_TrendDetection_AUTO, - Polaris10Adpm_TrendDetection_UP, - Polaris10Adpm_TrendDetection_DOWN -}; -typedef enum Polaris10dpm_TrendDetection Polaris10dpm_TrendDetection; - -/* We need to fill in the default values */ - - -#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102 -#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT1 0x000400 -#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080 -#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200 -#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680 -#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033 -#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033 -#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000 - - -#define PPPOLARIS10_THERMALPROTECTCOUNTER_DFLT 0x200 -#define PPPOLARIS10_STATICSCREENTHRESHOLDUNIT_DFLT 0 -#define PPPOLARIS10_STATICSCREENTHRESHOLD_DFLT 0x00C8 -#define PPPOLARIS10_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200 -#define PPPOLARIS10_REFERENCEDIVIDER_DFLT 4 - -#define PPPOLARIS10_ULVVOLTAGECHANGEDELAY_DFLT 1687 - -#define PPPOLARIS10_CGULVPARAMETER_DFLT 0x00040035 -#define PPPOLARIS10_CGULVCONTROL_DFLT 0x00007450 -#define PPPOLARIS10_TARGETACTIVITY_DFLT 50 -#define PPPOLARIS10_MCLK_TARGETACTIVITY_DFLT 10 - -#endif - diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c deleted file mode 100644 index 769636a0c5b5..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c +++ /dev/null @@ -1,5290 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include <linux/module.h> -#include <linux/slab.h> -#include <linux/fb.h> -#include <asm/div64.h> -#include "linux/delay.h" -#include "pp_acpi.h" -#include "hwmgr.h" -#include "polaris10_hwmgr.h" -#include "polaris10_powertune.h" -#include "polaris10_dyn_defaults.h" -#include "polaris10_smumgr.h" -#include "pp_debug.h" -#include "ppatomctrl.h" -#include "atombios.h" -#include "tonga_pptable.h" -#include "pppcielanes.h" -#include "amd_pcie_helpers.h" -#include "hardwaremanager.h" -#include "tonga_processpptables.h" -#include "cgs_common.h" -#include "smu74.h" -#include "smu_ucode_xfer_vi.h" -#include "smu74_discrete.h" -#include "smu/smu_7_1_3_d.h" -#include "smu/smu_7_1_3_sh_mask.h" -#include "gmc/gmc_8_1_d.h" -#include "gmc/gmc_8_1_sh_mask.h" -#include "oss/oss_3_0_d.h" -#include "gca/gfx_8_0_d.h" -#include "bif/bif_5_0_d.h" -#include "bif/bif_5_0_sh_mask.h" -#include "gmc/gmc_8_1_d.h" -#include "gmc/gmc_8_1_sh_mask.h" -#include "bif/bif_5_0_d.h" -#include "bif/bif_5_0_sh_mask.h" -#include "dce/dce_10_0_d.h" -#include "dce/dce_10_0_sh_mask.h" - -#include "polaris10_thermal.h" -#include "polaris10_clockpowergating.h" - -#define MC_CG_ARB_FREQ_F0 0x0a -#define MC_CG_ARB_FREQ_F1 0x0b -#define MC_CG_ARB_FREQ_F2 0x0c -#define MC_CG_ARB_FREQ_F3 0x0d - -#define MC_CG_SEQ_DRAMCONF_S0 0x05 -#define MC_CG_SEQ_DRAMCONF_S1 0x06 -#define MC_CG_SEQ_YCLK_SUSPEND 0x04 -#define MC_CG_SEQ_YCLK_RESUME 0x0a - - -#define SMC_RAM_END 0x40000 - -#define SMC_CG_IND_START 0xc0030000 -#define SMC_CG_IND_END 0xc0040000 - -#define VOLTAGE_SCALE 4 -#define VOLTAGE_VID_OFFSET_SCALE1 625 -#define VOLTAGE_VID_OFFSET_SCALE2 100 - -#define VDDC_VDDCI_DELTA 200 - -#define MEM_FREQ_LOW_LATENCY 25000 -#define MEM_FREQ_HIGH_LATENCY 80000 - -#define MEM_LATENCY_HIGH 45 -#define MEM_LATENCY_LOW 35 -#define MEM_LATENCY_ERR 0xFFFF - -#define MC_SEQ_MISC0_GDDR5_SHIFT 28 -#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000 -#define MC_SEQ_MISC0_GDDR5_VALUE 5 - - -#define PCIE_BUS_CLK 10000 -#define TCLK (PCIE_BUS_CLK / 10) - - -static const uint16_t polaris10_clock_stretcher_lookup_table[2][4] = -{ {600, 1050, 3, 0}, {600, 1050, 6, 1} }; - -/* [FF, SS] type, [] 4 voltage ranges, and [Floor Freq, Boundary Freq, VID min , VID max] */ -static const uint32_t polaris10_clock_stretcher_ddt_table[2][4][4] = -{ { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} }, - { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } }; - -/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] (coming from PWR_CKS_CNTL.stretch_amount reg spec) */ -static const uint8_t polaris10_clock_stretch_amount_conversion[2][6] = -{ {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} }; - -/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */ -enum DPM_EVENT_SRC { - DPM_EVENT_SRC_ANALOG = 0, - DPM_EVENT_SRC_EXTERNAL = 1, - DPM_EVENT_SRC_DIGITAL = 2, - DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, - DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 -}; - -static const unsigned long PhwPolaris10_Magic = (unsigned long)(PHM_VIslands_Magic); - -struct polaris10_power_state *cast_phw_polaris10_power_state( - struct pp_hw_power_state *hw_ps) -{ - PP_ASSERT_WITH_CODE((PhwPolaris10_Magic == hw_ps->magic), - "Invalid Powerstate Type!", - return NULL); - - return (struct polaris10_power_state *)hw_ps; -} - -const struct polaris10_power_state *cast_const_phw_polaris10_power_state( - const struct pp_hw_power_state *hw_ps) -{ - PP_ASSERT_WITH_CODE((PhwPolaris10_Magic == hw_ps->magic), - "Invalid Powerstate Type!", - return NULL); - - return (const struct polaris10_power_state *)hw_ps; -} - -static bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr) -{ - return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device, - CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON)) - ? true : false; -} - -/** - * Find the MC microcode version and store it in the HwMgr struct - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int phm_get_mc_microcode_version (struct pp_hwmgr *hwmgr) -{ - cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F); - - hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA); - - return 0; -} - -uint16_t phm_get_current_pcie_speed(struct pp_hwmgr *hwmgr) -{ - uint32_t speedCntl = 0; - - /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */ - speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE, - ixPCIE_LC_SPEED_CNTL); - return((uint16_t)PHM_GET_FIELD(speedCntl, - PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE)); -} - -int phm_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr) -{ - uint32_t link_width; - - /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */ - link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, - PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD); - - PP_ASSERT_WITH_CODE((7 >= link_width), - "Invalid PCIe lane width!", return 0); - - return decode_pcie_lane_width(link_width); -} - -/** -* Enable voltage control -* -* @param pHwMgr the address of the powerplay hardware manager. -* @return always PP_Result_OK -*/ -int polaris10_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr) -{ - PP_ASSERT_WITH_CODE( - (hwmgr->smumgr->smumgr_funcs->send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable) == 0), - "Failed to enable voltage DPM during DPM Start Function!", - return 1; - ); - - return 0; -} - -/** -* Checks if we want to support voltage control -* -* @param hwmgr the address of the powerplay hardware manager. -*/ -static bool polaris10_voltage_control(const struct pp_hwmgr *hwmgr) -{ - const struct polaris10_hwmgr *data = - (const struct polaris10_hwmgr *)(hwmgr->backend); - - return (POLARIS10_VOLTAGE_CONTROL_NONE != data->voltage_control); -} - -/** -* Enable voltage control -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -*/ -static int polaris10_enable_voltage_control(struct pp_hwmgr *hwmgr) -{ - /* enable voltage control */ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1); - - return 0; -} - -/** -* Create Voltage Tables. -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -*/ -static int polaris10_construct_voltage_tables(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)hwmgr->pptable; - int result; - - if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { - result = atomctrl_get_voltage_table_v3(hwmgr, - VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT, - &(data->mvdd_voltage_table)); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve MVDD table.", - return result); - } else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) { - result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table), - table_info->vdd_dep_on_mclk); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve SVI2 MVDD table from dependancy table.", - return result;); - } - - if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) { - result = atomctrl_get_voltage_table_v3(hwmgr, - VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT, - &(data->vddci_voltage_table)); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve VDDCI table.", - return result); - } else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) { - result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table), - table_info->vdd_dep_on_mclk); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve SVI2 VDDCI table from dependancy table.", - return result); - } - - if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { - result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table), - table_info->vddc_lookup_table); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve SVI2 VDDC table from lookup table.", - return result); - } - - PP_ASSERT_WITH_CODE( - (data->vddc_voltage_table.count <= (SMU74_MAX_LEVELS_VDDC)), - "Too many voltage values for VDDC. Trimming to fit state table.", - phm_trim_voltage_table_to_fit_state_table(SMU74_MAX_LEVELS_VDDC, - &(data->vddc_voltage_table))); - - PP_ASSERT_WITH_CODE( - (data->vddci_voltage_table.count <= (SMU74_MAX_LEVELS_VDDCI)), - "Too many voltage values for VDDCI. Trimming to fit state table.", - phm_trim_voltage_table_to_fit_state_table(SMU74_MAX_LEVELS_VDDCI, - &(data->vddci_voltage_table))); - - PP_ASSERT_WITH_CODE( - (data->mvdd_voltage_table.count <= (SMU74_MAX_LEVELS_MVDD)), - "Too many voltage values for MVDD. Trimming to fit state table.", - phm_trim_voltage_table_to_fit_state_table(SMU74_MAX_LEVELS_MVDD, - &(data->mvdd_voltage_table))); - - return 0; -} - -/** -* Programs static screed detection parameters -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -*/ -static int polaris10_program_static_screen_threshold_parameters( - struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - /* Set static screen threshold unit */ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT, - data->static_screen_threshold_unit); - /* Set static screen threshold */ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD, - data->static_screen_threshold); - - return 0; -} - -/** -* Setup display gap for glitch free memory clock switching. -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -*/ -static int polaris10_enable_display_gap(struct pp_hwmgr *hwmgr) -{ - uint32_t display_gap = - cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_DISPLAY_GAP_CNTL); - - display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, - DISP_GAP, DISPLAY_GAP_IGNORE); - - display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, - DISP_GAP_MCHG, DISPLAY_GAP_VBLANK); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_DISPLAY_GAP_CNTL, display_gap); - - return 0; -} - -/** -* Programs activity state transition voting clients -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -*/ -static int polaris10_program_voting_clients(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - /* Clear reset for voting clients before enabling DPM */ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0); - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7); - - return 0; -} - -static int polaris10_clear_voting_clients(struct pp_hwmgr *hwmgr) -{ - /* Reset voting clients before disabling DPM */ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1); - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_0, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_1, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_2, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_3, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_4, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_5, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_6, 0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_7, 0); - - return 0; -} - -/** -* Get the location of various tables inside the FW image. -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -*/ -static int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); - uint32_t tmp; - int result; - bool error = false; - - result = polaris10_read_smc_sram_dword(hwmgr->smumgr, - SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU74_Firmware_Header, DpmTable), - &tmp, data->sram_end); - - if (0 == result) - data->dpm_table_start = tmp; - - error |= (0 != result); - - result = polaris10_read_smc_sram_dword(hwmgr->smumgr, - SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU74_Firmware_Header, SoftRegisters), - &tmp, data->sram_end); - - if (!result) { - data->soft_regs_start = tmp; - smu_data->soft_regs_start = tmp; - } - - error |= (0 != result); - - result = polaris10_read_smc_sram_dword(hwmgr->smumgr, - SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU74_Firmware_Header, mcRegisterTable), - &tmp, data->sram_end); - - if (!result) - data->mc_reg_table_start = tmp; - - result = polaris10_read_smc_sram_dword(hwmgr->smumgr, - SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU74_Firmware_Header, FanTable), - &tmp, data->sram_end); - - if (!result) - data->fan_table_start = tmp; - - error |= (0 != result); - - result = polaris10_read_smc_sram_dword(hwmgr->smumgr, - SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU74_Firmware_Header, mcArbDramTimingTable), - &tmp, data->sram_end); - - if (!result) - data->arb_table_start = tmp; - - error |= (0 != result); - - result = polaris10_read_smc_sram_dword(hwmgr->smumgr, - SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU74_Firmware_Header, Version), - &tmp, data->sram_end); - - if (!result) - hwmgr->microcode_version_info.SMC = tmp; - - error |= (0 != result); - - return error ? -1 : 0; -} - -/* Copy one arb setting to another and then switch the active set. - * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants. - */ -static int polaris10_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr, - uint32_t arb_src, uint32_t arb_dest) -{ - uint32_t mc_arb_dram_timing; - uint32_t mc_arb_dram_timing2; - uint32_t burst_time; - uint32_t mc_cg_config; - - switch (arb_src) { - case MC_CG_ARB_FREQ_F0: - mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); - mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); - burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); - break; - case MC_CG_ARB_FREQ_F1: - mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1); - mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1); - burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1); - break; - default: - return -EINVAL; - } - - switch (arb_dest) { - case MC_CG_ARB_FREQ_F0: - cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing); - cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2); - PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time); - break; - case MC_CG_ARB_FREQ_F1: - cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing); - cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2); - PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time); - break; - default: - return -EINVAL; - } - - mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG); - mc_cg_config |= 0x0000000F; - cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config); - PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest); - - return 0; -} - -static int polaris10_reset_to_default(struct pp_hwmgr *hwmgr) -{ - return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults); -} - -/** -* Initial switch from ARB F0->F1 -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -* This function is to be called from the SetPowerState table. -*/ -static int polaris10_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr) -{ - return polaris10_copy_and_switch_arb_sets(hwmgr, - MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); -} - -static int polaris10_force_switch_to_arbf0(struct pp_hwmgr *hwmgr) -{ - uint32_t tmp; - - tmp = (cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, ixSMC_SCRATCH9) & - 0x0000ff00) >> 8; - - if (tmp == MC_CG_ARB_FREQ_F0) - return 0; - - return polaris10_copy_and_switch_arb_sets(hwmgr, - tmp, MC_CG_ARB_FREQ_F0); -} - -static int polaris10_setup_default_pcie_table(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table; - uint32_t i, max_entry; - - PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels || - data->use_pcie_power_saving_levels), "No pcie performance levels!", - return -EINVAL); - - if (data->use_pcie_performance_levels && - !data->use_pcie_power_saving_levels) { - data->pcie_gen_power_saving = data->pcie_gen_performance; - data->pcie_lane_power_saving = data->pcie_lane_performance; - } else if (!data->use_pcie_performance_levels && - data->use_pcie_power_saving_levels) { - data->pcie_gen_performance = data->pcie_gen_power_saving; - data->pcie_lane_performance = data->pcie_lane_power_saving; - } - - phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table, - SMU74_MAX_LEVELS_LINK, - MAX_REGULAR_DPM_NUMBER); - - if (pcie_table != NULL) { - /* max_entry is used to make sure we reserve one PCIE level - * for boot level (fix for A+A PSPP issue). - * If PCIE table from PPTable have ULV entry + 8 entries, - * then ignore the last entry.*/ - max_entry = (SMU74_MAX_LEVELS_LINK < pcie_table->count) ? - SMU74_MAX_LEVELS_LINK : pcie_table->count; - for (i = 1; i < max_entry; i++) { - phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1, - get_pcie_gen_support(data->pcie_gen_cap, - pcie_table->entries[i].gen_speed), - get_pcie_lane_support(data->pcie_lane_cap, - pcie_table->entries[i].lane_width)); - } - data->dpm_table.pcie_speed_table.count = max_entry - 1; - - /* Setup BIF_SCLK levels */ - for (i = 0; i < max_entry; i++) - data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk; - } else { - /* Hardcode Pcie Table */ - phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0, - get_pcie_gen_support(data->pcie_gen_cap, - PP_Min_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, - PP_Max_PCIELane)); - phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1, - get_pcie_gen_support(data->pcie_gen_cap, - PP_Min_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, - PP_Max_PCIELane)); - phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2, - get_pcie_gen_support(data->pcie_gen_cap, - PP_Max_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, - PP_Max_PCIELane)); - phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3, - get_pcie_gen_support(data->pcie_gen_cap, - PP_Max_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, - PP_Max_PCIELane)); - phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4, - get_pcie_gen_support(data->pcie_gen_cap, - PP_Max_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, - PP_Max_PCIELane)); - phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5, - get_pcie_gen_support(data->pcie_gen_cap, - PP_Max_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, - PP_Max_PCIELane)); - - data->dpm_table.pcie_speed_table.count = 6; - } - /* Populate last level for boot PCIE level, but do not increment count. */ - phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, - data->dpm_table.pcie_speed_table.count, - get_pcie_gen_support(data->pcie_gen_cap, - PP_Min_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, - PP_Max_PCIELane)); - - return 0; -} - -/* - * This function is to initalize all DPM state tables - * for SMU7 based on the dependency table. - * Dynamic state patching function will then trim these - * state tables to the allowed range based - * on the power policy or external client requests, - * such as UVD request, etc. - */ -int polaris10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - uint32_t i; - - struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = - table_info->vdd_dep_on_sclk; - struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table = - table_info->vdd_dep_on_mclk; - - PP_ASSERT_WITH_CODE(dep_sclk_table != NULL, - "SCLK dependency table is missing. This table is mandatory", - return -EINVAL); - PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1, - "SCLK dependency table has to have is missing." - "This table is mandatory", - return -EINVAL); - - PP_ASSERT_WITH_CODE(dep_mclk_table != NULL, - "MCLK dependency table is missing. This table is mandatory", - return -EINVAL); - PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1, - "MCLK dependency table has to have is missing." - "This table is mandatory", - return -EINVAL); - - /* clear the state table to reset everything to default */ - phm_reset_single_dpm_table( - &data->dpm_table.sclk_table, SMU74_MAX_LEVELS_GRAPHICS, MAX_REGULAR_DPM_NUMBER); - phm_reset_single_dpm_table( - &data->dpm_table.mclk_table, SMU74_MAX_LEVELS_MEMORY, MAX_REGULAR_DPM_NUMBER); - - - /* Initialize Sclk DPM table based on allow Sclk values */ - data->dpm_table.sclk_table.count = 0; - for (i = 0; i < dep_sclk_table->count; i++) { - if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value != - dep_sclk_table->entries[i].clk) { - - data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value = - dep_sclk_table->entries[i].clk; - - data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = - (i == 0) ? true : false; - data->dpm_table.sclk_table.count++; - } - } - - /* Initialize Mclk DPM table based on allow Mclk values */ - data->dpm_table.mclk_table.count = 0; - for (i = 0; i < dep_mclk_table->count; i++) { - if (i == 0 || data->dpm_table.mclk_table.dpm_levels - [data->dpm_table.mclk_table.count - 1].value != - dep_mclk_table->entries[i].clk) { - data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value = - dep_mclk_table->entries[i].clk; - data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = - (i == 0) ? true : false; - data->dpm_table.mclk_table.count++; - } - } - - /* setup PCIE gen speed levels */ - polaris10_setup_default_pcie_table(hwmgr); - - /* save a copy of the default DPM table */ - memcpy(&(data->golden_dpm_table), &(data->dpm_table), - sizeof(struct polaris10_dpm_table)); - - return 0; -} - -uint8_t convert_to_vid(uint16_t vddc) -{ - return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25); -} - -/** - * Mvdd table preparation for SMC. - * - * @param *hwmgr The address of the hardware manager. - * @param *table The SMC DPM table structure to be populated. - * @return 0 - */ -static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr, - SMU74_Discrete_DpmTable *table) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint32_t count, level; - - if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { - count = data->mvdd_voltage_table.count; - if (count > SMU_MAX_SMIO_LEVELS) - count = SMU_MAX_SMIO_LEVELS; - for (level = 0; level < count; level++) { - table->SmioTable2.Pattern[level].Voltage = - PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE); - /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/ - table->SmioTable2.Pattern[level].Smio = - (uint8_t) level; - table->Smio[level] |= - data->mvdd_voltage_table.entries[level].smio_low; - } - table->SmioMask2 = data->mvdd_voltage_table.mask_low; - - table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count); - } - - return 0; -} - -static int polaris10_populate_smc_vddci_table(struct pp_hwmgr *hwmgr, - struct SMU74_Discrete_DpmTable *table) -{ - uint32_t count, level; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - count = data->vddci_voltage_table.count; - - if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) { - if (count > SMU_MAX_SMIO_LEVELS) - count = SMU_MAX_SMIO_LEVELS; - for (level = 0; level < count; ++level) { - table->SmioTable1.Pattern[level].Voltage = - PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE); - table->SmioTable1.Pattern[level].Smio = (uint8_t) level; - - table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low; - } - } - - table->SmioMask1 = data->vddci_voltage_table.mask_low; - - return 0; -} - -/** -* Preparation of vddc and vddgfx CAC tables for SMC. -* -* @param hwmgr the address of the hardware manager -* @param table the SMC DPM table structure to be populated -* @return always 0 -*/ -static int polaris10_populate_cac_table(struct pp_hwmgr *hwmgr, - struct SMU74_Discrete_DpmTable *table) -{ - uint32_t count; - uint8_t index; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_voltage_lookup_table *lookup_table = - table_info->vddc_lookup_table; - /* tables is already swapped, so in order to use the value from it, - * we need to swap it back. - * We are populating vddc CAC data to BapmVddc table - * in split and merged mode - */ - for (count = 0; count < lookup_table->count; count++) { - index = phm_get_voltage_index(lookup_table, - data->vddc_voltage_table.entries[count].value); - table->BapmVddcVidLoSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_low); - table->BapmVddcVidHiSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_mid); - table->BapmVddcVidHiSidd2[count] = convert_to_vid(lookup_table->entries[index].us_cac_high); - } - - return 0; -} - -/** -* Preparation of voltage tables for SMC. -* -* @param hwmgr the address of the hardware manager -* @param table the SMC DPM table structure to be populated -* @return always 0 -*/ - -int polaris10_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr, - struct SMU74_Discrete_DpmTable *table) -{ - polaris10_populate_smc_vddci_table(hwmgr, table); - polaris10_populate_smc_mvdd_table(hwmgr, table); - polaris10_populate_cac_table(hwmgr, table); - - return 0; -} - -static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr, - struct SMU74_Discrete_Ulv *state) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - state->CcPwrDynRm = 0; - state->CcPwrDynRm1 = 0; - - state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset; - state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset * - VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1); - - state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1; - - CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm); - CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1); - CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset); - - return 0; -} - -static int polaris10_populate_ulv_state(struct pp_hwmgr *hwmgr, - struct SMU74_Discrete_DpmTable *table) -{ - return polaris10_populate_ulv_level(hwmgr, &table->Ulv); -} - -static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr, - struct SMU74_Discrete_DpmTable *table) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct polaris10_dpm_table *dpm_table = &data->dpm_table; - int i; - - /* Index (dpm_table->pcie_speed_table.count) - * is reserved for PCIE boot level. */ - for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) { - table->LinkLevel[i].PcieGenSpeed = - (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value; - table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width( - dpm_table->pcie_speed_table.dpm_levels[i].param1); - table->LinkLevel[i].EnabledForActivity = 1; - table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff); - table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5); - table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30); - } - - data->smc_state_table.LinkLevelCount = - (uint8_t)dpm_table->pcie_speed_table.count; - data->dpm_level_enable_mask.pcie_dpm_enable_mask = - phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table); - - return 0; -} - -static uint32_t polaris10_get_xclk(struct pp_hwmgr *hwmgr) -{ - uint32_t reference_clock, tmp; - struct cgs_display_info info = {0}; - struct cgs_mode_info mode_info; - - info.mode_info = &mode_info; - - tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK); - - if (tmp) - return TCLK; - - cgs_get_active_displays_info(hwmgr->device, &info); - reference_clock = mode_info.ref_clock; - - tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE); - - if (0 != tmp) - return reference_clock / 4; - - return reference_clock; -} - -/** -* Calculates the SCLK dividers using the provided engine clock -* -* @param hwmgr the address of the hardware manager -* @param clock the engine clock to use to populate the structure -* @param sclk the SMC SCLK structure to be populated -*/ -static int polaris10_calculate_sclk_params(struct pp_hwmgr *hwmgr, - uint32_t clock, SMU_SclkSetting *sclk_setting) -{ - const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - const SMU74_Discrete_DpmTable *table = &(data->smc_state_table); - struct pp_atomctrl_clock_dividers_ai dividers; - - uint32_t ref_clock; - uint32_t pcc_target_percent, pcc_target_freq, ss_target_percent, ss_target_freq; - uint8_t i; - int result; - uint64_t temp; - - sclk_setting->SclkFrequency = clock; - /* get the engine clock dividers for this clock value */ - result = atomctrl_get_engine_pll_dividers_ai(hwmgr, clock, ÷rs); - if (result == 0) { - sclk_setting->Fcw_int = dividers.usSclk_fcw_int; - sclk_setting->Fcw_frac = dividers.usSclk_fcw_frac; - sclk_setting->Pcc_fcw_int = dividers.usPcc_fcw_int; - sclk_setting->PllRange = dividers.ucSclkPllRange; - sclk_setting->Sclk_slew_rate = 0x400; - sclk_setting->Pcc_up_slew_rate = dividers.usPcc_fcw_slew_frac; - sclk_setting->Pcc_down_slew_rate = 0xffff; - sclk_setting->SSc_En = dividers.ucSscEnable; - sclk_setting->Fcw1_int = dividers.usSsc_fcw1_int; - sclk_setting->Fcw1_frac = dividers.usSsc_fcw1_frac; - sclk_setting->Sclk_ss_slew_rate = dividers.usSsc_fcw_slew_frac; - return result; - } - - ref_clock = polaris10_get_xclk(hwmgr); - - for (i = 0; i < NUM_SCLK_RANGE; i++) { - if (clock > data->range_table[i].trans_lower_frequency - && clock <= data->range_table[i].trans_upper_frequency) { - sclk_setting->PllRange = i; - break; - } - } - - sclk_setting->Fcw_int = (uint16_t)((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock); - temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv; - temp <<= 0x10; - do_div(temp, ref_clock); - sclk_setting->Fcw_frac = temp & 0xffff; - - pcc_target_percent = 10; /* Hardcode 10% for now. */ - pcc_target_freq = clock - (clock * pcc_target_percent / 100); - sclk_setting->Pcc_fcw_int = (uint16_t)((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock); - - ss_target_percent = 2; /* Hardcode 2% for now. */ - sclk_setting->SSc_En = 0; - if (ss_target_percent) { - sclk_setting->SSc_En = 1; - ss_target_freq = clock - (clock * ss_target_percent / 100); - sclk_setting->Fcw1_int = (uint16_t)((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock); - temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv; - temp <<= 0x10; - do_div(temp, ref_clock); - sclk_setting->Fcw1_frac = temp & 0xffff; - } - - return 0; -} - -static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, - struct phm_ppt_v1_clock_voltage_dependency_table *dep_table, - uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd) -{ - uint32_t i; - uint16_t vddci; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - *voltage = *mvdd = 0; - - /* clock - voltage dependency table is empty table */ - if (dep_table->count == 0) - return -EINVAL; - - for (i = 0; i < dep_table->count; i++) { - /* find first sclk bigger than request */ - if (dep_table->entries[i].clk >= clock) { - *voltage |= (dep_table->entries[i].vddc * - VOLTAGE_SCALE) << VDDC_SHIFT; - if (POLARIS10_VOLTAGE_CONTROL_NONE == data->vddci_control) - *voltage |= (data->vbios_boot_state.vddci_bootup_value * - VOLTAGE_SCALE) << VDDCI_SHIFT; - else if (dep_table->entries[i].vddci) - *voltage |= (dep_table->entries[i].vddci * - VOLTAGE_SCALE) << VDDCI_SHIFT; - else { - vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), - (dep_table->entries[i].vddc - - (uint16_t)data->vddc_vddci_delta)); - *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; - } - - if (POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control) - *mvdd = data->vbios_boot_state.mvdd_bootup_value * - VOLTAGE_SCALE; - else if (dep_table->entries[i].mvdd) - *mvdd = (uint32_t) dep_table->entries[i].mvdd * - VOLTAGE_SCALE; - - *voltage |= 1 << PHASES_SHIFT; - return 0; - } - } - - /* sclk is bigger than max sclk in the dependence table */ - *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; - - if (POLARIS10_VOLTAGE_CONTROL_NONE == data->vddci_control) - *voltage |= (data->vbios_boot_state.vddci_bootup_value * - VOLTAGE_SCALE) << VDDCI_SHIFT; - else if (dep_table->entries[i-1].vddci) { - vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), - (dep_table->entries[i].vddc - - (uint16_t)data->vddc_vddci_delta)); - *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; - } - - if (POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control) - *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE; - else if (dep_table->entries[i].mvdd) - *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE; - - return 0; -} - -static const sclkFcwRange_t Range_Table[NUM_SCLK_RANGE] = -{ {VCO_2_4, POSTDIV_DIV_BY_16, 75, 160, 112}, - {VCO_3_6, POSTDIV_DIV_BY_16, 112, 224, 160}, - {VCO_2_4, POSTDIV_DIV_BY_8, 75, 160, 112}, - {VCO_3_6, POSTDIV_DIV_BY_8, 112, 224, 160}, - {VCO_2_4, POSTDIV_DIV_BY_4, 75, 160, 112}, - {VCO_3_6, POSTDIV_DIV_BY_4, 112, 216, 160}, - {VCO_2_4, POSTDIV_DIV_BY_2, 75, 160, 108}, - {VCO_3_6, POSTDIV_DIV_BY_2, 112, 216, 160} }; - -static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr) -{ - uint32_t i, ref_clk; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - SMU74_Discrete_DpmTable *table = &(data->smc_state_table); - struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } }; - - ref_clk = polaris10_get_xclk(hwmgr); - - if (0 == atomctrl_get_smc_sclk_range_table(hwmgr, &range_table_from_vbios)) { - for (i = 0; i < NUM_SCLK_RANGE; i++) { - table->SclkFcwRangeTable[i].vco_setting = range_table_from_vbios.entry[i].ucVco_setting; - table->SclkFcwRangeTable[i].postdiv = range_table_from_vbios.entry[i].ucPostdiv; - table->SclkFcwRangeTable[i].fcw_pcc = range_table_from_vbios.entry[i].usFcw_pcc; - - table->SclkFcwRangeTable[i].fcw_trans_upper = range_table_from_vbios.entry[i].usFcw_trans_upper; - table->SclkFcwRangeTable[i].fcw_trans_lower = range_table_from_vbios.entry[i].usRcw_trans_lower; - - CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc); - CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper); - CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower); - } - return; - } - - for (i = 0; i < NUM_SCLK_RANGE; i++) { - - data->range_table[i].trans_lower_frequency = (ref_clk * Range_Table[i].fcw_trans_lower) >> Range_Table[i].postdiv; - data->range_table[i].trans_upper_frequency = (ref_clk * Range_Table[i].fcw_trans_upper) >> Range_Table[i].postdiv; - - table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting; - table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv; - table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc; - - table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper; - table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower; - - CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc); - CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper); - CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower); - } -} - -/** -* Populates single SMC SCLK structure using the provided engine clock -* -* @param hwmgr the address of the hardware manager -* @param clock the engine clock to use to populate the structure -* @param sclk the SMC SCLK structure to be populated -*/ - -static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr, - uint32_t clock, uint16_t sclk_al_threshold, - struct SMU74_Discrete_GraphicsLevel *level) -{ - int result, i, temp; - /* PP_Clocks minClocks; */ - uint32_t mvdd; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - SMU_SclkSetting curr_sclk_setting = { 0 }; - - result = polaris10_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting); - - /* populate graphics levels */ - result = polaris10_get_dependency_volt_by_clk(hwmgr, - table_info->vdd_dep_on_sclk, clock, - &level->MinVoltage, &mvdd); - - PP_ASSERT_WITH_CODE((0 == result), - "can not find VDDC voltage value for " - "VDDC engine clock dependency table", - return result); - level->ActivityLevel = sclk_al_threshold; - - level->CcPwrDynRm = 0; - level->CcPwrDynRm1 = 0; - level->EnabledForActivity = 0; - level->EnabledForThrottle = 1; - level->UpHyst = 10; - level->DownHyst = 0; - level->VoltageDownHyst = 0; - level->PowerThrottle = 0; - - /* - * TODO: get minimum clocks from dal configaration - * PECI_GetMinClockSettings(hwmgr->pPECI, &minClocks); - */ - /* data->DisplayTiming.minClockInSR = minClocks.engineClockInSR; */ - - /* get level->DeepSleepDivId - if (phm_cap_enabled(hwmgr->platformDescriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) - level->DeepSleepDivId = PhwFiji_GetSleepDividerIdFromClock(hwmgr, clock, minClocks.engineClockInSR); - */ - PP_ASSERT_WITH_CODE((clock >= POLARIS10_MINIMUM_ENGINE_CLOCK), "Engine clock can't satisfy stutter requirement!", return 0); - for (i = POLARIS10_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) { - temp = clock >> i; - - if (temp >= POLARIS10_MINIMUM_ENGINE_CLOCK || i == 0) - break; - } - - level->DeepSleepDivId = i; - - /* Default to slow, highest DPM level will be - * set to PPSMC_DISPLAY_WATERMARK_LOW later. - */ - if (data->update_up_hyst) - level->UpHyst = (uint8_t)data->up_hyst; - if (data->update_down_hyst) - level->DownHyst = (uint8_t)data->down_hyst; - - level->SclkSetting = curr_sclk_setting; - - CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage); - CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm); - CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1); - CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel); - CONVERT_FROM_HOST_TO_SMC_UL(level->SclkSetting.SclkFrequency); - CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_int); - CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_frac); - CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_fcw_int); - CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_slew_rate); - CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_up_slew_rate); - CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_down_slew_rate); - CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_int); - CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_frac); - CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_ss_slew_rate); - return 0; -} - -/** -* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states -* -* @param hwmgr the address of the hardware manager -*/ -static int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct polaris10_dpm_table *dpm_table = &data->dpm_table; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table; - uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count; - int result = 0; - uint32_t array = data->dpm_table_start + - offsetof(SMU74_Discrete_DpmTable, GraphicsLevel); - uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) * - SMU74_MAX_LEVELS_GRAPHICS; - struct SMU74_Discrete_GraphicsLevel *levels = - data->smc_state_table.GraphicsLevel; - uint32_t i, max_entry; - uint8_t hightest_pcie_level_enabled = 0, - lowest_pcie_level_enabled = 0, - mid_pcie_level_enabled = 0, - count = 0; - - polaris10_get_sclk_range_table(hwmgr); - - for (i = 0; i < dpm_table->sclk_table.count; i++) { - - result = polaris10_populate_single_graphic_level(hwmgr, - dpm_table->sclk_table.dpm_levels[i].value, - (uint16_t)data->activity_target[i], - &(data->smc_state_table.GraphicsLevel[i])); - if (result) - return result; - - /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */ - if (i > 1) - levels[i].DeepSleepDivId = 0; - } - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SPLLShutdownSupport)) - data->smc_state_table.GraphicsLevel[0].SclkSetting.SSc_En = 0; - - data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1; - data->smc_state_table.GraphicsDpmLevelCount = - (uint8_t)dpm_table->sclk_table.count; - data->dpm_level_enable_mask.sclk_dpm_enable_mask = - phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); - - - if (pcie_table != NULL) { - PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt), - "There must be 1 or more PCIE levels defined in PPTable.", - return -EINVAL); - max_entry = pcie_entry_cnt - 1; - for (i = 0; i < dpm_table->sclk_table.count; i++) - levels[i].pcieDpmLevel = - (uint8_t) ((i < max_entry) ? i : max_entry); - } else { - while (data->dpm_level_enable_mask.pcie_dpm_enable_mask && - ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & - (1 << (hightest_pcie_level_enabled + 1))) != 0)) - hightest_pcie_level_enabled++; - - while (data->dpm_level_enable_mask.pcie_dpm_enable_mask && - ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & - (1 << lowest_pcie_level_enabled)) == 0)) - lowest_pcie_level_enabled++; - - while ((count < hightest_pcie_level_enabled) && - ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & - (1 << (lowest_pcie_level_enabled + 1 + count))) == 0)) - count++; - - mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) < - hightest_pcie_level_enabled ? - (lowest_pcie_level_enabled + 1 + count) : - hightest_pcie_level_enabled; - - /* set pcieDpmLevel to hightest_pcie_level_enabled */ - for (i = 2; i < dpm_table->sclk_table.count; i++) - levels[i].pcieDpmLevel = hightest_pcie_level_enabled; - - /* set pcieDpmLevel to lowest_pcie_level_enabled */ - levels[0].pcieDpmLevel = lowest_pcie_level_enabled; - - /* set pcieDpmLevel to mid_pcie_level_enabled */ - levels[1].pcieDpmLevel = mid_pcie_level_enabled; - } - /* level count will send to smc once at init smc table and never change */ - result = polaris10_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, - (uint32_t)array_size, data->sram_end); - - return result; -} - -static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr, - uint32_t clock, struct SMU74_Discrete_MemoryLevel *mem_level) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - int result = 0; - struct cgs_display_info info = {0, 0, NULL}; - - cgs_get_active_displays_info(hwmgr->device, &info); - - if (table_info->vdd_dep_on_mclk) { - result = polaris10_get_dependency_volt_by_clk(hwmgr, - table_info->vdd_dep_on_mclk, clock, - &mem_level->MinVoltage, &mem_level->MinMvdd); - PP_ASSERT_WITH_CODE((0 == result), - "can not find MinVddc voltage value from memory " - "VDDC voltage dependency table", return result); - } - - mem_level->MclkFrequency = clock; - mem_level->EnabledForThrottle = 1; - mem_level->EnabledForActivity = 0; - mem_level->UpHyst = 0; - mem_level->DownHyst = 100; - mem_level->VoltageDownHyst = 0; - mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target; - mem_level->StutterEnable = false; - mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; - - data->display_timing.num_existing_displays = info.display_count; - - if ((data->mclk_stutter_mode_threshold) && - (clock <= data->mclk_stutter_mode_threshold) && - (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, - STUTTER_ENABLE) & 0x1)) - mem_level->StutterEnable = true; - - if (!result) { - CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd); - CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency); - CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel); - CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage); - } - return result; -} - -/** -* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states -* -* @param hwmgr the address of the hardware manager -*/ -static int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct polaris10_dpm_table *dpm_table = &data->dpm_table; - int result; - /* populate MCLK dpm table to SMU7 */ - uint32_t array = data->dpm_table_start + - offsetof(SMU74_Discrete_DpmTable, MemoryLevel); - uint32_t array_size = sizeof(SMU74_Discrete_MemoryLevel) * - SMU74_MAX_LEVELS_MEMORY; - struct SMU74_Discrete_MemoryLevel *levels = - data->smc_state_table.MemoryLevel; - uint32_t i; - - for (i = 0; i < dpm_table->mclk_table.count; i++) { - PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value), - "can not populate memory level as memory clock is zero", - return -EINVAL); - result = polaris10_populate_single_memory_level(hwmgr, - dpm_table->mclk_table.dpm_levels[i].value, - &levels[i]); - if (i == dpm_table->mclk_table.count - 1) { - levels[i].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH; - levels[i].EnabledForActivity = 1; - } - if (result) - return result; - } - - /* In order to prevent MC activity from stutter mode to push DPM up, - * the UVD change complements this by putting the MCLK in - * a higher state by default such that we are not affected by - * up threshold or and MCLK DPM latency. - */ - levels[0].ActivityLevel = 0x1f; - CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel); - - data->smc_state_table.MemoryDpmLevelCount = - (uint8_t)dpm_table->mclk_table.count; - data->dpm_level_enable_mask.mclk_dpm_enable_mask = - phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); - - /* level count will send to smc once at init smc table and never change */ - result = polaris10_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, - (uint32_t)array_size, data->sram_end); - - return result; -} - -/** -* Populates the SMC MVDD structure using the provided memory clock. -* -* @param hwmgr the address of the hardware manager -* @param mclk the MCLK value to be used in the decision if MVDD should be high or low. -* @param voltage the SMC VOLTAGE structure to be populated -*/ -int polaris10_populate_mvdd_value(struct pp_hwmgr *hwmgr, - uint32_t mclk, SMIO_Pattern *smio_pat) -{ - const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - uint32_t i = 0; - - if (POLARIS10_VOLTAGE_CONTROL_NONE != data->mvdd_control) { - /* find mvdd value which clock is more than request */ - for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) { - if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) { - smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value; - break; - } - } - PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count, - "MVDD Voltage is outside the supported range.", - return -EINVAL); - } else - return -EINVAL; - - return 0; -} - -static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, - SMU74_Discrete_DpmTable *table) -{ - int result = 0; - uint32_t sclk_frequency; - const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - SMIO_Pattern vol_level; - uint32_t mvdd; - uint16_t us_mvdd; - - table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; - - - /* Get MinVoltage and Frequency from DPM0, - * already converted to SMC_UL */ - sclk_frequency = data->vbios_boot_state.sclk_bootup_value; - result = polaris10_get_dependency_volt_by_clk(hwmgr, - table_info->vdd_dep_on_sclk, - sclk_frequency, - &table->ACPILevel.MinVoltage, &mvdd); - PP_ASSERT_WITH_CODE((0 == result), - "Cannot find ACPI VDDC voltage value " - "in Clock Dependency Table", - ); - - - result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency, &(table->ACPILevel.SclkSetting)); - PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result); - - table->ACPILevel.DeepSleepDivId = 0; - table->ACPILevel.CcPwrDynRm = 0; - table->ACPILevel.CcPwrDynRm1 = 0; - - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1); - - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency); - CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int); - CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac); - CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int); - CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate); - CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate); - CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate); - CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int); - CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac); - CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate); - - - /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */ - table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value; - result = polaris10_get_dependency_volt_by_clk(hwmgr, - table_info->vdd_dep_on_mclk, - table->MemoryACPILevel.MclkFrequency, - &table->MemoryACPILevel.MinVoltage, &mvdd); - PP_ASSERT_WITH_CODE((0 == result), - "Cannot find ACPI VDDCI voltage value " - "in Clock Dependency Table", - ); - - us_mvdd = 0; - if ((POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control) || - (data->mclk_dpm_key_disabled)) - us_mvdd = data->vbios_boot_state.mvdd_bootup_value; - else { - if (!polaris10_populate_mvdd_value(hwmgr, - data->dpm_table.mclk_table.dpm_levels[0].value, - &vol_level)) - us_mvdd = vol_level.Voltage; - } - - if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level)) - table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage); - else - table->MemoryACPILevel.MinMvdd = 0; - - table->MemoryACPILevel.StutterEnable = false; - - table->MemoryACPILevel.EnabledForThrottle = 0; - table->MemoryACPILevel.EnabledForActivity = 0; - table->MemoryACPILevel.UpHyst = 0; - table->MemoryACPILevel.DownHyst = 100; - table->MemoryACPILevel.VoltageDownHyst = 0; - table->MemoryACPILevel.ActivityLevel = - PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target); - - CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency); - CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage); - - return result; -} - -static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr, - SMU74_Discrete_DpmTable *table) -{ - int result = -EINVAL; - uint8_t count; - struct pp_atomctrl_clock_dividers_vi dividers; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = - table_info->mm_dep_table; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint32_t vddci; - - table->VceLevelCount = (uint8_t)(mm_table->count); - table->VceBootLevel = 0; - - for (count = 0; count < table->VceLevelCount; count++) { - table->VceLevel[count].Frequency = mm_table->entries[count].eclk; - table->VceLevel[count].MinVoltage = 0; - table->VceLevel[count].MinVoltage |= - (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; - - if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) - vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table), - mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); - else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) - vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA; - else - vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT; - - - table->VceLevel[count].MinVoltage |= - (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; - table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT; - - /*retrieve divider value for VBIOS */ - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, - table->VceLevel[count].Frequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), - "can not find divide id for VCE engine clock", - return result); - - table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider; - - CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency); - CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage); - } - return result; -} - -static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr, - SMU74_Discrete_DpmTable *table) -{ - int result = -EINVAL; - uint8_t count; - struct pp_atomctrl_clock_dividers_vi dividers; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = - table_info->mm_dep_table; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint32_t vddci; - - table->SamuBootLevel = 0; - table->SamuLevelCount = (uint8_t)(mm_table->count); - - for (count = 0; count < table->SamuLevelCount; count++) { - /* not sure whether we need evclk or not */ - table->SamuLevel[count].MinVoltage = 0; - table->SamuLevel[count].Frequency = mm_table->entries[count].samclock; - table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc * - VOLTAGE_SCALE) << VDDC_SHIFT; - - if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) - vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table), - mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); - else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) - vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA; - else - vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT; - - table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; - table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT; - - /* retrieve divider value for VBIOS */ - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, - table->SamuLevel[count].Frequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), - "can not find divide id for samu clock", return result); - - table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider; - - CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency); - CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage); - } - return result; -} - -static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr, - int32_t eng_clock, int32_t mem_clock, - SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs) -{ - uint32_t dram_timing; - uint32_t dram_timing2; - uint32_t burst_time; - int result; - - result = atomctrl_set_engine_dram_timings_rv770(hwmgr, - eng_clock, mem_clock); - PP_ASSERT_WITH_CODE(result == 0, - "Error calling VBIOS to set DRAM_TIMING.", return result); - - dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); - dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); - burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); - - - arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing); - arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2); - arb_regs->McArbBurstTime = (uint8_t)burst_time; - - return 0; -} - -static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct SMU74_Discrete_MCArbDramTimingTable arb_regs; - uint32_t i, j; - int result = 0; - - for (i = 0; i < data->dpm_table.sclk_table.count; i++) { - for (j = 0; j < data->dpm_table.mclk_table.count; j++) { - result = polaris10_populate_memory_timing_parameters(hwmgr, - data->dpm_table.sclk_table.dpm_levels[i].value, - data->dpm_table.mclk_table.dpm_levels[j].value, - &arb_regs.entries[i][j]); - if (result == 0) - result = atomctrl_set_ac_timing_ai(hwmgr, data->dpm_table.mclk_table.dpm_levels[j].value, j); - if (result != 0) - return result; - } - } - - result = polaris10_copy_bytes_to_smc( - hwmgr->smumgr, - data->arb_table_start, - (uint8_t *)&arb_regs, - sizeof(SMU74_Discrete_MCArbDramTimingTable), - data->sram_end); - return result; -} - -static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr, - struct SMU74_Discrete_DpmTable *table) -{ - int result = -EINVAL; - uint8_t count; - struct pp_atomctrl_clock_dividers_vi dividers; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = - table_info->mm_dep_table; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint32_t vddci; - - table->UvdLevelCount = (uint8_t)(mm_table->count); - table->UvdBootLevel = 0; - - for (count = 0; count < table->UvdLevelCount; count++) { - table->UvdLevel[count].MinVoltage = 0; - table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk; - table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk; - table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc * - VOLTAGE_SCALE) << VDDC_SHIFT; - - if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) - vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table), - mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); - else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) - vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA; - else - vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT; - - table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; - table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT; - - /* retrieve divider value for VBIOS */ - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, - table->UvdLevel[count].VclkFrequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), - "can not find divide id for Vclk clock", return result); - - table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider; - - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, - table->UvdLevel[count].DclkFrequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), - "can not find divide id for Dclk clock", return result); - - table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider; - - CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency); - CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency); - CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage); - } - - return result; -} - -static int polaris10_populate_smc_boot_level(struct pp_hwmgr *hwmgr, - struct SMU74_Discrete_DpmTable *table) -{ - int result = 0; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - table->GraphicsBootLevel = 0; - table->MemoryBootLevel = 0; - - /* find boot level from dpm table */ - result = phm_find_boot_level(&(data->dpm_table.sclk_table), - data->vbios_boot_state.sclk_bootup_value, - (uint32_t *)&(table->GraphicsBootLevel)); - - result = phm_find_boot_level(&(data->dpm_table.mclk_table), - data->vbios_boot_state.mclk_bootup_value, - (uint32_t *)&(table->MemoryBootLevel)); - - table->BootVddc = data->vbios_boot_state.vddc_bootup_value * - VOLTAGE_SCALE; - table->BootVddci = data->vbios_boot_state.vddci_bootup_value * - VOLTAGE_SCALE; - table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value * - VOLTAGE_SCALE; - - CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc); - CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci); - CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd); - - return 0; -} - - -static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - uint8_t count, level; - - count = (uint8_t)(table_info->vdd_dep_on_sclk->count); - - for (level = 0; level < count; level++) { - if (table_info->vdd_dep_on_sclk->entries[level].clk >= - data->vbios_boot_state.sclk_bootup_value) { - data->smc_state_table.GraphicsBootLevel = level; - break; - } - } - - count = (uint8_t)(table_info->vdd_dep_on_mclk->count); - for (level = 0; level < count; level++) { - if (table_info->vdd_dep_on_mclk->entries[level].clk >= - data->vbios_boot_state.mclk_bootup_value) { - data->smc_state_table.MemoryBootLevel = level; - break; - } - } - - return 0; -} - -static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) -{ - uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint8_t i, stretch_amount, volt_offset = 0; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = - table_info->vdd_dep_on_sclk; - - stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount; - - /* Read SMU_Eefuse to read and calculate RO and determine - * if the part is SS or FF. if RO >= 1660MHz, part is FF. - */ - efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixSMU_EFUSE_0 + (67 * 4)); - efuse &= 0xFF000000; - efuse = efuse >> 24; - - if (hwmgr->chip_id == CHIP_POLARIS10) { - min = 1000; - max = 2300; - } else { - min = 1100; - max = 2100; - } - - ro = efuse * (max -min)/255 + min; - - /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */ - for (i = 0; i < sclk_table->count; i++) { - data->smc_state_table.Sclk_CKS_masterEn0_7 |= - sclk_table->entries[i].cks_enable << i; - if (hwmgr->chip_id == CHIP_POLARIS10) { - volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 -(ro - 70) * 1000000) / \ - (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000)); - volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \ - (2522480 - sclk_table->entries[i].clk/100 * 115764/100)); - } else { - volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 -(ro - 50) * 1000000) / \ - (2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000))); - volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \ - (3422454 - sclk_table->entries[i].clk/100 * (18886376/10000))); - } - - if (volt_without_cks >= volt_with_cks) - volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks + - sclk_table->entries[i].cks_voffset) * 100 + 624) / 625); - - data->smc_state_table.Sclk_voltageOffset[i] = volt_offset; - } - - data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6; - /* Populate CKS Lookup Table */ - if (stretch_amount != 1 && stretch_amount != 2 && stretch_amount != 3 && - stretch_amount != 4 && stretch_amount != 5) { - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ClockStretcher); - PP_ASSERT_WITH_CODE(false, - "Stretch Amount in PPTable not supported\n", - return -EINVAL); - } - - value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL); - value &= 0xFFFFFFFE; - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value); - - return 0; -} - -/** -* Populates the SMC VRConfig field in DPM table. -* -* @param hwmgr the address of the hardware manager -* @param table the SMC DPM table structure to be populated -* @return always 0 -*/ -static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr, - struct SMU74_Discrete_DpmTable *table) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint16_t config; - - config = VR_MERGED_WITH_VDDC; - table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT); - - /* Set Vddc Voltage Controller */ - if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { - config = VR_SVI2_PLANE_1; - table->VRConfig |= config; - } else { - PP_ASSERT_WITH_CODE(false, - "VDDC should be on SVI2 control in merged mode!", - ); - } - /* Set Vddci Voltage Controller */ - if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) { - config = VR_SVI2_PLANE_2; /* only in merged mode */ - table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); - } else if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) { - config = VR_SMIO_PATTERN_1; - table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); - } else { - config = VR_STATIC_VOLTAGE; - table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); - } - /* Set Mvdd Voltage Controller */ - if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) { - config = VR_SVI2_PLANE_2; - table->VRConfig |= (config << VRCONF_MVDD_SHIFT); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + - offsetof(SMU74_SoftRegisters, AllowMvddSwitch), 0x1); - } else { - config = VR_STATIC_VOLTAGE; - table->VRConfig |= (config << VRCONF_MVDD_SHIFT); - } - - return 0; -} - - -int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - SMU74_Discrete_DpmTable *table = &(data->smc_state_table); - int result = 0; - struct pp_atom_ctrl__avfs_parameters avfs_params = {0}; - AVFS_meanNsigma_t AVFS_meanNsigma = { {0} }; - AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} }; - uint32_t tmp, i; - struct pp_smumgr *smumgr = hwmgr->smumgr; - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); - - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)hwmgr->pptable; - struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = - table_info->vdd_dep_on_sclk; - - - if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED) - return result; - - result = atomctrl_get_avfs_information(hwmgr, &avfs_params); - - if (0 == result) { - table->BTCGB_VDROOP_TABLE[0].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0); - table->BTCGB_VDROOP_TABLE[0].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1); - table->BTCGB_VDROOP_TABLE[0].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2); - table->BTCGB_VDROOP_TABLE[1].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0); - table->BTCGB_VDROOP_TABLE[1].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1); - table->BTCGB_VDROOP_TABLE[1].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2); - table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1); - table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2); - table->AVFSGB_VDROOP_TABLE[0].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b); - table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24; - table->AVFSGB_VDROOP_TABLE[0].m2_shift = 12; - table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1); - table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2); - table->AVFSGB_VDROOP_TABLE[1].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b); - table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24; - table->AVFSGB_VDROOP_TABLE[1].m2_shift = 12; - table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv); - AVFS_meanNsigma.Aconstant[0] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0); - AVFS_meanNsigma.Aconstant[1] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1); - AVFS_meanNsigma.Aconstant[2] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2); - AVFS_meanNsigma.DC_tol_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma); - AVFS_meanNsigma.Platform_mean = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean); - AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor); - AVFS_meanNsigma.Platform_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma); - - for (i = 0; i < NUM_VFT_COLUMNS; i++) { - AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625); - AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100); - } - - result = polaris10_read_smc_sram_dword(smumgr, - SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma), - &tmp, data->sram_end); - - polaris10_copy_bytes_to_smc(smumgr, - tmp, - (uint8_t *)&AVFS_meanNsigma, - sizeof(AVFS_meanNsigma_t), - data->sram_end); - - result = polaris10_read_smc_sram_dword(smumgr, - SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable), - &tmp, data->sram_end); - polaris10_copy_bytes_to_smc(smumgr, - tmp, - (uint8_t *)&AVFS_SclkOffset, - sizeof(AVFS_Sclk_Offset_t), - data->sram_end); - - data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) | - (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) | - (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) | - (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT); - data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false; - } - return result; -} - - -/** -* Initializes the SMC table and uploads it -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -*/ -static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr) -{ - int result; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct SMU74_Discrete_DpmTable *table = &(data->smc_state_table); - const struct polaris10_ulv_parm *ulv = &(data->ulv); - uint8_t i; - struct pp_atomctrl_gpio_pin_assignment gpio_pin; - pp_atomctrl_clock_dividers_vi dividers; - - result = polaris10_setup_default_dpm_tables(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to setup default DPM tables!", return result); - - if (POLARIS10_VOLTAGE_CONTROL_NONE != data->voltage_control) - polaris10_populate_smc_voltage_tables(hwmgr, table); - - table->SystemFlags = 0; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_AutomaticDCTransition)) - table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StepVddc)) - table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; - - if (data->is_memory_gddr5) - table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; - - if (ulv->ulv_supported && table_info->us_ulv_voltage_offset) { - result = polaris10_populate_ulv_state(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize ULV state!", return result); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_ULV_PARAMETER, PPPOLARIS10_CGULVPARAMETER_DFLT); - } - - result = polaris10_populate_smc_link_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize Link Level!", return result); - - result = polaris10_populate_all_graphic_levels(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize Graphics Level!", return result); - - result = polaris10_populate_all_memory_levels(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize Memory Level!", return result); - - result = polaris10_populate_smc_acpi_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize ACPI Level!", return result); - - result = polaris10_populate_smc_vce_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize VCE Level!", return result); - - result = polaris10_populate_smc_samu_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize SAMU Level!", return result); - - /* Since only the initial state is completely set up at this point - * (the other states are just copies of the boot state) we only - * need to populate the ARB settings for the initial state. - */ - result = polaris10_program_memory_timing_parameters(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to Write ARB settings for the initial state.", return result); - - result = polaris10_populate_smc_uvd_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize UVD Level!", return result); - - result = polaris10_populate_smc_boot_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize Boot Level!", return result); - - result = polaris10_populate_smc_initailial_state(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize Boot State!", return result); - - result = polaris10_populate_bapm_parameters_in_dpm_table(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to populate BAPM Parameters!", return result); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ClockStretcher)) { - result = polaris10_populate_clock_stretcher_data_table(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to populate Clock Stretcher Data Table!", - return result); - } - - result = polaris10_populate_avfs_parameters(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;); - - table->CurrSclkPllRange = 0xff; - table->GraphicsVoltageChangeEnable = 1; - table->GraphicsThermThrottleEnable = 1; - table->GraphicsInterval = 1; - table->VoltageInterval = 1; - table->ThermalInterval = 1; - table->TemperatureLimitHigh = - table_info->cac_dtp_table->usTargetOperatingTemp * - POLARIS10_Q88_FORMAT_CONVERSION_UNIT; - table->TemperatureLimitLow = - (table_info->cac_dtp_table->usTargetOperatingTemp - 1) * - POLARIS10_Q88_FORMAT_CONVERSION_UNIT; - table->MemoryVoltageChangeEnable = 1; - table->MemoryInterval = 1; - table->VoltageResponseTime = 0; - table->PhaseResponseTime = 0; - table->MemoryThermThrottleEnable = 1; - table->PCIeBootLinkLevel = 0; - table->PCIeGenInterval = 1; - table->VRConfig = 0; - - result = polaris10_populate_vr_config(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to populate VRConfig setting!", return result); - - table->ThermGpio = 17; - table->SclkStepSize = 0x4000; - - if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) { - table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift; - } else { - table->VRHotGpio = POLARIS10_UNUSED_GPIO_PIN; - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_RegulatorHot); - } - - if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID, - &gpio_pin)) { - table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift; - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_AutomaticDCTransition); - } else { - table->AcDcGpio = POLARIS10_UNUSED_GPIO_PIN; - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_AutomaticDCTransition); - } - - /* Thermal Output GPIO */ - if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID, - &gpio_pin)) { - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ThermalOutGPIO); - - table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift; - - /* For porlarity read GPIOPAD_A with assigned Gpio pin - * since VBIOS will program this register to set 'inactive state', - * driver can then determine 'active state' from this and - * program SMU with correct polarity - */ - table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) - & (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0; - table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY; - - /* if required, combine VRHot/PCC with thermal out GPIO */ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_RegulatorHot) - && phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CombinePCCWithThermalSignal)) - table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT; - } else { - table->ThermOutGpio = 17; - table->ThermOutPolarity = 1; - table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE; - } - - /* Populate BIF_SCLK levels into SMC DPM table */ - for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++) { - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, data->bif_sclk_table[i], ÷rs); - PP_ASSERT_WITH_CODE((result == 0), "Can not find DFS divide id for Sclk", return result); - - if (i == 0) - table->Ulv.BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider)); - else - table->LinkLevel[i-1].BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider)); - } - - for (i = 0; i < SMU74_MAX_ENTRIES_SMIO; i++) - table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]); - - CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags); - CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig); - CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1); - CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2); - CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); - CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange); - CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); - CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); - CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); - CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); - - /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ - result = polaris10_copy_bytes_to_smc(hwmgr->smumgr, - data->dpm_table_start + - offsetof(SMU74_Discrete_DpmTable, SystemFlags), - (uint8_t *)&(table->SystemFlags), - sizeof(SMU74_Discrete_DpmTable) - 3 * sizeof(SMU74_PIDController), - data->sram_end); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to upload dpm data to SMC memory!", return result); - - return 0; -} - -/** -* Initialize the ARB DRAM timing table's index field. -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -*/ -static int polaris10_init_arb_table_index(struct pp_hwmgr *hwmgr) -{ - const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint32_t tmp; - int result; - - /* This is a read-modify-write on the first byte of the ARB table. - * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure - * is the field 'current'. - * This solution is ugly, but we never write the whole table only - * individual fields in it. - * In reality this field should not be in that structure - * but in a soft register. - */ - result = polaris10_read_smc_sram_dword(hwmgr->smumgr, - data->arb_table_start, &tmp, data->sram_end); - - if (result) - return result; - - tmp &= 0x00FFFFFF; - tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24; - - return polaris10_write_smc_sram_dword(hwmgr->smumgr, - data->arb_table_start, tmp, data->sram_end); -} - -static int polaris10_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr) -{ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_RegulatorHot)) - return smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_EnableVRHotGPIOInterrupt); - - return 0; -} - -static int polaris10_enable_sclk_control(struct pp_hwmgr *hwmgr) -{ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, - SCLK_PWRMGT_OFF, 0); - return 0; -} - -static int polaris10_enable_ulv(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct polaris10_ulv_parm *ulv = &(data->ulv); - - if (ulv->ulv_supported) - return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV); - - return 0; -} - -static int polaris10_disable_ulv(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct polaris10_ulv_parm *ulv = &(data->ulv); - - if (ulv->ulv_supported) - return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV); - - return 0; -} - -static int polaris10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) -{ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkDeepSleep)) { - if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON)) - PP_ASSERT_WITH_CODE(false, - "Attempt to enable Master Deep Sleep switch failed!", - return -1); - } else { - if (smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_MASTER_DeepSleep_OFF)) { - PP_ASSERT_WITH_CODE(false, - "Attempt to disable Master Deep Sleep switch failed!", - return -1); - } - } - - return 0; -} - -static int polaris10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) -{ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkDeepSleep)) { - if (smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_MASTER_DeepSleep_OFF)) { - PP_ASSERT_WITH_CODE(false, - "Attempt to disable Master Deep Sleep switch failed!", - return -1); - } - } - - return 0; -} - -static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint32_t soft_register_value = 0; - uint32_t handshake_disables_offset = data->soft_regs_start - + offsetof(SMU74_SoftRegisters, HandshakeDisables); - - /* enable SCLK dpm */ - if (!data->sclk_dpm_key_disabled) - PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)), - "Failed to enable SCLK DPM during DPM Start Function!", - return -1); - - /* enable MCLK dpm */ - if (0 == data->mclk_dpm_key_disabled) { -/* Disable UVD - SMU handshake for MCLK. */ - soft_register_value = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, handshake_disables_offset); - soft_register_value |= SMU7_UVD_MCLK_HANDSHAKE_DISABLE; - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - handshake_disables_offset, soft_register_value); - - PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_Enable)), - "Failed to enable MCLK DPM during DPM Start Function!", - return -1); - - PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005); - udelay(10); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005); - } - - return 0; -} - -static int polaris10_start_dpm(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - /*enable general power management */ - - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, - GLOBAL_PWRMGT_EN, 1); - - /* enable sclk deep sleep */ - - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, - DYNAMIC_PM_EN, 1); - - /* prepare for PCIE DPM */ - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - data->soft_regs_start + offsetof(SMU74_SoftRegisters, - VoltageChangeTimeout), 0x1000); - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, - SWRST_COMMAND_1, RESETLC, 0x0); -/* - PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_Voltage_Cntl_Enable)), - "Failed to enable voltage DPM during DPM Start Function!", - return -1); -*/ - - if (polaris10_enable_sclk_mclk_dpm(hwmgr)) { - printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!"); - return -1; - } - - /* enable PCIE dpm */ - if (0 == data->pcie_dpm_key_disabled) { - PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_PCIeDPM_Enable)), - "Failed to enable pcie DPM during DPM Start Function!", - return -1); - } - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_Falcon_QuickTransition)) { - PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_EnableACDCGPIOInterrupt)), - "Failed to enable AC DC GPIO Interrupt!", - ); - } - - return 0; -} - -static int polaris10_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - /* disable SCLK dpm */ - if (!data->sclk_dpm_key_disabled) - PP_ASSERT_WITH_CODE( - (smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_DPM_Disable) == 0), - "Failed to disable SCLK DPM!", - return -1); - - /* disable MCLK dpm */ - if (!data->mclk_dpm_key_disabled) { - PP_ASSERT_WITH_CODE( - (smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_Disable) == 0), - "Failed to disable MCLK DPM!", - return -1); - } - - return 0; -} - -static int polaris10_stop_dpm(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - /* disable general power management */ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, - GLOBAL_PWRMGT_EN, 0); - /* disable sclk deep sleep */ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, - DYNAMIC_PM_EN, 0); - - /* disable PCIE dpm */ - if (!data->pcie_dpm_key_disabled) { - PP_ASSERT_WITH_CODE( - (smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_PCIeDPM_Disable) == 0), - "Failed to disable pcie DPM during DPM Stop Function!", - return -1); - } - - if (polaris10_disable_sclk_mclk_dpm(hwmgr)) { - printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!"); - return -1; - } - - return 0; -} - -static void polaris10_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources) -{ - bool protection; - enum DPM_EVENT_SRC src; - - switch (sources) { - default: - printk(KERN_ERR "Unknown throttling event sources."); - /* fall through */ - case 0: - protection = false; - /* src is unused */ - break; - case (1 << PHM_AutoThrottleSource_Thermal): - protection = true; - src = DPM_EVENT_SRC_DIGITAL; - break; - case (1 << PHM_AutoThrottleSource_External): - protection = true; - src = DPM_EVENT_SRC_EXTERNAL; - break; - case (1 << PHM_AutoThrottleSource_External) | - (1 << PHM_AutoThrottleSource_Thermal): - protection = true; - src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL; - break; - } - /* Order matters - don't enable thermal protection for the wrong source. */ - if (protection) { - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, - DPM_EVENT_SRC, src); - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, - THERMAL_PROTECTION_DIS, - !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ThermalController)); - } else - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, - THERMAL_PROTECTION_DIS, 1); -} - -static int polaris10_enable_auto_throttle_source(struct pp_hwmgr *hwmgr, - PHM_AutoThrottleSource source) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - if (!(data->active_auto_throttle_sources & (1 << source))) { - data->active_auto_throttle_sources |= 1 << source; - polaris10_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources); - } - return 0; -} - -static int polaris10_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) -{ - return polaris10_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); -} - -static int polaris10_disable_auto_throttle_source(struct pp_hwmgr *hwmgr, - PHM_AutoThrottleSource source) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - if (data->active_auto_throttle_sources & (1 << source)) { - data->active_auto_throttle_sources &= ~(1 << source); - polaris10_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources); - } - return 0; -} - -static int polaris10_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) -{ - return polaris10_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); -} - -int polaris10_pcie_performance_request(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - data->pcie_performance_request = true; - - return 0; -} - -int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) -{ - int tmp_result, result = 0; - tmp_result = (!polaris10_is_dpm_running(hwmgr)) ? 0 : -1; - PP_ASSERT_WITH_CODE(result == 0, - "DPM is already running right now, no need to enable DPM!", - return 0); - - if (polaris10_voltage_control(hwmgr)) { - tmp_result = polaris10_enable_voltage_control(hwmgr); - PP_ASSERT_WITH_CODE(tmp_result == 0, - "Failed to enable voltage control!", - result = tmp_result); - - tmp_result = polaris10_construct_voltage_tables(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to contruct voltage tables!", - result = tmp_result); - } - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_EngineSpreadSpectrumSupport)) - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ThermalController)) - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0); - - tmp_result = polaris10_program_static_screen_threshold_parameters(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to program static screen threshold parameters!", - result = tmp_result); - - tmp_result = polaris10_enable_display_gap(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable display gap!", result = tmp_result); - - tmp_result = polaris10_program_voting_clients(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to program voting clients!", result = tmp_result); - - tmp_result = polaris10_process_firmware_header(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to process firmware header!", result = tmp_result); - - tmp_result = polaris10_initial_switch_from_arbf0_to_f1(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to initialize switch from ArbF0 to F1!", - result = tmp_result); - - tmp_result = polaris10_init_smc_table(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to initialize SMC table!", result = tmp_result); - - tmp_result = polaris10_init_arb_table_index(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to initialize ARB table index!", result = tmp_result); - - tmp_result = polaris10_populate_pm_fuses(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to populate PM fuses!", result = tmp_result); - - tmp_result = polaris10_enable_vrhot_gpio_interrupt(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable VR hot GPIO interrupt!", result = tmp_result); - - smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay); - - tmp_result = polaris10_enable_sclk_control(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable SCLK control!", result = tmp_result); - - tmp_result = polaris10_enable_smc_voltage_controller(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable voltage control!", result = tmp_result); - - tmp_result = polaris10_enable_ulv(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable ULV!", result = tmp_result); - - tmp_result = polaris10_enable_deep_sleep_master_switch(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable deep sleep master switch!", result = tmp_result); - - tmp_result = polaris10_enable_didt_config(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to enable deep sleep master switch!", result = tmp_result); - - tmp_result = polaris10_start_dpm(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to start DPM!", result = tmp_result); - - tmp_result = polaris10_enable_smc_cac(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable SMC CAC!", result = tmp_result); - - tmp_result = polaris10_enable_power_containment(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable power containment!", result = tmp_result); - - tmp_result = polaris10_power_control_set_level(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to power control set level!", result = tmp_result); - - tmp_result = polaris10_enable_thermal_auto_throttle(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable thermal auto throttle!", result = tmp_result); - - tmp_result = polaris10_pcie_performance_request(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "pcie performance request failed!", result = tmp_result); - - return result; -} - -int polaris10_disable_dpm_tasks(struct pp_hwmgr *hwmgr) -{ - int tmp_result, result = 0; - - tmp_result = (polaris10_is_dpm_running(hwmgr)) ? 0 : -1; - PP_ASSERT_WITH_CODE(tmp_result == 0, - "DPM is not running right now, no need to disable DPM!", - return 0); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ThermalController)) - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1); - - tmp_result = polaris10_disable_power_containment(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to disable power containment!", result = tmp_result); - - tmp_result = polaris10_disable_smc_cac(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to disable SMC CAC!", result = tmp_result); - - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_SPLL_SPREAD_SPECTRUM, SSEN, 0); - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0); - - tmp_result = polaris10_disable_thermal_auto_throttle(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to disable thermal auto throttle!", result = tmp_result); - - tmp_result = polaris10_stop_dpm(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to stop DPM!", result = tmp_result); - - tmp_result = polaris10_disable_deep_sleep_master_switch(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to disable deep sleep master switch!", result = tmp_result); - - tmp_result = polaris10_disable_ulv(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to disable ULV!", result = tmp_result); - - tmp_result = polaris10_clear_voting_clients(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to clear voting clients!", result = tmp_result); - - tmp_result = polaris10_reset_to_default(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to reset to default!", result = tmp_result); - - tmp_result = polaris10_force_switch_to_arbf0(hwmgr); - PP_ASSERT_WITH_CODE((tmp_result == 0), - "Failed to force to switch arbf0!", result = tmp_result); - - return result; -} - -int polaris10_reset_asic_tasks(struct pp_hwmgr *hwmgr) -{ - - return 0; -} - -int polaris10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) -{ - return phm_hwmgr_backend_fini(hwmgr); -} - -int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkDeepSleep); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DynamicPatchPowerState); - - if (data->mvdd_control == POLARIS10_VOLTAGE_CONTROL_NONE) - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_EnableMVDDControl); - - if (data->vddci_control == POLARIS10_VOLTAGE_CONTROL_NONE) - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ControlVDDCI); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_EnableSMU7ThermalManagement); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DynamicPowerManagement); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_UnTabledHardwareInterface); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SMC); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_NonABMSupportInPPLib); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DynamicUVDState); - - /* power tune caps Assume disabled */ - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SQRamping); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DBRamping); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TDRamping); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TCPRamping); - - if (hwmgr->powercontainment_enabled) - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment); - else - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_CAC); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_RegulatorHot); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_AutomaticDCTransition); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ODFuzzyFanControlSupport); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_FanSpeedInTableIsRPM); - - if (hwmgr->chip_id == CHIP_POLARIS11) - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SPLLShutdownSupport); - return 0; -} - -static void polaris10_init_dpm_defaults(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - polaris10_initialize_power_tune_defaults(hwmgr); - - data->pcie_gen_performance.max = PP_PCIEGen1; - data->pcie_gen_performance.min = PP_PCIEGen3; - data->pcie_gen_power_saving.max = PP_PCIEGen1; - data->pcie_gen_power_saving.min = PP_PCIEGen3; - data->pcie_lane_performance.max = 0; - data->pcie_lane_performance.min = 16; - data->pcie_lane_power_saving.max = 0; - data->pcie_lane_power_saving.min = 16; -} - -/** -* Get Leakage VDDC based on leakage ID. -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always 0 -*/ -static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint16_t vv_id; - uint32_t vddc = 0; - uint16_t i, j; - uint32_t sclk = 0; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)hwmgr->pptable; - struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = - table_info->vdd_dep_on_sclk; - int result; - - for (i = 0; i < POLARIS10_MAX_LEAKAGE_COUNT; i++) { - vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; - if (!phm_get_sclk_for_voltage_evv(hwmgr, - table_info->vddc_lookup_table, vv_id, &sclk)) { - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ClockStretcher)) { - for (j = 1; j < sclk_table->count; j++) { - if (sclk_table->entries[j].clk == sclk && - sclk_table->entries[j].cks_enable == 0) { - sclk += 5000; - break; - } - } - } - - if (atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, - VOLTAGE_TYPE_VDDC, - sclk, vv_id, &vddc) != 0) { - printk(KERN_WARNING "failed to retrieving EVV voltage!\n"); - continue; - } - - /* need to make sure vddc is less than 2v or else, it could burn the ASIC. - * real voltage level in unit of 0.01mv */ - PP_ASSERT_WITH_CODE((vddc < 200000 && vddc != 0), - "Invalid VDDC value", result = -EINVAL;); - - /* the voltage should not be zero nor equal to leakage ID */ - if (vddc != 0 && vddc != vv_id) { - data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc/100); - data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id; - data->vddc_leakage.count++; - } - } - } - - return 0; -} - -/** - * Change virtual leakage voltage to actual value. - * - * @param hwmgr the address of the powerplay hardware manager. - * @param pointer to changing voltage - * @param pointer to leakage table - */ -static void polaris10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr, - uint16_t *voltage, struct polaris10_leakage_voltage *leakage_table) -{ - uint32_t index; - - /* search for leakage voltage ID 0xff01 ~ 0xff08 */ - for (index = 0; index < leakage_table->count; index++) { - /* if this voltage matches a leakage voltage ID */ - /* patch with actual leakage voltage */ - if (leakage_table->leakage_id[index] == *voltage) { - *voltage = leakage_table->actual_voltage[index]; - break; - } - } - - if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0) - printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n"); -} - -/** -* Patch voltage lookup table by EVV leakages. -* -* @param hwmgr the address of the powerplay hardware manager. -* @param pointer to voltage lookup table -* @param pointer to leakage table -* @return always 0 -*/ -static int polaris10_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr, - phm_ppt_v1_voltage_lookup_table *lookup_table, - struct polaris10_leakage_voltage *leakage_table) -{ - uint32_t i; - - for (i = 0; i < lookup_table->count; i++) - polaris10_patch_with_vdd_leakage(hwmgr, - &lookup_table->entries[i].us_vdd, leakage_table); - - return 0; -} - -static int polaris10_patch_clock_voltage_limits_with_vddc_leakage( - struct pp_hwmgr *hwmgr, struct polaris10_leakage_voltage *leakage_table, - uint16_t *vddc) -{ - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - polaris10_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table); - hwmgr->dyn_state.max_clock_voltage_on_dc.vddc = - table_info->max_clock_voltage_on_dc.vddc; - return 0; -} - -static int polaris10_patch_voltage_dependency_tables_with_lookup_table( - struct pp_hwmgr *hwmgr) -{ - uint8_t entryId; - uint8_t voltageId; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = - table_info->vdd_dep_on_sclk; - struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = - table_info->vdd_dep_on_mclk; - struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = - table_info->mm_dep_table; - - for (entryId = 0; entryId < sclk_table->count; ++entryId) { - voltageId = sclk_table->entries[entryId].vddInd; - sclk_table->entries[entryId].vddc = - table_info->vddc_lookup_table->entries[voltageId].us_vdd; - } - - for (entryId = 0; entryId < mclk_table->count; ++entryId) { - voltageId = mclk_table->entries[entryId].vddInd; - mclk_table->entries[entryId].vddc = - table_info->vddc_lookup_table->entries[voltageId].us_vdd; - } - - for (entryId = 0; entryId < mm_table->count; ++entryId) { - voltageId = mm_table->entries[entryId].vddcInd; - mm_table->entries[entryId].vddc = - table_info->vddc_lookup_table->entries[voltageId].us_vdd; - } - - return 0; - -} - -static int polaris10_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr) -{ - /* Need to determine if we need calculated voltage. */ - return 0; -} - -static int polaris10_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr) -{ - /* Need to determine if we need calculated voltage from mm table. */ - return 0; -} - -static int polaris10_sort_lookup_table(struct pp_hwmgr *hwmgr, - struct phm_ppt_v1_voltage_lookup_table *lookup_table) -{ - uint32_t table_size, i, j; - struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record; - table_size = lookup_table->count; - - PP_ASSERT_WITH_CODE(0 != lookup_table->count, - "Lookup table is empty", return -EINVAL); - - /* Sorting voltages */ - for (i = 0; i < table_size - 1; i++) { - for (j = i + 1; j > 0; j--) { - if (lookup_table->entries[j].us_vdd < - lookup_table->entries[j - 1].us_vdd) { - tmp_voltage_lookup_record = lookup_table->entries[j - 1]; - lookup_table->entries[j - 1] = lookup_table->entries[j]; - lookup_table->entries[j] = tmp_voltage_lookup_record; - } - } - } - - return 0; -} - -static int polaris10_complete_dependency_tables(struct pp_hwmgr *hwmgr) -{ - int result = 0; - int tmp_result; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - tmp_result = polaris10_patch_lookup_table_with_leakage(hwmgr, - table_info->vddc_lookup_table, &(data->vddc_leakage)); - if (tmp_result) - result = tmp_result; - - tmp_result = polaris10_patch_clock_voltage_limits_with_vddc_leakage(hwmgr, - &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc); - if (tmp_result) - result = tmp_result; - - tmp_result = polaris10_patch_voltage_dependency_tables_with_lookup_table(hwmgr); - if (tmp_result) - result = tmp_result; - - tmp_result = polaris10_calc_voltage_dependency_tables(hwmgr); - if (tmp_result) - result = tmp_result; - - tmp_result = polaris10_calc_mm_voltage_dependency_table(hwmgr); - if (tmp_result) - result = tmp_result; - - tmp_result = polaris10_sort_lookup_table(hwmgr, table_info->vddc_lookup_table); - if (tmp_result) - result = tmp_result; - - return result; -} - -static int polaris10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr) -{ - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table = - table_info->vdd_dep_on_sclk; - struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table = - table_info->vdd_dep_on_mclk; - - PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL, - "VDD dependency on SCLK table is missing. \ - This table is mandatory", return -EINVAL); - PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1, - "VDD dependency on SCLK table has to have is missing. \ - This table is mandatory", return -EINVAL); - - PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL, - "VDD dependency on MCLK table is missing. \ - This table is mandatory", return -EINVAL); - PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1, - "VDD dependency on MCLK table has to have is missing. \ - This table is mandatory", return -EINVAL); - - table_info->max_clock_voltage_on_ac.sclk = - allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk; - table_info->max_clock_voltage_on_ac.mclk = - allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk; - table_info->max_clock_voltage_on_ac.vddc = - allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc; - table_info->max_clock_voltage_on_ac.vddci = - allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci; - - hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk; - hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk; - hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc; - hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =table_info->max_clock_voltage_on_ac.vddci; - - return 0; -} - -int polaris10_patch_voltage_workaround(struct pp_hwmgr *hwmgr) -{ - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table = - table_info->vdd_dep_on_mclk; - struct phm_ppt_v1_voltage_lookup_table *lookup_table = - table_info->vddc_lookup_table; - uint32_t i; - - if (hwmgr->chip_id == CHIP_POLARIS10 && hwmgr->hw_revision == 0xC7) { - if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000) - return 0; - - for (i = 0; i < lookup_table->count; i++) { - if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) { - dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i; - return 0; - } - } - } - return 0; -} - - -int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data; - struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; - uint32_t temp_reg; - int result; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - data = kzalloc(sizeof(struct polaris10_hwmgr), GFP_KERNEL); - if (data == NULL) - return -ENOMEM; - - hwmgr->backend = data; - - data->dll_default_on = false; - data->sram_end = SMC_RAM_END; - data->mclk_dpm0_activity_target = 0xa; - data->disable_dpm_mask = 0xFF; - data->static_screen_threshold = PPPOLARIS10_STATICSCREENTHRESHOLD_DFLT; - data->static_screen_threshold_unit = PPPOLARIS10_STATICSCREENTHRESHOLD_DFLT; - data->activity_target[0] = PPPOLARIS10_TARGETACTIVITY_DFLT; - data->activity_target[1] = PPPOLARIS10_TARGETACTIVITY_DFLT; - data->activity_target[2] = PPPOLARIS10_TARGETACTIVITY_DFLT; - data->activity_target[3] = PPPOLARIS10_TARGETACTIVITY_DFLT; - data->activity_target[4] = PPPOLARIS10_TARGETACTIVITY_DFLT; - data->activity_target[5] = PPPOLARIS10_TARGETACTIVITY_DFLT; - data->activity_target[6] = PPPOLARIS10_TARGETACTIVITY_DFLT; - data->activity_target[7] = PPPOLARIS10_TARGETACTIVITY_DFLT; - - data->voting_rights_clients0 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT0; - data->voting_rights_clients1 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT1; - data->voting_rights_clients2 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT2; - data->voting_rights_clients3 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT3; - data->voting_rights_clients4 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT4; - data->voting_rights_clients5 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT5; - data->voting_rights_clients6 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT6; - data->voting_rights_clients7 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT7; - - data->vddc_vddci_delta = VDDC_VDDCI_DELTA; - - data->mclk_activity_target = PPPOLARIS10_MCLK_TARGETACTIVITY_DFLT; - - /* need to set voltage control types before EVV patching */ - data->voltage_control = POLARIS10_VOLTAGE_CONTROL_NONE; - data->vddci_control = POLARIS10_VOLTAGE_CONTROL_NONE; - data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_NONE; - - data->enable_tdc_limit_feature = true; - data->enable_pkg_pwr_tracking_feature = true; - data->force_pcie_gen = PP_PCIEGenInvalid; - data->mclk_stutter_mode_threshold = 40000; - - if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, - VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) - data->voltage_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_EnableMVDDControl)) { - if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, - VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT)) - data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_BY_GPIO; - else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, - VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) - data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2; - } - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ControlVDDCI)) { - if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, - VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT)) - data->vddci_control = POLARIS10_VOLTAGE_CONTROL_BY_GPIO; - else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, - VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2)) - data->vddci_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2; - } - - if (table_info->cac_dtp_table->usClockStretchAmount != 0) - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ClockStretcher); - - polaris10_set_features_platform_caps(hwmgr); - - polaris10_patch_voltage_workaround(hwmgr); - polaris10_init_dpm_defaults(hwmgr); - - /* Get leakage voltage based on leakage ID. */ - result = polaris10_get_evv_voltages(hwmgr); - - if (result) { - printk("Get EVV Voltage Failed. Abort Driver loading!\n"); - return -1; - } - - polaris10_complete_dependency_tables(hwmgr); - polaris10_set_private_data_based_on_pptable(hwmgr); - - /* Initalize Dynamic State Adjustment Rule Settings */ - result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr); - - if (0 == result) { - struct cgs_system_info sys_info = {0}; - - data->is_tlu_enabled = false; - - hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = - POLARIS10_MAX_HARDWARE_POWERLEVELS; - hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; - hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; - - - if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) { - temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL); - switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) { - case 0: - temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1); - break; - case 1: - temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2); - break; - case 2: - temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1); - break; - case 3: - temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1); - break; - case 4: - temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1); - break; - default: - PP_ASSERT_WITH_CODE(0, - "Failed to setup PCC HW register! Wrong GPIO assigned for VDDC_PCC_GPIO_PINID!", - ); - break; - } - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg); - } - - if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 && - hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) { - hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit = - (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit; - - hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit = - (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM; - - hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1; - - hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100; - - hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit = - (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit; - - hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1; - - table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ? - (table_info->cac_dtp_table->usDefaultTargetOperatingTemp -50) : 0; - - table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp; - table_info->cac_dtp_table->usOperatingTempStep = 1; - table_info->cac_dtp_table->usOperatingTempHyst = 1; - - hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM = - hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM; - - hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = - hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM; - - hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit = - table_info->cac_dtp_table->usOperatingTempMinLimit; - - hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit = - table_info->cac_dtp_table->usOperatingTempMaxLimit; - - hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp = - table_info->cac_dtp_table->usDefaultTargetOperatingTemp; - - hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep = - table_info->cac_dtp_table->usOperatingTempStep; - - hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp = - table_info->cac_dtp_table->usTargetOperatingTemp; - } - - sys_info.size = sizeof(struct cgs_system_info); - sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO; - result = cgs_query_system_info(hwmgr->device, &sys_info); - if (result) - data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK; - else - data->pcie_gen_cap = (uint32_t)sys_info.value; - if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) - data->pcie_spc_cap = 20; - sys_info.size = sizeof(struct cgs_system_info); - sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW; - result = cgs_query_system_info(hwmgr->device, &sys_info); - if (result) - data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK; - else - data->pcie_lane_cap = (uint32_t)sys_info.value; - - hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */ -/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */ - hwmgr->platform_descriptor.clockStep.engineClock = 500; - hwmgr->platform_descriptor.clockStep.memoryClock = 500; - } else { - /* Ignore return value in here, we are cleaning up a mess. */ - polaris10_hwmgr_backend_fini(hwmgr); - } - - return 0; -} - -static int polaris10_force_dpm_highest(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint32_t level, tmp; - - if (!data->pcie_dpm_key_disabled) { - if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) { - level = 0; - tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask; - while (tmp >>= 1) - level++; - - if (level) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_PCIeDPM_ForceLevel, level); - } - } - - if (!data->sclk_dpm_key_disabled) { - if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { - level = 0; - tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask; - while (tmp >>= 1) - level++; - - if (level) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_SetEnabledMask, - (1 << level)); - } - } - - if (!data->mclk_dpm_key_disabled) { - if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) { - level = 0; - tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask; - while (tmp >>= 1) - level++; - - if (level) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_SetEnabledMask, - (1 << level)); - } - } - - return 0; -} - -static int polaris10_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - phm_apply_dal_min_voltage_request(hwmgr); - - if (!data->sclk_dpm_key_disabled) { - if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_SetEnabledMask, - data->dpm_level_enable_mask.sclk_dpm_enable_mask); - } - - if (!data->mclk_dpm_key_disabled) { - if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_SetEnabledMask, - data->dpm_level_enable_mask.mclk_dpm_enable_mask); - } - - return 0; -} - -static int polaris10_unforce_dpm_levels(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - if (!polaris10_is_dpm_running(hwmgr)) - return -EINVAL; - - if (!data->pcie_dpm_key_disabled) { - smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_PCIeDPM_UnForceLevel); - } - - return polaris10_upload_dpm_level_enable_mask(hwmgr); -} - -static int polaris10_force_dpm_lowest(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = - (struct polaris10_hwmgr *)(hwmgr->backend); - uint32_t level; - - if (!data->sclk_dpm_key_disabled) - if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { - level = phm_get_lowest_enabled_level(hwmgr, - data->dpm_level_enable_mask.sclk_dpm_enable_mask); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_SetEnabledMask, - (1 << level)); - - } - - if (!data->mclk_dpm_key_disabled) { - if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) { - level = phm_get_lowest_enabled_level(hwmgr, - data->dpm_level_enable_mask.mclk_dpm_enable_mask); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_SetEnabledMask, - (1 << level)); - } - } - - if (!data->pcie_dpm_key_disabled) { - if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) { - level = phm_get_lowest_enabled_level(hwmgr, - data->dpm_level_enable_mask.pcie_dpm_enable_mask); - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_PCIeDPM_ForceLevel, - (level)); - } - } - - return 0; - -} -static int polaris10_force_dpm_level(struct pp_hwmgr *hwmgr, - enum amd_dpm_forced_level level) -{ - int ret = 0; - - switch (level) { - case AMD_DPM_FORCED_LEVEL_HIGH: - ret = polaris10_force_dpm_highest(hwmgr); - if (ret) - return ret; - break; - case AMD_DPM_FORCED_LEVEL_LOW: - ret = polaris10_force_dpm_lowest(hwmgr); - if (ret) - return ret; - break; - case AMD_DPM_FORCED_LEVEL_AUTO: - ret = polaris10_unforce_dpm_levels(hwmgr); - if (ret) - return ret; - break; - default: - break; - } - - hwmgr->dpm_level = level; - - return ret; -} - -static int polaris10_get_power_state_size(struct pp_hwmgr *hwmgr) -{ - return sizeof(struct polaris10_power_state); -} - - -static int polaris10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, - struct pp_power_state *request_ps, - const struct pp_power_state *current_ps) -{ - - struct polaris10_power_state *polaris10_ps = - cast_phw_polaris10_power_state(&request_ps->hardware); - uint32_t sclk; - uint32_t mclk; - struct PP_Clocks minimum_clocks = {0}; - bool disable_mclk_switching; - bool disable_mclk_switching_for_frame_lock; - struct cgs_display_info info = {0}; - const struct phm_clock_and_voltage_limits *max_limits; - uint32_t i; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - int32_t count; - int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; - - data->battery_state = (PP_StateUILabel_Battery == - request_ps->classification.ui_label); - - PP_ASSERT_WITH_CODE(polaris10_ps->performance_level_count == 2, - "VI should always have 2 performance levels", - ); - - max_limits = (PP_PowerSource_AC == hwmgr->power_source) ? - &(hwmgr->dyn_state.max_clock_voltage_on_ac) : - &(hwmgr->dyn_state.max_clock_voltage_on_dc); - - /* Cap clock DPM tables at DC MAX if it is in DC. */ - if (PP_PowerSource_DC == hwmgr->power_source) { - for (i = 0; i < polaris10_ps->performance_level_count; i++) { - if (polaris10_ps->performance_levels[i].memory_clock > max_limits->mclk) - polaris10_ps->performance_levels[i].memory_clock = max_limits->mclk; - if (polaris10_ps->performance_levels[i].engine_clock > max_limits->sclk) - polaris10_ps->performance_levels[i].engine_clock = max_limits->sclk; - } - } - - polaris10_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk; - polaris10_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk; - - cgs_get_active_displays_info(hwmgr->device, &info); - - /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/ - - /* TO DO GetMinClockSettings(hwmgr->pPECI, &minimum_clocks); */ - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StablePState)) { - max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac); - stable_pstate_sclk = (max_limits->sclk * 75) / 100; - - for (count = table_info->vdd_dep_on_sclk->count - 1; - count >= 0; count--) { - if (stable_pstate_sclk >= - table_info->vdd_dep_on_sclk->entries[count].clk) { - stable_pstate_sclk = - table_info->vdd_dep_on_sclk->entries[count].clk; - break; - } - } - - if (count < 0) - stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk; - - stable_pstate_mclk = max_limits->mclk; - - minimum_clocks.engineClock = stable_pstate_sclk; - minimum_clocks.memoryClock = stable_pstate_mclk; - } - - if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk) - minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk; - - if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk) - minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk; - - polaris10_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold; - - if (0 != hwmgr->gfx_arbiter.sclk_over_drive) { - PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <= - hwmgr->platform_descriptor.overdriveLimit.engineClock), - "Overdrive sclk exceeds limit", - hwmgr->gfx_arbiter.sclk_over_drive = - hwmgr->platform_descriptor.overdriveLimit.engineClock); - - if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk) - polaris10_ps->performance_levels[1].engine_clock = - hwmgr->gfx_arbiter.sclk_over_drive; - } - - if (0 != hwmgr->gfx_arbiter.mclk_over_drive) { - PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <= - hwmgr->platform_descriptor.overdriveLimit.memoryClock), - "Overdrive mclk exceeds limit", - hwmgr->gfx_arbiter.mclk_over_drive = - hwmgr->platform_descriptor.overdriveLimit.memoryClock); - - if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk) - polaris10_ps->performance_levels[1].memory_clock = - hwmgr->gfx_arbiter.mclk_over_drive; - } - - disable_mclk_switching_for_frame_lock = phm_cap_enabled( - hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); - - - disable_mclk_switching = (1 < info.display_count) || - disable_mclk_switching_for_frame_lock; - - sclk = polaris10_ps->performance_levels[0].engine_clock; - mclk = polaris10_ps->performance_levels[0].memory_clock; - - if (disable_mclk_switching) - mclk = polaris10_ps->performance_levels - [polaris10_ps->performance_level_count - 1].memory_clock; - - if (sclk < minimum_clocks.engineClock) - sclk = (minimum_clocks.engineClock > max_limits->sclk) ? - max_limits->sclk : minimum_clocks.engineClock; - - if (mclk < minimum_clocks.memoryClock) - mclk = (minimum_clocks.memoryClock > max_limits->mclk) ? - max_limits->mclk : minimum_clocks.memoryClock; - - polaris10_ps->performance_levels[0].engine_clock = sclk; - polaris10_ps->performance_levels[0].memory_clock = mclk; - - polaris10_ps->performance_levels[1].engine_clock = - (polaris10_ps->performance_levels[1].engine_clock >= - polaris10_ps->performance_levels[0].engine_clock) ? - polaris10_ps->performance_levels[1].engine_clock : - polaris10_ps->performance_levels[0].engine_clock; - - if (disable_mclk_switching) { - if (mclk < polaris10_ps->performance_levels[1].memory_clock) - mclk = polaris10_ps->performance_levels[1].memory_clock; - - polaris10_ps->performance_levels[0].memory_clock = mclk; - polaris10_ps->performance_levels[1].memory_clock = mclk; - } else { - if (polaris10_ps->performance_levels[1].memory_clock < - polaris10_ps->performance_levels[0].memory_clock) - polaris10_ps->performance_levels[1].memory_clock = - polaris10_ps->performance_levels[0].memory_clock; - } - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StablePState)) { - for (i = 0; i < polaris10_ps->performance_level_count; i++) { - polaris10_ps->performance_levels[i].engine_clock = stable_pstate_sclk; - polaris10_ps->performance_levels[i].memory_clock = stable_pstate_mclk; - polaris10_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max; - polaris10_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max; - } - } - return 0; -} - - -static int polaris10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) -{ - struct pp_power_state *ps; - struct polaris10_power_state *polaris10_ps; - - if (hwmgr == NULL) - return -EINVAL; - - ps = hwmgr->request_ps; - - if (ps == NULL) - return -EINVAL; - - polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware); - - if (low) - return polaris10_ps->performance_levels[0].memory_clock; - else - return polaris10_ps->performance_levels - [polaris10_ps->performance_level_count-1].memory_clock; -} - -static int polaris10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) -{ - struct pp_power_state *ps; - struct polaris10_power_state *polaris10_ps; - - if (hwmgr == NULL) - return -EINVAL; - - ps = hwmgr->request_ps; - - if (ps == NULL) - return -EINVAL; - - polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware); - - if (low) - return polaris10_ps->performance_levels[0].engine_clock; - else - return polaris10_ps->performance_levels - [polaris10_ps->performance_level_count-1].engine_clock; -} - -static int polaris10_dpm_patch_boot_state(struct pp_hwmgr *hwmgr, - struct pp_hw_power_state *hw_ps) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct polaris10_power_state *ps = (struct polaris10_power_state *)hw_ps; - ATOM_FIRMWARE_INFO_V2_2 *fw_info; - uint16_t size; - uint8_t frev, crev; - int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); - - /* First retrieve the Boot clocks and VDDC from the firmware info table. - * We assume here that fw_info is unchanged if this call fails. - */ - fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table( - hwmgr->device, index, - &size, &frev, &crev); - if (!fw_info) - /* During a test, there is no firmware info table. */ - return 0; - - /* Patch the state. */ - data->vbios_boot_state.sclk_bootup_value = - le32_to_cpu(fw_info->ulDefaultEngineClock); - data->vbios_boot_state.mclk_bootup_value = - le32_to_cpu(fw_info->ulDefaultMemoryClock); - data->vbios_boot_state.mvdd_bootup_value = - le16_to_cpu(fw_info->usBootUpMVDDCVoltage); - data->vbios_boot_state.vddc_bootup_value = - le16_to_cpu(fw_info->usBootUpVDDCVoltage); - data->vbios_boot_state.vddci_bootup_value = - le16_to_cpu(fw_info->usBootUpVDDCIVoltage); - data->vbios_boot_state.pcie_gen_bootup_value = - phm_get_current_pcie_speed(hwmgr); - - data->vbios_boot_state.pcie_lane_bootup_value = - (uint16_t)phm_get_current_pcie_lane_number(hwmgr); - - /* set boot power state */ - ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value; - ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value; - ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value; - ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value; - - return 0; -} - -static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr, - void *state, struct pp_power_state *power_state, - void *pp_table, uint32_t classification_flag) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct polaris10_power_state *polaris10_power_state = - (struct polaris10_power_state *)(&(power_state->hardware)); - struct polaris10_performance_level *performance_level; - ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state; - ATOM_Tonga_POWERPLAYTABLE *powerplay_table = - (ATOM_Tonga_POWERPLAYTABLE *)pp_table; - PPTable_Generic_SubTable_Header *sclk_dep_table = - (PPTable_Generic_SubTable_Header *) - (((unsigned long)powerplay_table) + - le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); - - ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = - (ATOM_Tonga_MCLK_Dependency_Table *) - (((unsigned long)powerplay_table) + - le16_to_cpu(powerplay_table->usMclkDependencyTableOffset)); - - /* The following fields are not initialized here: id orderedList allStatesList */ - power_state->classification.ui_label = - (le16_to_cpu(state_entry->usClassification) & - ATOM_PPLIB_CLASSIFICATION_UI_MASK) >> - ATOM_PPLIB_CLASSIFICATION_UI_SHIFT; - power_state->classification.flags = classification_flag; - /* NOTE: There is a classification2 flag in BIOS that is not being used right now */ - - power_state->classification.temporary_state = false; - power_state->classification.to_be_deleted = false; - - power_state->validation.disallowOnDC = - (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & - ATOM_Tonga_DISALLOW_ON_DC)); - - power_state->pcie.lanes = 0; - - power_state->display.disableFrameModulation = false; - power_state->display.limitRefreshrate = false; - power_state->display.enableVariBright = - (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & - ATOM_Tonga_ENABLE_VARIBRIGHT)); - - power_state->validation.supportedPowerLevels = 0; - power_state->uvd_clocks.VCLK = 0; - power_state->uvd_clocks.DCLK = 0; - power_state->temperatures.min = 0; - power_state->temperatures.max = 0; - - performance_level = &(polaris10_power_state->performance_levels - [polaris10_power_state->performance_level_count++]); - - PP_ASSERT_WITH_CODE( - (polaris10_power_state->performance_level_count < SMU74_MAX_LEVELS_GRAPHICS), - "Performance levels exceeds SMC limit!", - return -1); - - PP_ASSERT_WITH_CODE( - (polaris10_power_state->performance_level_count <= - hwmgr->platform_descriptor.hardwareActivityPerformanceLevels), - "Performance levels exceeds Driver limit!", - return -1); - - /* Performance levels are arranged from low to high. */ - performance_level->memory_clock = mclk_dep_table->entries - [state_entry->ucMemoryClockIndexLow].ulMclk; - if (sclk_dep_table->ucRevId == 0) - performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries - [state_entry->ucEngineClockIndexLow].ulSclk; - else if (sclk_dep_table->ucRevId == 1) - performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries - [state_entry->ucEngineClockIndexLow].ulSclk; - performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, - state_entry->ucPCIEGenLow); - performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, - state_entry->ucPCIELaneHigh); - - performance_level = &(polaris10_power_state->performance_levels - [polaris10_power_state->performance_level_count++]); - performance_level->memory_clock = mclk_dep_table->entries - [state_entry->ucMemoryClockIndexHigh].ulMclk; - - if (sclk_dep_table->ucRevId == 0) - performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries - [state_entry->ucEngineClockIndexHigh].ulSclk; - else if (sclk_dep_table->ucRevId == 1) - performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries - [state_entry->ucEngineClockIndexHigh].ulSclk; - - performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, - state_entry->ucPCIEGenHigh); - performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, - state_entry->ucPCIELaneHigh); - - return 0; -} - -static int polaris10_get_pp_table_entry(struct pp_hwmgr *hwmgr, - unsigned long entry_index, struct pp_power_state *state) -{ - int result; - struct polaris10_power_state *ps; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table = - table_info->vdd_dep_on_mclk; - - state->hardware.magic = PHM_VIslands_Magic; - - ps = (struct polaris10_power_state *)(&state->hardware); - - result = tonga_get_powerplay_table_entry(hwmgr, entry_index, state, - polaris10_get_pp_table_entry_callback_func); - - /* This is the earliest time we have all the dependency table and the VBIOS boot state - * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state - * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state - */ - if (dep_mclk_table != NULL && dep_mclk_table->count == 1) { - if (dep_mclk_table->entries[0].clk != - data->vbios_boot_state.mclk_bootup_value) - printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table " - "does not match VBIOS boot MCLK level"); - if (dep_mclk_table->entries[0].vddci != - data->vbios_boot_state.vddci_bootup_value) - printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table " - "does not match VBIOS boot VDDCI level"); - } - - /* set DC compatible flag if this state supports DC */ - if (!state->validation.disallowOnDC) - ps->dc_compatible = true; - - if (state->classification.flags & PP_StateClassificationFlag_ACPI) - data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen; - - ps->uvd_clks.vclk = state->uvd_clocks.VCLK; - ps->uvd_clks.dclk = state->uvd_clocks.DCLK; - - if (!result) { - uint32_t i; - - switch (state->classification.ui_label) { - case PP_StateUILabel_Performance: - data->use_pcie_performance_levels = true; - for (i = 0; i < ps->performance_level_count; i++) { - if (data->pcie_gen_performance.max < - ps->performance_levels[i].pcie_gen) - data->pcie_gen_performance.max = - ps->performance_levels[i].pcie_gen; - - if (data->pcie_gen_performance.min > - ps->performance_levels[i].pcie_gen) - data->pcie_gen_performance.min = - ps->performance_levels[i].pcie_gen; - - if (data->pcie_lane_performance.max < - ps->performance_levels[i].pcie_lane) - data->pcie_lane_performance.max = - ps->performance_levels[i].pcie_lane; - if (data->pcie_lane_performance.min > - ps->performance_levels[i].pcie_lane) - data->pcie_lane_performance.min = - ps->performance_levels[i].pcie_lane; - } - break; - case PP_StateUILabel_Battery: - data->use_pcie_power_saving_levels = true; - - for (i = 0; i < ps->performance_level_count; i++) { - if (data->pcie_gen_power_saving.max < - ps->performance_levels[i].pcie_gen) - data->pcie_gen_power_saving.max = - ps->performance_levels[i].pcie_gen; - - if (data->pcie_gen_power_saving.min > - ps->performance_levels[i].pcie_gen) - data->pcie_gen_power_saving.min = - ps->performance_levels[i].pcie_gen; - - if (data->pcie_lane_power_saving.max < - ps->performance_levels[i].pcie_lane) - data->pcie_lane_power_saving.max = - ps->performance_levels[i].pcie_lane; - - if (data->pcie_lane_power_saving.min > - ps->performance_levels[i].pcie_lane) - data->pcie_lane_power_saving.min = - ps->performance_levels[i].pcie_lane; - } - break; - default: - break; - } - } - return 0; -} - -static void -polaris10_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m) -{ - uint32_t sclk, mclk, activity_percent; - uint32_t offset; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency); - - sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency); - - mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - seq_printf(m, "\n [ mclk ]: %u MHz\n\n [ sclk ]: %u MHz\n", - mclk / 100, sclk / 100); - - offset = data->soft_regs_start + offsetof(SMU74_SoftRegisters, AverageGraphicsActivity); - activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset); - activity_percent += 0x80; - activity_percent >>= 8; - - seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent); - - seq_printf(m, "uvd %sabled\n", data->uvd_power_gated ? "dis" : "en"); - - seq_printf(m, "vce %sabled\n", data->vce_power_gated ? "dis" : "en"); -} - -static int polaris10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input) -{ - const struct phm_set_power_state_input *states = - (const struct phm_set_power_state_input *)input; - const struct polaris10_power_state *polaris10_ps = - cast_const_phw_polaris10_power_state(states->pnew_state); - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct polaris10_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); - uint32_t sclk = polaris10_ps->performance_levels - [polaris10_ps->performance_level_count - 1].engine_clock; - struct polaris10_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); - uint32_t mclk = polaris10_ps->performance_levels - [polaris10_ps->performance_level_count - 1].memory_clock; - struct PP_Clocks min_clocks = {0}; - uint32_t i; - struct cgs_display_info info = {0}; - - data->need_update_smu7_dpm_table = 0; - - for (i = 0; i < sclk_table->count; i++) { - if (sclk == sclk_table->dpm_levels[i].value) - break; - } - - if (i >= sclk_table->count) - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; - else { - /* TODO: Check SCLK in DAL's minimum clocks - * in case DeepSleep divider update is required. - */ - if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR && - (min_clocks.engineClockInSR >= POLARIS10_MINIMUM_ENGINE_CLOCK || - data->display_timing.min_clock_in_sr >= POLARIS10_MINIMUM_ENGINE_CLOCK)) - data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK; - } - - for (i = 0; i < mclk_table->count; i++) { - if (mclk == mclk_table->dpm_levels[i].value) - break; - } - - if (i >= mclk_table->count) - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; - - cgs_get_active_displays_info(hwmgr->device, &info); - - if (data->display_timing.num_existing_displays != info.display_count) - data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK; - - return 0; -} - -static uint16_t polaris10_get_maximum_link_speed(struct pp_hwmgr *hwmgr, - const struct polaris10_power_state *polaris10_ps) -{ - uint32_t i; - uint32_t sclk, max_sclk = 0; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct polaris10_dpm_table *dpm_table = &data->dpm_table; - - for (i = 0; i < polaris10_ps->performance_level_count; i++) { - sclk = polaris10_ps->performance_levels[i].engine_clock; - if (max_sclk < sclk) - max_sclk = sclk; - } - - for (i = 0; i < dpm_table->sclk_table.count; i++) { - if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk) - return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ? - dpm_table->pcie_speed_table.dpm_levels - [dpm_table->pcie_speed_table.count - 1].value : - dpm_table->pcie_speed_table.dpm_levels[i].value); - } - - return 0; -} - -static int polaris10_request_link_speed_change_before_state_change( - struct pp_hwmgr *hwmgr, const void *input) -{ - const struct phm_set_power_state_input *states = - (const struct phm_set_power_state_input *)input; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - const struct polaris10_power_state *polaris10_nps = - cast_const_phw_polaris10_power_state(states->pnew_state); - const struct polaris10_power_state *polaris10_cps = - cast_const_phw_polaris10_power_state(states->pcurrent_state); - - uint16_t target_link_speed = polaris10_get_maximum_link_speed(hwmgr, polaris10_nps); - uint16_t current_link_speed; - - if (data->force_pcie_gen == PP_PCIEGenInvalid) - current_link_speed = polaris10_get_maximum_link_speed(hwmgr, polaris10_cps); - else - current_link_speed = data->force_pcie_gen; - - data->force_pcie_gen = PP_PCIEGenInvalid; - data->pspp_notify_required = false; - - if (target_link_speed > current_link_speed) { - switch (target_link_speed) { - case PP_PCIEGen3: - if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false)) - break; - data->force_pcie_gen = PP_PCIEGen2; - if (current_link_speed == PP_PCIEGen2) - break; - case PP_PCIEGen2: - if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false)) - break; - default: - data->force_pcie_gen = phm_get_current_pcie_speed(hwmgr); - break; - } - } else { - if (target_link_speed < current_link_speed) - data->pspp_notify_required = true; - } - - return 0; -} - -static int polaris10_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - if (0 == data->need_update_smu7_dpm_table) - return 0; - - if ((0 == data->sclk_dpm_key_disabled) && - (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { - PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr), - "Trying to freeze SCLK DPM when DPM is disabled", - ); - PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_FreezeLevel), - "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!", - return -1); - } - - if ((0 == data->mclk_dpm_key_disabled) && - (data->need_update_smu7_dpm_table & - DPMTABLE_OD_UPDATE_MCLK)) { - PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr), - "Trying to freeze MCLK DPM when DPM is disabled", - ); - PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_FreezeLevel), - "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!", - return -1); - } - - return 0; -} - -static int polaris10_populate_and_upload_sclk_mclk_dpm_levels( - struct pp_hwmgr *hwmgr, const void *input) -{ - int result = 0; - const struct phm_set_power_state_input *states = - (const struct phm_set_power_state_input *)input; - const struct polaris10_power_state *polaris10_ps = - cast_const_phw_polaris10_power_state(states->pnew_state); - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint32_t sclk = polaris10_ps->performance_levels - [polaris10_ps->performance_level_count - 1].engine_clock; - uint32_t mclk = polaris10_ps->performance_levels - [polaris10_ps->performance_level_count - 1].memory_clock; - struct polaris10_dpm_table *dpm_table = &data->dpm_table; - - struct polaris10_dpm_table *golden_dpm_table = &data->golden_dpm_table; - uint32_t dpm_count, clock_percent; - uint32_t i; - - if (0 == data->need_update_smu7_dpm_table) - return 0; - - if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) { - dpm_table->sclk_table.dpm_levels - [dpm_table->sclk_table.count - 1].value = sclk; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) { - /* Need to do calculation based on the golden DPM table - * as the Heatmap GPU Clock axis is also based on the default values - */ - PP_ASSERT_WITH_CODE( - (golden_dpm_table->sclk_table.dpm_levels - [golden_dpm_table->sclk_table.count - 1].value != 0), - "Divide by 0!", - return -1); - dpm_count = dpm_table->sclk_table.count < 2 ? 0 : dpm_table->sclk_table.count - 2; - - for (i = dpm_count; i > 1; i--) { - if (sclk > golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value) { - clock_percent = - ((sclk - - golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value - ) * 100) - / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value; - - dpm_table->sclk_table.dpm_levels[i].value = - golden_dpm_table->sclk_table.dpm_levels[i].value + - (golden_dpm_table->sclk_table.dpm_levels[i].value * - clock_percent)/100; - - } else if (golden_dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value > sclk) { - clock_percent = - ((golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value - - sclk) * 100) - / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value; - - dpm_table->sclk_table.dpm_levels[i].value = - golden_dpm_table->sclk_table.dpm_levels[i].value - - (golden_dpm_table->sclk_table.dpm_levels[i].value * - clock_percent) / 100; - } else - dpm_table->sclk_table.dpm_levels[i].value = - golden_dpm_table->sclk_table.dpm_levels[i].value; - } - } - } - - if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) { - dpm_table->mclk_table.dpm_levels - [dpm_table->mclk_table.count - 1].value = mclk; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) { - - PP_ASSERT_WITH_CODE( - (golden_dpm_table->mclk_table.dpm_levels - [golden_dpm_table->mclk_table.count-1].value != 0), - "Divide by 0!", - return -1); - dpm_count = dpm_table->mclk_table.count < 2 ? 0 : dpm_table->mclk_table.count - 2; - for (i = dpm_count; i > 1; i--) { - if (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value < mclk) { - clock_percent = ((mclk - - golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value) * 100) - / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value; - - dpm_table->mclk_table.dpm_levels[i].value = - golden_dpm_table->mclk_table.dpm_levels[i].value + - (golden_dpm_table->mclk_table.dpm_levels[i].value * - clock_percent) / 100; - - } else if (golden_dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value > mclk) { - clock_percent = ( - (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value - mclk) - * 100) - / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value; - - dpm_table->mclk_table.dpm_levels[i].value = - golden_dpm_table->mclk_table.dpm_levels[i].value - - (golden_dpm_table->mclk_table.dpm_levels[i].value * - clock_percent) / 100; - } else - dpm_table->mclk_table.dpm_levels[i].value = - golden_dpm_table->mclk_table.dpm_levels[i].value; - } - } - } - - if (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) { - result = polaris10_populate_all_graphic_levels(hwmgr); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", - return result); - } - - if (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) { - /*populate MCLK dpm table to SMU7 */ - result = polaris10_populate_all_memory_levels(hwmgr); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to populate MCLK during PopulateNewDPMClocksStates Function!", - return result); - } - - return result; -} - -static int polaris10_trim_single_dpm_states(struct pp_hwmgr *hwmgr, - struct polaris10_single_dpm_table *dpm_table, - uint32_t low_limit, uint32_t high_limit) -{ - uint32_t i; - - for (i = 0; i < dpm_table->count; i++) { - if ((dpm_table->dpm_levels[i].value < low_limit) - || (dpm_table->dpm_levels[i].value > high_limit)) - dpm_table->dpm_levels[i].enabled = false; - else - dpm_table->dpm_levels[i].enabled = true; - } - - return 0; -} - -static int polaris10_trim_dpm_states(struct pp_hwmgr *hwmgr, - const struct polaris10_power_state *polaris10_ps) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint32_t high_limit_count; - - PP_ASSERT_WITH_CODE((polaris10_ps->performance_level_count >= 1), - "power state did not have any performance level", - return -1); - - high_limit_count = (1 == polaris10_ps->performance_level_count) ? 0 : 1; - - polaris10_trim_single_dpm_states(hwmgr, - &(data->dpm_table.sclk_table), - polaris10_ps->performance_levels[0].engine_clock, - polaris10_ps->performance_levels[high_limit_count].engine_clock); - - polaris10_trim_single_dpm_states(hwmgr, - &(data->dpm_table.mclk_table), - polaris10_ps->performance_levels[0].memory_clock, - polaris10_ps->performance_levels[high_limit_count].memory_clock); - - return 0; -} - -static int polaris10_generate_dpm_level_enable_mask( - struct pp_hwmgr *hwmgr, const void *input) -{ - int result; - const struct phm_set_power_state_input *states = - (const struct phm_set_power_state_input *)input; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - const struct polaris10_power_state *polaris10_ps = - cast_const_phw_polaris10_power_state(states->pnew_state); - - result = polaris10_trim_dpm_states(hwmgr, polaris10_ps); - if (result) - return result; - - data->dpm_level_enable_mask.sclk_dpm_enable_mask = - phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table); - data->dpm_level_enable_mask.mclk_dpm_enable_mask = - phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table); - data->dpm_level_enable_mask.pcie_dpm_enable_mask = - phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table); - - return 0; -} - -int polaris10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable) -{ - return smum_send_msg_to_smc(hwmgr->smumgr, enable ? - PPSMC_MSG_UVDDPM_Enable : - PPSMC_MSG_UVDDPM_Disable); -} - -int polaris10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) -{ - return smum_send_msg_to_smc(hwmgr->smumgr, enable? - PPSMC_MSG_VCEDPM_Enable : - PPSMC_MSG_VCEDPM_Disable); -} - -int polaris10_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable) -{ - return smum_send_msg_to_smc(hwmgr->smumgr, enable? - PPSMC_MSG_SAMUDPM_Enable : - PPSMC_MSG_SAMUDPM_Disable); -} - -int polaris10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint32_t mm_boot_level_offset, mm_boot_level_value; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - if (!bgate) { - data->smc_state_table.UvdBootLevel = 0; - if (table_info->mm_dep_table->count > 0) - data->smc_state_table.UvdBootLevel = - (uint8_t) (table_info->mm_dep_table->count - 1); - mm_boot_level_offset = data->dpm_table_start + - offsetof(SMU74_Discrete_DpmTable, UvdBootLevel); - mm_boot_level_offset /= 4; - mm_boot_level_offset *= 4; - mm_boot_level_value = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset); - mm_boot_level_value &= 0x00FFFFFF; - mm_boot_level_value |= data->smc_state_table.UvdBootLevel << 24; - cgs_write_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); - - if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_UVDDPM) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_UVDDPM_SetEnabledMask, - (uint32_t)(1 << data->smc_state_table.UvdBootLevel)); - } - - return polaris10_enable_disable_uvd_dpm(hwmgr, !bgate); -} - -int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint32_t mm_boot_level_offset, mm_boot_level_value; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - if (!bgate) { - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StablePState)) - data->smc_state_table.VceBootLevel = - (uint8_t) (table_info->mm_dep_table->count - 1); - else - data->smc_state_table.VceBootLevel = 0; - - mm_boot_level_offset = data->dpm_table_start + - offsetof(SMU74_Discrete_DpmTable, VceBootLevel); - mm_boot_level_offset /= 4; - mm_boot_level_offset *= 4; - mm_boot_level_value = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset); - mm_boot_level_value &= 0xFF00FFFF; - mm_boot_level_value |= data->smc_state_table.VceBootLevel << 16; - cgs_write_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_VCEDPM_SetEnabledMask, - (uint32_t)1 << data->smc_state_table.VceBootLevel); - } - - polaris10_enable_disable_vce_dpm(hwmgr, !bgate); - - return 0; -} - -int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint32_t mm_boot_level_offset, mm_boot_level_value; - - if (!bgate) { - data->smc_state_table.SamuBootLevel = 0; - mm_boot_level_offset = data->dpm_table_start + - offsetof(SMU74_Discrete_DpmTable, SamuBootLevel); - mm_boot_level_offset /= 4; - mm_boot_level_offset *= 4; - mm_boot_level_value = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset); - mm_boot_level_value &= 0xFFFFFF00; - mm_boot_level_value |= data->smc_state_table.SamuBootLevel << 0; - cgs_write_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SAMUDPM_SetEnabledMask, - (uint32_t)(1 << data->smc_state_table.SamuBootLevel)); - } - - return polaris10_enable_disable_samu_dpm(hwmgr, !bgate); -} - -static int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - int result = 0; - uint32_t low_sclk_interrupt_threshold = 0; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkThrottleLowNotification) - && (hwmgr->gfx_arbiter.sclk_threshold != - data->low_sclk_interrupt_threshold)) { - data->low_sclk_interrupt_threshold = - hwmgr->gfx_arbiter.sclk_threshold; - low_sclk_interrupt_threshold = - data->low_sclk_interrupt_threshold; - - CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); - - result = polaris10_copy_bytes_to_smc( - hwmgr->smumgr, - data->dpm_table_start + - offsetof(SMU74_Discrete_DpmTable, - LowSclkInterruptThreshold), - (uint8_t *)&low_sclk_interrupt_threshold, - sizeof(uint32_t), - data->sram_end); - } - - return result; -} - -static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - if (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK)) - return polaris10_program_memory_timing_parameters(hwmgr); - - return 0; -} - -static int polaris10_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - if (0 == data->need_update_smu7_dpm_table) - return 0; - - if ((0 == data->sclk_dpm_key_disabled) && - (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { - - PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr), - "Trying to Unfreeze SCLK DPM when DPM is disabled", - ); - PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_UnfreezeLevel), - "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!", - return -1); - } - - if ((0 == data->mclk_dpm_key_disabled) && - (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { - - PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr), - "Trying to Unfreeze MCLK DPM when DPM is disabled", - ); - PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_UnfreezeLevel), - "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!", - return -1); - } - - data->need_update_smu7_dpm_table = 0; - - return 0; -} - -static int polaris10_notify_link_speed_change_after_state_change( - struct pp_hwmgr *hwmgr, const void *input) -{ - const struct phm_set_power_state_input *states = - (const struct phm_set_power_state_input *)input; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - const struct polaris10_power_state *polaris10_ps = - cast_const_phw_polaris10_power_state(states->pnew_state); - uint16_t target_link_speed = polaris10_get_maximum_link_speed(hwmgr, polaris10_ps); - uint8_t request; - - if (data->pspp_notify_required) { - if (target_link_speed == PP_PCIEGen3) - request = PCIE_PERF_REQ_GEN3; - else if (target_link_speed == PP_PCIEGen2) - request = PCIE_PERF_REQ_GEN2; - else - request = PCIE_PERF_REQ_GEN1; - - if (request == PCIE_PERF_REQ_GEN1 && - phm_get_current_pcie_speed(hwmgr) > 0) - return 0; - - if (acpi_pcie_perf_request(hwmgr->device, request, false)) { - if (PP_PCIEGen2 == target_link_speed) - printk("PSPP request to switch to Gen2 from Gen3 Failed!"); - else - printk("PSPP request to switch to Gen1 from Gen2 Failed!"); - } - } - - return 0; -} - -static int polaris10_notify_smc_display(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2); - return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL; -} - - - -static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) -{ - int tmp_result, result = 0; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - tmp_result = polaris10_find_dpm_states_clocks_in_dpm_table(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to find DPM states clocks in DPM table!", - result = tmp_result); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PCIEPerformanceRequest)) { - tmp_result = - polaris10_request_link_speed_change_before_state_change(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to request link speed change before state change!", - result = tmp_result); - } - - tmp_result = polaris10_freeze_sclk_mclk_dpm(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to freeze SCLK MCLK DPM!", result = tmp_result); - - tmp_result = polaris10_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to populate and upload SCLK MCLK DPM levels!", - result = tmp_result); - - tmp_result = polaris10_generate_dpm_level_enable_mask(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to generate DPM level enabled mask!", - result = tmp_result); - - tmp_result = polaris10_update_sclk_threshold(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to update SCLK threshold!", - result = tmp_result); - - tmp_result = polaris10_program_mem_timing_parameters(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to program memory timing parameters!", - result = tmp_result); - - tmp_result = polaris10_notify_smc_display(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to notify smc display settings!", - result = tmp_result); - - tmp_result = polaris10_unfreeze_sclk_mclk_dpm(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to unfreeze SCLK MCLK DPM!", - result = tmp_result); - - tmp_result = polaris10_upload_dpm_level_enable_mask(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to upload DPM level enabled mask!", - result = tmp_result); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PCIEPerformanceRequest)) { - tmp_result = - polaris10_notify_link_speed_change_after_state_change(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to notify link speed change after state change!", - result = tmp_result); - } - data->apply_optimized_settings = false; - return result; -} - -static int polaris10_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm) -{ - hwmgr->thermal_controller. - advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm; - - if (phm_is_hw_access_blocked(hwmgr)) - return 0; - - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm); -} - - -int polaris10_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display) -{ - PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay; - - return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1; -} - -int polaris10_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr) -{ - uint32_t num_active_displays = 0; - struct cgs_display_info info = {0}; - info.mode_info = NULL; - - cgs_get_active_displays_info(hwmgr->device, &info); - - num_active_displays = info.display_count; - - if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */ - polaris10_notify_smc_display_change(hwmgr, false); - - - return 0; -} - -/** -* Programs the display gap -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always OK -*/ -int polaris10_program_display_gap(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint32_t num_active_displays = 0; - uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL); - uint32_t display_gap2; - uint32_t pre_vbi_time_in_us; - uint32_t frame_time_in_us; - uint32_t ref_clock; - uint32_t refresh_rate = 0; - struct cgs_display_info info = {0}; - struct cgs_mode_info mode_info; - - info.mode_info = &mode_info; - - cgs_get_active_displays_info(hwmgr->device, &info); - num_active_displays = info.display_count; - - display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap); - - ref_clock = mode_info.ref_clock; - refresh_rate = mode_info.refresh_rate; - - if (0 == refresh_rate) - refresh_rate = 60; - - frame_time_in_us = 1000000 / refresh_rate; - - pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us; - data->frame_time_x2 = frame_time_in_us * 2 / 100; - - display_gap2 = pre_vbi_time_in_us * (ref_clock / 100); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, PreVBlankGap), 0x64); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us)); - - - return 0; -} - - -int polaris10_display_configuration_changed_task(struct pp_hwmgr *hwmgr) -{ - return polaris10_program_display_gap(hwmgr); -} - -/** -* Set maximum target operating fan output RPM -* -* @param hwmgr: the address of the powerplay hardware manager. -* @param usMaxFanRpm: max operating fan RPM value. -* @return The response that came from the SMC. -*/ -static int polaris10_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm) -{ - hwmgr->thermal_controller. - advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm; - - if (phm_is_hw_access_blocked(hwmgr)) - return 0; - - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm); -} - -int polaris10_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr, - const void *thermal_interrupt_info) -{ - return 0; -} - -bool polaris10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - bool is_update_required = false; - struct cgs_display_info info = {0, 0, NULL}; - - cgs_get_active_displays_info(hwmgr->device, &info); - - if (data->display_timing.num_existing_displays != info.display_count) - is_update_required = true; -/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL - if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { - cgs_get_min_clock_settings(hwmgr->device, &min_clocks); - if (min_clocks.engineClockInSR != data->display_timing.minClockInSR && - (min_clocks.engineClockInSR >= POLARIS10_MINIMUM_ENGINE_CLOCK || - data->display_timing.minClockInSR >= POLARIS10_MINIMUM_ENGINE_CLOCK)) - is_update_required = true; -*/ - return is_update_required; -} - -static inline bool polaris10_are_power_levels_equal(const struct polaris10_performance_level *pl1, - const struct polaris10_performance_level *pl2) -{ - return ((pl1->memory_clock == pl2->memory_clock) && - (pl1->engine_clock == pl2->engine_clock) && - (pl1->pcie_gen == pl2->pcie_gen) && - (pl1->pcie_lane == pl2->pcie_lane)); -} - -int polaris10_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal) -{ - const struct polaris10_power_state *psa = cast_const_phw_polaris10_power_state(pstate1); - const struct polaris10_power_state *psb = cast_const_phw_polaris10_power_state(pstate2); - int i; - - if (pstate1 == NULL || pstate2 == NULL || equal == NULL) - return -EINVAL; - - /* If the two states don't even have the same number of performance levels they cannot be the same state. */ - if (psa->performance_level_count != psb->performance_level_count) { - *equal = false; - return 0; - } - - for (i = 0; i < psa->performance_level_count; i++) { - if (!polaris10_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) { - /* If we have found even one performance level pair that is different the states are different. */ - *equal = false; - return 0; - } - } - - /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ - *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk)); - *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk)); - *equal &= (psa->sclk_threshold == psb->sclk_threshold); - - return 0; -} - -int polaris10_upload_mc_firmware(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - uint32_t vbios_version; - - /* Read MC indirect register offset 0x9F bits [3:0] to see if VBIOS has already loaded a full version of MC ucode or not.*/ - - phm_get_mc_microcode_version(hwmgr); - vbios_version = hwmgr->microcode_version_info.MC & 0xf; - /* Full version of MC ucode has already been loaded. */ - if (vbios_version == 0) { - data->need_long_memory_training = false; - return 0; - } - - data->need_long_memory_training = false; - -/* - * PPMCME_FirmwareDescriptorEntry *pfd = NULL; - pfd = &tonga_mcmeFirmware; - if (0 == PHM_READ_FIELD(hwmgr->device, MC_SEQ_SUP_CNTL, RUN)) - polaris10_load_mc_microcode(hwmgr, pfd->dpmThreshold, - pfd->cfgArray, pfd->cfgSize, pfd->ioDebugArray, - pfd->ioDebugSize, pfd->ucodeArray, pfd->ucodeSize); -*/ - return 0; -} - -/** - * Read clock related registers. - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -static int polaris10_read_clock_registers(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - data->clock_registers.vCG_SPLL_FUNC_CNTL = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL) - & CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK; - - data->clock_registers.vCG_SPLL_FUNC_CNTL_2 = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2) - & CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK; - - data->clock_registers.vCG_SPLL_FUNC_CNTL_4 = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4) - & CG_SPLL_FUNC_CNTL_4__SPLL_SPARE_MASK; - - return 0; -} - -/** - * Find out if memory is GDDR5. - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -static int polaris10_get_memory_type(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint32_t temp; - - temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0); - - data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE == - ((temp & MC_SEQ_MISC0_GDDR5_MASK) >> - MC_SEQ_MISC0_GDDR5_SHIFT)); - - return 0; -} - -/** - * Enables Dynamic Power Management by SMC - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -static int polaris10_enable_acpi_power_management(struct pp_hwmgr *hwmgr) -{ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - GENERAL_PWRMGT, STATIC_PM_EN, 1); - - return 0; -} - -/** - * Initialize PowerGating States for different engines - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -static int polaris10_init_power_gate_state(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - data->uvd_power_gated = false; - data->vce_power_gated = false; - data->samu_power_gated = false; - - return 0; -} - -static int polaris10_init_sclk_threshold(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - data->low_sclk_interrupt_threshold = 0; - - return 0; -} - -int polaris10_setup_asic_task(struct pp_hwmgr *hwmgr) -{ - int tmp_result, result = 0; - - polaris10_upload_mc_firmware(hwmgr); - - tmp_result = polaris10_read_clock_registers(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to read clock registers!", result = tmp_result); - - tmp_result = polaris10_get_memory_type(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to get memory type!", result = tmp_result); - - tmp_result = polaris10_enable_acpi_power_management(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable ACPI power management!", result = tmp_result); - - tmp_result = polaris10_init_power_gate_state(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to init power gate state!", result = tmp_result); - - tmp_result = phm_get_mc_microcode_version(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to get MC microcode version!", result = tmp_result); - - tmp_result = polaris10_init_sclk_threshold(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to init sclk threshold!", result = tmp_result); - - return result; -} - -static int polaris10_force_clock_level(struct pp_hwmgr *hwmgr, - enum pp_clock_type type, uint32_t mask) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) - return -EINVAL; - - switch (type) { - case PP_SCLK: - if (!data->sclk_dpm_key_disabled) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_SetEnabledMask, - data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask); - break; - case PP_MCLK: - if (!data->mclk_dpm_key_disabled) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_SetEnabledMask, - data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask); - break; - case PP_PCIE: - { - uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask; - uint32_t level = 0; - - while (tmp >>= 1) - level++; - - if (!data->pcie_dpm_key_disabled) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_PCIeDPM_ForceLevel, - level); - break; - } - default: - break; - } - - return 0; -} - -static uint16_t polaris10_get_current_pcie_speed(struct pp_hwmgr *hwmgr) -{ - uint32_t speedCntl = 0; - - /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */ - speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE, - ixPCIE_LC_SPEED_CNTL); - return((uint16_t)PHM_GET_FIELD(speedCntl, - PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE)); -} - -static int polaris10_print_clock_levels(struct pp_hwmgr *hwmgr, - enum pp_clock_type type, char *buf) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct polaris10_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); - struct polaris10_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); - struct polaris10_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table); - int i, now, size = 0; - uint32_t clock, pcie_speed; - - switch (type) { - case PP_SCLK: - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency); - clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - - for (i = 0; i < sclk_table->count; i++) { - if (clock > sclk_table->dpm_levels[i].value) - continue; - break; - } - now = i; - - for (i = 0; i < sclk_table->count; i++) - size += sprintf(buf + size, "%d: %uMhz %s\n", - i, sclk_table->dpm_levels[i].value / 100, - (i == now) ? "*" : ""); - break; - case PP_MCLK: - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency); - clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - - for (i = 0; i < mclk_table->count; i++) { - if (clock > mclk_table->dpm_levels[i].value) - continue; - break; - } - now = i; - - for (i = 0; i < mclk_table->count; i++) - size += sprintf(buf + size, "%d: %uMhz %s\n", - i, mclk_table->dpm_levels[i].value / 100, - (i == now) ? "*" : ""); - break; - case PP_PCIE: - pcie_speed = polaris10_get_current_pcie_speed(hwmgr); - for (i = 0; i < pcie_table->count; i++) { - if (pcie_speed != pcie_table->dpm_levels[i].value) - continue; - break; - } - now = i; - - for (i = 0; i < pcie_table->count; i++) - size += sprintf(buf + size, "%d: %s %s\n", i, - (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" : - (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" : - (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "", - (i == now) ? "*" : ""); - break; - default: - break; - } - return size; -} - -static int polaris10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) -{ - if (mode) { - /* stop auto-manage */ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl)) - polaris10_fan_ctrl_stop_smc_fan_control(hwmgr); - polaris10_fan_ctrl_set_static_mode(hwmgr, mode); - } else - /* restart auto-manage */ - polaris10_fan_ctrl_reset_fan_speed_to_default(hwmgr); - - return 0; -} - -static int polaris10_get_fan_control_mode(struct pp_hwmgr *hwmgr) -{ - if (hwmgr->fan_ctrl_is_in_default_mode) - return hwmgr->fan_ctrl_default_mode; - else - return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL2, FDO_PWM_MODE); -} - -static int polaris10_get_sclk_od(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct polaris10_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); - struct polaris10_single_dpm_table *golden_sclk_table = - &(data->golden_dpm_table.sclk_table); - int value; - - value = (sclk_table->dpm_levels[sclk_table->count - 1].value - - golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * - 100 / - golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; - - return value; -} - -static int polaris10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct polaris10_single_dpm_table *golden_sclk_table = - &(data->golden_dpm_table.sclk_table); - struct pp_power_state *ps; - struct polaris10_power_state *polaris10_ps; - - if (value > 20) - value = 20; - - ps = hwmgr->request_ps; - - if (ps == NULL) - return -EINVAL; - - polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware); - - polaris10_ps->performance_levels[polaris10_ps->performance_level_count - 1].engine_clock = - golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * - value / 100 + - golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; - - return 0; -} - -static int polaris10_get_mclk_od(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct polaris10_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); - struct polaris10_single_dpm_table *golden_mclk_table = - &(data->golden_dpm_table.mclk_table); - int value; - - value = (mclk_table->dpm_levels[mclk_table->count - 1].value - - golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * - 100 / - golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; - - return value; -} - -static int polaris10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct polaris10_single_dpm_table *golden_mclk_table = - &(data->golden_dpm_table.mclk_table); - struct pp_power_state *ps; - struct polaris10_power_state *polaris10_ps; - - if (value > 20) - value = 20; - - ps = hwmgr->request_ps; - - if (ps == NULL) - return -EINVAL; - - polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware); - - polaris10_ps->performance_levels[polaris10_ps->performance_level_count - 1].memory_clock = - golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * - value / 100 + - golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; - - return 0; -} -static const struct pp_hwmgr_func polaris10_hwmgr_funcs = { - .backend_init = &polaris10_hwmgr_backend_init, - .backend_fini = &polaris10_hwmgr_backend_fini, - .asic_setup = &polaris10_setup_asic_task, - .dynamic_state_management_enable = &polaris10_enable_dpm_tasks, - .apply_state_adjust_rules = polaris10_apply_state_adjust_rules, - .force_dpm_level = &polaris10_force_dpm_level, - .power_state_set = polaris10_set_power_state_tasks, - .get_power_state_size = polaris10_get_power_state_size, - .get_mclk = polaris10_dpm_get_mclk, - .get_sclk = polaris10_dpm_get_sclk, - .patch_boot_state = polaris10_dpm_patch_boot_state, - .get_pp_table_entry = polaris10_get_pp_table_entry, - .get_num_of_pp_table_entries = tonga_get_number_of_powerplay_table_entries, - .print_current_perforce_level = polaris10_print_current_perforce_level, - .powerdown_uvd = polaris10_phm_powerdown_uvd, - .powergate_uvd = polaris10_phm_powergate_uvd, - .powergate_vce = polaris10_phm_powergate_vce, - .disable_clock_power_gating = polaris10_phm_disable_clock_power_gating, - .update_clock_gatings = polaris10_phm_update_clock_gatings, - .notify_smc_display_config_after_ps_adjustment = polaris10_notify_smc_display_config_after_ps_adjustment, - .display_config_changed = polaris10_display_configuration_changed_task, - .set_max_fan_pwm_output = polaris10_set_max_fan_pwm_output, - .set_max_fan_rpm_output = polaris10_set_max_fan_rpm_output, - .get_temperature = polaris10_thermal_get_temperature, - .stop_thermal_controller = polaris10_thermal_stop_thermal_controller, - .get_fan_speed_info = polaris10_fan_ctrl_get_fan_speed_info, - .get_fan_speed_percent = polaris10_fan_ctrl_get_fan_speed_percent, - .set_fan_speed_percent = polaris10_fan_ctrl_set_fan_speed_percent, - .reset_fan_speed_to_default = polaris10_fan_ctrl_reset_fan_speed_to_default, - .get_fan_speed_rpm = polaris10_fan_ctrl_get_fan_speed_rpm, - .set_fan_speed_rpm = polaris10_fan_ctrl_set_fan_speed_rpm, - .uninitialize_thermal_controller = polaris10_thermal_ctrl_uninitialize_thermal_controller, - .register_internal_thermal_interrupt = polaris10_register_internal_thermal_interrupt, - .check_smc_update_required_for_display_configuration = polaris10_check_smc_update_required_for_display_configuration, - .check_states_equal = polaris10_check_states_equal, - .set_fan_control_mode = polaris10_set_fan_control_mode, - .get_fan_control_mode = polaris10_get_fan_control_mode, - .force_clock_level = polaris10_force_clock_level, - .print_clock_levels = polaris10_print_clock_levels, - .enable_per_cu_power_gating = polaris10_phm_enable_per_cu_power_gating, - .get_sclk_od = polaris10_get_sclk_od, - .set_sclk_od = polaris10_set_sclk_od, - .get_mclk_od = polaris10_get_mclk_od, - .set_mclk_od = polaris10_set_mclk_od, -}; - -int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr) -{ - hwmgr->hwmgr_func = &polaris10_hwmgr_funcs; - hwmgr->pptable_func = &tonga_pptable_funcs; - pp_polaris10_thermal_initialize(hwmgr); - - return 0; -} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c deleted file mode 100644 index b206632d4650..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c +++ /dev/null @@ -1,716 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include <asm/div64.h> -#include "polaris10_thermal.h" -#include "polaris10_hwmgr.h" -#include "polaris10_smumgr.h" -#include "polaris10_ppsmc.h" -#include "smu/smu_7_1_3_d.h" -#include "smu/smu_7_1_3_sh_mask.h" - -int polaris10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, - struct phm_fan_speed_info *fan_speed_info) -{ - if (hwmgr->thermal_controller.fanInfo.bNoFan) - return 0; - - fan_speed_info->supports_percent_read = true; - fan_speed_info->supports_percent_write = true; - fan_speed_info->min_percent = 0; - fan_speed_info->max_percent = 100; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_FanSpeedInTableIsRPM) && - hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) { - fan_speed_info->supports_rpm_read = true; - fan_speed_info->supports_rpm_write = true; - fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM; - fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM; - } else { - fan_speed_info->min_rpm = 0; - fan_speed_info->max_rpm = 0; - } - - return 0; -} - -int polaris10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, - uint32_t *speed) -{ - uint32_t duty100; - uint32_t duty; - uint64_t tmp64; - - if (hwmgr->thermal_controller.fanInfo.bNoFan) - return 0; - - duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL1, FMAX_DUTY100); - duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_THERMAL_STATUS, FDO_PWM_DUTY); - - if (duty100 == 0) - return -EINVAL; - - - tmp64 = (uint64_t)duty * 100; - do_div(tmp64, duty100); - *speed = (uint32_t)tmp64; - - if (*speed > 100) - *speed = 100; - - return 0; -} - -int polaris10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed) -{ - uint32_t tach_period; - uint32_t crystal_clock_freq; - - if (hwmgr->thermal_controller.fanInfo.bNoFan || - (hwmgr->thermal_controller.fanInfo. - ucTachometerPulsesPerRevolution == 0)) - return 0; - - tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_TACH_STATUS, TACH_PERIOD); - - if (tach_period == 0) - return -EINVAL; - - crystal_clock_freq = tonga_get_xclk(hwmgr); - - *speed = 60 * crystal_clock_freq * 10000 / tach_period; - - return 0; -} - -/** -* Set Fan Speed Control to static mode, so that the user can decide what speed to use. -* @param hwmgr the address of the powerplay hardware manager. -* mode the fan control mode, 0 default, 1 by percent, 5, by RPM -* @exception Should always succeed. -*/ -int polaris10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode) -{ - - if (hwmgr->fan_ctrl_is_in_default_mode) { - hwmgr->fan_ctrl_default_mode = - PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL2, FDO_PWM_MODE); - hwmgr->tmin = - PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL2, TMIN); - hwmgr->fan_ctrl_is_in_default_mode = false; - } - - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL2, TMIN, 0); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL2, FDO_PWM_MODE, mode); - - return 0; -} - -/** -* Reset Fan Speed Control to default mode. -* @param hwmgr the address of the powerplay hardware manager. -* @exception Should always succeed. -*/ -int polaris10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr) -{ - if (!hwmgr->fan_ctrl_is_in_default_mode) { - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL2, TMIN, hwmgr->tmin); - hwmgr->fan_ctrl_is_in_default_mode = true; - } - - return 0; -} - -int polaris10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr) -{ - int result; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ODFuzzyFanControlSupport)) { - cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY); - result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_FanSpeedInTableIsRPM)) - hwmgr->hwmgr_func->set_max_fan_rpm_output(hwmgr, - hwmgr->thermal_controller. - advanceFanControlParameters.usMaxFanRPM); - else - hwmgr->hwmgr_func->set_max_fan_pwm_output(hwmgr, - hwmgr->thermal_controller. - advanceFanControlParameters.usMaxFanPWM); - - } else { - cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE); - result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl); - } - - if (!result && hwmgr->thermal_controller. - advanceFanControlParameters.ucTargetTemperature) - result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SetFanTemperatureTarget, - hwmgr->thermal_controller. - advanceFanControlParameters.ucTargetTemperature); - - return result; -} - - -int polaris10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr) -{ - return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl); -} - -/** -* Set Fan Speed in percent. -* @param hwmgr the address of the powerplay hardware manager. -* @param speed is the percentage value (0% - 100%) to be set. -* @exception Fails is the 100% setting appears to be 0. -*/ -int polaris10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, - uint32_t speed) -{ - uint32_t duty100; - uint32_t duty; - uint64_t tmp64; - - if (hwmgr->thermal_controller.fanInfo.bNoFan) - return 0; - - if (speed > 100) - speed = 100; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl)) - polaris10_fan_ctrl_stop_smc_fan_control(hwmgr); - - duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL1, FMAX_DUTY100); - - if (duty100 == 0) - return -EINVAL; - - tmp64 = (uint64_t)speed * duty100; - do_div(tmp64, 100); - duty = (uint32_t)tmp64; - - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL0, FDO_STATIC_DUTY, duty); - - return polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); -} - -/** -* Reset Fan Speed to default. -* @param hwmgr the address of the powerplay hardware manager. -* @exception Always succeeds. -*/ -int polaris10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr) -{ - int result; - - if (hwmgr->thermal_controller.fanInfo.bNoFan) - return 0; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl)) { - result = polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); - if (!result) - result = polaris10_fan_ctrl_start_smc_fan_control(hwmgr); - } else - result = polaris10_fan_ctrl_set_default_mode(hwmgr); - - return result; -} - -/** -* Set Fan Speed in RPM. -* @param hwmgr the address of the powerplay hardware manager. -* @param speed is the percentage value (min - max) to be set. -* @exception Fails is the speed not lie between min and max. -*/ -int polaris10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) -{ - uint32_t tach_period; - uint32_t crystal_clock_freq; - - if (hwmgr->thermal_controller.fanInfo.bNoFan || - (hwmgr->thermal_controller.fanInfo. - ucTachometerPulsesPerRevolution == 0) || - (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) || - (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM)) - return 0; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl)) - polaris10_fan_ctrl_stop_smc_fan_control(hwmgr); - - crystal_clock_freq = tonga_get_xclk(hwmgr); - - tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); - - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_TACH_STATUS, TACH_PERIOD, tach_period); - - return polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); -} - -/** -* Reads the remote temperature from the SIslands thermal controller. -* -* @param hwmgr The address of the hardware manager. -*/ -int polaris10_thermal_get_temperature(struct pp_hwmgr *hwmgr) -{ - int temp; - - temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_MULT_THERMAL_STATUS, CTF_TEMP); - - /* Bit 9 means the reading is lower than the lowest usable value. */ - if (temp & 0x200) - temp = POLARIS10_THERMAL_MAXIMUM_TEMP_READING; - else - temp = temp & 0x1ff; - - temp *= PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - - return temp; -} - -/** -* Set the requested temperature range for high and low alert signals -* -* @param hwmgr The address of the hardware manager. -* @param range Temperature range to be programmed for high and low alert signals -* @exception PP_Result_BadInput if the input data is not valid. -*/ -static int polaris10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, - uint32_t low_temp, uint32_t high_temp) -{ - uint32_t low = POLARIS10_THERMAL_MINIMUM_ALERT_TEMP * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - uint32_t high = POLARIS10_THERMAL_MAXIMUM_ALERT_TEMP * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - - if (low < low_temp) - low = low_temp; - if (high > high_temp) - high = high_temp; - - if (low > high) - return -EINVAL; - - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_THERMAL_INT, DIG_THERM_INTH, - (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_THERMAL_INT, DIG_THERM_INTL, - (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_THERMAL_CTRL, DIG_THERM_DPM, - (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); - - return 0; -} - -/** -* Programs thermal controller one-time setting registers -* -* @param hwmgr The address of the hardware manager. -*/ -static int polaris10_thermal_initialize(struct pp_hwmgr *hwmgr) -{ - if (hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_TACH_CTRL, EDGE_PER_REV, - hwmgr->thermal_controller.fanInfo. - ucTachometerPulsesPerRevolution - 1); - - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28); - - return 0; -} - -/** -* Enable thermal alerts on the RV770 thermal controller. -* -* @param hwmgr The address of the hardware manager. -*/ -static int polaris10_thermal_enable_alert(struct pp_hwmgr *hwmgr) -{ - uint32_t alert; - - alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_THERMAL_INT, THERM_INT_MASK); - alert &= ~(POLARIS10_THERMAL_HIGH_ALERT_MASK | POLARIS10_THERMAL_LOW_ALERT_MASK); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_THERMAL_INT, THERM_INT_MASK, alert); - - /* send message to SMU to enable internal thermal interrupts */ - return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable); -} - -/** -* Disable thermal alerts on the RV770 thermal controller. -* @param hwmgr The address of the hardware manager. -*/ -static int polaris10_thermal_disable_alert(struct pp_hwmgr *hwmgr) -{ - uint32_t alert; - - alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_THERMAL_INT, THERM_INT_MASK); - alert |= (POLARIS10_THERMAL_HIGH_ALERT_MASK | POLARIS10_THERMAL_LOW_ALERT_MASK); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_THERMAL_INT, THERM_INT_MASK, alert); - - /* send message to SMU to disable internal thermal interrupts */ - return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable); -} - -/** -* Uninitialize the thermal controller. -* Currently just disables alerts. -* @param hwmgr The address of the hardware manager. -*/ -int polaris10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr) -{ - int result = polaris10_thermal_disable_alert(hwmgr); - - if (!hwmgr->thermal_controller.fanInfo.bNoFan) - polaris10_fan_ctrl_set_default_mode(hwmgr); - - return result; -} - -/** -* Set up the fan table to control the fan using the SMC. -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from set temperature range routine -*/ -int tf_polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - SMU74_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; - uint32_t duty100; - uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2; - uint16_t fdo_min, slope1, slope2; - uint32_t reference_clock; - int res; - uint64_t tmp64; - - if (data->fan_table_start == 0) { - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl); - return 0; - } - - duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL1, FMAX_DUTY100); - - if (duty100 == 0) { - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl); - return 0; - } - - tmp64 = hwmgr->thermal_controller.advanceFanControlParameters. - usPWMMin * duty100; - do_div(tmp64, 10000); - fdo_min = (uint16_t)tmp64; - - t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - - hwmgr->thermal_controller.advanceFanControlParameters.usTMin; - t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - - hwmgr->thermal_controller.advanceFanControlParameters.usTMed; - - pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin; - pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed; - - slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); - slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); - - fan_table.TempMin = cpu_to_be16((50 + hwmgr-> - thermal_controller.advanceFanControlParameters.usTMin) / 100); - fan_table.TempMed = cpu_to_be16((50 + hwmgr-> - thermal_controller.advanceFanControlParameters.usTMed) / 100); - fan_table.TempMax = cpu_to_be16((50 + hwmgr-> - thermal_controller.advanceFanControlParameters.usTMax) / 100); - - fan_table.Slope1 = cpu_to_be16(slope1); - fan_table.Slope2 = cpu_to_be16(slope2); - - fan_table.FdoMin = cpu_to_be16(fdo_min); - - fan_table.HystDown = cpu_to_be16(hwmgr-> - thermal_controller.advanceFanControlParameters.ucTHyst); - - fan_table.HystUp = cpu_to_be16(1); - - fan_table.HystSlope = cpu_to_be16(1); - - fan_table.TempRespLim = cpu_to_be16(5); - - reference_clock = tonga_get_xclk(hwmgr); - - fan_table.RefreshPeriod = cpu_to_be32((hwmgr-> - thermal_controller.advanceFanControlParameters.ulCycleDelay * - reference_clock) / 1600); - - fan_table.FdoMax = cpu_to_be16((uint16_t)duty100); - - fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD( - hwmgr->device, CGS_IND_REG__SMC, - CG_MULT_THERMAL_CTRL, TEMP_SEL); - - res = polaris10_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start, - (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), - data->sram_end); - - if (!res && hwmgr->thermal_controller. - advanceFanControlParameters.ucMinimumPWMLimit) - res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SetFanMinPwm, - hwmgr->thermal_controller. - advanceFanControlParameters.ucMinimumPWMLimit); - - if (!res && hwmgr->thermal_controller. - advanceFanControlParameters.ulMinFanSCLKAcousticLimit) - res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SetFanSclkTarget, - hwmgr->thermal_controller. - advanceFanControlParameters.ulMinFanSCLKAcousticLimit); - - if (res) - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl); - - return 0; -} - -/** -* Start the fan control on the SMC. -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from set temperature range routine -*/ -int tf_polaris10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) -{ -/* If the fantable setup has failed we could have disabled - * PHM_PlatformCaps_MicrocodeFanControl even after - * this function was included in the table. - * Make sure that we still think controlling the fan is OK. -*/ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl)) { - polaris10_fan_ctrl_start_smc_fan_control(hwmgr); - polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); - } - - return 0; -} - -/** -* Set temperature range for high and low alerts -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from set temperature range routine -*/ -int tf_polaris10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) -{ - struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input; - - if (range == NULL) - return -EINVAL; - - return polaris10_thermal_set_temperature_range(hwmgr, range->min, range->max); -} - -/** -* Programs one-time setting registers -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from initialize thermal controller routine -*/ -int tf_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) -{ - return polaris10_thermal_initialize(hwmgr); -} - -/** -* Enable high and low alerts -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from enable alert routine -*/ -int tf_polaris10_thermal_enable_alert(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) -{ - return polaris10_thermal_enable_alert(hwmgr); -} - -/** -* Disable high and low alerts -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from disable alert routine -*/ -static int tf_polaris10_thermal_disable_alert(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) -{ - return polaris10_thermal_disable_alert(hwmgr); -} - -static int tf_polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) -{ - int ret; - struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr); - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED) - return 0; - - ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting); - - ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ? - 0 : -1; - - if (!ret) - /* If this param is not changed, this function could fire unnecessarily */ - smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY; - - return ret; -} - -static const struct phm_master_table_item -polaris10_thermal_start_thermal_controller_master_list[] = { - {NULL, tf_polaris10_thermal_initialize}, - {NULL, tf_polaris10_thermal_set_temperature_range}, - {NULL, tf_polaris10_thermal_enable_alert}, - {NULL, tf_polaris10_thermal_avfs_enable}, -/* We should restrict performance levels to low before we halt the SMC. - * On the other hand we are still in boot state when we do this - * so it would be pointless. - * If this assumption changes we have to revisit this table. - */ - {NULL, tf_polaris10_thermal_setup_fan_table}, - {NULL, tf_polaris10_thermal_start_smc_fan_control}, - {NULL, NULL} -}; - -static const struct phm_master_table_header -polaris10_thermal_start_thermal_controller_master = { - 0, - PHM_MasterTableFlag_None, - polaris10_thermal_start_thermal_controller_master_list -}; - -static const struct phm_master_table_item -polaris10_thermal_set_temperature_range_master_list[] = { - {NULL, tf_polaris10_thermal_disable_alert}, - {NULL, tf_polaris10_thermal_set_temperature_range}, - {NULL, tf_polaris10_thermal_enable_alert}, - {NULL, NULL} -}; - -static const struct phm_master_table_header -polaris10_thermal_set_temperature_range_master = { - 0, - PHM_MasterTableFlag_None, - polaris10_thermal_set_temperature_range_master_list -}; - -int polaris10_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr) -{ - if (!hwmgr->thermal_controller.fanInfo.bNoFan) - polaris10_fan_ctrl_set_default_mode(hwmgr); - return 0; -} - -/** -* Initializes the thermal controller related functions in the Hardware Manager structure. -* @param hwmgr The address of the hardware manager. -* @exception Any error code from the low-level communication. -*/ -int pp_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr) -{ - int result; - - result = phm_construct_table(hwmgr, - &polaris10_thermal_set_temperature_range_master, - &(hwmgr->set_temperature_range)); - - if (!result) { - result = phm_construct_table(hwmgr, - &polaris10_thermal_start_thermal_controller_master, - &(hwmgr->start_thermal_controller)); - if (result) - phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range)); - } - - if (!result) - hwmgr->fan_ctrl_is_in_default_mode = true; - return result; -} - diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.h deleted file mode 100644 index 62f8cbc2d590..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef _POLARIS10_THERMAL_H_ -#define _POLARIS10_THERMAL_H_ - -#include "hwmgr.h" - -#define POLARIS10_THERMAL_HIGH_ALERT_MASK 0x1 -#define POLARIS10_THERMAL_LOW_ALERT_MASK 0x2 - -#define POLARIS10_THERMAL_MINIMUM_TEMP_READING -256 -#define POLARIS10_THERMAL_MAXIMUM_TEMP_READING 255 - -#define POLARIS10_THERMAL_MINIMUM_ALERT_TEMP 0 -#define POLARIS10_THERMAL_MAXIMUM_ALERT_TEMP 255 - -#define FDO_PWM_MODE_STATIC 1 -#define FDO_PWM_MODE_STATIC_RPM 5 - - -extern int tf_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result); -extern int tf_polaris10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result); -extern int tf_polaris10_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result); - -extern int polaris10_thermal_get_temperature(struct pp_hwmgr *hwmgr); -extern int polaris10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr); -extern int polaris10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info); -extern int polaris10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed); -extern int polaris10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr); -extern int polaris10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode); -extern int polaris10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed); -extern int polaris10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr); -extern int pp_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr); -extern int polaris10_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr); -extern int polaris10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed); -extern int polaris10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed); -extern int polaris10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr); -extern uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr); - -#endif - diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c index 26f3e30d0fef..1126bd4f74dc 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c @@ -22,7 +22,6 @@ */ #include <linux/module.h> #include <linux/slab.h> -#include <linux/fb.h> #include "ppatomctrl.h" #include "atombios.h" diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h b/drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h index f127198aafc4..1e870f58dd12 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h @@ -164,7 +164,7 @@ typedef struct _ATOM_Tonga_State { typedef struct _ATOM_Tonga_State_Array { UCHAR ucRevId; UCHAR ucNumEntries; /* Number of entries. */ - ATOM_Tonga_State states[1]; /* Dynamically allocate entries. */ + ATOM_Tonga_State entries[1]; /* Dynamically allocate entries. */ } ATOM_Tonga_State_Array; typedef struct _ATOM_Tonga_MCLK_Dependency_Record { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c index cfb647f76cbe..7de701d8a450 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c @@ -22,15 +22,14 @@ */ #include <linux/module.h> #include <linux/slab.h> -#include <linux/fb.h> -#include "tonga_processpptables.h" +#include "process_pptables_v1_0.h" #include "ppatomctrl.h" #include "atombios.h" #include "pp_debug.h" #include "hwmgr.h" #include "cgs_common.h" -#include "tonga_pptable.h" +#include "pptable_v1_0.h" /** * Private Function used during initialization. @@ -154,12 +153,14 @@ const void *get_powerplay_table(struct pp_hwmgr *hwmgr) static int get_vddc_lookup_table( struct pp_hwmgr *hwmgr, phm_ppt_v1_voltage_lookup_table **lookup_table, - const ATOM_Tonga_Voltage_Lookup_Table *vddc_lookup_pp_tables, - uint32_t max_levels + const ATOM_Tonga_Voltage_Lookup_Table *vddc_lookup_pp_tables, + uint32_t max_levels ) { uint32_t table_size, i; phm_ppt_v1_voltage_lookup_table *table; + phm_ppt_v1_voltage_lookup_record *record; + ATOM_Tonga_Voltage_Lookup_Record *atom_record; PP_ASSERT_WITH_CODE((0 != vddc_lookup_pp_tables->ucNumEntries), "Invalid CAC Leakage PowerPlay Table!", return 1); @@ -177,15 +178,17 @@ static int get_vddc_lookup_table( table->count = vddc_lookup_pp_tables->ucNumEntries; for (i = 0; i < vddc_lookup_pp_tables->ucNumEntries; i++) { - table->entries[i].us_calculated = 0; - table->entries[i].us_vdd = - vddc_lookup_pp_tables->entries[i].usVdd; - table->entries[i].us_cac_low = - vddc_lookup_pp_tables->entries[i].usCACLow; - table->entries[i].us_cac_mid = - vddc_lookup_pp_tables->entries[i].usCACMid; - table->entries[i].us_cac_high = - vddc_lookup_pp_tables->entries[i].usCACHigh; + record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR( + phm_ppt_v1_voltage_lookup_record, + entries, table, i); + atom_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR( + ATOM_Tonga_Voltage_Lookup_Record, + entries, vddc_lookup_pp_tables, i); + record->us_calculated = 0; + record->us_vdd = atom_record->usVdd; + record->us_cac_low = atom_record->usCACLow; + record->us_cac_mid = atom_record->usCACMid; + record->us_cac_high = atom_record->usCACHigh; } *lookup_table = table; @@ -314,11 +317,12 @@ static int init_dpm_2_parameters( static int get_valid_clk( struct pp_hwmgr *hwmgr, struct phm_clock_array **clk_table, - const phm_ppt_v1_clock_voltage_dependency_table * clk_volt_pp_table + phm_ppt_v1_clock_voltage_dependency_table const *clk_volt_pp_table ) { uint32_t table_size, i; struct phm_clock_array *table; + phm_ppt_v1_clock_voltage_dependency_record *dep_record; PP_ASSERT_WITH_CODE((0 != clk_volt_pp_table->count), "Invalid PowerPlay Table!", return -1); @@ -335,9 +339,12 @@ static int get_valid_clk( table->count = (uint32_t)clk_volt_pp_table->count; - for (i = 0; i < table->count; i++) - table->values[i] = (uint32_t)clk_volt_pp_table->entries[i].clk; - + for (i = 0; i < table->count; i++) { + dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR( + phm_ppt_v1_clock_voltage_dependency_record, + entries, clk_volt_pp_table, i); + table->values[i] = (uint32_t)dep_record->clk; + } *clk_table = table; return 0; @@ -346,7 +353,7 @@ static int get_valid_clk( static int get_hard_limits( struct pp_hwmgr *hwmgr, struct phm_clock_and_voltage_limits *limits, - const ATOM_Tonga_Hard_Limit_Table * limitable + ATOM_Tonga_Hard_Limit_Table const *limitable ) { PP_ASSERT_WITH_CODE((0 != limitable->ucNumEntries), "Invalid PowerPlay Table!", return -1); @@ -364,11 +371,13 @@ static int get_hard_limits( static int get_mclk_voltage_dependency_table( struct pp_hwmgr *hwmgr, phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_mclk_dep_table, - const ATOM_Tonga_MCLK_Dependency_Table * mclk_dep_table + ATOM_Tonga_MCLK_Dependency_Table const *mclk_dep_table ) { uint32_t table_size, i; phm_ppt_v1_clock_voltage_dependency_table *mclk_table; + phm_ppt_v1_clock_voltage_dependency_record *mclk_table_record; + ATOM_Tonga_MCLK_Dependency_Record *mclk_dep_record; PP_ASSERT_WITH_CODE((0 != mclk_dep_table->ucNumEntries), "Invalid PowerPlay Table!", return -1); @@ -386,16 +395,17 @@ static int get_mclk_voltage_dependency_table( mclk_table->count = (uint32_t)mclk_dep_table->ucNumEntries; for (i = 0; i < mclk_dep_table->ucNumEntries; i++) { - mclk_table->entries[i].vddInd = - mclk_dep_table->entries[i].ucVddcInd; - mclk_table->entries[i].vdd_offset = - mclk_dep_table->entries[i].usVddgfxOffset; - mclk_table->entries[i].vddci = - mclk_dep_table->entries[i].usVddci; - mclk_table->entries[i].mvdd = - mclk_dep_table->entries[i].usMvdd; - mclk_table->entries[i].clk = - mclk_dep_table->entries[i].ulMclk; + mclk_table_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR( + phm_ppt_v1_clock_voltage_dependency_record, + entries, mclk_table, i); + mclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR( + ATOM_Tonga_MCLK_Dependency_Record, + entries, mclk_dep_table, i); + mclk_table_record->vddInd = mclk_dep_record->ucVddcInd; + mclk_table_record->vdd_offset = mclk_dep_record->usVddgfxOffset; + mclk_table_record->vddci = mclk_dep_record->usVddci; + mclk_table_record->mvdd = mclk_dep_record->usMvdd; + mclk_table_record->clk = mclk_dep_record->ulMclk; } *pp_tonga_mclk_dep_table = mclk_table; @@ -406,15 +416,17 @@ static int get_mclk_voltage_dependency_table( static int get_sclk_voltage_dependency_table( struct pp_hwmgr *hwmgr, phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_sclk_dep_table, - const PPTable_Generic_SubTable_Header *sclk_dep_table + PPTable_Generic_SubTable_Header const *sclk_dep_table ) { uint32_t table_size, i; phm_ppt_v1_clock_voltage_dependency_table *sclk_table; + phm_ppt_v1_clock_voltage_dependency_record *sclk_table_record; if (sclk_dep_table->ucRevId < 1) { const ATOM_Tonga_SCLK_Dependency_Table *tonga_table = (ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table; + ATOM_Tonga_SCLK_Dependency_Record *sclk_dep_record; PP_ASSERT_WITH_CODE((0 != tonga_table->ucNumEntries), "Invalid PowerPlay Table!", return -1); @@ -432,20 +444,23 @@ static int get_sclk_voltage_dependency_table( sclk_table->count = (uint32_t)tonga_table->ucNumEntries; for (i = 0; i < tonga_table->ucNumEntries; i++) { - sclk_table->entries[i].vddInd = - tonga_table->entries[i].ucVddInd; - sclk_table->entries[i].vdd_offset = - tonga_table->entries[i].usVddcOffset; - sclk_table->entries[i].clk = - tonga_table->entries[i].ulSclk; - sclk_table->entries[i].cks_enable = - (((tonga_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0; - sclk_table->entries[i].cks_voffset = - (tonga_table->entries[i].ucCKSVOffsetandDisable & 0x7F); + sclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR( + ATOM_Tonga_SCLK_Dependency_Record, + entries, tonga_table, i); + sclk_table_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR( + phm_ppt_v1_clock_voltage_dependency_record, + entries, sclk_table, i); + sclk_table_record->vddInd = sclk_dep_record->ucVddInd; + sclk_table_record->vdd_offset = sclk_dep_record->usVddcOffset; + sclk_table_record->clk = sclk_dep_record->ulSclk; + sclk_table_record->cks_enable = + (((sclk_dep_record->ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0; + sclk_table_record->cks_voffset = (sclk_dep_record->ucCKSVOffsetandDisable & 0x7F); } } else { const ATOM_Polaris_SCLK_Dependency_Table *polaris_table = (ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table; + ATOM_Polaris_SCLK_Dependency_Record *sclk_dep_record; PP_ASSERT_WITH_CODE((0 != polaris_table->ucNumEntries), "Invalid PowerPlay Table!", return -1); @@ -463,17 +478,19 @@ static int get_sclk_voltage_dependency_table( sclk_table->count = (uint32_t)polaris_table->ucNumEntries; for (i = 0; i < polaris_table->ucNumEntries; i++) { - sclk_table->entries[i].vddInd = - polaris_table->entries[i].ucVddInd; - sclk_table->entries[i].vdd_offset = - polaris_table->entries[i].usVddcOffset; - sclk_table->entries[i].clk = - polaris_table->entries[i].ulSclk; - sclk_table->entries[i].cks_enable = - (((polaris_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0; - sclk_table->entries[i].cks_voffset = - (polaris_table->entries[i].ucCKSVOffsetandDisable & 0x7F); - sclk_table->entries[i].sclk_offset = polaris_table->entries[i].ulSclkOffset; + sclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR( + ATOM_Polaris_SCLK_Dependency_Record, + entries, polaris_table, i); + sclk_table_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR( + phm_ppt_v1_clock_voltage_dependency_record, + entries, sclk_table, i); + sclk_table_record->vddInd = sclk_dep_record->ucVddInd; + sclk_table_record->vdd_offset = sclk_dep_record->usVddcOffset; + sclk_table_record->clk = sclk_dep_record->ulSclk; + sclk_table_record->cks_enable = + (((sclk_dep_record->ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0; + sclk_table_record->cks_voffset = (sclk_dep_record->ucCKSVOffsetandDisable & 0x7F); + sclk_table_record->sclk_offset = sclk_dep_record->ulSclkOffset; } } *pp_tonga_sclk_dep_table = sclk_table; @@ -484,16 +501,19 @@ static int get_sclk_voltage_dependency_table( static int get_pcie_table( struct pp_hwmgr *hwmgr, phm_ppt_v1_pcie_table **pp_tonga_pcie_table, - const PPTable_Generic_SubTable_Header * pTable + PPTable_Generic_SubTable_Header const *ptable ) { uint32_t table_size, i, pcie_count; phm_ppt_v1_pcie_table *pcie_table; struct phm_ppt_v1_information *pp_table_information = (struct phm_ppt_v1_information *)(hwmgr->pptable); + phm_ppt_v1_pcie_record *pcie_record; + + if (ptable->ucRevId < 1) { + const ATOM_Tonga_PCIE_Table *atom_pcie_table = (ATOM_Tonga_PCIE_Table *)ptable; + ATOM_Tonga_PCIE_Record *atom_pcie_record; - if (pTable->ucRevId < 1) { - const ATOM_Tonga_PCIE_Table *atom_pcie_table = (ATOM_Tonga_PCIE_Table *)pTable; PP_ASSERT_WITH_CODE((atom_pcie_table->ucNumEntries != 0), "Invalid PowerPlay Table!", return -1); @@ -519,18 +539,23 @@ static int get_pcie_table( Disregarding the excess entries... \n"); pcie_table->count = pcie_count; - for (i = 0; i < pcie_count; i++) { - pcie_table->entries[i].gen_speed = - atom_pcie_table->entries[i].ucPCIEGenSpeed; - pcie_table->entries[i].lane_width = - atom_pcie_table->entries[i].usPCIELaneWidth; + pcie_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR( + phm_ppt_v1_pcie_record, + entries, pcie_table, i); + atom_pcie_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR( + ATOM_Tonga_PCIE_Record, + entries, atom_pcie_table, i); + pcie_record->gen_speed = atom_pcie_record->ucPCIEGenSpeed; + pcie_record->lane_width = atom_pcie_record->usPCIELaneWidth; } *pp_tonga_pcie_table = pcie_table; } else { /* Polaris10/Polaris11 and newer. */ - const ATOM_Polaris10_PCIE_Table *atom_pcie_table = (ATOM_Polaris10_PCIE_Table *)pTable; + const ATOM_Polaris10_PCIE_Table *atom_pcie_table = (ATOM_Polaris10_PCIE_Table *)ptable; + ATOM_Polaris10_PCIE_Record *atom_pcie_record; + PP_ASSERT_WITH_CODE((atom_pcie_table->ucNumEntries != 0), "Invalid PowerPlay Table!", return -1); @@ -558,12 +583,15 @@ static int get_pcie_table( pcie_table->count = pcie_count; for (i = 0; i < pcie_count; i++) { - pcie_table->entries[i].gen_speed = - atom_pcie_table->entries[i].ucPCIEGenSpeed; - pcie_table->entries[i].lane_width = - atom_pcie_table->entries[i].usPCIELaneWidth; - pcie_table->entries[i].pcie_sclk = - atom_pcie_table->entries[i].ulPCIE_Sclk; + pcie_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR( + phm_ppt_v1_pcie_record, + entries, pcie_table, i); + atom_pcie_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR( + ATOM_Polaris10_PCIE_Record, + entries, atom_pcie_table, i); + pcie_record->gen_speed = atom_pcie_record->ucPCIEGenSpeed; + pcie_record->lane_width = atom_pcie_record->usPCIELaneWidth; + pcie_record->pcie_sclk = atom_pcie_record->ulPCIE_Sclk; } *pp_tonga_pcie_table = pcie_table; @@ -685,6 +713,7 @@ static int get_mm_clock_voltage_table( uint32_t table_size, i; const ATOM_Tonga_MM_Dependency_Record *mm_dependency_record; phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table; + phm_ppt_v1_mm_clock_voltage_dependency_record *mm_table_record; PP_ASSERT_WITH_CODE((0 != mm_dependency_table->ucNumEntries), "Invalid PowerPlay Table!", return -1); @@ -701,14 +730,19 @@ static int get_mm_clock_voltage_table( mm_table->count = mm_dependency_table->ucNumEntries; for (i = 0; i < mm_dependency_table->ucNumEntries; i++) { - mm_dependency_record = &mm_dependency_table->entries[i]; - mm_table->entries[i].vddcInd = mm_dependency_record->ucVddcInd; - mm_table->entries[i].vddgfx_offset = mm_dependency_record->usVddgfxOffset; - mm_table->entries[i].aclk = mm_dependency_record->ulAClk; - mm_table->entries[i].samclock = mm_dependency_record->ulSAMUClk; - mm_table->entries[i].eclk = mm_dependency_record->ulEClk; - mm_table->entries[i].vclk = mm_dependency_record->ulVClk; - mm_table->entries[i].dclk = mm_dependency_record->ulDClk; + mm_dependency_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR( + ATOM_Tonga_MM_Dependency_Record, + entries, mm_dependency_table, i); + mm_table_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR( + phm_ppt_v1_mm_clock_voltage_dependency_record, + entries, mm_table, i); + mm_table_record->vddcInd = mm_dependency_record->ucVddcInd; + mm_table_record->vddgfx_offset = mm_dependency_record->usVddgfxOffset; + mm_table_record->aclk = mm_dependency_record->ulAClk; + mm_table_record->samclock = mm_dependency_record->ulSAMUClk; + mm_table_record->eclk = mm_dependency_record->ulEClk; + mm_table_record->vclk = mm_dependency_record->ulVClk; + mm_table_record->dclk = mm_dependency_record->ulDClk; } *tonga_mm_table = mm_table; @@ -1015,7 +1049,7 @@ static int check_powerplay_tables( return 0; } -int tonga_pp_tables_initialize(struct pp_hwmgr *hwmgr) +int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr) { int result = 0; const ATOM_Tonga_POWERPLAYTABLE *powerplay_table; @@ -1066,7 +1100,7 @@ int tonga_pp_tables_initialize(struct pp_hwmgr *hwmgr) return result; } -int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr) +int pp_tables_v1_0_uninitialize(struct pp_hwmgr *hwmgr) { struct phm_ppt_v1_information *pp_table_information = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -1110,14 +1144,14 @@ int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr) return 0; } -const struct pp_table_func tonga_pptable_funcs = { - .pptable_init = tonga_pp_tables_initialize, - .pptable_fini = tonga_pp_tables_uninitialize, +const struct pp_table_func pptable_v1_0_funcs = { + .pptable_init = pp_tables_v1_0_initialize, + .pptable_fini = pp_tables_v1_0_uninitialize, }; -int tonga_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr) +int get_number_of_powerplay_table_entries_v1_0(struct pp_hwmgr *hwmgr) { - const ATOM_Tonga_State_Array * state_arrays; + ATOM_Tonga_State_Array const *state_arrays; const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr); PP_ASSERT_WITH_CODE((NULL != pp_table), @@ -1164,6 +1198,71 @@ static uint32_t make_classification_flags(struct pp_hwmgr *hwmgr, return result; } +static int ppt_get_num_of_vce_state_table_entries_v1_0(struct pp_hwmgr *hwmgr) +{ + const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr); + const ATOM_Tonga_VCE_State_Table *vce_state_table = + (ATOM_Tonga_VCE_State_Table *)(((unsigned long)pp_table) + le16_to_cpu(pp_table->usVCEStateTableOffset)); + + if (vce_state_table == NULL) + return 0; + + return vce_state_table->ucNumEntries; +} + +static int ppt_get_vce_state_table_entry_v1_0(struct pp_hwmgr *hwmgr, uint32_t i, + struct pp_vce_state *vce_state, void **clock_info, uint32_t *flag) +{ + const ATOM_Tonga_VCE_State_Record *vce_state_record; + ATOM_Tonga_SCLK_Dependency_Record *sclk_dep_record; + ATOM_Tonga_MCLK_Dependency_Record *mclk_dep_record; + ATOM_Tonga_MM_Dependency_Record *mm_dep_record; + const ATOM_Tonga_POWERPLAYTABLE *pptable = get_powerplay_table(hwmgr); + const ATOM_Tonga_VCE_State_Table *vce_state_table = (ATOM_Tonga_VCE_State_Table *)(((unsigned long)pptable) + + le16_to_cpu(pptable->usVCEStateTableOffset)); + const ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table = (ATOM_Tonga_SCLK_Dependency_Table *)(((unsigned long)pptable) + + le16_to_cpu(pptable->usSclkDependencyTableOffset)); + const ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = (ATOM_Tonga_MCLK_Dependency_Table *)(((unsigned long)pptable) + + le16_to_cpu(pptable->usMclkDependencyTableOffset)); + const ATOM_Tonga_MM_Dependency_Table *mm_dep_table = (ATOM_Tonga_MM_Dependency_Table *)(((unsigned long)pptable) + + le16_to_cpu(pptable->usMMDependencyTableOffset)); + + PP_ASSERT_WITH_CODE((i < vce_state_table->ucNumEntries), + "Requested state entry ID is out of range!", + return -EINVAL); + + vce_state_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR( + ATOM_Tonga_VCE_State_Record, + entries, vce_state_table, i); + sclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR( + ATOM_Tonga_SCLK_Dependency_Record, + entries, sclk_dep_table, + vce_state_record->ucSCLKIndex); + mm_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR( + ATOM_Tonga_MM_Dependency_Record, + entries, mm_dep_table, + vce_state_record->ucVCEClockIndex); + *flag = vce_state_record->ucFlag; + + vce_state->evclk = mm_dep_record->ulEClk; + vce_state->ecclk = mm_dep_record->ulEClk; + vce_state->sclk = sclk_dep_record->ulSclk; + + if (vce_state_record->ucMCLKIndex >= mclk_dep_table->ucNumEntries) + mclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR( + ATOM_Tonga_MCLK_Dependency_Record, + entries, mclk_dep_table, + mclk_dep_table->ucNumEntries - 1); + else + mclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR( + ATOM_Tonga_MCLK_Dependency_Record, + entries, mclk_dep_table, + vce_state_record->ucMCLKIndex); + + vce_state->mclk = mclk_dep_record->ulMclk; + return 0; +} + /** * Create a Power State out of an entry in the PowerPlay table. * This function is called by the hardware back-end. @@ -1172,15 +1271,17 @@ static uint32_t make_classification_flags(struct pp_hwmgr *hwmgr, * @param power_state The address of the PowerState instance being created. * @return -1 if the entry cannot be retrieved. */ -int tonga_get_powerplay_table_entry(struct pp_hwmgr *hwmgr, +int get_powerplay_table_entry_v1_0(struct pp_hwmgr *hwmgr, uint32_t entry_index, struct pp_power_state *power_state, int (*call_back_func)(struct pp_hwmgr *, void *, struct pp_power_state *, void *, uint32_t)) { int result = 0; - const ATOM_Tonga_State_Array * state_arrays; + const ATOM_Tonga_State_Array *state_arrays; const ATOM_Tonga_State *state_entry; const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr); + int i, j; + uint32_t flags = 0; PP_ASSERT_WITH_CODE((NULL != pp_table), "Missing PowerPlay Table!", return -1;); power_state->classification.bios_index = entry_index; @@ -1197,7 +1298,9 @@ int tonga_get_powerplay_table_entry(struct pp_hwmgr *hwmgr, PP_ASSERT_WITH_CODE((entry_index <= state_arrays->ucNumEntries), "Invalid PowerPlay Table State Array Entry.", return -1); - state_entry = &(state_arrays->states[entry_index]); + state_entry = GET_FLEXIBLE_ARRAY_MEMBER_ADDR( + ATOM_Tonga_State, entries, + state_arrays, entry_index); result = call_back_func(hwmgr, (void *)state_entry, power_state, (void *)pp_table, @@ -1210,5 +1313,13 @@ int tonga_get_powerplay_table_entry(struct pp_hwmgr *hwmgr, PP_StateClassificationFlag_Boot)) result = hwmgr->hwmgr_func->patch_boot_state(hwmgr, &(power_state->hardware)); + hwmgr->num_vce_state_tables = i = ppt_get_num_of_vce_state_table_entries_v1_0(hwmgr); + + if ((i != 0) && (i <= PP_MAX_VCE_LEVELS)) { + for (j = 0; j < i; j++) + ppt_get_vce_state_table_entry_v1_0(hwmgr, j, &(hwmgr->vce_states[j]), NULL, &flags); + } + return result; } + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.h index d24b8887f466..b9710abdff01 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.h @@ -20,14 +20,14 @@ * OTHER DEALINGS IN THE SOFTWARE. * */ -#ifndef TONGA_PROCESSPPTABLES_H -#define TONGA_PROCESSPPTABLES_H +#ifndef _PROCESSPPTABLES_V1_0_H +#define _PROCESSPPTABLES_V1_0_H #include "hwmgr.h" -extern const struct pp_table_func tonga_pptable_funcs; -extern int tonga_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr); -extern int tonga_get_powerplay_table_entry(struct pp_hwmgr *hwmgr, uint32_t entry_index, +extern const struct pp_table_func pptable_v1_0_funcs; +extern int get_number_of_powerplay_table_entries_v1_0(struct pp_hwmgr *hwmgr); +extern int get_powerplay_table_entry_v1_0(struct pp_hwmgr *hwmgr, uint32_t entry_index, struct pp_power_state *power_state, int (*call_back_func)(struct pp_hwmgr *, void *, struct pp_power_state *, void *, uint32_t)); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c index 6c321b0d8a1e..ccf7ebeaf892 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c @@ -1523,7 +1523,7 @@ int get_number_of_vce_state_table_entries( int get_vce_state_table_entry(struct pp_hwmgr *hwmgr, unsigned long i, - struct PP_VCEState *vce_state, + struct pp_vce_state *vce_state, void **clock_info, unsigned long *flag) { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c index b5edb5105986..6eb6db199250 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c @@ -21,9 +21,53 @@ * */ -#include "polaris10_clockpowergating.h" +#include "smu7_hwmgr.h" +#include "smu7_clockpowergating.h" +#include "smu7_common.h" -int polaris10_phm_powerdown_uvd(struct pp_hwmgr *hwmgr) +static int smu7_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable) +{ + return smum_send_msg_to_smc(hwmgr->smumgr, enable ? + PPSMC_MSG_UVDDPM_Enable : + PPSMC_MSG_UVDDPM_Disable); +} + +static int smu7_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) +{ + return smum_send_msg_to_smc(hwmgr->smumgr, enable ? + PPSMC_MSG_VCEDPM_Enable : + PPSMC_MSG_VCEDPM_Disable); +} + +static int smu7_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable) +{ + return smum_send_msg_to_smc(hwmgr->smumgr, enable ? + PPSMC_MSG_SAMUDPM_Enable : + PPSMC_MSG_SAMUDPM_Disable); +} + +static int smu7_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate) +{ + if (!bgate) + smum_update_smc_table(hwmgr, SMU_UVD_TABLE); + return smu7_enable_disable_uvd_dpm(hwmgr, !bgate); +} + +static int smu7_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate) +{ + if (!bgate) + smum_update_smc_table(hwmgr, SMU_VCE_TABLE); + return smu7_enable_disable_vce_dpm(hwmgr, !bgate); +} + +static int smu7_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate) +{ + if (!bgate) + smum_update_smc_table(hwmgr, SMU_SAMU_TABLE); + return smu7_enable_disable_samu_dpm(hwmgr, !bgate); +} + +int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr) { if (phm_cf_want_uvd_power_gating(hwmgr)) return smum_send_msg_to_smc(hwmgr->smumgr, @@ -31,7 +75,7 @@ int polaris10_phm_powerdown_uvd(struct pp_hwmgr *hwmgr) return 0; } -int polaris10_phm_powerup_uvd(struct pp_hwmgr *hwmgr) +int smu7_powerup_uvd(struct pp_hwmgr *hwmgr) { if (phm_cf_want_uvd_power_gating(hwmgr)) { if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, @@ -47,7 +91,7 @@ int polaris10_phm_powerup_uvd(struct pp_hwmgr *hwmgr) return 0; } -int polaris10_phm_powerdown_vce(struct pp_hwmgr *hwmgr) +int smu7_powerdown_vce(struct pp_hwmgr *hwmgr) { if (phm_cf_want_vce_power_gating(hwmgr)) return smum_send_msg_to_smc(hwmgr->smumgr, @@ -55,7 +99,7 @@ int polaris10_phm_powerdown_vce(struct pp_hwmgr *hwmgr) return 0; } -int polaris10_phm_powerup_vce(struct pp_hwmgr *hwmgr) +int smu7_powerup_vce(struct pp_hwmgr *hwmgr) { if (phm_cf_want_vce_power_gating(hwmgr)) return smum_send_msg_to_smc(hwmgr->smumgr, @@ -63,7 +107,7 @@ int polaris10_phm_powerup_vce(struct pp_hwmgr *hwmgr) return 0; } -int polaris10_phm_powerdown_samu(struct pp_hwmgr *hwmgr) +int smu7_powerdown_samu(struct pp_hwmgr *hwmgr) { if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SamuPowerGating)) @@ -72,7 +116,7 @@ int polaris10_phm_powerdown_samu(struct pp_hwmgr *hwmgr) return 0; } -int polaris10_phm_powerup_samu(struct pp_hwmgr *hwmgr) +int smu7_powerup_samu(struct pp_hwmgr *hwmgr) { if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SamuPowerGating)) @@ -81,27 +125,24 @@ int polaris10_phm_powerup_samu(struct pp_hwmgr *hwmgr) return 0; } -int polaris10_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr) +int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr) { - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); data->uvd_power_gated = false; data->vce_power_gated = false; data->samu_power_gated = false; - polaris10_phm_powerup_uvd(hwmgr); - polaris10_phm_powerup_vce(hwmgr); - polaris10_phm_powerup_samu(hwmgr); + smu7_powerup_uvd(hwmgr); + smu7_powerup_vce(hwmgr); + smu7_powerup_samu(hwmgr); return 0; } -int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) +int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) { - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - if (data->uvd_power_gated == bgate) - return 0; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); data->uvd_power_gated = bgate; @@ -109,11 +150,11 @@ int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) cgs_set_clockgating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_UVD, AMD_CG_STATE_GATE); - polaris10_update_uvd_dpm(hwmgr, true); - polaris10_phm_powerdown_uvd(hwmgr); + smu7_update_uvd_dpm(hwmgr, true); + smu7_powerdown_uvd(hwmgr); } else { - polaris10_phm_powerup_uvd(hwmgr); - polaris10_update_uvd_dpm(hwmgr, false); + smu7_powerup_uvd(hwmgr); + smu7_update_uvd_dpm(hwmgr, false); cgs_set_clockgating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_UVD, AMD_CG_STATE_UNGATE); @@ -122,9 +163,9 @@ int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) return 0; } -int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) +int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) { - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); if (data->vce_power_gated == bgate) return 0; @@ -135,11 +176,11 @@ int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) cgs_set_clockgating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_VCE, AMD_CG_STATE_GATE); - polaris10_update_vce_dpm(hwmgr, true); - polaris10_phm_powerdown_vce(hwmgr); + smu7_update_vce_dpm(hwmgr, true); + smu7_powerdown_vce(hwmgr); } else { - polaris10_phm_powerup_vce(hwmgr); - polaris10_update_vce_dpm(hwmgr, false); + smu7_powerup_vce(hwmgr); + smu7_update_vce_dpm(hwmgr, false); cgs_set_clockgating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_VCE, AMD_CG_STATE_UNGATE); @@ -147,9 +188,9 @@ int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) return 0; } -int polaris10_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate) +int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate) { - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); if (data->samu_power_gated == bgate) return 0; @@ -157,22 +198,25 @@ int polaris10_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate) data->samu_power_gated = bgate; if (bgate) { - polaris10_update_samu_dpm(hwmgr, true); - polaris10_phm_powerdown_samu(hwmgr); + smu7_update_samu_dpm(hwmgr, true); + smu7_powerdown_samu(hwmgr); } else { - polaris10_phm_powerup_samu(hwmgr); - polaris10_update_samu_dpm(hwmgr, false); + smu7_powerup_samu(hwmgr); + smu7_update_samu_dpm(hwmgr, false); } return 0; } -int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, +int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, const uint32_t *msg_id) { PPSMC_Msg msg; uint32_t value; + if (!(hwmgr->feature_mask & PP_ENABLE_GFX_CG_THRU_SMU)) + return 0; + switch ((*msg_id & PP_GROUP_MASK) >> PP_GROUP_SHIFT) { case PP_GROUP_GFX: switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) { @@ -185,7 +229,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, if (smum_send_msg_to_smc_with_parameter( hwmgr->smumgr, msg, value)) - return -1; + return -EINVAL; } if (PP_STATE_SUPPORT_LS & *msg_id) { msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS @@ -195,7 +239,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, if (smum_send_msg_to_smc_with_parameter( hwmgr->smumgr, msg, value)) - return -1; + return -EINVAL; } break; @@ -208,7 +252,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, if (smum_send_msg_to_smc_with_parameter( hwmgr->smumgr, msg, value)) - return -1; + return -EINVAL; } if (PP_STATE_SUPPORT_LS & *msg_id) { @@ -219,7 +263,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, if (smum_send_msg_to_smc_with_parameter( hwmgr->smumgr, msg, value)) - return -1; + return -EINVAL; } break; @@ -232,7 +276,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, if (smum_send_msg_to_smc_with_parameter( hwmgr->smumgr, msg, value)) - return -1; + return -EINVAL; } break; @@ -245,7 +289,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, if (smum_send_msg_to_smc_with_parameter( hwmgr->smumgr, msg, value)) - return -1; + return -EINVAL; } break; @@ -259,12 +303,12 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, if (smum_send_msg_to_smc_with_parameter( hwmgr->smumgr, msg, value)) - return -1; + return -EINVAL; } break; default: - return -1; + return -EINVAL; } break; @@ -279,7 +323,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, if (smum_send_msg_to_smc_with_parameter( hwmgr->smumgr, msg, value)) - return -1; + return -EINVAL; } if (PP_STATE_SUPPORT_LS & *msg_id) { msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ? @@ -289,7 +333,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, if (smum_send_msg_to_smc_with_parameter( hwmgr->smumgr, msg, value)) - return -1; + return -EINVAL; } break; @@ -302,7 +346,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, if (smum_send_msg_to_smc_with_parameter( hwmgr->smumgr, msg, value)) - return -1; + return -EINVAL; } if (PP_STATE_SUPPORT_LS & *msg_id) { @@ -313,7 +357,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, if (smum_send_msg_to_smc_with_parameter( hwmgr->smumgr, msg, value)) - return -1; + return -EINVAL; } break; @@ -326,7 +370,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, if (smum_send_msg_to_smc_with_parameter( hwmgr->smumgr, msg, value)) - return -1; + return -EINVAL; } if (PP_STATE_SUPPORT_LS & *msg_id) { msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ? @@ -336,7 +380,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, if (smum_send_msg_to_smc_with_parameter( hwmgr->smumgr, msg, value)) - return -1; + return -EINVAL; } break; @@ -349,7 +393,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, if (smum_send_msg_to_smc_with_parameter( hwmgr->smumgr, msg, value)) - return -1; + return -EINVAL; } if (PP_STATE_SUPPORT_LS & *msg_id) { @@ -360,7 +404,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, if (smum_send_msg_to_smc_with_parameter( hwmgr->smumgr, msg, value)) - return -1; + return -EINVAL; } break; @@ -373,7 +417,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, if (smum_send_msg_to_smc_with_parameter( hwmgr->smumgr, msg, value)) - return -1; + return -EINVAL; } if (PP_STATE_SUPPORT_LS & *msg_id) { @@ -384,7 +428,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, if (smum_send_msg_to_smc_with_parameter( hwmgr->smumgr, msg, value)) - return -1; + return -EINVAL; } break; @@ -397,18 +441,18 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, if (smum_send_msg_to_smc_with_parameter( hwmgr->smumgr, msg, value)) - return -1; + return -EINVAL; } break; default: - return -1; + return -EINVAL; } break; default: - return -1; + return -EINVAL; } @@ -419,7 +463,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, * Powerplay will only control the static per CU Power Gating. * Dynamic per CU Power Gating will be done in gfx. */ -int polaris10_phm_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable) +int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable) { struct cgs_system_info sys_info = {0}; uint32_t active_cus; @@ -432,8 +476,8 @@ int polaris10_phm_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable if (result) return -EINVAL; - else - active_cus = sys_info.value; + + active_cus = sys_info.value; if (enable) return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h index 88d68cb6e89d..d52a28c343e3 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h @@ -21,20 +21,20 @@ * */ -#ifndef _POLARIS10_CLOCK_POWER_GATING_H_ -#define _POLARIS10_CLOCK_POWER_GATING_H_ +#ifndef _SMU7_CLOCK_POWER_GATING_H_ +#define _SMU7_CLOCK__POWER_GATING_H_ -#include "polaris10_hwmgr.h" +#include "smu7_hwmgr.h" #include "pp_asicblocks.h" -int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate); -int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate); -int polaris10_phm_powerdown_uvd(struct pp_hwmgr *hwmgr); -int polaris10_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate); -int polaris10_phm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate); -int polaris10_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr); -int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, +int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate); +int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate); +int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr); +int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate); +int smu7_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate); +int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr); +int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, const uint32_t *msg_id); -int polaris10_phm_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable); +int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable); -#endif /* _POLARIS10_CLOCK_POWER_GATING_H_ */ +#endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h new file mode 100644 index 000000000000..f967613191cf --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h @@ -0,0 +1,55 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _SMU7_DYN_DEFAULTS_H +#define _SMU7_DYN_DEFAULTS_H + + +/* We need to fill in the default values */ + + +#define SMU7_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102 +#define SMU7_VOTINGRIGHTSCLIENTS_DFLT1 0x000400 +#define SMU7_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080 +#define SMU7_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200 +#define SMU7_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680 +#define SMU7_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033 +#define SMU7_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033 +#define SMU7_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000 + + +#define SMU7_THERMALPROTECTCOUNTER_DFLT 0x200 +#define SMU7_STATICSCREENTHRESHOLDUNIT_DFLT 0 +#define SMU7_STATICSCREENTHRESHOLD_DFLT 0x00C8 +#define SMU7_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200 +#define SMU7_REFERENCEDIVIDER_DFLT 4 + +#define SMU7_ULVVOLTAGECHANGEDELAY_DFLT 1687 + +#define SMU7_CGULVPARAMETER_DFLT 0x00040035 +#define SMU7_CGULVCONTROL_DFLT 0x00007450 +#define SMU7_TARGETACTIVITY_DFLT 50 +#define SMU7_MCLK_TARGETACTIVITY_DFLT 10 + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c new file mode 100644 index 000000000000..508245d49d33 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -0,0 +1,4359 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/fb.h> +#include <asm/div64.h> +#include "linux/delay.h" +#include "pp_acpi.h" +#include "pp_debug.h" +#include "ppatomctrl.h" +#include "atombios.h" +#include "pptable_v1_0.h" +#include "pppcielanes.h" +#include "amd_pcie_helpers.h" +#include "hardwaremanager.h" +#include "process_pptables_v1_0.h" +#include "cgs_common.h" + +#include "smu7_common.h" + +#include "hwmgr.h" +#include "smu7_hwmgr.h" +#include "smu7_powertune.h" +#include "smu7_dyn_defaults.h" +#include "smu7_thermal.h" +#include "smu7_clockpowergating.h" +#include "processpptables.h" + +#define MC_CG_ARB_FREQ_F0 0x0a +#define MC_CG_ARB_FREQ_F1 0x0b +#define MC_CG_ARB_FREQ_F2 0x0c +#define MC_CG_ARB_FREQ_F3 0x0d + +#define MC_CG_SEQ_DRAMCONF_S0 0x05 +#define MC_CG_SEQ_DRAMCONF_S1 0x06 +#define MC_CG_SEQ_YCLK_SUSPEND 0x04 +#define MC_CG_SEQ_YCLK_RESUME 0x0a + +#define SMC_CG_IND_START 0xc0030000 +#define SMC_CG_IND_END 0xc0040000 + +#define VOLTAGE_SCALE 4 +#define VOLTAGE_VID_OFFSET_SCALE1 625 +#define VOLTAGE_VID_OFFSET_SCALE2 100 + +#define MEM_FREQ_LOW_LATENCY 25000 +#define MEM_FREQ_HIGH_LATENCY 80000 + +#define MEM_LATENCY_HIGH 45 +#define MEM_LATENCY_LOW 35 +#define MEM_LATENCY_ERR 0xFFFF + +#define MC_SEQ_MISC0_GDDR5_SHIFT 28 +#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000 +#define MC_SEQ_MISC0_GDDR5_VALUE 5 + +#define PCIE_BUS_CLK 10000 +#define TCLK (PCIE_BUS_CLK / 10) + + +/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */ +enum DPM_EVENT_SRC { + DPM_EVENT_SRC_ANALOG = 0, + DPM_EVENT_SRC_EXTERNAL = 1, + DPM_EVENT_SRC_DIGITAL = 2, + DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, + DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 +}; + +static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic); + +struct smu7_power_state *cast_phw_smu7_power_state( + struct pp_hw_power_state *hw_ps) +{ + PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic), + "Invalid Powerstate Type!", + return NULL); + + return (struct smu7_power_state *)hw_ps; +} + +const struct smu7_power_state *cast_const_phw_smu7_power_state( + const struct pp_hw_power_state *hw_ps) +{ + PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic), + "Invalid Powerstate Type!", + return NULL); + + return (const struct smu7_power_state *)hw_ps; +} + +/** + * Find the MC microcode version and store it in the HwMgr struct + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int smu7_get_mc_microcode_version (struct pp_hwmgr *hwmgr) +{ + cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F); + + hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA); + + return 0; +} + +uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr) +{ + uint32_t speedCntl = 0; + + /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */ + speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE, + ixPCIE_LC_SPEED_CNTL); + return((uint16_t)PHM_GET_FIELD(speedCntl, + PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE)); +} + +int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr) +{ + uint32_t link_width; + + /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */ + link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, + PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD); + + PP_ASSERT_WITH_CODE((7 >= link_width), + "Invalid PCIe lane width!", return 0); + + return decode_pcie_lane_width(link_width); +} + +/** +* Enable voltage control +* +* @param pHwMgr the address of the powerplay hardware manager. +* @return always PP_Result_OK +*/ +int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr) +{ + if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK) + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable); + + return 0; +} + +/** +* Checks if we want to support voltage control +* +* @param hwmgr the address of the powerplay hardware manager. +*/ +static bool smu7_voltage_control(const struct pp_hwmgr *hwmgr) +{ + const struct smu7_hwmgr *data = + (const struct smu7_hwmgr *)(hwmgr->backend); + + return (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control); +} + +/** +* Enable voltage control +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always 0 +*/ +static int smu7_enable_voltage_control(struct pp_hwmgr *hwmgr) +{ + /* enable voltage control */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1); + + return 0; +} + +static int phm_get_svi2_voltage_table_v0(pp_atomctrl_voltage_table *voltage_table, + struct phm_clock_voltage_dependency_table *voltage_dependency_table + ) +{ + uint32_t i; + + PP_ASSERT_WITH_CODE((NULL != voltage_table), + "Voltage Dependency Table empty.", return -EINVAL;); + + voltage_table->mask_low = 0; + voltage_table->phase_delay = 0; + voltage_table->count = voltage_dependency_table->count; + + for (i = 0; i < voltage_dependency_table->count; i++) { + voltage_table->entries[i].value = + voltage_dependency_table->entries[i].v; + voltage_table->entries[i].smio_low = 0; + } + + return 0; +} + + +/** +* Create Voltage Tables. +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always 0 +*/ +static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)hwmgr->pptable; + int result = 0; + uint32_t tmp; + + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { + result = atomctrl_get_voltage_table_v3(hwmgr, + VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT, + &(data->mvdd_voltage_table)); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve MVDD table.", + return result); + } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) { + if (hwmgr->pp_table_version == PP_TABLE_V1) + result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table), + table_info->vdd_dep_on_mclk); + else if (hwmgr->pp_table_version == PP_TABLE_V0) + result = phm_get_svi2_voltage_table_v0(&(data->mvdd_voltage_table), + hwmgr->dyn_state.mvdd_dependency_on_mclk); + + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve SVI2 MVDD table from dependancy table.", + return result;); + } + + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) { + result = atomctrl_get_voltage_table_v3(hwmgr, + VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT, + &(data->vddci_voltage_table)); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve VDDCI table.", + return result); + } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) { + if (hwmgr->pp_table_version == PP_TABLE_V1) + result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table), + table_info->vdd_dep_on_mclk); + else if (hwmgr->pp_table_version == PP_TABLE_V0) + result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table), + hwmgr->dyn_state.vddci_dependency_on_mclk); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve SVI2 VDDCI table from dependancy table.", + return result); + } + + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) { + /* VDDGFX has only SVI2 voltage control */ + result = phm_get_svi2_vdd_voltage_table(&(data->vddgfx_voltage_table), + table_info->vddgfx_lookup_table); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;); + } + + + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) { + result = atomctrl_get_voltage_table_v3(hwmgr, + VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT, + &data->vddc_voltage_table); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve VDDC table.", return result;); + } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { + + if (hwmgr->pp_table_version == PP_TABLE_V0) + result = phm_get_svi2_voltage_table_v0(&data->vddc_voltage_table, + hwmgr->dyn_state.vddc_dependency_on_mclk); + else if (hwmgr->pp_table_version == PP_TABLE_V1) + result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table), + table_info->vddc_lookup_table); + + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve SVI2 VDDC table from dependancy table.", return result;); + } + + tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDC); + PP_ASSERT_WITH_CODE( + (data->vddc_voltage_table.count <= tmp), + "Too many voltage values for VDDC. Trimming to fit state table.", + phm_trim_voltage_table_to_fit_state_table(tmp, + &(data->vddc_voltage_table))); + + tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDGFX); + PP_ASSERT_WITH_CODE( + (data->vddgfx_voltage_table.count <= tmp), + "Too many voltage values for VDDC. Trimming to fit state table.", + phm_trim_voltage_table_to_fit_state_table(tmp, + &(data->vddgfx_voltage_table))); + + tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDCI); + PP_ASSERT_WITH_CODE( + (data->vddci_voltage_table.count <= tmp), + "Too many voltage values for VDDCI. Trimming to fit state table.", + phm_trim_voltage_table_to_fit_state_table(tmp, + &(data->vddci_voltage_table))); + + tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_MVDD); + PP_ASSERT_WITH_CODE( + (data->mvdd_voltage_table.count <= tmp), + "Too many voltage values for MVDD. Trimming to fit state table.", + phm_trim_voltage_table_to_fit_state_table(tmp, + &(data->mvdd_voltage_table))); + + return 0; +} + +/** +* Programs static screed detection parameters +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always 0 +*/ +static int smu7_program_static_screen_threshold_parameters( + struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + /* Set static screen threshold unit */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT, + data->static_screen_threshold_unit); + /* Set static screen threshold */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD, + data->static_screen_threshold); + + return 0; +} + +/** +* Setup display gap for glitch free memory clock switching. +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always 0 +*/ +static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr) +{ + uint32_t display_gap = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_DISPLAY_GAP_CNTL); + + display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, + DISP_GAP, DISPLAY_GAP_IGNORE); + + display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, + DISP_GAP_MCHG, DISPLAY_GAP_VBLANK); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_DISPLAY_GAP_CNTL, display_gap); + + return 0; +} + +/** +* Programs activity state transition voting clients +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always 0 +*/ +static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + /* Clear reset for voting clients before enabling DPM */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7); + + return 0; +} + +static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr) +{ + /* Reset voting clients before disabling DPM */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_0, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_1, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_2, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_3, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_4, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_5, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_6, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_7, 0); + + return 0; +} + +/* Copy one arb setting to another and then switch the active set. + * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants. + */ +static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr, + uint32_t arb_src, uint32_t arb_dest) +{ + uint32_t mc_arb_dram_timing; + uint32_t mc_arb_dram_timing2; + uint32_t burst_time; + uint32_t mc_cg_config; + + switch (arb_src) { + case MC_CG_ARB_FREQ_F0: + mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); + mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); + burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); + break; + case MC_CG_ARB_FREQ_F1: + mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1); + mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1); + burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1); + break; + default: + return -EINVAL; + } + + switch (arb_dest) { + case MC_CG_ARB_FREQ_F0: + cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing); + cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2); + PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time); + break; + case MC_CG_ARB_FREQ_F1: + cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing); + cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2); + PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time); + break; + default: + return -EINVAL; + } + + mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG); + mc_cg_config |= 0x0000000F; + cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config); + PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest); + + return 0; +} + +static int smu7_reset_to_default(struct pp_hwmgr *hwmgr) +{ + return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults); +} + +/** +* Initial switch from ARB F0->F1 +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always 0 +* This function is to be called from the SetPowerState table. +*/ +static int smu7_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr) +{ + return smu7_copy_and_switch_arb_sets(hwmgr, + MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); +} + +static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr) +{ + uint32_t tmp; + + tmp = (cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, ixSMC_SCRATCH9) & + 0x0000ff00) >> 8; + + if (tmp == MC_CG_ARB_FREQ_F0) + return 0; + + return smu7_copy_and_switch_arb_sets(hwmgr, + tmp, MC_CG_ARB_FREQ_F0); +} + +static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_pcie_table *pcie_table = NULL; + + uint32_t i, max_entry; + uint32_t tmp; + + PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels || + data->use_pcie_power_saving_levels), "No pcie performance levels!", + return -EINVAL); + + if (table_info != NULL) + pcie_table = table_info->pcie_table; + + if (data->use_pcie_performance_levels && + !data->use_pcie_power_saving_levels) { + data->pcie_gen_power_saving = data->pcie_gen_performance; + data->pcie_lane_power_saving = data->pcie_lane_performance; + } else if (!data->use_pcie_performance_levels && + data->use_pcie_power_saving_levels) { + data->pcie_gen_performance = data->pcie_gen_power_saving; + data->pcie_lane_performance = data->pcie_lane_power_saving; + } + tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_LINK); + phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table, + tmp, + MAX_REGULAR_DPM_NUMBER); + + if (pcie_table != NULL) { + /* max_entry is used to make sure we reserve one PCIE level + * for boot level (fix for A+A PSPP issue). + * If PCIE table from PPTable have ULV entry + 8 entries, + * then ignore the last entry.*/ + max_entry = (tmp < pcie_table->count) ? tmp : pcie_table->count; + for (i = 1; i < max_entry; i++) { + phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1, + get_pcie_gen_support(data->pcie_gen_cap, + pcie_table->entries[i].gen_speed), + get_pcie_lane_support(data->pcie_lane_cap, + pcie_table->entries[i].lane_width)); + } + data->dpm_table.pcie_speed_table.count = max_entry - 1; + smum_update_smc_table(hwmgr, SMU_BIF_TABLE); + } else { + /* Hardcode Pcie Table */ + phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0, + get_pcie_gen_support(data->pcie_gen_cap, + PP_Min_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, + PP_Max_PCIELane)); + phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1, + get_pcie_gen_support(data->pcie_gen_cap, + PP_Min_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, + PP_Max_PCIELane)); + phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2, + get_pcie_gen_support(data->pcie_gen_cap, + PP_Max_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, + PP_Max_PCIELane)); + phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3, + get_pcie_gen_support(data->pcie_gen_cap, + PP_Max_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, + PP_Max_PCIELane)); + phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4, + get_pcie_gen_support(data->pcie_gen_cap, + PP_Max_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, + PP_Max_PCIELane)); + phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5, + get_pcie_gen_support(data->pcie_gen_cap, + PP_Max_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, + PP_Max_PCIELane)); + + data->dpm_table.pcie_speed_table.count = 6; + } + /* Populate last level for boot PCIE level, but do not increment count. */ + phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, + data->dpm_table.pcie_speed_table.count, + get_pcie_gen_support(data->pcie_gen_cap, + PP_Min_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, + PP_Max_PCIELane)); + + return 0; +} + +static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table)); + + phm_reset_single_dpm_table( + &data->dpm_table.sclk_table, + smum_get_mac_definition(hwmgr->smumgr, + SMU_MAX_LEVELS_GRAPHICS), + MAX_REGULAR_DPM_NUMBER); + phm_reset_single_dpm_table( + &data->dpm_table.mclk_table, + smum_get_mac_definition(hwmgr->smumgr, + SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER); + + phm_reset_single_dpm_table( + &data->dpm_table.vddc_table, + smum_get_mac_definition(hwmgr->smumgr, + SMU_MAX_LEVELS_VDDC), + MAX_REGULAR_DPM_NUMBER); + phm_reset_single_dpm_table( + &data->dpm_table.vddci_table, + smum_get_mac_definition(hwmgr->smumgr, + SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER); + + phm_reset_single_dpm_table( + &data->dpm_table.mvdd_table, + smum_get_mac_definition(hwmgr->smumgr, + SMU_MAX_LEVELS_MVDD), + MAX_REGULAR_DPM_NUMBER); + return 0; +} +/* + * This function is to initialize all DPM state tables + * for SMU7 based on the dependency table. + * Dynamic state patching function will then trim these + * state tables to the allowed range based + * on the power policy or external client requests, + * such as UVD request, etc. + */ + +static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table = + hwmgr->dyn_state.vddc_dependency_on_sclk; + struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table = + hwmgr->dyn_state.vddc_dependency_on_mclk; + struct phm_cac_leakage_table *std_voltage_table = + hwmgr->dyn_state.cac_leakage_table; + uint32_t i; + + PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL, + "SCLK dependency table is missing. This table is mandatory", return -EINVAL); + PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1, + "SCLK dependency table has to have is missing. This table is mandatory", return -EINVAL); + + PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL, + "MCLK dependency table is missing. This table is mandatory", return -EINVAL); + PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1, + "VMCLK dependency table has to have is missing. This table is mandatory", return -EINVAL); + + + /* Initialize Sclk DPM table based on allow Sclk values*/ + data->dpm_table.sclk_table.count = 0; + + for (i = 0; i < allowed_vdd_sclk_table->count; i++) { + if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value != + allowed_vdd_sclk_table->entries[i].clk) { + data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value = + allowed_vdd_sclk_table->entries[i].clk; + data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; to do */ + data->dpm_table.sclk_table.count++; + } + } + + PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL, + "MCLK dependency table is missing. This table is mandatory", return -EINVAL); + /* Initialize Mclk DPM table based on allow Mclk values */ + data->dpm_table.mclk_table.count = 0; + for (i = 0; i < allowed_vdd_mclk_table->count; i++) { + if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value != + allowed_vdd_mclk_table->entries[i].clk) { + data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value = + allowed_vdd_mclk_table->entries[i].clk; + data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; */ + data->dpm_table.mclk_table.count++; + } + } + + /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */ + for (i = 0; i < allowed_vdd_sclk_table->count; i++) { + data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; + data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage; + /* param1 is for corresponding std voltage */ + data->dpm_table.vddc_table.dpm_levels[i].enabled = 1; + } + + data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count; + allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk; + + if (NULL != allowed_vdd_mclk_table) { + /* Initialize Vddci DPM table based on allow Mclk values */ + for (i = 0; i < allowed_vdd_mclk_table->count; i++) { + data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; + data->dpm_table.vddci_table.dpm_levels[i].enabled = 1; + } + data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count; + } + + allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk; + + if (NULL != allowed_vdd_mclk_table) { + /* + * Initialize MVDD DPM table based on allow Mclk + * values + */ + for (i = 0; i < allowed_vdd_mclk_table->count; i++) { + data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; + data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1; + } + data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count; + } + + return 0; +} + +static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint32_t i; + + struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; + + if (table_info == NULL) + return -EINVAL; + + dep_sclk_table = table_info->vdd_dep_on_sclk; + dep_mclk_table = table_info->vdd_dep_on_mclk; + + PP_ASSERT_WITH_CODE(dep_sclk_table != NULL, + "SCLK dependency table is missing.", + return -EINVAL); + PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1, + "SCLK dependency table count is 0.", + return -EINVAL); + + PP_ASSERT_WITH_CODE(dep_mclk_table != NULL, + "MCLK dependency table is missing.", + return -EINVAL); + PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1, + "MCLK dependency table count is 0", + return -EINVAL); + + /* Initialize Sclk DPM table based on allow Sclk values */ + data->dpm_table.sclk_table.count = 0; + for (i = 0; i < dep_sclk_table->count; i++) { + if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value != + dep_sclk_table->entries[i].clk) { + + data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value = + dep_sclk_table->entries[i].clk; + + data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = + (i == 0) ? true : false; + data->dpm_table.sclk_table.count++; + } + } + + /* Initialize Mclk DPM table based on allow Mclk values */ + data->dpm_table.mclk_table.count = 0; + for (i = 0; i < dep_mclk_table->count; i++) { + if (i == 0 || data->dpm_table.mclk_table.dpm_levels + [data->dpm_table.mclk_table.count - 1].value != + dep_mclk_table->entries[i].clk) { + data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value = + dep_mclk_table->entries[i].clk; + data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = + (i == 0) ? true : false; + data->dpm_table.mclk_table.count++; + } + } + + return 0; +} + +int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + smu7_reset_dpm_tables(hwmgr); + + if (hwmgr->pp_table_version == PP_TABLE_V1) + smu7_setup_dpm_tables_v1(hwmgr); + else if (hwmgr->pp_table_version == PP_TABLE_V0) + smu7_setup_dpm_tables_v0(hwmgr); + + smu7_setup_default_pcie_table(hwmgr); + + /* save a copy of the default DPM table */ + memcpy(&(data->golden_dpm_table), &(data->dpm_table), + sizeof(struct smu7_dpm_table)); + return 0; +} + +uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr) +{ + uint32_t reference_clock, tmp; + struct cgs_display_info info = {0}; + struct cgs_mode_info mode_info; + + info.mode_info = &mode_info; + + tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK); + + if (tmp) + return TCLK; + + cgs_get_active_displays_info(hwmgr->device, &info); + reference_clock = mode_info.ref_clock; + + tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE); + + if (0 != tmp) + return reference_clock / 4; + + return reference_clock; +} + +static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr) +{ + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot)) + return smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_EnableVRHotGPIOInterrupt); + + return 0; +} + +static int smu7_enable_sclk_control(struct pp_hwmgr *hwmgr) +{ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, + SCLK_PWRMGT_OFF, 0); + return 0; +} + +static int smu7_enable_ulv(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (data->ulv_supported) + return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV); + + return 0; +} + +static int smu7_disable_ulv(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (data->ulv_supported) + return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV); + + return 0; +} + +static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) +{ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep)) { + if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON)) + PP_ASSERT_WITH_CODE(false, + "Attempt to enable Master Deep Sleep switch failed!", + return -EINVAL); + } else { + if (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MASTER_DeepSleep_OFF)) { + PP_ASSERT_WITH_CODE(false, + "Attempt to disable Master Deep Sleep switch failed!", + return -EINVAL); + } + } + + return 0; +} + +static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) +{ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep)) { + if (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MASTER_DeepSleep_OFF)) { + PP_ASSERT_WITH_CODE(false, + "Attempt to disable Master Deep Sleep switch failed!", + return -EINVAL); + } + } + + return 0; +} + +static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t soft_register_value = 0; + uint32_t handshake_disables_offset = data->soft_regs_start + + smum_get_offsetof(hwmgr->smumgr, + SMU_SoftRegisters, HandshakeDisables); + + soft_register_value = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, handshake_disables_offset); + soft_register_value |= smum_get_mac_definition(hwmgr->smumgr, + SMU_UVD_MCLK_HANDSHAKE_DISABLE); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + handshake_disables_offset, soft_register_value); + return 0; +} + +static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + /* enable SCLK dpm */ + if (!data->sclk_dpm_key_disabled) + PP_ASSERT_WITH_CODE( + (0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)), + "Failed to enable SCLK DPM during DPM Start Function!", + return -EINVAL); + + /* enable MCLK dpm */ + if (0 == data->mclk_dpm_key_disabled) { + if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK)) + smu7_disable_handshake_uvd(hwmgr); + PP_ASSERT_WITH_CODE( + (0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_Enable)), + "Failed to enable MCLK DPM during DPM Start Function!", + return -EINVAL); + + PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005); + udelay(10); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005); + } + + return 0; +} + +static int smu7_start_dpm(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + /*enable general power management */ + + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, + GLOBAL_PWRMGT_EN, 1); + + /* enable sclk deep sleep */ + + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, + DYNAMIC_PM_EN, 1); + + /* prepare for PCIE DPM */ + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + data->soft_regs_start + + smum_get_offsetof(hwmgr->smumgr, SMU_SoftRegisters, + VoltageChangeTimeout), 0x1000); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, + SWRST_COMMAND_1, RESETLC, 0x0); + + PP_ASSERT_WITH_CODE( + (0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_Voltage_Cntl_Enable)), + "Failed to enable voltage DPM during DPM Start Function!", + return -EINVAL); + + + if (smu7_enable_sclk_mclk_dpm(hwmgr)) { + printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!"); + return -EINVAL; + } + + /* enable PCIE dpm */ + if (0 == data->pcie_dpm_key_disabled) { + PP_ASSERT_WITH_CODE( + (0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_Enable)), + "Failed to enable pcie DPM during DPM Start Function!", + return -EINVAL); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_Falcon_QuickTransition)) { + PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_EnableACDCGPIOInterrupt)), + "Failed to enable AC DC GPIO Interrupt!", + ); + } + + return 0; +} + +static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + /* disable SCLK dpm */ + if (!data->sclk_dpm_key_disabled) + PP_ASSERT_WITH_CODE( + (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_DPM_Disable) == 0), + "Failed to disable SCLK DPM!", + return -EINVAL); + + /* disable MCLK dpm */ + if (!data->mclk_dpm_key_disabled) { + PP_ASSERT_WITH_CODE( + (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_Disable) == 0), + "Failed to disable MCLK DPM!", + return -EINVAL); + } + + return 0; +} + +static int smu7_stop_dpm(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + /* disable general power management */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, + GLOBAL_PWRMGT_EN, 0); + /* disable sclk deep sleep */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, + DYNAMIC_PM_EN, 0); + + /* disable PCIE dpm */ + if (!data->pcie_dpm_key_disabled) { + PP_ASSERT_WITH_CODE( + (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_Disable) == 0), + "Failed to disable pcie DPM during DPM Stop Function!", + return -EINVAL); + } + + if (smu7_disable_sclk_mclk_dpm(hwmgr)) { + printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!"); + return -EINVAL; + } + + return 0; +} + +static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources) +{ + bool protection; + enum DPM_EVENT_SRC src; + + switch (sources) { + default: + printk(KERN_ERR "Unknown throttling event sources."); + /* fall through */ + case 0: + protection = false; + /* src is unused */ + break; + case (1 << PHM_AutoThrottleSource_Thermal): + protection = true; + src = DPM_EVENT_SRC_DIGITAL; + break; + case (1 << PHM_AutoThrottleSource_External): + protection = true; + src = DPM_EVENT_SRC_EXTERNAL; + break; + case (1 << PHM_AutoThrottleSource_External) | + (1 << PHM_AutoThrottleSource_Thermal): + protection = true; + src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL; + break; + } + /* Order matters - don't enable thermal protection for the wrong source. */ + if (protection) { + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, + DPM_EVENT_SRC, src); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, + THERMAL_PROTECTION_DIS, + !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalController)); + } else + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, + THERMAL_PROTECTION_DIS, 1); +} + +static int smu7_enable_auto_throttle_source(struct pp_hwmgr *hwmgr, + PHM_AutoThrottleSource source) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (!(data->active_auto_throttle_sources & (1 << source))) { + data->active_auto_throttle_sources |= 1 << source; + smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources); + } + return 0; +} + +static int smu7_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) +{ + return smu7_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); +} + +static int smu7_disable_auto_throttle_source(struct pp_hwmgr *hwmgr, + PHM_AutoThrottleSource source) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (data->active_auto_throttle_sources & (1 << source)) { + data->active_auto_throttle_sources &= ~(1 << source); + smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources); + } + return 0; +} + +static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) +{ + return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); +} + +int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + data->pcie_performance_request = true; + + return 0; +} + +int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr) +{ + int tmp_result = 0; + int result = 0; + + tmp_result = (!smum_is_dpm_running(hwmgr)) ? 0 : -1; + PP_ASSERT_WITH_CODE(tmp_result == 0, + "DPM is already running right now, no need to enable DPM!", + return 0); + + if (smu7_voltage_control(hwmgr)) { + tmp_result = smu7_enable_voltage_control(hwmgr); + PP_ASSERT_WITH_CODE(tmp_result == 0, + "Failed to enable voltage control!", + result = tmp_result); + + tmp_result = smu7_construct_voltage_tables(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to contruct voltage tables!", + result = tmp_result); + } + smum_initialize_mc_reg_table(hwmgr); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EngineSpreadSpectrumSupport)) + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalController)) + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0); + + tmp_result = smu7_program_static_screen_threshold_parameters(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to program static screen threshold parameters!", + result = tmp_result); + + tmp_result = smu7_enable_display_gap(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable display gap!", result = tmp_result); + + tmp_result = smu7_program_voting_clients(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to program voting clients!", result = tmp_result); + + tmp_result = smum_process_firmware_header(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to process firmware header!", result = tmp_result); + + tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to initialize switch from ArbF0 to F1!", + result = tmp_result); + + result = smu7_setup_default_dpm_tables(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to setup default DPM tables!", return result); + + tmp_result = smum_init_smc_table(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to initialize SMC table!", result = tmp_result); + + tmp_result = smu7_enable_vrhot_gpio_interrupt(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable VR hot GPIO interrupt!", result = tmp_result); + + smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay); + + tmp_result = smu7_enable_sclk_control(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable SCLK control!", result = tmp_result); + + tmp_result = smu7_enable_smc_voltage_controller(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable voltage control!", result = tmp_result); + + tmp_result = smu7_enable_ulv(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable ULV!", result = tmp_result); + + tmp_result = smu7_enable_deep_sleep_master_switch(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable deep sleep master switch!", result = tmp_result); + + tmp_result = smu7_enable_didt_config(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to enable deep sleep master switch!", result = tmp_result); + + tmp_result = smu7_start_dpm(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to start DPM!", result = tmp_result); + + tmp_result = smu7_enable_smc_cac(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable SMC CAC!", result = tmp_result); + + tmp_result = smu7_enable_power_containment(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable power containment!", result = tmp_result); + + tmp_result = smu7_power_control_set_level(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to power control set level!", result = tmp_result); + + tmp_result = smu7_enable_thermal_auto_throttle(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable thermal auto throttle!", result = tmp_result); + + tmp_result = smu7_pcie_performance_request(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "pcie performance request failed!", result = tmp_result); + + return 0; +} + +int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr) +{ + int tmp_result, result = 0; + + tmp_result = (smum_is_dpm_running(hwmgr)) ? 0 : -1; + PP_ASSERT_WITH_CODE(tmp_result == 0, + "DPM is not running right now, no need to disable DPM!", + return 0); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalController)) + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1); + + tmp_result = smu7_disable_power_containment(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable power containment!", result = tmp_result); + + tmp_result = smu7_disable_smc_cac(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable SMC CAC!", result = tmp_result); + + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_SPLL_SPREAD_SPECTRUM, SSEN, 0); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0); + + tmp_result = smu7_disable_thermal_auto_throttle(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable thermal auto throttle!", result = tmp_result); + + tmp_result = smu7_stop_dpm(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to stop DPM!", result = tmp_result); + + tmp_result = smu7_disable_deep_sleep_master_switch(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable deep sleep master switch!", result = tmp_result); + + tmp_result = smu7_disable_ulv(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable ULV!", result = tmp_result); + + tmp_result = smu7_clear_voting_clients(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to clear voting clients!", result = tmp_result); + + tmp_result = smu7_reset_to_default(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to reset to default!", result = tmp_result); + + tmp_result = smu7_force_switch_to_arbf0(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to force to switch arbf0!", result = tmp_result); + + return result; +} + +int smu7_reset_asic_tasks(struct pp_hwmgr *hwmgr) +{ + + return 0; +} + +static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + data->dll_default_on = false; + data->mclk_dpm0_activity_target = 0xa; + data->mclk_activity_target = SMU7_MCLK_TARGETACTIVITY_DFLT; + data->vddc_vddgfx_delta = 300; + data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT; + data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT; + data->voting_rights_clients0 = SMU7_VOTINGRIGHTSCLIENTS_DFLT0; + data->voting_rights_clients1 = SMU7_VOTINGRIGHTSCLIENTS_DFLT1; + data->voting_rights_clients2 = SMU7_VOTINGRIGHTSCLIENTS_DFLT2; + data->voting_rights_clients3 = SMU7_VOTINGRIGHTSCLIENTS_DFLT3; + data->voting_rights_clients4 = SMU7_VOTINGRIGHTSCLIENTS_DFLT4; + data->voting_rights_clients5 = SMU7_VOTINGRIGHTSCLIENTS_DFLT5; + data->voting_rights_clients6 = SMU7_VOTINGRIGHTSCLIENTS_DFLT6; + data->voting_rights_clients7 = SMU7_VOTINGRIGHTSCLIENTS_DFLT7; + + data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true; + data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true; + data->pcie_dpm_key_disabled = hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true; + /* need to set voltage control types before EVV patching */ + data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE; + data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE; + data->mvdd_control = SMU7_VOLTAGE_CONTROL_NONE; + data->enable_tdc_limit_feature = true; + data->enable_pkg_pwr_tracking_feature = true; + data->force_pcie_gen = PP_PCIEGenInvalid; + data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false; + + data->fast_watermark_threshold = 100; + if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) + data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ControlVDDGFX)) { + if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) { + data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; + } + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EnableMVDDControl)) { + if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT)) + data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO; + else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) + data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; + } + + if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ControlVDDGFX); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ControlVDDCI)) { + if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT)) + data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO; + else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2)) + data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; + } + + if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE) + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EnableMVDDControl); + + if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE) + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ControlVDDCI); + + if ((hwmgr->pp_table_version != PP_TABLE_V0) + && (table_info->cac_dtp_table->usClockStretchAmount != 0)) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ClockStretcher); + + data->pcie_gen_performance.max = PP_PCIEGen1; + data->pcie_gen_performance.min = PP_PCIEGen3; + data->pcie_gen_power_saving.max = PP_PCIEGen1; + data->pcie_gen_power_saving.min = PP_PCIEGen3; + data->pcie_lane_performance.max = 0; + data->pcie_lane_performance.min = 16; + data->pcie_lane_power_saving.max = 0; + data->pcie_lane_power_saving.min = 16; +} + +/** +* Get Leakage VDDC based on leakage ID. +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always 0 +*/ +static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint16_t vv_id; + uint16_t vddc = 0; + uint16_t vddgfx = 0; + uint16_t i, j; + uint32_t sclk = 0; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)hwmgr->pptable; + struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL; + + + if (table_info != NULL) + sclk_table = table_info->vdd_dep_on_sclk; + + for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) { + vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; + + if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { + if (0 == phm_get_sclk_for_voltage_evv(hwmgr, + table_info->vddgfx_lookup_table, vv_id, &sclk)) { + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ClockStretcher)) { + for (j = 1; j < sclk_table->count; j++) { + if (sclk_table->entries[j].clk == sclk && + sclk_table->entries[j].cks_enable == 0) { + sclk += 5000; + break; + } + } + } + if (0 == atomctrl_get_voltage_evv_on_sclk + (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk, + vv_id, &vddgfx)) { + /* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */ + PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -EINVAL); + + /* the voltage should not be zero nor equal to leakage ID */ + if (vddgfx != 0 && vddgfx != vv_id) { + data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx; + data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = vv_id; + data->vddcgfx_leakage.count++; + } + } else { + printk("Error retrieving EVV voltage value!\n"); + } + } + } else { + + if ((hwmgr->pp_table_version == PP_TABLE_V0) + || !phm_get_sclk_for_voltage_evv(hwmgr, + table_info->vddc_lookup_table, vv_id, &sclk)) { + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ClockStretcher)) { + for (j = 1; j < sclk_table->count; j++) { + if (sclk_table->entries[j].clk == sclk && + sclk_table->entries[j].cks_enable == 0) { + sclk += 5000; + break; + } + } + } + + if (phm_get_voltage_evv_on_sclk(hwmgr, + VOLTAGE_TYPE_VDDC, + sclk, vv_id, &vddc) == 0) { + if (vddc >= 2000 || vddc == 0) + return -EINVAL; + } else { + printk(KERN_WARNING "failed to retrieving EVV voltage!\n"); + continue; + } + + /* the voltage should not be zero nor equal to leakage ID */ + if (vddc != 0 && vddc != vv_id) { + data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc); + data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id; + data->vddc_leakage.count++; + } + } + } + } + + return 0; +} + +/** + * Change virtual leakage voltage to actual value. + * + * @param hwmgr the address of the powerplay hardware manager. + * @param pointer to changing voltage + * @param pointer to leakage table + */ +static void smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr *hwmgr, + uint16_t *voltage, struct smu7_leakage_voltage *leakage_table) +{ + uint32_t index; + + /* search for leakage voltage ID 0xff01 ~ 0xff08 */ + for (index = 0; index < leakage_table->count; index++) { + /* if this voltage matches a leakage voltage ID */ + /* patch with actual leakage voltage */ + if (leakage_table->leakage_id[index] == *voltage) { + *voltage = leakage_table->actual_voltage[index]; + break; + } + } + + if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0) + printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n"); +} + +/** +* Patch voltage lookup table by EVV leakages. +* +* @param hwmgr the address of the powerplay hardware manager. +* @param pointer to voltage lookup table +* @param pointer to leakage table +* @return always 0 +*/ +static int smu7_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr, + phm_ppt_v1_voltage_lookup_table *lookup_table, + struct smu7_leakage_voltage *leakage_table) +{ + uint32_t i; + + for (i = 0; i < lookup_table->count; i++) + smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, + &lookup_table->entries[i].us_vdd, leakage_table); + + return 0; +} + +static int smu7_patch_clock_voltage_limits_with_vddc_leakage( + struct pp_hwmgr *hwmgr, struct smu7_leakage_voltage *leakage_table, + uint16_t *vddc) +{ + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table); + hwmgr->dyn_state.max_clock_voltage_on_dc.vddc = + table_info->max_clock_voltage_on_dc.vddc; + return 0; +} + +static int smu7_patch_voltage_dependency_tables_with_lookup_table( + struct pp_hwmgr *hwmgr) +{ + uint8_t entry_id; + uint8_t voltage_id; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = + table_info->vdd_dep_on_sclk; + struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = + table_info->vdd_dep_on_mclk; + struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = + table_info->mm_dep_table; + + if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { + for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) { + voltage_id = sclk_table->entries[entry_id].vddInd; + sclk_table->entries[entry_id].vddgfx = + table_info->vddgfx_lookup_table->entries[voltage_id].us_vdd; + } + } else { + for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) { + voltage_id = sclk_table->entries[entry_id].vddInd; + sclk_table->entries[entry_id].vddc = + table_info->vddc_lookup_table->entries[voltage_id].us_vdd; + } + } + + for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) { + voltage_id = mclk_table->entries[entry_id].vddInd; + mclk_table->entries[entry_id].vddc = + table_info->vddc_lookup_table->entries[voltage_id].us_vdd; + } + + for (entry_id = 0; entry_id < mm_table->count; ++entry_id) { + voltage_id = mm_table->entries[entry_id].vddcInd; + mm_table->entries[entry_id].vddc = + table_info->vddc_lookup_table->entries[voltage_id].us_vdd; + } + + return 0; + +} + +static int phm_add_voltage(struct pp_hwmgr *hwmgr, + phm_ppt_v1_voltage_lookup_table *look_up_table, + phm_ppt_v1_voltage_lookup_record *record) +{ + uint32_t i; + + PP_ASSERT_WITH_CODE((NULL != look_up_table), + "Lookup Table empty.", return -EINVAL); + PP_ASSERT_WITH_CODE((0 != look_up_table->count), + "Lookup Table empty.", return -EINVAL); + + i = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDGFX); + PP_ASSERT_WITH_CODE((i >= look_up_table->count), + "Lookup Table is full.", return -EINVAL); + + /* This is to avoid entering duplicate calculated records. */ + for (i = 0; i < look_up_table->count; i++) { + if (look_up_table->entries[i].us_vdd == record->us_vdd) { + if (look_up_table->entries[i].us_calculated == 1) + return 0; + break; + } + } + + look_up_table->entries[i].us_calculated = 1; + look_up_table->entries[i].us_vdd = record->us_vdd; + look_up_table->entries[i].us_cac_low = record->us_cac_low; + look_up_table->entries[i].us_cac_mid = record->us_cac_mid; + look_up_table->entries[i].us_cac_high = record->us_cac_high; + /* Only increment the count when we're appending, not replacing duplicate entry. */ + if (i == look_up_table->count) + look_up_table->count++; + + return 0; +} + + +static int smu7_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr) +{ + uint8_t entry_id; + struct phm_ppt_v1_voltage_lookup_record v_record; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + + phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk; + phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk; + + if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { + for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) { + if (sclk_table->entries[entry_id].vdd_offset & (1 << 15)) + v_record.us_vdd = sclk_table->entries[entry_id].vddgfx + + sclk_table->entries[entry_id].vdd_offset - 0xFFFF; + else + v_record.us_vdd = sclk_table->entries[entry_id].vddgfx + + sclk_table->entries[entry_id].vdd_offset; + + sclk_table->entries[entry_id].vddc = + v_record.us_cac_low = v_record.us_cac_mid = + v_record.us_cac_high = v_record.us_vdd; + + phm_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record); + } + + for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) { + if (mclk_table->entries[entry_id].vdd_offset & (1 << 15)) + v_record.us_vdd = mclk_table->entries[entry_id].vddc + + mclk_table->entries[entry_id].vdd_offset - 0xFFFF; + else + v_record.us_vdd = mclk_table->entries[entry_id].vddc + + mclk_table->entries[entry_id].vdd_offset; + + mclk_table->entries[entry_id].vddgfx = v_record.us_cac_low = + v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd; + phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record); + } + } + return 0; +} + +static int smu7_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr) +{ + uint8_t entry_id; + struct phm_ppt_v1_voltage_lookup_record v_record; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table; + + if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { + for (entry_id = 0; entry_id < mm_table->count; entry_id++) { + if (mm_table->entries[entry_id].vddgfx_offset & (1 << 15)) + v_record.us_vdd = mm_table->entries[entry_id].vddc + + mm_table->entries[entry_id].vddgfx_offset - 0xFFFF; + else + v_record.us_vdd = mm_table->entries[entry_id].vddc + + mm_table->entries[entry_id].vddgfx_offset; + + /* Add the calculated VDDGFX to the VDDGFX lookup table */ + mm_table->entries[entry_id].vddgfx = v_record.us_cac_low = + v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd; + phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record); + } + } + return 0; +} + +static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr, + struct phm_ppt_v1_voltage_lookup_table *lookup_table) +{ + uint32_t table_size, i, j; + struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record; + table_size = lookup_table->count; + + PP_ASSERT_WITH_CODE(0 != lookup_table->count, + "Lookup table is empty", return -EINVAL); + + /* Sorting voltages */ + for (i = 0; i < table_size - 1; i++) { + for (j = i + 1; j > 0; j--) { + if (lookup_table->entries[j].us_vdd < + lookup_table->entries[j - 1].us_vdd) { + tmp_voltage_lookup_record = lookup_table->entries[j - 1]; + lookup_table->entries[j - 1] = lookup_table->entries[j]; + lookup_table->entries[j] = tmp_voltage_lookup_record; + } + } + } + + return 0; +} + +static int smu7_complete_dependency_tables(struct pp_hwmgr *hwmgr) +{ + int result = 0; + int tmp_result; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { + tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr, + table_info->vddgfx_lookup_table, &(data->vddcgfx_leakage)); + if (tmp_result != 0) + result = tmp_result; + + smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, + &table_info->max_clock_voltage_on_dc.vddgfx, &(data->vddcgfx_leakage)); + } else { + + tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr, + table_info->vddc_lookup_table, &(data->vddc_leakage)); + if (tmp_result) + result = tmp_result; + + tmp_result = smu7_patch_clock_voltage_limits_with_vddc_leakage(hwmgr, + &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc); + if (tmp_result) + result = tmp_result; + } + + tmp_result = smu7_patch_voltage_dependency_tables_with_lookup_table(hwmgr); + if (tmp_result) + result = tmp_result; + + tmp_result = smu7_calc_voltage_dependency_tables(hwmgr); + if (tmp_result) + result = tmp_result; + + tmp_result = smu7_calc_mm_voltage_dependency_table(hwmgr); + if (tmp_result) + result = tmp_result; + + tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddgfx_lookup_table); + if (tmp_result) + result = tmp_result; + + tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddc_lookup_table); + if (tmp_result) + result = tmp_result; + + return result; +} + +static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr) +{ + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table = + table_info->vdd_dep_on_sclk; + struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table = + table_info->vdd_dep_on_mclk; + + PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL, + "VDD dependency on SCLK table is missing.", + return -EINVAL); + PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1, + "VDD dependency on SCLK table has to have is missing.", + return -EINVAL); + + PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL, + "VDD dependency on MCLK table is missing", + return -EINVAL); + PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1, + "VDD dependency on MCLK table has to have is missing.", + return -EINVAL); + + table_info->max_clock_voltage_on_ac.sclk = + allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk; + table_info->max_clock_voltage_on_ac.mclk = + allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk; + table_info->max_clock_voltage_on_ac.vddc = + allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc; + table_info->max_clock_voltage_on_ac.vddci = + allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci; + + hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk; + hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk; + hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc; + hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = table_info->max_clock_voltage_on_ac.vddci; + + return 0; +} + +int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr) +{ + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; + struct phm_ppt_v1_voltage_lookup_table *lookup_table; + uint32_t i; + uint32_t hw_revision, sub_vendor_id, sub_sys_id; + struct cgs_system_info sys_info = {0}; + + if (table_info != NULL) { + dep_mclk_table = table_info->vdd_dep_on_mclk; + lookup_table = table_info->vddc_lookup_table; + } else + return 0; + + sys_info.size = sizeof(struct cgs_system_info); + + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV; + cgs_query_system_info(hwmgr->device, &sys_info); + hw_revision = (uint32_t)sys_info.value; + + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID; + cgs_query_system_info(hwmgr->device, &sys_info); + sub_sys_id = (uint32_t)sys_info.value; + + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID; + cgs_query_system_info(hwmgr->device, &sys_info); + sub_vendor_id = (uint32_t)sys_info.value; + + if (hwmgr->chip_id == CHIP_POLARIS10 && hw_revision == 0xC7 && + ((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) || + (sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) || + (sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) { + if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000) + return 0; + + for (i = 0; i < lookup_table->count; i++) { + if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) { + dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i; + return 0; + } + } + } + return 0; +} + +static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr) +{ + struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; + uint32_t temp_reg; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + + if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) { + temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL); + switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) { + case 0: + temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1); + break; + case 1: + temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2); + break; + case 2: + temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1); + break; + case 3: + temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1); + break; + case 4: + temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1); + break; + default: + PP_ASSERT_WITH_CODE(0, + "Failed to setup PCC HW register! Wrong GPIO assigned for VDDC_PCC_GPIO_PINID!", + ); + break; + } + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg); + } + + if (table_info == NULL) + return 0; + + if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 && + hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) { + hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit = + (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit; + + hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit = + (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM; + + hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1; + + hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100; + + hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit = + (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit; + + hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1; + + table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ? + (table_info->cac_dtp_table->usDefaultTargetOperatingTemp - 50) : 0; + + table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp; + table_info->cac_dtp_table->usOperatingTempStep = 1; + table_info->cac_dtp_table->usOperatingTempHyst = 1; + + hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM = + hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM; + + hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = + hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM; + + hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit = + table_info->cac_dtp_table->usOperatingTempMinLimit; + + hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit = + table_info->cac_dtp_table->usOperatingTempMaxLimit; + + hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp = + table_info->cac_dtp_table->usDefaultTargetOperatingTemp; + + hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep = + table_info->cac_dtp_table->usOperatingTempStep; + + hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp = + table_info->cac_dtp_table->usTargetOperatingTemp; + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ODFuzzyFanControlSupport); + } + + return 0; +} + +/** + * Change virtual leakage voltage to actual value. + * + * @param hwmgr the address of the powerplay hardware manager. + * @param pointer to changing voltage + * @param pointer to leakage table + */ +static void smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr *hwmgr, + uint32_t *voltage, struct smu7_leakage_voltage *leakage_table) +{ + uint32_t index; + + /* search for leakage voltage ID 0xff01 ~ 0xff08 */ + for (index = 0; index < leakage_table->count; index++) { + /* if this voltage matches a leakage voltage ID */ + /* patch with actual leakage voltage */ + if (leakage_table->leakage_id[index] == *voltage) { + *voltage = leakage_table->actual_voltage[index]; + break; + } + } + + if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0) + printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n"); +} + + +static int smu7_patch_vddc(struct pp_hwmgr *hwmgr, + struct phm_clock_voltage_dependency_table *tab) +{ + uint16_t i; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (tab) + for (i = 0; i < tab->count; i++) + smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, + &data->vddc_leakage); + + return 0; +} + +static int smu7_patch_vddci(struct pp_hwmgr *hwmgr, + struct phm_clock_voltage_dependency_table *tab) +{ + uint16_t i; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (tab) + for (i = 0; i < tab->count; i++) + smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, + &data->vddci_leakage); + + return 0; +} + +static int smu7_patch_vce_vddc(struct pp_hwmgr *hwmgr, + struct phm_vce_clock_voltage_dependency_table *tab) +{ + uint16_t i; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (tab) + for (i = 0; i < tab->count; i++) + smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, + &data->vddc_leakage); + + return 0; +} + + +static int smu7_patch_uvd_vddc(struct pp_hwmgr *hwmgr, + struct phm_uvd_clock_voltage_dependency_table *tab) +{ + uint16_t i; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (tab) + for (i = 0; i < tab->count; i++) + smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, + &data->vddc_leakage); + + return 0; +} + +static int smu7_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr, + struct phm_phase_shedding_limits_table *tab) +{ + uint16_t i; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (tab) + for (i = 0; i < tab->count; i++) + smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].Voltage, + &data->vddc_leakage); + + return 0; +} + +static int smu7_patch_samu_vddc(struct pp_hwmgr *hwmgr, + struct phm_samu_clock_voltage_dependency_table *tab) +{ + uint16_t i; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (tab) + for (i = 0; i < tab->count; i++) + smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, + &data->vddc_leakage); + + return 0; +} + +static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr, + struct phm_acp_clock_voltage_dependency_table *tab) +{ + uint16_t i; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (tab) + for (i = 0; i < tab->count; i++) + smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, + &data->vddc_leakage); + + return 0; +} + +static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr, + struct phm_clock_and_voltage_limits *tab) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (tab) { + smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, (uint32_t *)&tab->vddc, + &data->vddc_leakage); + smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, (uint32_t *)&tab->vddci, + &data->vddci_leakage); + } + + return 0; +} + +static int smu7_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab) +{ + uint32_t i; + uint32_t vddc; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (tab) { + for (i = 0; i < tab->count; i++) { + vddc = (uint32_t)(tab->entries[i].Vddc); + smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, &data->vddc_leakage); + tab->entries[i].Vddc = (uint16_t)vddc; + } + } + + return 0; +} + +static int smu7_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr) +{ + int tmp; + + tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk); + if (tmp) + return -EINVAL; + + tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk); + if (tmp) + return -EINVAL; + + tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl); + if (tmp) + return -EINVAL; + + tmp = smu7_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk); + if (tmp) + return -EINVAL; + + tmp = smu7_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table); + if (tmp) + return -EINVAL; + + tmp = smu7_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table); + if (tmp) + return -EINVAL; + + tmp = smu7_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table); + if (tmp) + return -EINVAL; + + tmp = smu7_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table); + if (tmp) + return -EINVAL; + + tmp = smu7_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table); + if (tmp) + return -EINVAL; + + tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac); + if (tmp) + return -EINVAL; + + tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc); + if (tmp) + return -EINVAL; + + tmp = smu7_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table); + if (tmp) + return -EINVAL; + + return 0; +} + + +static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk; + struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk; + struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk; + + PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL, + "VDDC dependency on SCLK table is missing. This table is mandatory\n", return -EINVAL); + PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1, + "VDDC dependency on SCLK table has to have is missing. This table is mandatory\n", return -EINVAL); + + PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL, + "VDDC dependency on MCLK table is missing. This table is mandatory\n", return -EINVAL); + PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1, + "VDD dependency on MCLK table has to have is missing. This table is mandatory\n", return -EINVAL); + + data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[0].v; + data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; + + hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = + allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk; + hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = + allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk; + hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = + allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; + + if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) { + data->min_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[0].v; + data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v; + } + + if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count > 1) + hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v; + + return 0; +} + +int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data; + int result; + + data = kzalloc(sizeof(struct smu7_hwmgr), GFP_KERNEL); + if (data == NULL) + return -ENOMEM; + + hwmgr->backend = data; + + smu7_patch_voltage_workaround(hwmgr); + smu7_init_dpm_defaults(hwmgr); + + /* Get leakage voltage based on leakage ID. */ + result = smu7_get_evv_voltages(hwmgr); + + if (result) { + printk("Get EVV Voltage Failed. Abort Driver loading!\n"); + return -EINVAL; + } + + if (hwmgr->pp_table_version == PP_TABLE_V1) { + smu7_complete_dependency_tables(hwmgr); + smu7_set_private_data_based_on_pptable_v1(hwmgr); + } else if (hwmgr->pp_table_version == PP_TABLE_V0) { + smu7_patch_dependency_tables_with_leakage(hwmgr); + smu7_set_private_data_based_on_pptable_v0(hwmgr); + } + + /* Initalize Dynamic State Adjustment Rule Settings */ + result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr); + + if (0 == result) { + struct cgs_system_info sys_info = {0}; + + data->is_tlu_enabled = false; + + hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = + SMU7_MAX_HARDWARE_POWERLEVELS; + hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; + hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO; + result = cgs_query_system_info(hwmgr->device, &sys_info); + if (result) + data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK; + else + data->pcie_gen_cap = (uint32_t)sys_info.value; + if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) + data->pcie_spc_cap = 20; + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW; + result = cgs_query_system_info(hwmgr->device, &sys_info); + if (result) + data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK; + else + data->pcie_lane_cap = (uint32_t)sys_info.value; + + hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */ +/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */ + hwmgr->platform_descriptor.clockStep.engineClock = 500; + hwmgr->platform_descriptor.clockStep.memoryClock = 500; + smu7_thermal_parameter_init(hwmgr); + } else { + /* Ignore return value in here, we are cleaning up a mess. */ + phm_hwmgr_backend_fini(hwmgr); + } + + return 0; +} + +static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t level, tmp; + + if (!data->pcie_dpm_key_disabled) { + if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) { + level = 0; + tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask; + while (tmp >>= 1) + level++; + + if (level) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_ForceLevel, level); + } + } + + if (!data->sclk_dpm_key_disabled) { + if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { + level = 0; + tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask; + while (tmp >>= 1) + level++; + + if (level) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_SetEnabledMask, + (1 << level)); + } + } + + if (!data->mclk_dpm_key_disabled) { + if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) { + level = 0; + tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask; + while (tmp >>= 1) + level++; + + if (level) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_SetEnabledMask, + (1 << level)); + } + } + + return 0; +} + +static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (hwmgr->pp_table_version == PP_TABLE_V1) + phm_apply_dal_min_voltage_request(hwmgr); +/* TO DO for v0 iceland and Ci*/ + + if (!data->sclk_dpm_key_disabled) { + if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_SetEnabledMask, + data->dpm_level_enable_mask.sclk_dpm_enable_mask); + } + + if (!data->mclk_dpm_key_disabled) { + if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_SetEnabledMask, + data->dpm_level_enable_mask.mclk_dpm_enable_mask); + } + + return 0; +} + +static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (!smum_is_dpm_running(hwmgr)) + return -EINVAL; + + if (!data->pcie_dpm_key_disabled) { + smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_UnForceLevel); + } + + return smu7_upload_dpm_level_enable_mask(hwmgr); +} + +static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = + (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t level; + + if (!data->sclk_dpm_key_disabled) + if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { + level = phm_get_lowest_enabled_level(hwmgr, + data->dpm_level_enable_mask.sclk_dpm_enable_mask); + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_SetEnabledMask, + (1 << level)); + + } + + if (!data->mclk_dpm_key_disabled) { + if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) { + level = phm_get_lowest_enabled_level(hwmgr, + data->dpm_level_enable_mask.mclk_dpm_enable_mask); + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_SetEnabledMask, + (1 << level)); + } + } + + if (!data->pcie_dpm_key_disabled) { + if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) { + level = phm_get_lowest_enabled_level(hwmgr, + data->dpm_level_enable_mask.pcie_dpm_enable_mask); + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_ForceLevel, + (level)); + } + } + + return 0; + +} +static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr, + enum amd_dpm_forced_level level) +{ + int ret = 0; + + switch (level) { + case AMD_DPM_FORCED_LEVEL_HIGH: + ret = smu7_force_dpm_highest(hwmgr); + if (ret) + return ret; + break; + case AMD_DPM_FORCED_LEVEL_LOW: + ret = smu7_force_dpm_lowest(hwmgr); + if (ret) + return ret; + break; + case AMD_DPM_FORCED_LEVEL_AUTO: + ret = smu7_unforce_dpm_levels(hwmgr); + if (ret) + return ret; + break; + default: + break; + } + + hwmgr->dpm_level = level; + + return ret; +} + +static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr) +{ + return sizeof(struct smu7_power_state); +} + + +static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, + struct pp_power_state *request_ps, + const struct pp_power_state *current_ps) +{ + + struct smu7_power_state *smu7_ps = + cast_phw_smu7_power_state(&request_ps->hardware); + uint32_t sclk; + uint32_t mclk; + struct PP_Clocks minimum_clocks = {0}; + bool disable_mclk_switching; + bool disable_mclk_switching_for_frame_lock; + struct cgs_display_info info = {0}; + const struct phm_clock_and_voltage_limits *max_limits; + uint32_t i; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + int32_t count; + int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; + + data->battery_state = (PP_StateUILabel_Battery == + request_ps->classification.ui_label); + + PP_ASSERT_WITH_CODE(smu7_ps->performance_level_count == 2, + "VI should always have 2 performance levels", + ); + + max_limits = (PP_PowerSource_AC == hwmgr->power_source) ? + &(hwmgr->dyn_state.max_clock_voltage_on_ac) : + &(hwmgr->dyn_state.max_clock_voltage_on_dc); + + /* Cap clock DPM tables at DC MAX if it is in DC. */ + if (PP_PowerSource_DC == hwmgr->power_source) { + for (i = 0; i < smu7_ps->performance_level_count; i++) { + if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk) + smu7_ps->performance_levels[i].memory_clock = max_limits->mclk; + if (smu7_ps->performance_levels[i].engine_clock > max_limits->sclk) + smu7_ps->performance_levels[i].engine_clock = max_limits->sclk; + } + } + + smu7_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk; + smu7_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk; + + cgs_get_active_displays_info(hwmgr->device, &info); + + /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/ + + minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock; + minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) { + max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac); + stable_pstate_sclk = (max_limits->sclk * 75) / 100; + + for (count = table_info->vdd_dep_on_sclk->count - 1; + count >= 0; count--) { + if (stable_pstate_sclk >= + table_info->vdd_dep_on_sclk->entries[count].clk) { + stable_pstate_sclk = + table_info->vdd_dep_on_sclk->entries[count].clk; + break; + } + } + + if (count < 0) + stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk; + + stable_pstate_mclk = max_limits->mclk; + + minimum_clocks.engineClock = stable_pstate_sclk; + minimum_clocks.memoryClock = stable_pstate_mclk; + } + + if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk) + minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk; + + if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk) + minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk; + + smu7_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold; + + if (0 != hwmgr->gfx_arbiter.sclk_over_drive) { + PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <= + hwmgr->platform_descriptor.overdriveLimit.engineClock), + "Overdrive sclk exceeds limit", + hwmgr->gfx_arbiter.sclk_over_drive = + hwmgr->platform_descriptor.overdriveLimit.engineClock); + + if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk) + smu7_ps->performance_levels[1].engine_clock = + hwmgr->gfx_arbiter.sclk_over_drive; + } + + if (0 != hwmgr->gfx_arbiter.mclk_over_drive) { + PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <= + hwmgr->platform_descriptor.overdriveLimit.memoryClock), + "Overdrive mclk exceeds limit", + hwmgr->gfx_arbiter.mclk_over_drive = + hwmgr->platform_descriptor.overdriveLimit.memoryClock); + + if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk) + smu7_ps->performance_levels[1].memory_clock = + hwmgr->gfx_arbiter.mclk_over_drive; + } + + disable_mclk_switching_for_frame_lock = phm_cap_enabled( + hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); + + + disable_mclk_switching = (1 < info.display_count) || + disable_mclk_switching_for_frame_lock; + + sclk = smu7_ps->performance_levels[0].engine_clock; + mclk = smu7_ps->performance_levels[0].memory_clock; + + if (disable_mclk_switching) + mclk = smu7_ps->performance_levels + [smu7_ps->performance_level_count - 1].memory_clock; + + if (sclk < minimum_clocks.engineClock) + sclk = (minimum_clocks.engineClock > max_limits->sclk) ? + max_limits->sclk : minimum_clocks.engineClock; + + if (mclk < minimum_clocks.memoryClock) + mclk = (minimum_clocks.memoryClock > max_limits->mclk) ? + max_limits->mclk : minimum_clocks.memoryClock; + + smu7_ps->performance_levels[0].engine_clock = sclk; + smu7_ps->performance_levels[0].memory_clock = mclk; + + smu7_ps->performance_levels[1].engine_clock = + (smu7_ps->performance_levels[1].engine_clock >= + smu7_ps->performance_levels[0].engine_clock) ? + smu7_ps->performance_levels[1].engine_clock : + smu7_ps->performance_levels[0].engine_clock; + + if (disable_mclk_switching) { + if (mclk < smu7_ps->performance_levels[1].memory_clock) + mclk = smu7_ps->performance_levels[1].memory_clock; + + smu7_ps->performance_levels[0].memory_clock = mclk; + smu7_ps->performance_levels[1].memory_clock = mclk; + } else { + if (smu7_ps->performance_levels[1].memory_clock < + smu7_ps->performance_levels[0].memory_clock) + smu7_ps->performance_levels[1].memory_clock = + smu7_ps->performance_levels[0].memory_clock; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) { + for (i = 0; i < smu7_ps->performance_level_count; i++) { + smu7_ps->performance_levels[i].engine_clock = stable_pstate_sclk; + smu7_ps->performance_levels[i].memory_clock = stable_pstate_mclk; + smu7_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max; + smu7_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max; + } + } + return 0; +} + + +static int smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) +{ + struct pp_power_state *ps; + struct smu7_power_state *smu7_ps; + + if (hwmgr == NULL) + return -EINVAL; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + smu7_ps = cast_phw_smu7_power_state(&ps->hardware); + + if (low) + return smu7_ps->performance_levels[0].memory_clock; + else + return smu7_ps->performance_levels + [smu7_ps->performance_level_count-1].memory_clock; +} + +static int smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) +{ + struct pp_power_state *ps; + struct smu7_power_state *smu7_ps; + + if (hwmgr == NULL) + return -EINVAL; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + smu7_ps = cast_phw_smu7_power_state(&ps->hardware); + + if (low) + return smu7_ps->performance_levels[0].engine_clock; + else + return smu7_ps->performance_levels + [smu7_ps->performance_level_count-1].engine_clock; +} + +static int smu7_dpm_patch_boot_state(struct pp_hwmgr *hwmgr, + struct pp_hw_power_state *hw_ps) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_power_state *ps = (struct smu7_power_state *)hw_ps; + ATOM_FIRMWARE_INFO_V2_2 *fw_info; + uint16_t size; + uint8_t frev, crev; + int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); + + /* First retrieve the Boot clocks and VDDC from the firmware info table. + * We assume here that fw_info is unchanged if this call fails. + */ + fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table( + hwmgr->device, index, + &size, &frev, &crev); + if (!fw_info) + /* During a test, there is no firmware info table. */ + return 0; + + /* Patch the state. */ + data->vbios_boot_state.sclk_bootup_value = + le32_to_cpu(fw_info->ulDefaultEngineClock); + data->vbios_boot_state.mclk_bootup_value = + le32_to_cpu(fw_info->ulDefaultMemoryClock); + data->vbios_boot_state.mvdd_bootup_value = + le16_to_cpu(fw_info->usBootUpMVDDCVoltage); + data->vbios_boot_state.vddc_bootup_value = + le16_to_cpu(fw_info->usBootUpVDDCVoltage); + data->vbios_boot_state.vddci_bootup_value = + le16_to_cpu(fw_info->usBootUpVDDCIVoltage); + data->vbios_boot_state.pcie_gen_bootup_value = + smu7_get_current_pcie_speed(hwmgr); + + data->vbios_boot_state.pcie_lane_bootup_value = + (uint16_t)smu7_get_current_pcie_lane_number(hwmgr); + + /* set boot power state */ + ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value; + ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value; + ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value; + ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value; + + return 0; +} + +static int smu7_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr) +{ + int result; + unsigned long ret = 0; + + if (hwmgr->pp_table_version == PP_TABLE_V0) { + result = pp_tables_get_num_of_entries(hwmgr, &ret); + return result ? 0 : ret; + } else if (hwmgr->pp_table_version == PP_TABLE_V1) { + result = get_number_of_powerplay_table_entries_v1_0(hwmgr); + return result; + } + return 0; +} + +static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr, + void *state, struct pp_power_state *power_state, + void *pp_table, uint32_t classification_flag) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_power_state *smu7_power_state = + (struct smu7_power_state *)(&(power_state->hardware)); + struct smu7_performance_level *performance_level; + ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state; + ATOM_Tonga_POWERPLAYTABLE *powerplay_table = + (ATOM_Tonga_POWERPLAYTABLE *)pp_table; + PPTable_Generic_SubTable_Header *sclk_dep_table = + (PPTable_Generic_SubTable_Header *) + (((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); + + ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = + (ATOM_Tonga_MCLK_Dependency_Table *) + (((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table->usMclkDependencyTableOffset)); + + /* The following fields are not initialized here: id orderedList allStatesList */ + power_state->classification.ui_label = + (le16_to_cpu(state_entry->usClassification) & + ATOM_PPLIB_CLASSIFICATION_UI_MASK) >> + ATOM_PPLIB_CLASSIFICATION_UI_SHIFT; + power_state->classification.flags = classification_flag; + /* NOTE: There is a classification2 flag in BIOS that is not being used right now */ + + power_state->classification.temporary_state = false; + power_state->classification.to_be_deleted = false; + + power_state->validation.disallowOnDC = + (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & + ATOM_Tonga_DISALLOW_ON_DC)); + + power_state->pcie.lanes = 0; + + power_state->display.disableFrameModulation = false; + power_state->display.limitRefreshrate = false; + power_state->display.enableVariBright = + (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & + ATOM_Tonga_ENABLE_VARIBRIGHT)); + + power_state->validation.supportedPowerLevels = 0; + power_state->uvd_clocks.VCLK = 0; + power_state->uvd_clocks.DCLK = 0; + power_state->temperatures.min = 0; + power_state->temperatures.max = 0; + + performance_level = &(smu7_power_state->performance_levels + [smu7_power_state->performance_level_count++]); + + PP_ASSERT_WITH_CODE( + (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)), + "Performance levels exceeds SMC limit!", + return -EINVAL); + + PP_ASSERT_WITH_CODE( + (smu7_power_state->performance_level_count <= + hwmgr->platform_descriptor.hardwareActivityPerformanceLevels), + "Performance levels exceeds Driver limit!", + return -EINVAL); + + /* Performance levels are arranged from low to high. */ + performance_level->memory_clock = mclk_dep_table->entries + [state_entry->ucMemoryClockIndexLow].ulMclk; + if (sclk_dep_table->ucRevId == 0) + performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries + [state_entry->ucEngineClockIndexLow].ulSclk; + else if (sclk_dep_table->ucRevId == 1) + performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries + [state_entry->ucEngineClockIndexLow].ulSclk; + performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, + state_entry->ucPCIEGenLow); + performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, + state_entry->ucPCIELaneHigh); + + performance_level = &(smu7_power_state->performance_levels + [smu7_power_state->performance_level_count++]); + performance_level->memory_clock = mclk_dep_table->entries + [state_entry->ucMemoryClockIndexHigh].ulMclk; + + if (sclk_dep_table->ucRevId == 0) + performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries + [state_entry->ucEngineClockIndexHigh].ulSclk; + else if (sclk_dep_table->ucRevId == 1) + performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries + [state_entry->ucEngineClockIndexHigh].ulSclk; + + performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, + state_entry->ucPCIEGenHigh); + performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, + state_entry->ucPCIELaneHigh); + + return 0; +} + +static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr, + unsigned long entry_index, struct pp_power_state *state) +{ + int result; + struct smu7_power_state *ps; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table = + table_info->vdd_dep_on_mclk; + + state->hardware.magic = PHM_VIslands_Magic; + + ps = (struct smu7_power_state *)(&state->hardware); + + result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state, + smu7_get_pp_table_entry_callback_func_v1); + + /* This is the earliest time we have all the dependency table and the VBIOS boot state + * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state + * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state + */ + if (dep_mclk_table != NULL && dep_mclk_table->count == 1) { + if (dep_mclk_table->entries[0].clk != + data->vbios_boot_state.mclk_bootup_value) + printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table " + "does not match VBIOS boot MCLK level"); + if (dep_mclk_table->entries[0].vddci != + data->vbios_boot_state.vddci_bootup_value) + printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table " + "does not match VBIOS boot VDDCI level"); + } + + /* set DC compatible flag if this state supports DC */ + if (!state->validation.disallowOnDC) + ps->dc_compatible = true; + + if (state->classification.flags & PP_StateClassificationFlag_ACPI) + data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen; + + ps->uvd_clks.vclk = state->uvd_clocks.VCLK; + ps->uvd_clks.dclk = state->uvd_clocks.DCLK; + + if (!result) { + uint32_t i; + + switch (state->classification.ui_label) { + case PP_StateUILabel_Performance: + data->use_pcie_performance_levels = true; + for (i = 0; i < ps->performance_level_count; i++) { + if (data->pcie_gen_performance.max < + ps->performance_levels[i].pcie_gen) + data->pcie_gen_performance.max = + ps->performance_levels[i].pcie_gen; + + if (data->pcie_gen_performance.min > + ps->performance_levels[i].pcie_gen) + data->pcie_gen_performance.min = + ps->performance_levels[i].pcie_gen; + + if (data->pcie_lane_performance.max < + ps->performance_levels[i].pcie_lane) + data->pcie_lane_performance.max = + ps->performance_levels[i].pcie_lane; + if (data->pcie_lane_performance.min > + ps->performance_levels[i].pcie_lane) + data->pcie_lane_performance.min = + ps->performance_levels[i].pcie_lane; + } + break; + case PP_StateUILabel_Battery: + data->use_pcie_power_saving_levels = true; + + for (i = 0; i < ps->performance_level_count; i++) { + if (data->pcie_gen_power_saving.max < + ps->performance_levels[i].pcie_gen) + data->pcie_gen_power_saving.max = + ps->performance_levels[i].pcie_gen; + + if (data->pcie_gen_power_saving.min > + ps->performance_levels[i].pcie_gen) + data->pcie_gen_power_saving.min = + ps->performance_levels[i].pcie_gen; + + if (data->pcie_lane_power_saving.max < + ps->performance_levels[i].pcie_lane) + data->pcie_lane_power_saving.max = + ps->performance_levels[i].pcie_lane; + + if (data->pcie_lane_power_saving.min > + ps->performance_levels[i].pcie_lane) + data->pcie_lane_power_saving.min = + ps->performance_levels[i].pcie_lane; + } + break; + default: + break; + } + } + return 0; +} + +static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr, + struct pp_hw_power_state *power_state, + unsigned int index, const void *clock_info) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_power_state *ps = cast_phw_smu7_power_state(power_state); + const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info; + struct smu7_performance_level *performance_level; + uint32_t engine_clock, memory_clock; + uint16_t pcie_gen_from_bios; + + engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow; + memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow; + + if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk) + data->highest_mclk = memory_clock; + + performance_level = &(ps->performance_levels + [ps->performance_level_count++]); + + PP_ASSERT_WITH_CODE( + (ps->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)), + "Performance levels exceeds SMC limit!", + return -EINVAL); + + PP_ASSERT_WITH_CODE( + (ps->performance_level_count <= + hwmgr->platform_descriptor.hardwareActivityPerformanceLevels), + "Performance levels exceeds Driver limit!", + return -EINVAL); + + /* Performance levels are arranged from low to high. */ + performance_level->memory_clock = memory_clock; + performance_level->engine_clock = engine_clock; + + pcie_gen_from_bios = visland_clk_info->ucPCIEGen; + + performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios); + performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane); + + return 0; +} + +static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr, + unsigned long entry_index, struct pp_power_state *state) +{ + int result; + struct smu7_power_state *ps; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_clock_voltage_dependency_table *dep_mclk_table = + hwmgr->dyn_state.vddci_dependency_on_mclk; + + memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state)); + + state->hardware.magic = PHM_VIslands_Magic; + + ps = (struct smu7_power_state *)(&state->hardware); + + result = pp_tables_get_entry(hwmgr, entry_index, state, + smu7_get_pp_table_entry_callback_func_v0); + + /* + * This is the earliest time we have all the dependency table + * and the VBIOS boot state as + * PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot + * state if there is only one VDDCI/MCLK level, check if it's + * the same as VBIOS boot state + */ + if (dep_mclk_table != NULL && dep_mclk_table->count == 1) { + if (dep_mclk_table->entries[0].clk != + data->vbios_boot_state.mclk_bootup_value) + printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table " + "does not match VBIOS boot MCLK level"); + if (dep_mclk_table->entries[0].v != + data->vbios_boot_state.vddci_bootup_value) + printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table " + "does not match VBIOS boot VDDCI level"); + } + + /* set DC compatible flag if this state supports DC */ + if (!state->validation.disallowOnDC) + ps->dc_compatible = true; + + if (state->classification.flags & PP_StateClassificationFlag_ACPI) + data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen; + + ps->uvd_clks.vclk = state->uvd_clocks.VCLK; + ps->uvd_clks.dclk = state->uvd_clocks.DCLK; + + if (!result) { + uint32_t i; + + switch (state->classification.ui_label) { + case PP_StateUILabel_Performance: + data->use_pcie_performance_levels = true; + + for (i = 0; i < ps->performance_level_count; i++) { + if (data->pcie_gen_performance.max < + ps->performance_levels[i].pcie_gen) + data->pcie_gen_performance.max = + ps->performance_levels[i].pcie_gen; + + if (data->pcie_gen_performance.min > + ps->performance_levels[i].pcie_gen) + data->pcie_gen_performance.min = + ps->performance_levels[i].pcie_gen; + + if (data->pcie_lane_performance.max < + ps->performance_levels[i].pcie_lane) + data->pcie_lane_performance.max = + ps->performance_levels[i].pcie_lane; + + if (data->pcie_lane_performance.min > + ps->performance_levels[i].pcie_lane) + data->pcie_lane_performance.min = + ps->performance_levels[i].pcie_lane; + } + break; + case PP_StateUILabel_Battery: + data->use_pcie_power_saving_levels = true; + + for (i = 0; i < ps->performance_level_count; i++) { + if (data->pcie_gen_power_saving.max < + ps->performance_levels[i].pcie_gen) + data->pcie_gen_power_saving.max = + ps->performance_levels[i].pcie_gen; + + if (data->pcie_gen_power_saving.min > + ps->performance_levels[i].pcie_gen) + data->pcie_gen_power_saving.min = + ps->performance_levels[i].pcie_gen; + + if (data->pcie_lane_power_saving.max < + ps->performance_levels[i].pcie_lane) + data->pcie_lane_power_saving.max = + ps->performance_levels[i].pcie_lane; + + if (data->pcie_lane_power_saving.min > + ps->performance_levels[i].pcie_lane) + data->pcie_lane_power_saving.min = + ps->performance_levels[i].pcie_lane; + } + break; + default: + break; + } + } + return 0; +} + +static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr, + unsigned long entry_index, struct pp_power_state *state) +{ + if (hwmgr->pp_table_version == PP_TABLE_V0) + return smu7_get_pp_table_entry_v0(hwmgr, entry_index, state); + else if (hwmgr->pp_table_version == PP_TABLE_V1) + return smu7_get_pp_table_entry_v1(hwmgr, entry_index, state); + + return 0; +} + +static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value) +{ + uint32_t sclk, mclk, activity_percent; + uint32_t offset; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + switch (idx) { + case AMDGPU_PP_SENSOR_GFX_SCLK: + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency); + sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + *value = sclk; + return 0; + case AMDGPU_PP_SENSOR_GFX_MCLK: + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency); + mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + *value = mclk; + return 0; + case AMDGPU_PP_SENSOR_GPU_LOAD: + offset = data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr, + SMU_SoftRegisters, + AverageGraphicsActivity); + + activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset); + activity_percent += 0x80; + activity_percent >>= 8; + *value = activity_percent > 100 ? 100 : activity_percent; + return 0; + case AMDGPU_PP_SENSOR_GPU_TEMP: + *value = smu7_thermal_get_temperature(hwmgr); + return 0; + case AMDGPU_PP_SENSOR_UVD_POWER: + *value = data->uvd_power_gated ? 0 : 1; + return 0; + case AMDGPU_PP_SENSOR_VCE_POWER: + *value = data->vce_power_gated ? 0 : 1; + return 0; + default: + return -EINVAL; + } +} + +static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input) +{ + const struct phm_set_power_state_input *states = + (const struct phm_set_power_state_input *)input; + const struct smu7_power_state *smu7_ps = + cast_const_phw_smu7_power_state(states->pnew_state); + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); + uint32_t sclk = smu7_ps->performance_levels + [smu7_ps->performance_level_count - 1].engine_clock; + struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); + uint32_t mclk = smu7_ps->performance_levels + [smu7_ps->performance_level_count - 1].memory_clock; + struct PP_Clocks min_clocks = {0}; + uint32_t i; + struct cgs_display_info info = {0}; + + data->need_update_smu7_dpm_table = 0; + + for (i = 0; i < sclk_table->count; i++) { + if (sclk == sclk_table->dpm_levels[i].value) + break; + } + + if (i >= sclk_table->count) + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; + else { + /* TODO: Check SCLK in DAL's minimum clocks + * in case DeepSleep divider update is required. + */ + if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR && + (min_clocks.engineClockInSR >= SMU7_MINIMUM_ENGINE_CLOCK || + data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK)) + data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK; + } + + for (i = 0; i < mclk_table->count; i++) { + if (mclk == mclk_table->dpm_levels[i].value) + break; + } + + if (i >= mclk_table->count) + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; + + cgs_get_active_displays_info(hwmgr->device, &info); + + if (data->display_timing.num_existing_displays != info.display_count) + data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK; + + return 0; +} + +static uint16_t smu7_get_maximum_link_speed(struct pp_hwmgr *hwmgr, + const struct smu7_power_state *smu7_ps) +{ + uint32_t i; + uint32_t sclk, max_sclk = 0; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_dpm_table *dpm_table = &data->dpm_table; + + for (i = 0; i < smu7_ps->performance_level_count; i++) { + sclk = smu7_ps->performance_levels[i].engine_clock; + if (max_sclk < sclk) + max_sclk = sclk; + } + + for (i = 0; i < dpm_table->sclk_table.count; i++) { + if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk) + return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ? + dpm_table->pcie_speed_table.dpm_levels + [dpm_table->pcie_speed_table.count - 1].value : + dpm_table->pcie_speed_table.dpm_levels[i].value); + } + + return 0; +} + +static int smu7_request_link_speed_change_before_state_change( + struct pp_hwmgr *hwmgr, const void *input) +{ + const struct phm_set_power_state_input *states = + (const struct phm_set_power_state_input *)input; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + const struct smu7_power_state *smu7_nps = + cast_const_phw_smu7_power_state(states->pnew_state); + const struct smu7_power_state *polaris10_cps = + cast_const_phw_smu7_power_state(states->pcurrent_state); + + uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_nps); + uint16_t current_link_speed; + + if (data->force_pcie_gen == PP_PCIEGenInvalid) + current_link_speed = smu7_get_maximum_link_speed(hwmgr, polaris10_cps); + else + current_link_speed = data->force_pcie_gen; + + data->force_pcie_gen = PP_PCIEGenInvalid; + data->pspp_notify_required = false; + + if (target_link_speed > current_link_speed) { + switch (target_link_speed) { + case PP_PCIEGen3: + if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false)) + break; + data->force_pcie_gen = PP_PCIEGen2; + if (current_link_speed == PP_PCIEGen2) + break; + case PP_PCIEGen2: + if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false)) + break; + default: + data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr); + break; + } + } else { + if (target_link_speed < current_link_speed) + data->pspp_notify_required = true; + } + + return 0; +} + +static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (0 == data->need_update_smu7_dpm_table) + return 0; + + if ((0 == data->sclk_dpm_key_disabled) && + (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { + PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), + "Trying to freeze SCLK DPM when DPM is disabled", + ); + PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_FreezeLevel), + "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!", + return -EINVAL); + } + + if ((0 == data->mclk_dpm_key_disabled) && + (data->need_update_smu7_dpm_table & + DPMTABLE_OD_UPDATE_MCLK)) { + PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), + "Trying to freeze MCLK DPM when DPM is disabled", + ); + PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_FreezeLevel), + "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!", + return -EINVAL); + } + + return 0; +} + +static int smu7_populate_and_upload_sclk_mclk_dpm_levels( + struct pp_hwmgr *hwmgr, const void *input) +{ + int result = 0; + const struct phm_set_power_state_input *states = + (const struct phm_set_power_state_input *)input; + const struct smu7_power_state *smu7_ps = + cast_const_phw_smu7_power_state(states->pnew_state); + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t sclk = smu7_ps->performance_levels + [smu7_ps->performance_level_count - 1].engine_clock; + uint32_t mclk = smu7_ps->performance_levels + [smu7_ps->performance_level_count - 1].memory_clock; + struct smu7_dpm_table *dpm_table = &data->dpm_table; + + struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table; + uint32_t dpm_count, clock_percent; + uint32_t i; + + if (0 == data->need_update_smu7_dpm_table) + return 0; + + if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) { + dpm_table->sclk_table.dpm_levels + [dpm_table->sclk_table.count - 1].value = sclk; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) { + /* Need to do calculation based on the golden DPM table + * as the Heatmap GPU Clock axis is also based on the default values + */ + PP_ASSERT_WITH_CODE( + (golden_dpm_table->sclk_table.dpm_levels + [golden_dpm_table->sclk_table.count - 1].value != 0), + "Divide by 0!", + return -EINVAL); + dpm_count = dpm_table->sclk_table.count < 2 ? 0 : dpm_table->sclk_table.count - 2; + + for (i = dpm_count; i > 1; i--) { + if (sclk > golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value) { + clock_percent = + ((sclk + - golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value + ) * 100) + / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value; + + dpm_table->sclk_table.dpm_levels[i].value = + golden_dpm_table->sclk_table.dpm_levels[i].value + + (golden_dpm_table->sclk_table.dpm_levels[i].value * + clock_percent)/100; + + } else if (golden_dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value > sclk) { + clock_percent = + ((golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value + - sclk) * 100) + / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value; + + dpm_table->sclk_table.dpm_levels[i].value = + golden_dpm_table->sclk_table.dpm_levels[i].value - + (golden_dpm_table->sclk_table.dpm_levels[i].value * + clock_percent) / 100; + } else + dpm_table->sclk_table.dpm_levels[i].value = + golden_dpm_table->sclk_table.dpm_levels[i].value; + } + } + } + + if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) { + dpm_table->mclk_table.dpm_levels + [dpm_table->mclk_table.count - 1].value = mclk; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) { + + PP_ASSERT_WITH_CODE( + (golden_dpm_table->mclk_table.dpm_levels + [golden_dpm_table->mclk_table.count-1].value != 0), + "Divide by 0!", + return -EINVAL); + dpm_count = dpm_table->mclk_table.count < 2 ? 0 : dpm_table->mclk_table.count - 2; + for (i = dpm_count; i > 1; i--) { + if (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value < mclk) { + clock_percent = ((mclk - + golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value) * 100) + / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value; + + dpm_table->mclk_table.dpm_levels[i].value = + golden_dpm_table->mclk_table.dpm_levels[i].value + + (golden_dpm_table->mclk_table.dpm_levels[i].value * + clock_percent) / 100; + + } else if (golden_dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value > mclk) { + clock_percent = ( + (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value - mclk) + * 100) + / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value; + + dpm_table->mclk_table.dpm_levels[i].value = + golden_dpm_table->mclk_table.dpm_levels[i].value - + (golden_dpm_table->mclk_table.dpm_levels[i].value * + clock_percent) / 100; + } else + dpm_table->mclk_table.dpm_levels[i].value = + golden_dpm_table->mclk_table.dpm_levels[i].value; + } + } + } + + if (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) { + result = smum_populate_all_graphic_levels(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", + return result); + } + + if (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) { + /*populate MCLK dpm table to SMU7 */ + result = smum_populate_all_memory_levels(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to populate MCLK during PopulateNewDPMClocksStates Function!", + return result); + } + + return result; +} + +static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr, + struct smu7_single_dpm_table *dpm_table, + uint32_t low_limit, uint32_t high_limit) +{ + uint32_t i; + + for (i = 0; i < dpm_table->count; i++) { + if ((dpm_table->dpm_levels[i].value < low_limit) + || (dpm_table->dpm_levels[i].value > high_limit)) + dpm_table->dpm_levels[i].enabled = false; + else + dpm_table->dpm_levels[i].enabled = true; + } + + return 0; +} + +static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr, + const struct smu7_power_state *smu7_ps) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t high_limit_count; + + PP_ASSERT_WITH_CODE((smu7_ps->performance_level_count >= 1), + "power state did not have any performance level", + return -EINVAL); + + high_limit_count = (1 == smu7_ps->performance_level_count) ? 0 : 1; + + smu7_trim_single_dpm_states(hwmgr, + &(data->dpm_table.sclk_table), + smu7_ps->performance_levels[0].engine_clock, + smu7_ps->performance_levels[high_limit_count].engine_clock); + + smu7_trim_single_dpm_states(hwmgr, + &(data->dpm_table.mclk_table), + smu7_ps->performance_levels[0].memory_clock, + smu7_ps->performance_levels[high_limit_count].memory_clock); + + return 0; +} + +static int smu7_generate_dpm_level_enable_mask( + struct pp_hwmgr *hwmgr, const void *input) +{ + int result; + const struct phm_set_power_state_input *states = + (const struct phm_set_power_state_input *)input; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + const struct smu7_power_state *smu7_ps = + cast_const_phw_smu7_power_state(states->pnew_state); + + result = smu7_trim_dpm_states(hwmgr, smu7_ps); + if (result) + return result; + + data->dpm_level_enable_mask.sclk_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table); + data->dpm_level_enable_mask.mclk_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table); + data->dpm_level_enable_mask.pcie_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table); + + return 0; +} + +static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (0 == data->need_update_smu7_dpm_table) + return 0; + + if ((0 == data->sclk_dpm_key_disabled) && + (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { + + PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), + "Trying to Unfreeze SCLK DPM when DPM is disabled", + ); + PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_UnfreezeLevel), + "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!", + return -EINVAL); + } + + if ((0 == data->mclk_dpm_key_disabled) && + (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { + + PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), + "Trying to Unfreeze MCLK DPM when DPM is disabled", + ); + PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_UnfreezeLevel), + "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!", + return -EINVAL); + } + + data->need_update_smu7_dpm_table = 0; + + return 0; +} + +static int smu7_notify_link_speed_change_after_state_change( + struct pp_hwmgr *hwmgr, const void *input) +{ + const struct phm_set_power_state_input *states = + (const struct phm_set_power_state_input *)input; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + const struct smu7_power_state *smu7_ps = + cast_const_phw_smu7_power_state(states->pnew_state); + uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_ps); + uint8_t request; + + if (data->pspp_notify_required) { + if (target_link_speed == PP_PCIEGen3) + request = PCIE_PERF_REQ_GEN3; + else if (target_link_speed == PP_PCIEGen2) + request = PCIE_PERF_REQ_GEN2; + else + request = PCIE_PERF_REQ_GEN1; + + if (request == PCIE_PERF_REQ_GEN1 && + smu7_get_current_pcie_speed(hwmgr) > 0) + return 0; + + if (acpi_pcie_perf_request(hwmgr->device, request, false)) { + if (PP_PCIEGen2 == target_link_speed) + printk("PSPP request to switch to Gen2 from Gen3 Failed!"); + else + printk("PSPP request to switch to Gen1 from Gen2 Failed!"); + } + } + + return 0; +} + +static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2); + return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL; +} + +static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) +{ + int tmp_result, result = 0; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + tmp_result = smu7_find_dpm_states_clocks_in_dpm_table(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to find DPM states clocks in DPM table!", + result = tmp_result); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PCIEPerformanceRequest)) { + tmp_result = + smu7_request_link_speed_change_before_state_change(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to request link speed change before state change!", + result = tmp_result); + } + + tmp_result = smu7_freeze_sclk_mclk_dpm(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to freeze SCLK MCLK DPM!", result = tmp_result); + + tmp_result = smu7_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to populate and upload SCLK MCLK DPM levels!", + result = tmp_result); + + tmp_result = smu7_generate_dpm_level_enable_mask(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to generate DPM level enabled mask!", + result = tmp_result); + + tmp_result = smum_update_sclk_threshold(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to update SCLK threshold!", + result = tmp_result); + + tmp_result = smu7_notify_smc_display(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to notify smc display settings!", + result = tmp_result); + + tmp_result = smu7_unfreeze_sclk_mclk_dpm(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to unfreeze SCLK MCLK DPM!", + result = tmp_result); + + tmp_result = smu7_upload_dpm_level_enable_mask(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to upload DPM level enabled mask!", + result = tmp_result); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PCIEPerformanceRequest)) { + tmp_result = + smu7_notify_link_speed_change_after_state_change(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to notify link speed change after state change!", + result = tmp_result); + } + data->apply_optimized_settings = false; + return result; +} + +static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm) +{ + hwmgr->thermal_controller. + advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm; + + if (phm_is_hw_access_blocked(hwmgr)) + return 0; + + return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm); +} + +int smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display) +{ + PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay; + + return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1; +} + +int smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr) +{ + uint32_t num_active_displays = 0; + struct cgs_display_info info = {0}; + + info.mode_info = NULL; + cgs_get_active_displays_info(hwmgr->device, &info); + + num_active_displays = info.display_count; + + if (num_active_displays > 1 && hwmgr->display_config.multi_monitor_in_sync != true) + smu7_notify_smc_display_change(hwmgr, false); + + return 0; +} + +/** +* Programs the display gap +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always OK +*/ +int smu7_program_display_gap(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t num_active_displays = 0; + uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL); + uint32_t display_gap2; + uint32_t pre_vbi_time_in_us; + uint32_t frame_time_in_us; + uint32_t ref_clock; + uint32_t refresh_rate = 0; + struct cgs_display_info info = {0}; + struct cgs_mode_info mode_info; + + info.mode_info = &mode_info; + + cgs_get_active_displays_info(hwmgr->device, &info); + num_active_displays = info.display_count; + + display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap); + + ref_clock = mode_info.ref_clock; + refresh_rate = mode_info.refresh_rate; + + if (0 == refresh_rate) + refresh_rate = 60; + + frame_time_in_us = 1000000 / refresh_rate; + + pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us; + data->frame_time_x2 = frame_time_in_us * 2 / 100; + + display_gap2 = pre_vbi_time_in_us * (ref_clock / 100); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr, + SMU_SoftRegisters, + PreVBlankGap), 0x64); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr, + SMU_SoftRegisters, + VBlankTimeout), + (frame_time_in_us - pre_vbi_time_in_us)); + + return 0; +} + +int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr) +{ + return smu7_program_display_gap(hwmgr); +} + +/** +* Set maximum target operating fan output RPM +* +* @param hwmgr: the address of the powerplay hardware manager. +* @param usMaxFanRpm: max operating fan RPM value. +* @return The response that came from the SMC. +*/ +static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm) +{ + hwmgr->thermal_controller. + advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm; + + if (phm_is_hw_access_blocked(hwmgr)) + return 0; + + return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm); +} + +int smu7_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr, + const void *thermal_interrupt_info) +{ + return 0; +} + +bool smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + bool is_update_required = false; + struct cgs_display_info info = {0, 0, NULL}; + + cgs_get_active_displays_info(hwmgr->device, &info); + + if (data->display_timing.num_existing_displays != info.display_count) + is_update_required = true; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { + if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr && + (data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK || + hwmgr->display_config.min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK)) + is_update_required = true; + } + return is_update_required; +} + +static inline bool smu7_are_power_levels_equal(const struct smu7_performance_level *pl1, + const struct smu7_performance_level *pl2) +{ + return ((pl1->memory_clock == pl2->memory_clock) && + (pl1->engine_clock == pl2->engine_clock) && + (pl1->pcie_gen == pl2->pcie_gen) && + (pl1->pcie_lane == pl2->pcie_lane)); +} + +int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal) +{ + const struct smu7_power_state *psa = cast_const_phw_smu7_power_state(pstate1); + const struct smu7_power_state *psb = cast_const_phw_smu7_power_state(pstate2); + int i; + + if (pstate1 == NULL || pstate2 == NULL || equal == NULL) + return -EINVAL; + + /* If the two states don't even have the same number of performance levels they cannot be the same state. */ + if (psa->performance_level_count != psb->performance_level_count) { + *equal = false; + return 0; + } + + for (i = 0; i < psa->performance_level_count; i++) { + if (!smu7_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) { + /* If we have found even one performance level pair that is different the states are different. */ + *equal = false; + return 0; + } + } + + /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ + *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk)); + *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk)); + *equal &= (psa->sclk_threshold == psb->sclk_threshold); + + return 0; +} + +int smu7_upload_mc_firmware(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + uint32_t vbios_version; + uint32_t tmp; + + /* Read MC indirect register offset 0x9F bits [3:0] to see + * if VBIOS has already loaded a full version of MC ucode + * or not. + */ + + smu7_get_mc_microcode_version(hwmgr); + vbios_version = hwmgr->microcode_version_info.MC & 0xf; + + data->need_long_memory_training = false; + + cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, + ixMC_IO_DEBUG_UP_13); + tmp = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA); + + if (tmp & (1 << 23)) { + data->mem_latency_high = MEM_LATENCY_HIGH; + data->mem_latency_low = MEM_LATENCY_LOW; + } else { + data->mem_latency_high = 330; + data->mem_latency_low = 330; + } + + return 0; +} + +static int smu7_read_clock_registers(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + data->clock_registers.vCG_SPLL_FUNC_CNTL = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL); + data->clock_registers.vCG_SPLL_FUNC_CNTL_2 = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2); + data->clock_registers.vCG_SPLL_FUNC_CNTL_3 = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3); + data->clock_registers.vCG_SPLL_FUNC_CNTL_4 = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4); + data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM); + data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2); + data->clock_registers.vDLL_CNTL = + cgs_read_register(hwmgr->device, mmDLL_CNTL); + data->clock_registers.vMCLK_PWRMGT_CNTL = + cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL); + data->clock_registers.vMPLL_AD_FUNC_CNTL = + cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL); + data->clock_registers.vMPLL_DQ_FUNC_CNTL = + cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL); + data->clock_registers.vMPLL_FUNC_CNTL = + cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL); + data->clock_registers.vMPLL_FUNC_CNTL_1 = + cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1); + data->clock_registers.vMPLL_FUNC_CNTL_2 = + cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2); + data->clock_registers.vMPLL_SS1 = + cgs_read_register(hwmgr->device, mmMPLL_SS1); + data->clock_registers.vMPLL_SS2 = + cgs_read_register(hwmgr->device, mmMPLL_SS2); + return 0; + +} + +/** + * Find out if memory is GDDR5. + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +static int smu7_get_memory_type(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t temp; + + temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0); + + data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE == + ((temp & MC_SEQ_MISC0_GDDR5_MASK) >> + MC_SEQ_MISC0_GDDR5_SHIFT)); + + return 0; +} + +/** + * Enables Dynamic Power Management by SMC + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +static int smu7_enable_acpi_power_management(struct pp_hwmgr *hwmgr) +{ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + GENERAL_PWRMGT, STATIC_PM_EN, 1); + + return 0; +} + +/** + * Initialize PowerGating States for different engines + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + data->uvd_power_gated = false; + data->vce_power_gated = false; + data->samu_power_gated = false; + + return 0; +} + +static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + data->low_sclk_interrupt_threshold = 0; + return 0; +} + +int smu7_setup_asic_task(struct pp_hwmgr *hwmgr) +{ + int tmp_result, result = 0; + + smu7_upload_mc_firmware(hwmgr); + + tmp_result = smu7_read_clock_registers(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to read clock registers!", result = tmp_result); + + tmp_result = smu7_get_memory_type(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to get memory type!", result = tmp_result); + + tmp_result = smu7_enable_acpi_power_management(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable ACPI power management!", result = tmp_result); + + tmp_result = smu7_init_power_gate_state(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to init power gate state!", result = tmp_result); + + tmp_result = smu7_get_mc_microcode_version(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to get MC microcode version!", result = tmp_result); + + tmp_result = smu7_init_sclk_threshold(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to init sclk threshold!", result = tmp_result); + + return result; +} + +static int smu7_force_clock_level(struct pp_hwmgr *hwmgr, + enum pp_clock_type type, uint32_t mask) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) + return -EINVAL; + + switch (type) { + case PP_SCLK: + if (!data->sclk_dpm_key_disabled) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_SetEnabledMask, + data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask); + break; + case PP_MCLK: + if (!data->mclk_dpm_key_disabled) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_SetEnabledMask, + data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask); + break; + case PP_PCIE: + { + uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask; + uint32_t level = 0; + + while (tmp >>= 1) + level++; + + if (!data->pcie_dpm_key_disabled) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_ForceLevel, + level); + break; + } + default: + break; + } + + return 0; +} + +static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, + enum pp_clock_type type, char *buf) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); + struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); + struct smu7_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table); + int i, now, size = 0; + uint32_t clock, pcie_speed; + + switch (type) { + case PP_SCLK: + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency); + clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + + for (i = 0; i < sclk_table->count; i++) { + if (clock > sclk_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < sclk_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, sclk_table->dpm_levels[i].value / 100, + (i == now) ? "*" : ""); + break; + case PP_MCLK: + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency); + clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + + for (i = 0; i < mclk_table->count; i++) { + if (clock > mclk_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < mclk_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, mclk_table->dpm_levels[i].value / 100, + (i == now) ? "*" : ""); + break; + case PP_PCIE: + pcie_speed = smu7_get_current_pcie_speed(hwmgr); + for (i = 0; i < pcie_table->count; i++) { + if (pcie_speed != pcie_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < pcie_table->count; i++) + size += sprintf(buf + size, "%d: %s %s\n", i, + (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" : + (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" : + (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "", + (i == now) ? "*" : ""); + break; + default: + break; + } + return size; +} + +static int smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) +{ + if (mode) { + /* stop auto-manage */ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl)) + smu7_fan_ctrl_stop_smc_fan_control(hwmgr); + smu7_fan_ctrl_set_static_mode(hwmgr, mode); + } else + /* restart auto-manage */ + smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr); + + return 0; +} + +static int smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr) +{ + if (hwmgr->fan_ctrl_is_in_default_mode) + return hwmgr->fan_ctrl_default_mode; + else + return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_FDO_CTRL2, FDO_PWM_MODE); +} + +static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); + struct smu7_single_dpm_table *golden_sclk_table = + &(data->golden_dpm_table.sclk_table); + int value; + + value = (sclk_table->dpm_levels[sclk_table->count - 1].value - + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * + 100 / + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; + + return value; +} + +static int smu7_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_single_dpm_table *golden_sclk_table = + &(data->golden_dpm_table.sclk_table); + struct pp_power_state *ps; + struct smu7_power_state *smu7_ps; + + if (value > 20) + value = 20; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + smu7_ps = cast_phw_smu7_power_state(&ps->hardware); + + smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].engine_clock = + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * + value / 100 + + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; + + return 0; +} + +static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); + struct smu7_single_dpm_table *golden_mclk_table = + &(data->golden_dpm_table.mclk_table); + int value; + + value = (mclk_table->dpm_levels[mclk_table->count - 1].value - + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * + 100 / + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; + + return value; +} + +static int smu7_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_single_dpm_table *golden_mclk_table = + &(data->golden_dpm_table.mclk_table); + struct pp_power_state *ps; + struct smu7_power_state *smu7_ps; + + if (value > 20) + value = 20; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + smu7_ps = cast_phw_smu7_power_state(&ps->hardware); + + smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].memory_clock = + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * + value / 100 + + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; + + return 0; +} + + +static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) +{ + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)hwmgr->pptable; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table; + int i; + + if (table_info == NULL) + return -EINVAL; + + dep_sclk_table = table_info->vdd_dep_on_sclk; + + for (i = 0; i < dep_sclk_table->count; i++) { + clocks->clock[i] = dep_sclk_table->entries[i].clk; + clocks->count++; + } + return 0; +} + +static uint32_t smu7_get_mem_latency(struct pp_hwmgr *hwmgr, uint32_t clk) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (clk >= MEM_FREQ_LOW_LATENCY && clk < MEM_FREQ_HIGH_LATENCY) + return data->mem_latency_high; + else if (clk >= MEM_FREQ_HIGH_LATENCY) + return data->mem_latency_low; + else + return MEM_LATENCY_ERR; +} + +static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) +{ + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)hwmgr->pptable; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; + int i; + + if (table_info == NULL) + return -EINVAL; + + dep_mclk_table = table_info->vdd_dep_on_mclk; + + for (i = 0; i < dep_mclk_table->count; i++) { + clocks->clock[i] = dep_mclk_table->entries[i].clk; + clocks->latency[i] = smu7_get_mem_latency(hwmgr, + dep_mclk_table->entries[i].clk); + clocks->count++; + } + return 0; +} + +static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, + struct amd_pp_clocks *clocks) +{ + switch (type) { + case amd_pp_sys_clock: + smu7_get_sclks(hwmgr, clocks); + break; + case amd_pp_mem_clock: + smu7_get_mclks(hwmgr, clocks); + break; + default: + return -EINVAL; + } + + return 0; +} + +static const struct pp_hwmgr_func smu7_hwmgr_funcs = { + .backend_init = &smu7_hwmgr_backend_init, + .backend_fini = &phm_hwmgr_backend_fini, + .asic_setup = &smu7_setup_asic_task, + .dynamic_state_management_enable = &smu7_enable_dpm_tasks, + .apply_state_adjust_rules = smu7_apply_state_adjust_rules, + .force_dpm_level = &smu7_force_dpm_level, + .power_state_set = smu7_set_power_state_tasks, + .get_power_state_size = smu7_get_power_state_size, + .get_mclk = smu7_dpm_get_mclk, + .get_sclk = smu7_dpm_get_sclk, + .patch_boot_state = smu7_dpm_patch_boot_state, + .get_pp_table_entry = smu7_get_pp_table_entry, + .get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries, + .powerdown_uvd = smu7_powerdown_uvd, + .powergate_uvd = smu7_powergate_uvd, + .powergate_vce = smu7_powergate_vce, + .disable_clock_power_gating = smu7_disable_clock_power_gating, + .update_clock_gatings = smu7_update_clock_gatings, + .notify_smc_display_config_after_ps_adjustment = smu7_notify_smc_display_config_after_ps_adjustment, + .display_config_changed = smu7_display_configuration_changed_task, + .set_max_fan_pwm_output = smu7_set_max_fan_pwm_output, + .set_max_fan_rpm_output = smu7_set_max_fan_rpm_output, + .get_temperature = smu7_thermal_get_temperature, + .stop_thermal_controller = smu7_thermal_stop_thermal_controller, + .get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info, + .get_fan_speed_percent = smu7_fan_ctrl_get_fan_speed_percent, + .set_fan_speed_percent = smu7_fan_ctrl_set_fan_speed_percent, + .reset_fan_speed_to_default = smu7_fan_ctrl_reset_fan_speed_to_default, + .get_fan_speed_rpm = smu7_fan_ctrl_get_fan_speed_rpm, + .set_fan_speed_rpm = smu7_fan_ctrl_set_fan_speed_rpm, + .uninitialize_thermal_controller = smu7_thermal_ctrl_uninitialize_thermal_controller, + .register_internal_thermal_interrupt = smu7_register_internal_thermal_interrupt, + .check_smc_update_required_for_display_configuration = smu7_check_smc_update_required_for_display_configuration, + .check_states_equal = smu7_check_states_equal, + .set_fan_control_mode = smu7_set_fan_control_mode, + .get_fan_control_mode = smu7_get_fan_control_mode, + .force_clock_level = smu7_force_clock_level, + .print_clock_levels = smu7_print_clock_levels, + .enable_per_cu_power_gating = smu7_enable_per_cu_power_gating, + .get_sclk_od = smu7_get_sclk_od, + .set_sclk_od = smu7_set_sclk_od, + .get_mclk_od = smu7_get_mclk_od, + .set_mclk_od = smu7_set_mclk_od, + .get_clock_by_type = smu7_get_clock_by_type, + .read_sensor = smu7_read_sensor, +}; + +uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock, + uint32_t clock_insr) +{ + uint8_t i; + uint32_t temp; + uint32_t min = max(clock_insr, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK); + + PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0); + for (i = SMU7_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) { + temp = clock >> i; + + if (temp >= min || i == 0) + break; + } + return i; +} + +int smu7_hwmgr_init(struct pp_hwmgr *hwmgr) +{ + int ret = 0; + + hwmgr->hwmgr_func = &smu7_hwmgr_funcs; + if (hwmgr->pp_table_version == PP_TABLE_V0) + hwmgr->pptable_func = &pptable_funcs; + else if (hwmgr->pp_table_version == PP_TABLE_V1) + hwmgr->pptable_func = &pptable_v1_0_funcs; + + pp_smu7_thermal_initialize(hwmgr); + return ret; +} + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h index 33c33947e827..27e7f76ad8a6 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h @@ -21,81 +21,100 @@ * */ -#ifndef POLARIS10_HWMGR_H -#define POLARIS10_HWMGR_H +#ifndef _SMU7_HWMGR_H +#define _SMU7_HWMGR_H #include "hwmgr.h" -#include "smu74.h" -#include "smu74_discrete.h" #include "ppatomctrl.h" -#include "polaris10_ppsmc.h" -#include "polaris10_powertune.h" -#define POLARIS10_MAX_HARDWARE_POWERLEVELS 2 +#define SMU7_MAX_HARDWARE_POWERLEVELS 2 -#define POLARIS10_VOLTAGE_CONTROL_NONE 0x0 -#define POLARIS10_VOLTAGE_CONTROL_BY_GPIO 0x1 -#define POLARIS10_VOLTAGE_CONTROL_BY_SVID2 0x2 -#define POLARIS10_VOLTAGE_CONTROL_MERGED 0x3 +#define SMU7_VOLTAGE_CONTROL_NONE 0x0 +#define SMU7_VOLTAGE_CONTROL_BY_GPIO 0x1 +#define SMU7_VOLTAGE_CONTROL_BY_SVID2 0x2 +#define SMU7_VOLTAGE_CONTROL_MERGED 0x3 #define DPMTABLE_OD_UPDATE_SCLK 0x00000001 #define DPMTABLE_OD_UPDATE_MCLK 0x00000002 #define DPMTABLE_UPDATE_SCLK 0x00000004 #define DPMTABLE_UPDATE_MCLK 0x00000008 -struct polaris10_performance_level { +enum gpu_pt_config_reg_type { + GPU_CONFIGREG_MMR = 0, + GPU_CONFIGREG_SMC_IND, + GPU_CONFIGREG_DIDT_IND, + GPU_CONFIGREG_GC_CAC_IND, + GPU_CONFIGREG_CACHE, + GPU_CONFIGREG_MAX +}; + +struct gpu_pt_config_reg { + uint32_t offset; + uint32_t mask; + uint32_t shift; + uint32_t value; + enum gpu_pt_config_reg_type type; +}; + +struct smu7_performance_level { uint32_t memory_clock; uint32_t engine_clock; uint16_t pcie_gen; uint16_t pcie_lane; }; -struct polaris10_uvd_clocks { +struct smu7_thermal_temperature_setting { + long temperature_low; + long temperature_high; + long temperature_shutdown; +}; + +struct smu7_uvd_clocks { uint32_t vclk; uint32_t dclk; }; -struct polaris10_vce_clocks { +struct smu7_vce_clocks { uint32_t evclk; uint32_t ecclk; }; -struct polaris10_power_state { +struct smu7_power_state { uint32_t magic; - struct polaris10_uvd_clocks uvd_clks; - struct polaris10_vce_clocks vce_clks; + struct smu7_uvd_clocks uvd_clks; + struct smu7_vce_clocks vce_clks; uint32_t sam_clk; uint16_t performance_level_count; bool dc_compatible; uint32_t sclk_threshold; - struct polaris10_performance_level performance_levels[POLARIS10_MAX_HARDWARE_POWERLEVELS]; + struct smu7_performance_level performance_levels[SMU7_MAX_HARDWARE_POWERLEVELS]; }; -struct polaris10_dpm_level { +struct smu7_dpm_level { bool enabled; uint32_t value; uint32_t param1; }; -#define POLARIS10_MAX_DEEPSLEEP_DIVIDER_ID 5 +#define SMU7_MAX_DEEPSLEEP_DIVIDER_ID 5 #define MAX_REGULAR_DPM_NUMBER 8 -#define POLARIS10_MINIMUM_ENGINE_CLOCK 2500 +#define SMU7_MINIMUM_ENGINE_CLOCK 2500 -struct polaris10_single_dpm_table { +struct smu7_single_dpm_table { uint32_t count; - struct polaris10_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER]; + struct smu7_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER]; }; -struct polaris10_dpm_table { - struct polaris10_single_dpm_table sclk_table; - struct polaris10_single_dpm_table mclk_table; - struct polaris10_single_dpm_table pcie_speed_table; - struct polaris10_single_dpm_table vddc_table; - struct polaris10_single_dpm_table vddci_table; - struct polaris10_single_dpm_table mvdd_table; +struct smu7_dpm_table { + struct smu7_single_dpm_table sclk_table; + struct smu7_single_dpm_table mclk_table; + struct smu7_single_dpm_table pcie_speed_table; + struct smu7_single_dpm_table vddc_table; + struct smu7_single_dpm_table vddci_table; + struct smu7_single_dpm_table mvdd_table; }; -struct polaris10_clock_registers { +struct smu7_clock_registers { uint32_t vCG_SPLL_FUNC_CNTL; uint32_t vCG_SPLL_FUNC_CNTL_2; uint32_t vCG_SPLL_FUNC_CNTL_3; @@ -116,42 +135,35 @@ struct polaris10_clock_registers { #define DISABLE_MC_LOADMICROCODE 1 #define DISABLE_MC_CFGPROGRAMMING 2 -struct polaris10_voltage_smio_registers { +struct smu7_voltage_smio_registers { uint32_t vS0_VID_LOWER_SMIO_CNTL; }; -#define POLARIS10_MAX_LEAKAGE_COUNT 8 +#define SMU7_MAX_LEAKAGE_COUNT 8 -struct polaris10_leakage_voltage { +struct smu7_leakage_voltage { uint16_t count; - uint16_t leakage_id[POLARIS10_MAX_LEAKAGE_COUNT]; - uint16_t actual_voltage[POLARIS10_MAX_LEAKAGE_COUNT]; + uint16_t leakage_id[SMU7_MAX_LEAKAGE_COUNT]; + uint16_t actual_voltage[SMU7_MAX_LEAKAGE_COUNT]; }; -struct polaris10_vbios_boot_state { +struct smu7_vbios_boot_state { uint16_t mvdd_bootup_value; uint16_t vddc_bootup_value; uint16_t vddci_bootup_value; + uint16_t vddgfx_bootup_value; uint32_t sclk_bootup_value; uint32_t mclk_bootup_value; uint16_t pcie_gen_bootup_value; uint16_t pcie_lane_bootup_value; }; -/* Ultra Low Voltage parameter structure */ -struct polaris10_ulv_parm { - bool ulv_supported; - uint32_t cg_ulv_parameter; - uint32_t ulv_volt_change_delay; - struct polaris10_performance_level ulv_power_level; -}; - -struct polaris10_display_timing { +struct smu7_display_timing { uint32_t min_clock_in_sr; uint32_t num_existing_displays; }; -struct polaris10_dpmlevel_enable_mask { +struct smu7_dpmlevel_enable_mask { uint32_t uvd_dpm_enable_mask; uint32_t vce_dpm_enable_mask; uint32_t acp_dpm_enable_mask; @@ -161,22 +173,15 @@ struct polaris10_dpmlevel_enable_mask { uint32_t pcie_dpm_enable_mask; }; -struct polaris10_pcie_perf_range { +struct smu7_pcie_perf_range { uint16_t max; uint16_t min; }; -struct polaris10_range_table { - uint32_t trans_lower_frequency; /* in 10khz */ - uint32_t trans_upper_frequency; -}; -struct polaris10_hwmgr { - struct polaris10_dpm_table dpm_table; - struct polaris10_dpm_table golden_dpm_table; - SMU74_Discrete_DpmTable smc_state_table; - struct SMU74_Discrete_Ulv ulv_setting; +struct smu7_hwmgr { + struct smu7_dpm_table dpm_table; + struct smu7_dpm_table golden_dpm_table; - struct polaris10_range_table range_table[NUM_SCLK_RANGE]; uint32_t voting_rights_clients0; uint32_t voting_rights_clients1; uint32_t voting_rights_clients2; @@ -188,12 +193,11 @@ struct polaris10_hwmgr { uint32_t static_screen_threshold_unit; uint32_t static_screen_threshold; uint32_t voltage_control; - uint32_t vddc_vddci_delta; - + uint32_t vdd_gfx_control; + uint32_t vddc_vddgfx_delta; uint32_t active_auto_throttle_sources; - struct polaris10_clock_registers clock_registers; - struct polaris10_voltage_smio_registers voltage_smio_registers; + struct smu7_clock_registers clock_registers; bool is_memory_gddr5; uint16_t acpi_vddc; @@ -203,8 +207,9 @@ struct polaris10_hwmgr { uint32_t pcie_gen_cap; uint32_t pcie_lane_cap; uint32_t pcie_spc_cap; - struct polaris10_leakage_voltage vddc_leakage; - struct polaris10_leakage_voltage Vddci_leakage; + struct smu7_leakage_voltage vddc_leakage; + struct smu7_leakage_voltage vddci_leakage; + struct smu7_leakage_voltage vddcgfx_leakage; uint32_t mvdd_control; uint32_t vddc_mask_low; @@ -213,30 +218,23 @@ struct polaris10_hwmgr { uint16_t min_vddc_in_pptable; uint16_t max_vddci_in_pptable; uint16_t min_vddci_in_pptable; - uint32_t mclk_strobe_mode_threshold; - uint32_t mclk_stutter_mode_threshold; - uint32_t mclk_edc_enable_threshold; - uint32_t mclk_edcwr_enable_threshold; bool is_uvd_enabled; - struct polaris10_vbios_boot_state vbios_boot_state; + struct smu7_vbios_boot_state vbios_boot_state; bool pcie_performance_request; bool battery_state; bool is_tlu_enabled; + bool disable_handshake; + bool smc_voltage_control_enabled; + bool vbi_time_out_support; - /* ---- SMC SRAM Address of firmware header tables ---- */ - uint32_t sram_end; - uint32_t dpm_table_start; - uint32_t soft_regs_start; - uint32_t mc_reg_table_start; - uint32_t fan_table_start; - uint32_t arb_table_start; - + uint32_t soft_regs_start; /* ---- Stuff originally coming from Evergreen ---- */ uint32_t vddci_control; struct pp_atomctrl_voltage_table vddc_voltage_table; struct pp_atomctrl_voltage_table vddci_voltage_table; struct pp_atomctrl_voltage_table mvdd_voltage_table; + struct pp_atomctrl_voltage_table vddgfx_voltage_table; uint32_t mgcg_cgtt_local2; uint32_t mgcg_cgtt_local3; @@ -250,7 +248,7 @@ struct polaris10_hwmgr { bool performance_request_registered; /* ---- Low Power Features ---- */ - struct polaris10_ulv_parm ulv; + bool ulv_supported; /* ---- CAC Stuff ---- */ uint32_t cac_table_start; @@ -264,8 +262,8 @@ struct polaris10_hwmgr { bool enable_tdc_limit_feature; bool enable_pkg_pwr_tracking_feature; bool disable_uvd_power_tune_feature; - const struct polaris10_pt_defaults *power_tune_defaults; - struct SMU74_Discrete_PmFuses power_tune_table; + + uint32_t dte_tj_offset; uint32_t fast_watermark_threshold; @@ -273,23 +271,22 @@ struct polaris10_hwmgr { bool vddc_phase_shed_control; /* ---- DI/DT ---- */ - struct polaris10_display_timing display_timing; - uint32_t bif_sclk_table[SMU74_MAX_LEVELS_LINK]; + struct smu7_display_timing display_timing; /* ---- Thermal Temperature Setting ---- */ - struct polaris10_dpmlevel_enable_mask dpm_level_enable_mask; + struct smu7_thermal_temperature_setting thermal_temp_setting; + struct smu7_dpmlevel_enable_mask dpm_level_enable_mask; uint32_t need_update_smu7_dpm_table; uint32_t sclk_dpm_key_disabled; uint32_t mclk_dpm_key_disabled; uint32_t pcie_dpm_key_disabled; uint32_t min_engine_clocks; - struct polaris10_pcie_perf_range pcie_gen_performance; - struct polaris10_pcie_perf_range pcie_lane_performance; - struct polaris10_pcie_perf_range pcie_gen_power_saving; - struct polaris10_pcie_perf_range pcie_lane_power_saving; + struct smu7_pcie_perf_range pcie_gen_performance; + struct smu7_pcie_perf_range pcie_lane_performance; + struct smu7_pcie_perf_range pcie_gen_power_saving; + struct smu7_pcie_perf_range pcie_lane_power_saving; bool use_pcie_performance_levels; bool use_pcie_power_saving_levels; - uint32_t activity_target[SMU74_MAX_LEVELS_GRAPHICS]; uint32_t mclk_activity_target; uint32_t mclk_dpm0_activity_target; uint32_t low_sclk_interrupt_threshold; @@ -309,49 +306,48 @@ struct polaris10_hwmgr { uint32_t up_hyst; uint32_t disable_dpm_mask; bool apply_optimized_settings; + uint32_t avfs_vdroop_override_setting; bool apply_avfs_cks_off_voltage; uint32_t frame_time_x2; + uint16_t mem_latency_high; + uint16_t mem_latency_low; }; /* To convert to Q8.8 format for firmware */ -#define POLARIS10_Q88_FORMAT_CONVERSION_UNIT 256 - -enum Polaris10_I2CLineID { - Polaris10_I2CLineID_DDC1 = 0x90, - Polaris10_I2CLineID_DDC2 = 0x91, - Polaris10_I2CLineID_DDC3 = 0x92, - Polaris10_I2CLineID_DDC4 = 0x93, - Polaris10_I2CLineID_DDC5 = 0x94, - Polaris10_I2CLineID_DDC6 = 0x95, - Polaris10_I2CLineID_SCLSDA = 0x96, - Polaris10_I2CLineID_DDCVGA = 0x97 +#define SMU7_Q88_FORMAT_CONVERSION_UNIT 256 + +enum SMU7_I2CLineID { + SMU7_I2CLineID_DDC1 = 0x90, + SMU7_I2CLineID_DDC2 = 0x91, + SMU7_I2CLineID_DDC3 = 0x92, + SMU7_I2CLineID_DDC4 = 0x93, + SMU7_I2CLineID_DDC5 = 0x94, + SMU7_I2CLineID_DDC6 = 0x95, + SMU7_I2CLineID_SCLSDA = 0x96, + SMU7_I2CLineID_DDCVGA = 0x97 }; -#define POLARIS10_I2C_DDC1DATA 0 -#define POLARIS10_I2C_DDC1CLK 1 -#define POLARIS10_I2C_DDC2DATA 2 -#define POLARIS10_I2C_DDC2CLK 3 -#define POLARIS10_I2C_DDC3DATA 4 -#define POLARIS10_I2C_DDC3CLK 5 -#define POLARIS10_I2C_SDA 40 -#define POLARIS10_I2C_SCL 41 -#define POLARIS10_I2C_DDC4DATA 65 -#define POLARIS10_I2C_DDC4CLK 66 -#define POLARIS10_I2C_DDC5DATA 0x48 -#define POLARIS10_I2C_DDC5CLK 0x49 -#define POLARIS10_I2C_DDC6DATA 0x4a -#define POLARIS10_I2C_DDC6CLK 0x4b -#define POLARIS10_I2C_DDCVGADATA 0x4c -#define POLARIS10_I2C_DDCVGACLK 0x4d - -#define POLARIS10_UNUSED_GPIO_PIN 0x7F - -int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr); - -int polaris10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate); -int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate); -int polaris10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable); -int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate); +#define SMU7_I2C_DDC1DATA 0 +#define SMU7_I2C_DDC1CLK 1 +#define SMU7_I2C_DDC2DATA 2 +#define SMU7_I2C_DDC2CLK 3 +#define SMU7_I2C_DDC3DATA 4 +#define SMU7_I2C_DDC3CLK 5 +#define SMU7_I2C_SDA 40 +#define SMU7_I2C_SCL 41 +#define SMU7_I2C_DDC4DATA 65 +#define SMU7_I2C_DDC4CLK 66 +#define SMU7_I2C_DDC5DATA 0x48 +#define SMU7_I2C_DDC5CLK 0x49 +#define SMU7_I2C_DDC6DATA 0x4a +#define SMU7_I2C_DDC6CLK 0x4b +#define SMU7_I2C_DDCVGADATA 0x4c +#define SMU7_I2C_DDCVGACLK 0x4d + +#define SMU7_UNUSED_GPIO_PIN 0x7F +uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr); +uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock, + uint32_t clock_insr); #endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c index b9cb240a135d..41b634ffa5b0 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c @@ -20,546 +20,364 @@ * OTHER DEALINGS IN THE SOFTWARE. * */ - #include "hwmgr.h" #include "smumgr.h" -#include "polaris10_hwmgr.h" -#include "polaris10_powertune.h" -#include "polaris10_smumgr.h" -#include "smu74_discrete.h" +#include "smu7_hwmgr.h" +#include "smu7_powertune.h" #include "pp_debug.h" -#include "gca/gfx_8_0_d.h" -#include "gca/gfx_8_0_sh_mask.h" -#include "oss/oss_3_0_sh_mask.h" +#include "smu7_common.h" #define VOLTAGE_SCALE 4 -#define POWERTUNE_DEFAULT_SET_MAX 1 -uint32_t DIDTBlock_Info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK; +static uint32_t DIDTBlock_Info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK; -struct polaris10_pt_config_reg GCCACConfig_Polaris10[] = { +static const struct gpu_pt_config_reg GCCACConfig_Polaris10[] = { /* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * Offset Mask Shift Value Type * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- */ - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060013, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860013, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060013, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860013, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060013, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860013, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060013, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860013, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060013, POLARIS10_CONFIGREG_GC_CAC_IND }, - - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0013, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0013, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0013, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0013, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0013, POLARIS10_CONFIGREG_GC_CAC_IND }, - - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100013, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900013, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100013, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900013, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100013, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060013, GPU_CONFIGREG_GC_CAC_IND }, + + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0013, GPU_CONFIGREG_GC_CAC_IND }, + + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900013, GPU_CONFIGREG_GC_CAC_IND }, { 0xFFFFFFFF } }; -struct polaris10_pt_config_reg GCCACConfig_Polaris11[] = { +static const struct gpu_pt_config_reg GCCACConfig_Polaris11[] = { /* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * Offset Mask Shift Value Type * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- */ - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060011, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860011, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060011, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860011, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060011, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860011, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060011, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860011, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060011, POLARIS10_CONFIGREG_GC_CAC_IND }, - - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0011, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0011, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0011, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0011, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0011, POLARIS10_CONFIGREG_GC_CAC_IND }, - - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100011, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900011, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100011, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900011, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100011, POLARIS10_CONFIGREG_GC_CAC_IND }, - { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060011, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860011, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060011, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860011, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060011, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860011, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060011, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860011, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060011, GPU_CONFIGREG_GC_CAC_IND }, + + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0011, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0011, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0011, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0011, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0011, GPU_CONFIGREG_GC_CAC_IND }, + + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100011, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900011, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100011, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900011, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100011, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900011, GPU_CONFIGREG_GC_CAC_IND }, { 0xFFFFFFFF } }; -struct polaris10_pt_config_reg DIDTConfig_Polaris10[] = { +static const struct gpu_pt_config_reg DIDTConfig_Polaris10[] = { /* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * Offset Mask Shift Value Type * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- */ - { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0009, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0009, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0009, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0009, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, { 0xFFFFFFFF } }; -struct polaris10_pt_config_reg DIDTConfig_Polaris11[] = { +static const struct gpu_pt_config_reg DIDTConfig_Polaris11[] = { /* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * Offset Mask Shift Value Type * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- */ - { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0008, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0008, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - - { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, - { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, { 0xFFFFFFFF } }; -static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = { - /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt, - * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */ - { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000, - { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61}, - { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } }, -}; - -void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *polaris10_hwmgr = (struct polaris10_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - if (table_info && - table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX && - table_info->cac_dtp_table->usPowerTuneDataSetID) - polaris10_hwmgr->power_tune_defaults = - &polaris10_power_tune_data_set_array - [table_info->cac_dtp_table->usPowerTuneDataSetID - 1]; - else - polaris10_hwmgr->power_tune_defaults = &polaris10_power_tune_data_set_array[0]; - -} - -static uint16_t scale_fan_gain_settings(uint16_t raw_setting) -{ - uint32_t tmp; - tmp = raw_setting * 4096 / 100; - return (uint16_t)tmp; -} - -int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - const struct polaris10_pt_defaults *defaults = data->power_tune_defaults; - SMU74_Discrete_DpmTable *dpm_table = &(data->smc_state_table); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table; - struct pp_advance_fan_control_parameters *fan_table= - &hwmgr->thermal_controller.advanceFanControlParameters; - int i, j, k; - const uint16_t *pdef1; - const uint16_t *pdef2; - - dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128)); - dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128)); - - PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255, - "Target Operating Temp is out of Range!", - ); - - dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US( - cac_dtp_table->usTargetOperatingTemp * 256); - dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US( - cac_dtp_table->usTemperatureLimitHotspot * 256); - dpm_table->FanGainEdge = PP_HOST_TO_SMC_US( - scale_fan_gain_settings(fan_table->usFanGainEdge)); - dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US( - scale_fan_gain_settings(fan_table->usFanGainHotspot)); - - pdef1 = defaults->BAPMTI_R; - pdef2 = defaults->BAPMTI_RC; - - for (i = 0; i < SMU74_DTE_ITERATIONS; i++) { - for (j = 0; j < SMU74_DTE_SOURCES; j++) { - for (k = 0; k < SMU74_DTE_SINKS; k++) { - dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1); - dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2); - pdef1++; - pdef2++; - } - } - } - - return 0; -} - -static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - const struct polaris10_pt_defaults *defaults = data->power_tune_defaults; - - data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn; - data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC; - data->power_tune_table.SviLoadLineTrimVddC = 3; - data->power_tune_table.SviLoadLineOffsetVddC = 0; - - return 0; -} -static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr) -{ - uint16_t tdc_limit; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - const struct polaris10_pt_defaults *defaults = data->power_tune_defaults; - - tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128); - data->power_tune_table.TDC_VDDC_PkgLimit = - CONVERT_FROM_HOST_TO_SMC_US(tdc_limit); - data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc = - defaults->TDC_VDDC_ThrottleReleaseLimitPerc; - data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt; - - return 0; -} - -static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - const struct polaris10_pt_defaults *defaults = data->power_tune_defaults; - uint32_t temp; - - if (polaris10_read_smc_sram_dword(hwmgr->smumgr, - fuse_table_offset + - offsetof(SMU74_Discrete_PmFuses, TdcWaterfallCtl), - (uint32_t *)&temp, data->sram_end)) - PP_ASSERT_WITH_CODE(false, - "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!", - return -EINVAL); - else { - data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl; - data->power_tune_table.LPMLTemperatureMin = - (uint8_t)((temp >> 16) & 0xff); - data->power_tune_table.LPMLTemperatureMax = - (uint8_t)((temp >> 8) & 0xff); - data->power_tune_table.Reserved = (uint8_t)(temp & 0xff); - } - return 0; -} - -static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr) -{ - int i; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - /* Currently not used. Set all to zero. */ - for (i = 0; i < 16; i++) - data->power_tune_table.LPMLTemperatureScaler[i] = 0; - - return 0; -} - -static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15)) - || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity) - hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity = - hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity; - - data->power_tune_table.FuzzyFan_PwmSetDelta = PP_HOST_TO_SMC_US( - hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity); - return 0; -} - -static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr) -{ - int i; - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - /* Currently not used. Set all to zero. */ - for (i = 0; i < 16; i++) - data->power_tune_table.GnbLPML[i] = 0; - - return 0; -} - -static int polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr) -{ - return 0; -} - -static int polaris10_enable_didt(struct pp_hwmgr *hwmgr, const bool enable) +static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable) { uint32_t en = enable ? 1 : 0; @@ -608,29 +426,29 @@ static int polaris10_enable_didt(struct pp_hwmgr *hwmgr, const bool enable) return result; } -static int polaris10_program_pt_config_registers(struct pp_hwmgr *hwmgr, - struct polaris10_pt_config_reg *cac_config_regs) +static int smu7_program_pt_config_registers(struct pp_hwmgr *hwmgr, + const struct gpu_pt_config_reg *cac_config_regs) { - struct polaris10_pt_config_reg *config_regs = cac_config_regs; + const struct gpu_pt_config_reg *config_regs = cac_config_regs; uint32_t cache = 0; uint32_t data = 0; PP_ASSERT_WITH_CODE((config_regs != NULL), "Invalid config register table.", return -EINVAL); while (config_regs->offset != 0xFFFFFFFF) { - if (config_regs->type == POLARIS10_CONFIGREG_CACHE) + if (config_regs->type == GPU_CONFIGREG_CACHE) cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); else { switch (config_regs->type) { - case POLARIS10_CONFIGREG_SMC_IND: + case GPU_CONFIGREG_SMC_IND: data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, config_regs->offset); break; - case POLARIS10_CONFIGREG_DIDT_IND: + case GPU_CONFIGREG_DIDT_IND: data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset); break; - case POLARIS10_CONFIGREG_GC_CAC_IND: + case GPU_CONFIGREG_GC_CAC_IND: data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset); break; @@ -644,15 +462,15 @@ static int polaris10_program_pt_config_registers(struct pp_hwmgr *hwmgr, data |= cache; switch (config_regs->type) { - case POLARIS10_CONFIGREG_SMC_IND: + case GPU_CONFIGREG_SMC_IND: cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, config_regs->offset, data); break; - case POLARIS10_CONFIGREG_DIDT_IND: + case GPU_CONFIGREG_DIDT_IND: cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset, data); break; - case POLARIS10_CONFIGREG_GC_CAC_IND: + case GPU_CONFIGREG_GC_CAC_IND: cgs_write_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset, data); break; @@ -669,7 +487,7 @@ static int polaris10_program_pt_config_registers(struct pp_hwmgr *hwmgr, return 0; } -int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr) +int smu7_enable_didt_config(struct pp_hwmgr *hwmgr) { int result; uint32_t num_se = 0; @@ -699,20 +517,20 @@ int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr) cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value); if (hwmgr->chip_id == CHIP_POLARIS10) { - result = polaris10_program_pt_config_registers(hwmgr, GCCACConfig_Polaris10); + result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris10); PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); - result = polaris10_program_pt_config_registers(hwmgr, DIDTConfig_Polaris10); + result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris10); PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); } else if (hwmgr->chip_id == CHIP_POLARIS11) { - result = polaris10_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11); + result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11); PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); - result = polaris10_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11); + result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11); PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); } } cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value2); - result = polaris10_enable_didt(hwmgr, true); + result = smu7_enable_didt(hwmgr, true); PP_ASSERT_WITH_CODE((result == 0), "EnableDiDt failed.", return result); /* TO DO Post DIDT enable clock gating */ @@ -721,7 +539,7 @@ int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr) return 0; } -int polaris10_disable_didt_config(struct pp_hwmgr *hwmgr) +int smu7_disable_didt_config(struct pp_hwmgr *hwmgr) { int result; @@ -731,7 +549,7 @@ int polaris10_disable_didt_config(struct pp_hwmgr *hwmgr) phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) { /* TO DO Pre DIDT disable clock gating */ - result = polaris10_enable_didt(hwmgr, false); + result = smu7_enable_didt(hwmgr, false); PP_ASSERT_WITH_CODE((result == 0), "Post DIDT enable clock gating failed.", return result); /* TO DO Post DIDT enable clock gating */ } @@ -739,95 +557,9 @@ int polaris10_disable_didt_config(struct pp_hwmgr *hwmgr) return 0; } - -static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) +int smu7_enable_smc_cac(struct pp_hwmgr *hwmgr) { - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - uint16_t hi_sidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd; - uint16_t lo_sidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd; - struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table; - - hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256); - lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256); - - data->power_tune_table.BapmVddCBaseLeakageHiSidd = - CONVERT_FROM_HOST_TO_SMC_US(hi_sidd); - data->power_tune_table.BapmVddCBaseLeakageLoSidd = - CONVERT_FROM_HOST_TO_SMC_US(lo_sidd); - - return 0; -} - -int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint32_t pm_fuse_table_offset; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment)) { - if (polaris10_read_smc_sram_dword(hwmgr->smumgr, - SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU74_Firmware_Header, PmFuseTable), - &pm_fuse_table_offset, data->sram_end)) - PP_ASSERT_WITH_CODE(false, - "Attempt to get pm_fuse_table_offset Failed!", - return -EINVAL); - - if (polaris10_populate_svi_load_line(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate SviLoadLine Failed!", - return -EINVAL); - - if (polaris10_populate_tdc_limit(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate TDCLimit Failed!", return -EINVAL); - - if (polaris10_populate_dw8(hwmgr, pm_fuse_table_offset)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate TdcWaterfallCtl, " - "LPMLTemperature Min and Max Failed!", - return -EINVAL); - - if (0 != polaris10_populate_temperature_scaler(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate LPMLTemperatureScaler Failed!", - return -EINVAL); - - if (polaris10_populate_fuzzy_fan(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate Fuzzy Fan Control parameters Failed!", - return -EINVAL); - - if (polaris10_populate_gnb_lpml(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate GnbLPML Failed!", - return -EINVAL); - - if (polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate GnbLPML Min and Max Vid Failed!", - return -EINVAL); - - if (polaris10_populate_bapm_vddc_base_leakage_sidd(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate BapmVddCBaseLeakage Hi and Lo " - "Sidd Failed!", return -EINVAL); - - if (polaris10_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, - (uint8_t *)&data->power_tune_table, - (sizeof(struct SMU74_Discrete_PmFuses) - 92), data->sram_end)) - PP_ASSERT_WITH_CODE(false, - "Attempt to download PmFuseTable Failed!", - return -EINVAL); - } - return 0; -} - -int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); int result = 0; if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, @@ -843,9 +575,9 @@ int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr) return result; } -int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr) +int smu7_disable_smc_cac(struct pp_hwmgr *hwmgr) { - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); int result = 0; if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, @@ -860,9 +592,9 @@ int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr) return result; } -int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) +int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) { - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); if (data->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) @@ -871,21 +603,27 @@ int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) return 0; } -static int polaris10_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp) +static int smu7_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp) { return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp); } -int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr) +int smu7_enable_power_containment(struct pp_hwmgr *hwmgr) { - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); int smc_result; int result = 0; + struct phm_cac_tdp_table *cac_table; data->power_containment_features = 0; + if (hwmgr->pp_table_version == PP_TABLE_V1) + cac_table = table_info->cac_dtp_table; + else + cac_table = hwmgr->dyn_state.cac_dtp_table; + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerContainment)) { @@ -905,15 +643,13 @@ int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((0 == smc_result), "Failed to enable PkgPwrTracking in SMC.", result = -1;); if (0 == smc_result) { - struct phm_cac_tdp_table *cac_table = - table_info->cac_dtp_table; uint32_t default_limit = (uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256); data->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit; - if (polaris10_set_power_limit(hwmgr, default_limit)) + if (smu7_set_power_limit(hwmgr, default_limit)) printk(KERN_ERR "Failed to set Default Power Limit in SMC!"); } } @@ -921,9 +657,9 @@ int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr) return result; } -int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr) +int smu7_disable_power_containment(struct pp_hwmgr *hwmgr) { - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); int result = 0; if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, @@ -963,14 +699,19 @@ int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr) return result; } -int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr) +int smu7_power_control_set_level(struct pp_hwmgr *hwmgr) { struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table; + struct phm_cac_tdp_table *cac_table; + int adjust_percent, target_tdp; int result = 0; + if (hwmgr->pp_table_version == PP_TABLE_V1) + cac_table = table_info->cac_dtp_table; + else + cac_table = hwmgr->dyn_state.cac_dtp_table; if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerContainment)) { /* adjustment percentage has already been validated */ @@ -981,7 +722,7 @@ int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr) * but message to be 8 bit fraction for messages */ target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100; - result = polaris10_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp); + result = smu7_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp); } return result; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.h index bc78e28f010d..22f86b6bf1be 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.h @@ -20,17 +20,8 @@ * OTHER DEALINGS IN THE SOFTWARE. * */ -#ifndef POLARIS10_POWERTUNE_H -#define POLARIS10_POWERTUNE_H - -enum polaris10_pt_config_reg_type { - POLARIS10_CONFIGREG_MMR = 0, - POLARIS10_CONFIGREG_SMC_IND, - POLARIS10_CONFIGREG_DIDT_IND, - POLARIS10_CONFIGREG_GC_CAC_IND, - POLARIS10_CONFIGREG_CACHE, - POLARIS10_CONFIGREG_MAX -}; +#ifndef _SMU7_POWERTUNE_H +#define _SMU7_POWERTUNE_H #define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xfffc0000 #define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x12 @@ -52,43 +43,20 @@ enum polaris10_pt_config_reg_type { #define ixGC_CAC_CNTL 0x0000 #define ixDIDT_SQ_STALL_CTRL 0x0004 -#define ixDIDT_SQ_TUNING_CTRL 0x0005 +#define ixDIDT_SQ_TUNING_CTRL 0x0005 #define ixDIDT_TD_STALL_CTRL 0x0044 #define ixDIDT_TD_TUNING_CTRL 0x0045 #define ixDIDT_TCP_STALL_CTRL 0x0064 #define ixDIDT_TCP_TUNING_CTRL 0x0065 -struct polaris10_pt_config_reg { - uint32_t offset; - uint32_t mask; - uint32_t shift; - uint32_t value; - enum polaris10_pt_config_reg_type type; -}; - -struct polaris10_pt_defaults { - uint8_t SviLoadLineEn; - uint8_t SviLoadLineVddC; - uint8_t TDC_VDDC_ThrottleReleaseLimitPerc; - uint8_t TDC_MAWt; - uint8_t TdcWaterfallCtl; - uint8_t DTEAmbientTempBase; - - uint32_t DisplayCac; - uint32_t BAPM_TEMP_GRADIENT; - uint16_t BAPMTI_R[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS]; - uint16_t BAPMTI_RC[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS]; -}; -void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr); -int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr); -int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr); -int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr); -int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr); -int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr); -int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr); -int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n); -int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr); -int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr); -#endif /* POLARIS10_POWERTUNE_H */ +int smu7_enable_smc_cac(struct pp_hwmgr *hwmgr); +int smu7_disable_smc_cac(struct pp_hwmgr *hwmgr); +int smu7_enable_power_containment(struct pp_hwmgr *hwmgr); +int smu7_disable_power_containment(struct pp_hwmgr *hwmgr); +int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n); +int smu7_power_control_set_level(struct pp_hwmgr *hwmgr); +int smu7_enable_didt_config(struct pp_hwmgr *hwmgr); +int smu7_disable_didt_config(struct pp_hwmgr *hwmgr); +#endif /* DGPU_POWERTUNE_H */ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c index 92976b68d6fd..fb6c6f6106d5 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c @@ -1,5 +1,5 @@ /* - * Copyright 2015 Advanced Micro Devices, Inc. + * Copyright 2016 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -20,18 +20,15 @@ * OTHER DEALINGS IN THE SOFTWARE. * */ + #include <asm/div64.h> -#include "fiji_thermal.h" -#include "fiji_hwmgr.h" -#include "fiji_smumgr.h" -#include "fiji_ppsmc.h" -#include "smu/smu_7_1_3_d.h" -#include "smu/smu_7_1_3_sh_mask.h" - -int fiji_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, +#include "smu7_thermal.h" +#include "smu7_hwmgr.h" +#include "smu7_common.h" + +int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info) { - if (hwmgr->thermal_controller.fanInfo.bNoFan) return 0; @@ -55,7 +52,7 @@ int fiji_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, return 0; } -int fiji_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, +int smu7_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed) { uint32_t duty100; @@ -84,7 +81,7 @@ int fiji_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, return 0; } -int fiji_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed) +int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed) { uint32_t tach_period; uint32_t crystal_clock_freq; @@ -100,9 +97,9 @@ int fiji_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed) if (tach_period == 0) return -EINVAL; - crystal_clock_freq = tonga_get_xclk(hwmgr); + crystal_clock_freq = smu7_get_xclk(hwmgr); - *speed = 60 * crystal_clock_freq * 10000/ tach_period; + *speed = 60 * crystal_clock_freq * 10000 / tach_period; return 0; } @@ -113,7 +110,7 @@ int fiji_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed) * mode the fan control mode, 0 default, 1 by percent, 5, by RPM * @exception Should always succeed. */ -int fiji_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode) +int smu7_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode) { if (hwmgr->fan_ctrl_is_in_default_mode) { @@ -139,7 +136,7 @@ int fiji_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode) * @param hwmgr the address of the powerplay hardware manager. * @exception Should always succeed. */ -int fiji_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr) +int smu7_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr) { if (!hwmgr->fan_ctrl_is_in_default_mode) { PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, @@ -152,7 +149,7 @@ int fiji_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr) return 0; } -int fiji_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr) +static int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr) { int result; @@ -187,7 +184,7 @@ int fiji_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr) } -int fiji_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr) +int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr) { return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl); } @@ -198,7 +195,7 @@ int fiji_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr) * @param speed is the percentage value (0% - 100%) to be set. * @exception Fails is the 100% setting appears to be 0. */ -int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, +int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed) { uint32_t duty100; @@ -213,7 +210,7 @@ int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) - fiji_fan_ctrl_stop_smc_fan_control(hwmgr); + smu7_fan_ctrl_stop_smc_fan_control(hwmgr); duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100); @@ -228,7 +225,7 @@ int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL0, FDO_STATIC_DUTY, duty); - return fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); + return smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); } /** @@ -236,7 +233,7 @@ int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, * @param hwmgr the address of the powerplay hardware manager. * @exception Always succeeds. */ -int fiji_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr) +int smu7_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr) { int result; @@ -245,11 +242,11 @@ int fiji_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) { - result = fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); + result = smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); if (!result) - result = fiji_fan_ctrl_start_smc_fan_control(hwmgr); + result = smu7_fan_ctrl_start_smc_fan_control(hwmgr); } else - result = fiji_fan_ctrl_set_default_mode(hwmgr); + result = smu7_fan_ctrl_set_default_mode(hwmgr); return result; } @@ -260,7 +257,7 @@ int fiji_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr) * @param speed is the percentage value (min - max) to be set. * @exception Fails is the speed not lie between min and max. */ -int fiji_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) +int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) { uint32_t tach_period; uint32_t crystal_clock_freq; @@ -272,14 +269,18 @@ int fiji_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM)) return 0; - crystal_clock_freq = tonga_get_xclk(hwmgr); + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl)) + smu7_fan_ctrl_stop_smc_fan_control(hwmgr); + + crystal_clock_freq = smu7_get_xclk(hwmgr); tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_TACH_STATUS, TACH_PERIOD, tach_period); - return fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); + return smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); } /** @@ -287,7 +288,7 @@ int fiji_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) * * @param hwmgr The address of the hardware manager. */ -int fiji_thermal_get_temperature(struct pp_hwmgr *hwmgr) +int smu7_thermal_get_temperature(struct pp_hwmgr *hwmgr) { int temp; @@ -296,7 +297,7 @@ int fiji_thermal_get_temperature(struct pp_hwmgr *hwmgr) /* Bit 9 means the reading is lower than the lowest usable value. */ if (temp & 0x200) - temp = FIJI_THERMAL_MAXIMUM_TEMP_READING; + temp = SMU7_THERMAL_MAXIMUM_TEMP_READING; else temp = temp & 0x1ff; @@ -312,12 +313,12 @@ int fiji_thermal_get_temperature(struct pp_hwmgr *hwmgr) * @param range Temperature range to be programmed for high and low alert signals * @exception PP_Result_BadInput if the input data is not valid. */ -static int fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, +static int smu7_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, uint32_t low_temp, uint32_t high_temp) { - uint32_t low = FIJI_THERMAL_MINIMUM_ALERT_TEMP * + uint32_t low = SMU7_THERMAL_MINIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - uint32_t high = FIJI_THERMAL_MAXIMUM_ALERT_TEMP * + uint32_t high = SMU7_THERMAL_MAXIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; if (low < low_temp) @@ -346,7 +347,7 @@ static int fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, * * @param hwmgr The address of the hardware manager. */ -static int fiji_thermal_initialize(struct pp_hwmgr *hwmgr) +static int smu7_thermal_initialize(struct pp_hwmgr *hwmgr) { if (hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, @@ -365,13 +366,13 @@ static int fiji_thermal_initialize(struct pp_hwmgr *hwmgr) * * @param hwmgr The address of the hardware manager. */ -static int fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr) +int smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr) { uint32_t alert; alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK); - alert &= ~(FIJI_THERMAL_HIGH_ALERT_MASK | FIJI_THERMAL_LOW_ALERT_MASK); + alert &= ~(SMU7_THERMAL_HIGH_ALERT_MASK | SMU7_THERMAL_LOW_ALERT_MASK); PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert); @@ -383,13 +384,13 @@ static int fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr) * Disable thermal alerts on the RV770 thermal controller. * @param hwmgr The address of the hardware manager. */ -static int fiji_thermal_disable_alert(struct pp_hwmgr *hwmgr) +int smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr) { uint32_t alert; alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK); - alert |= (FIJI_THERMAL_HIGH_ALERT_MASK | FIJI_THERMAL_LOW_ALERT_MASK); + alert |= (SMU7_THERMAL_HIGH_ALERT_MASK | SMU7_THERMAL_LOW_ALERT_MASK); PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert); @@ -402,129 +403,17 @@ static int fiji_thermal_disable_alert(struct pp_hwmgr *hwmgr) * Currently just disables alerts. * @param hwmgr The address of the hardware manager. */ -int fiji_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr) +int smu7_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr) { - int result = fiji_thermal_disable_alert(hwmgr); + int result = smu7_thermal_disable_alert(hwmgr); - if (hwmgr->thermal_controller.fanInfo.bNoFan) - fiji_fan_ctrl_set_default_mode(hwmgr); + if (!hwmgr->thermal_controller.fanInfo.bNoFan) + smu7_fan_ctrl_set_default_mode(hwmgr); return result; } /** -* Set up the fan table to control the fan using the SMC. -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from set temperature range routine -*/ -int tf_fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - SMU73_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; - uint32_t duty100; - uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2; - uint16_t fdo_min, slope1, slope2; - uint32_t reference_clock; - int res; - uint64_t tmp64; - - if (data->fan_table_start == 0) { - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl); - return 0; - } - - duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL1, FMAX_DUTY100); - - if (duty100 == 0) { - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl); - return 0; - } - - tmp64 = hwmgr->thermal_controller.advanceFanControlParameters. - usPWMMin * duty100; - do_div(tmp64, 10000); - fdo_min = (uint16_t)tmp64; - - t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - - hwmgr->thermal_controller.advanceFanControlParameters.usTMin; - t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - - hwmgr->thermal_controller.advanceFanControlParameters.usTMed; - - pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin; - pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed; - - slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); - slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); - - fan_table.TempMin = cpu_to_be16((50 + hwmgr-> - thermal_controller.advanceFanControlParameters.usTMin) / 100); - fan_table.TempMed = cpu_to_be16((50 + hwmgr-> - thermal_controller.advanceFanControlParameters.usTMed) / 100); - fan_table.TempMax = cpu_to_be16((50 + hwmgr-> - thermal_controller.advanceFanControlParameters.usTMax) / 100); - - fan_table.Slope1 = cpu_to_be16(slope1); - fan_table.Slope2 = cpu_to_be16(slope2); - - fan_table.FdoMin = cpu_to_be16(fdo_min); - - fan_table.HystDown = cpu_to_be16(hwmgr-> - thermal_controller.advanceFanControlParameters.ucTHyst); - - fan_table.HystUp = cpu_to_be16(1); - - fan_table.HystSlope = cpu_to_be16(1); - - fan_table.TempRespLim = cpu_to_be16(5); - - reference_clock = tonga_get_xclk(hwmgr); - - fan_table.RefreshPeriod = cpu_to_be32((hwmgr-> - thermal_controller.advanceFanControlParameters.ulCycleDelay * - reference_clock) / 1600); - - fan_table.FdoMax = cpu_to_be16((uint16_t)duty100); - - fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD( - hwmgr->device, CGS_IND_REG__SMC, - CG_MULT_THERMAL_CTRL, TEMP_SEL); - - res = fiji_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start, - (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), - data->sram_end); - - if (!res && hwmgr->thermal_controller. - advanceFanControlParameters.ucMinimumPWMLimit) - res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SetFanMinPwm, - hwmgr->thermal_controller. - advanceFanControlParameters.ucMinimumPWMLimit); - - if (!res && hwmgr->thermal_controller. - advanceFanControlParameters.ulMinFanSCLKAcousticLimit) - res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SetFanSclkTarget, - hwmgr->thermal_controller. - advanceFanControlParameters.ulMinFanSCLKAcousticLimit); - - if (res) - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl); - - return 0; -} - -/** * Start the fan control on the SMC. * @param hwmgr the address of the powerplay hardware manager. * @param pInput the pointer to input data @@ -533,7 +422,7 @@ int tf_fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, * @param Result the last failure code * @return result from set temperature range routine */ -int tf_fiji_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, +static int tf_smu7_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) { /* If the fantable setup has failed we could have disabled @@ -543,8 +432,8 @@ int tf_fiji_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, */ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) { - fiji_fan_ctrl_start_smc_fan_control(hwmgr); - fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); + smu7_fan_ctrl_start_smc_fan_control(hwmgr); + smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); } return 0; @@ -559,7 +448,7 @@ int tf_fiji_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, * @param Result the last failure code * @return result from set temperature range routine */ -int tf_fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, +static int tf_smu7_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) { struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input; @@ -567,7 +456,7 @@ int tf_fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, if (range == NULL) return -EINVAL; - return fiji_thermal_set_temperature_range(hwmgr, range->min, range->max); + return smu7_thermal_set_temperature_range(hwmgr, range->min, range->max); } /** @@ -579,10 +468,10 @@ int tf_fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, * @param Result the last failure code * @return result from initialize thermal controller routine */ -int tf_fiji_thermal_initialize(struct pp_hwmgr *hwmgr, +static int tf_smu7_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) { - return fiji_thermal_initialize(hwmgr); + return smu7_thermal_initialize(hwmgr); } /** @@ -594,10 +483,10 @@ int tf_fiji_thermal_initialize(struct pp_hwmgr *hwmgr, * @param Result the last failure code * @return result from enable alert routine */ -int tf_fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr, +static int tf_smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) { - return fiji_thermal_enable_alert(hwmgr); + return smu7_thermal_enable_alert(hwmgr); } /** @@ -609,53 +498,54 @@ int tf_fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr, * @param Result the last failure code * @return result from disable alert routine */ -static int tf_fiji_thermal_disable_alert(struct pp_hwmgr *hwmgr, +static int tf_smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) { - return fiji_thermal_disable_alert(hwmgr); + return smu7_thermal_disable_alert(hwmgr); } static const struct phm_master_table_item -fiji_thermal_start_thermal_controller_master_list[] = { - {NULL, tf_fiji_thermal_initialize}, - {NULL, tf_fiji_thermal_set_temperature_range}, - {NULL, tf_fiji_thermal_enable_alert}, +phm_thermal_start_thermal_controller_master_list[] = { + {NULL, tf_smu7_thermal_initialize}, + {NULL, tf_smu7_thermal_set_temperature_range}, + {NULL, tf_smu7_thermal_enable_alert}, + {NULL, smum_thermal_avfs_enable}, /* We should restrict performance levels to low before we halt the SMC. * On the other hand we are still in boot state when we do this * so it would be pointless. * If this assumption changes we have to revisit this table. */ - {NULL, tf_fiji_thermal_setup_fan_table}, - {NULL, tf_fiji_thermal_start_smc_fan_control}, + {NULL, smum_thermal_setup_fan_table}, + {NULL, tf_smu7_thermal_start_smc_fan_control}, {NULL, NULL} }; static const struct phm_master_table_header -fiji_thermal_start_thermal_controller_master = { +phm_thermal_start_thermal_controller_master = { 0, PHM_MasterTableFlag_None, - fiji_thermal_start_thermal_controller_master_list + phm_thermal_start_thermal_controller_master_list }; static const struct phm_master_table_item -fiji_thermal_set_temperature_range_master_list[] = { - {NULL, tf_fiji_thermal_disable_alert}, - {NULL, tf_fiji_thermal_set_temperature_range}, - {NULL, tf_fiji_thermal_enable_alert}, +phm_thermal_set_temperature_range_master_list[] = { + {NULL, tf_smu7_thermal_disable_alert}, + {NULL, tf_smu7_thermal_set_temperature_range}, + {NULL, tf_smu7_thermal_enable_alert}, {NULL, NULL} }; static const struct phm_master_table_header -fiji_thermal_set_temperature_range_master = { +phm_thermal_set_temperature_range_master = { 0, PHM_MasterTableFlag_None, - fiji_thermal_set_temperature_range_master_list + phm_thermal_set_temperature_range_master_list }; -int fiji_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr) +int smu7_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr) { if (!hwmgr->thermal_controller.fanInfo.bNoFan) - fiji_fan_ctrl_set_default_mode(hwmgr); + smu7_fan_ctrl_set_default_mode(hwmgr); return 0; } @@ -664,17 +554,17 @@ int fiji_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr) * @param hwmgr The address of the hardware manager. * @exception Any error code from the low-level communication. */ -int pp_fiji_thermal_initialize(struct pp_hwmgr *hwmgr) +int pp_smu7_thermal_initialize(struct pp_hwmgr *hwmgr) { int result; result = phm_construct_table(hwmgr, - &fiji_thermal_set_temperature_range_master, + &phm_thermal_set_temperature_range_master, &(hwmgr->set_temperature_range)); if (!result) { result = phm_construct_table(hwmgr, - &fiji_thermal_start_thermal_controller_master, + &phm_thermal_start_thermal_controller_master, &(hwmgr->start_thermal_controller)); if (result) phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range)); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h new file mode 100644 index 000000000000..6face973be43 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h @@ -0,0 +1,58 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _SMU7_THERMAL_H_ +#define _SMU7_THERMAL_H_ + +#include "hwmgr.h" + +#define SMU7_THERMAL_HIGH_ALERT_MASK 0x1 +#define SMU7_THERMAL_LOW_ALERT_MASK 0x2 + +#define SMU7_THERMAL_MINIMUM_TEMP_READING -256 +#define SMU7_THERMAL_MAXIMUM_TEMP_READING 255 + +#define SMU7_THERMAL_MINIMUM_ALERT_TEMP 0 +#define SMU7_THERMAL_MAXIMUM_ALERT_TEMP 255 + +#define FDO_PWM_MODE_STATIC 1 +#define FDO_PWM_MODE_STATIC_RPM 5 + +extern int smu7_thermal_get_temperature(struct pp_hwmgr *hwmgr); +extern int smu7_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr); +extern int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info); +extern int smu7_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed); +extern int smu7_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr); +extern int smu7_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode); +extern int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed); +extern int smu7_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr); +extern int pp_smu7_thermal_initialize(struct pp_hwmgr *hwmgr); +extern int smu7_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr); +extern int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed); +extern int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed); +extern int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr); +extern int smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr); +extern int smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr); + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.c deleted file mode 100644 index e58d038a997b..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.c +++ /dev/null @@ -1,350 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include "hwmgr.h" -#include "tonga_clockpowergating.h" -#include "tonga_ppsmc.h" -#include "tonga_hwmgr.h" - -int tonga_phm_powerdown_uvd(struct pp_hwmgr *hwmgr) -{ - if (phm_cf_want_uvd_power_gating(hwmgr)) - return smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_UVDPowerOFF); - return 0; -} - -int tonga_phm_powerup_uvd(struct pp_hwmgr *hwmgr) -{ - if (phm_cf_want_uvd_power_gating(hwmgr)) { - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_UVDDynamicPowerGating)) { - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_UVDPowerON, 1); - } else { - return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_UVDPowerON, 0); - } - } - - return 0; -} - -int tonga_phm_powerdown_vce(struct pp_hwmgr *hwmgr) -{ - if (phm_cf_want_vce_power_gating(hwmgr)) - return smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_VCEPowerOFF); - return 0; -} - -int tonga_phm_powerup_vce(struct pp_hwmgr *hwmgr) -{ - if (phm_cf_want_vce_power_gating(hwmgr)) - return smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_VCEPowerON); - return 0; -} - -int tonga_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating) -{ - int ret = 0; - - switch (block) { - case PHM_AsicBlock_UVD_MVC: - case PHM_AsicBlock_UVD: - case PHM_AsicBlock_UVD_HD: - case PHM_AsicBlock_UVD_SD: - if (gating == PHM_ClockGateSetting_StaticOff) - ret = tonga_phm_powerdown_uvd(hwmgr); - else - ret = tonga_phm_powerup_uvd(hwmgr); - break; - case PHM_AsicBlock_GFX: - default: - break; - } - - return ret; -} - -int tonga_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - data->uvd_power_gated = false; - data->vce_power_gated = false; - - tonga_phm_powerup_uvd(hwmgr); - tonga_phm_powerup_vce(hwmgr); - - return 0; -} - -int tonga_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - if (data->uvd_power_gated == bgate) - return 0; - - data->uvd_power_gated = bgate; - - if (bgate) { - cgs_set_clockgating_state(hwmgr->device, - AMD_IP_BLOCK_TYPE_UVD, - AMD_CG_STATE_UNGATE); - cgs_set_powergating_state(hwmgr->device, - AMD_IP_BLOCK_TYPE_UVD, - AMD_PG_STATE_GATE); - tonga_update_uvd_dpm(hwmgr, true); - tonga_phm_powerdown_uvd(hwmgr); - } else { - tonga_phm_powerup_uvd(hwmgr); - cgs_set_powergating_state(hwmgr->device, - AMD_IP_BLOCK_TYPE_UVD, - AMD_PG_STATE_UNGATE); - cgs_set_clockgating_state(hwmgr->device, - AMD_IP_BLOCK_TYPE_UVD, - AMD_PG_STATE_GATE); - - tonga_update_uvd_dpm(hwmgr, false); - } - - return 0; -} - -int tonga_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - struct phm_set_power_state_input states; - const struct pp_power_state *pcurrent; - struct pp_power_state *requested; - - pcurrent = hwmgr->current_ps; - requested = hwmgr->request_ps; - - states.pcurrent_state = &(pcurrent->hardware); - states.pnew_state = &(requested->hardware); - - if (phm_cf_want_vce_power_gating(hwmgr)) { - if (data->vce_power_gated != bgate) { - if (bgate) { - cgs_set_clockgating_state( - hwmgr->device, - AMD_IP_BLOCK_TYPE_VCE, - AMD_CG_STATE_UNGATE); - cgs_set_powergating_state( - hwmgr->device, - AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_GATE); - tonga_enable_disable_vce_dpm(hwmgr, false); - data->vce_power_gated = true; - } else { - tonga_phm_powerup_vce(hwmgr); - data->vce_power_gated = false; - cgs_set_powergating_state( - hwmgr->device, - AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_UNGATE); - cgs_set_clockgating_state( - hwmgr->device, - AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_GATE); - - tonga_update_vce_dpm(hwmgr, &states); - tonga_enable_disable_vce_dpm(hwmgr, true); - return 0; - } - } - } else { - tonga_update_vce_dpm(hwmgr, &states); - tonga_enable_disable_vce_dpm(hwmgr, true); - return 0; - } - - if (!data->vce_power_gated) - tonga_update_vce_dpm(hwmgr, &states); - - return 0; -} - -int tonga_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, - const uint32_t *msg_id) -{ - PPSMC_Msg msg; - uint32_t value; - - switch ((*msg_id & PP_GROUP_MASK) >> PP_GROUP_SHIFT) { - case PP_GROUP_GFX: - switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) { - case PP_BLOCK_GFX_CG: - if (PP_STATE_SUPPORT_CG & *msg_id) { - msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) - ? PPSMC_MSG_EnableClockGatingFeature - : PPSMC_MSG_DisableClockGatingFeature; - value = CG_GFX_CGCG_MASK; - - if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value)) - return -1; - } - if (PP_STATE_SUPPORT_LS & *msg_id) { - msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS - ? PPSMC_MSG_EnableClockGatingFeature - : PPSMC_MSG_DisableClockGatingFeature; - value = CG_GFX_CGLS_MASK; - - if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value)) - return -1; - } - break; - - case PP_BLOCK_GFX_MG: - /* For GFX MGCG, there are three different ones; - * CPF, RLC, and all others. CPF MGCG will not be used for Tonga. - * For GFX MGLS, Tonga will not support it. - * */ - if (PP_STATE_SUPPORT_CG & *msg_id) { - msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) - ? PPSMC_MSG_EnableClockGatingFeature - : PPSMC_MSG_DisableClockGatingFeature; - value = (CG_RLC_MGCG_MASK | CG_GFX_OTHERS_MGCG_MASK); - - if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value)) - return -1; - } - break; - - default: - return -1; - } - break; - - case PP_GROUP_SYS: - switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) { - case PP_BLOCK_SYS_BIF: - if (PP_STATE_SUPPORT_LS & *msg_id) { - msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS - ? PPSMC_MSG_EnableClockGatingFeature - : PPSMC_MSG_DisableClockGatingFeature; - value = CG_SYS_BIF_MGLS_MASK; - - if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value)) - return -1; - } - break; - - case PP_BLOCK_SYS_MC: - if (PP_STATE_SUPPORT_CG & *msg_id) { - msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) - ? PPSMC_MSG_EnableClockGatingFeature - : PPSMC_MSG_DisableClockGatingFeature; - value = CG_SYS_MC_MGCG_MASK; - - if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value)) - return -1; - } - - if (PP_STATE_SUPPORT_LS & *msg_id) { - msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS - ? PPSMC_MSG_EnableClockGatingFeature - : PPSMC_MSG_DisableClockGatingFeature; - value = CG_SYS_MC_MGLS_MASK; - - if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value)) - return -1; - - } - break; - - case PP_BLOCK_SYS_HDP: - if (PP_STATE_SUPPORT_CG & *msg_id) { - msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) - ? PPSMC_MSG_EnableClockGatingFeature - : PPSMC_MSG_DisableClockGatingFeature; - value = CG_SYS_HDP_MGCG_MASK; - - if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value)) - return -1; - } - - if (PP_STATE_SUPPORT_LS & *msg_id) { - msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS - ? PPSMC_MSG_EnableClockGatingFeature - : PPSMC_MSG_DisableClockGatingFeature; - - value = CG_SYS_HDP_MGLS_MASK; - - if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value)) - return -1; - } - break; - - case PP_BLOCK_SYS_SDMA: - if (PP_STATE_SUPPORT_CG & *msg_id) { - msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) - ? PPSMC_MSG_EnableClockGatingFeature - : PPSMC_MSG_DisableClockGatingFeature; - value = CG_SYS_SDMA_MGCG_MASK; - - if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value)) - return -1; - } - - if (PP_STATE_SUPPORT_LS & *msg_id) { - msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS - ? PPSMC_MSG_EnableClockGatingFeature - : PPSMC_MSG_DisableClockGatingFeature; - - value = CG_SYS_SDMA_MGLS_MASK; - - if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value)) - return -1; - } - break; - - case PP_BLOCK_SYS_ROM: - if (PP_STATE_SUPPORT_CG & *msg_id) { - msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) - ? PPSMC_MSG_EnableClockGatingFeature - : PPSMC_MSG_DisableClockGatingFeature; - value = CG_SYS_ROM_MASK; - - if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value)) - return -1; - } - break; - - default: - return -1; - - } - break; - - default: - return -1; - - } - - return 0; -} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_dyn_defaults.h deleted file mode 100644 index 080d69d77f04..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_dyn_defaults.h +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef TONGA_DYN_DEFAULTS_H -#define TONGA_DYN_DEFAULTS_H - - -/** \file - * Volcanic Islands Dynamic default parameters. - */ - -enum TONGAdpm_TrendDetection { - TONGAdpm_TrendDetection_AUTO, - TONGAdpm_TrendDetection_UP, - TONGAdpm_TrendDetection_DOWN -}; -typedef enum TONGAdpm_TrendDetection TONGAdpm_TrendDetection; - -/* Bit vector representing same fields as hardware register. */ -#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102 /* CP_Gfx_busy */ -/* HDP_busy */ -/* IH_busy */ -/* DRM_busy */ -/* DRMDMA_busy */ -/* UVD_busy */ -/* VCE_busy */ -/* ACP_busy */ -/* SAMU_busy */ -/* AVP_busy */ -/* SDMA enabled */ -#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT1 0x000400 /* FE_Gfx_busy - Intended for primary usage. Rest are for flexibility. */ -/* SH_Gfx_busy */ -/* RB_Gfx_busy */ -/* VCE_busy */ - -#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080 /* SH_Gfx_busy - Intended for primary usage. Rest are for flexibility. */ -/* FE_Gfx_busy */ -/* RB_Gfx_busy */ -/* ACP_busy */ - -#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200 /* RB_Gfx_busy - Intended for primary usage. Rest are for flexibility. */ -/* FE_Gfx_busy */ -/* SH_Gfx_busy */ -/* UVD_busy */ - -#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680 /* UVD_busy */ -/* VCE_busy */ -/* ACP_busy */ -/* SAMU_busy */ - -#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033 /* GFX, HDP, DRMDMA */ -#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033 /* GFX, HDP, DRMDMA */ -#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000 /* GFX, HDP, DRMDMA */ - - -/* thermal protection counter (units).*/ -#define PPTONGA_THERMALPROTECTCOUNTER_DFLT 0x200 /* ~19us */ - -/* static screen threshold unit */ -#define PPTONGA_STATICSCREENTHRESHOLDUNIT_DFLT 0 - -/* static screen threshold */ -#define PPTONGA_STATICSCREENTHRESHOLD_DFLT 0x00C8 - -/* gfx idle clock stop threshold */ -#define PPTONGA_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200 /* ~19us with static screen threshold unit of 0 */ - -/* Fixed reference divider to use when building baby stepping tables. */ -#define PPTONGA_REFERENCEDIVIDER_DFLT 4 - -/* - * ULV voltage change delay time - * Used to be delay_vreg in N.I. split for S.I. - * Using N.I. delay_vreg value as default - * ReferenceClock = 2700 - * VoltageResponseTime = 1000 - * VDDCDelayTime = (VoltageResponseTime * ReferenceClock) / 1600 = 1687 - */ - -#define PPTONGA_ULVVOLTAGECHANGEDELAY_DFLT 1687 - -#define PPTONGA_CGULVPARAMETER_DFLT 0x00040035 -#define PPTONGA_CGULVCONTROL_DFLT 0x00007450 -#define PPTONGA_TARGETACTIVITY_DFLT 30 /*30% */ -#define PPTONGA_MCLK_TARGETACTIVITY_DFLT 10 /*10% */ - -#endif - diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c deleted file mode 100644 index c7dc111221c2..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c +++ /dev/null @@ -1,6276 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include <linux/module.h> -#include <linux/slab.h> -#include <linux/fb.h> -#include "linux/delay.h" -#include "pp_acpi.h" -#include "hwmgr.h" -#include <atombios.h> -#include "tonga_hwmgr.h" -#include "pptable.h" -#include "processpptables.h" -#include "tonga_processpptables.h" -#include "tonga_pptable.h" -#include "pp_debug.h" -#include "tonga_ppsmc.h" -#include "cgs_common.h" -#include "pppcielanes.h" -#include "tonga_dyn_defaults.h" -#include "smumgr.h" -#include "tonga_smumgr.h" -#include "tonga_clockpowergating.h" -#include "tonga_thermal.h" - -#include "smu/smu_7_1_2_d.h" -#include "smu/smu_7_1_2_sh_mask.h" - -#include "gmc/gmc_8_1_d.h" -#include "gmc/gmc_8_1_sh_mask.h" - -#include "bif/bif_5_0_d.h" -#include "bif/bif_5_0_sh_mask.h" - -#include "dce/dce_10_0_d.h" -#include "dce/dce_10_0_sh_mask.h" - -#include "cgs_linux.h" -#include "eventmgr.h" -#include "amd_pcie_helpers.h" - -#define MC_CG_ARB_FREQ_F0 0x0a -#define MC_CG_ARB_FREQ_F1 0x0b -#define MC_CG_ARB_FREQ_F2 0x0c -#define MC_CG_ARB_FREQ_F3 0x0d - -#define MC_CG_SEQ_DRAMCONF_S0 0x05 -#define MC_CG_SEQ_DRAMCONF_S1 0x06 -#define MC_CG_SEQ_YCLK_SUSPEND 0x04 -#define MC_CG_SEQ_YCLK_RESUME 0x0a - -#define PCIE_BUS_CLK 10000 -#define TCLK (PCIE_BUS_CLK / 10) - -#define SMC_RAM_END 0x40000 -#define SMC_CG_IND_START 0xc0030000 -#define SMC_CG_IND_END 0xc0040000 /* First byte after SMC_CG_IND*/ - -#define VOLTAGE_SCALE 4 -#define VOLTAGE_VID_OFFSET_SCALE1 625 -#define VOLTAGE_VID_OFFSET_SCALE2 100 - -#define VDDC_VDDCI_DELTA 200 -#define VDDC_VDDGFX_DELTA 300 - -#define MC_SEQ_MISC0_GDDR5_SHIFT 28 -#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000 -#define MC_SEQ_MISC0_GDDR5_VALUE 5 - -typedef uint32_t PECI_RegistryValue; - -/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ] */ -static const uint16_t PP_ClockStretcherLookupTable[2][4] = { - {600, 1050, 3, 0}, - {600, 1050, 6, 1} }; - -/* [FF, SS] type, [] 4 voltage ranges, and [Floor Freq, Boundary Freq, VID min , VID max] */ -static const uint32_t PP_ClockStretcherDDTTable[2][4][4] = { - { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} }, - { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } }; - -/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] (coming from PWR_CKS_CNTL.stretch_amount reg spec) */ -static const uint8_t PP_ClockStretchAmountConversion[2][6] = { - {0, 1, 3, 2, 4, 5}, - {0, 2, 4, 5, 6, 5} }; - -/* Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */ -enum DPM_EVENT_SRC { - DPM_EVENT_SRC_ANALOG = 0, /* Internal analog trip point */ - DPM_EVENT_SRC_EXTERNAL = 1, /* External (GPIO 17) signal */ - DPM_EVENT_SRC_DIGITAL = 2, /* Internal digital trip point (DIG_THERM_DPM) */ - DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, /* Internal analog or external */ - DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 /* Internal digital or external */ -}; -typedef enum DPM_EVENT_SRC DPM_EVENT_SRC; - -static const unsigned long PhwTonga_Magic = (unsigned long)(PHM_VIslands_Magic); - -struct tonga_power_state *cast_phw_tonga_power_state( - struct pp_hw_power_state *hw_ps) -{ - if (hw_ps == NULL) - return NULL; - - PP_ASSERT_WITH_CODE((PhwTonga_Magic == hw_ps->magic), - "Invalid Powerstate Type!", - return NULL); - - return (struct tonga_power_state *)hw_ps; -} - -const struct tonga_power_state *cast_const_phw_tonga_power_state( - const struct pp_hw_power_state *hw_ps) -{ - if (hw_ps == NULL) - return NULL; - - PP_ASSERT_WITH_CODE((PhwTonga_Magic == hw_ps->magic), - "Invalid Powerstate Type!", - return NULL); - - return (const struct tonga_power_state *)hw_ps; -} - -int tonga_add_voltage(struct pp_hwmgr *hwmgr, - phm_ppt_v1_voltage_lookup_table *look_up_table, - phm_ppt_v1_voltage_lookup_record *record) -{ - uint32_t i; - PP_ASSERT_WITH_CODE((NULL != look_up_table), - "Lookup Table empty.", return -1;); - PP_ASSERT_WITH_CODE((0 != look_up_table->count), - "Lookup Table empty.", return -1;); - PP_ASSERT_WITH_CODE((SMU72_MAX_LEVELS_VDDGFX >= look_up_table->count), - "Lookup Table is full.", return -1;); - - /* This is to avoid entering duplicate calculated records. */ - for (i = 0; i < look_up_table->count; i++) { - if (look_up_table->entries[i].us_vdd == record->us_vdd) { - if (look_up_table->entries[i].us_calculated == 1) - return 0; - else - break; - } - } - - look_up_table->entries[i].us_calculated = 1; - look_up_table->entries[i].us_vdd = record->us_vdd; - look_up_table->entries[i].us_cac_low = record->us_cac_low; - look_up_table->entries[i].us_cac_mid = record->us_cac_mid; - look_up_table->entries[i].us_cac_high = record->us_cac_high; - /* Only increment the count when we're appending, not replacing duplicate entry. */ - if (i == look_up_table->count) - look_up_table->count++; - - return 0; -} - -int tonga_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display) -{ - PPSMC_Msg msg = has_display? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay; - - return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1; -} - -uint8_t tonga_get_voltage_id(pp_atomctrl_voltage_table *voltage_table, - uint32_t voltage) -{ - uint8_t count = (uint8_t) (voltage_table->count); - uint8_t i = 0; - - PP_ASSERT_WITH_CODE((NULL != voltage_table), - "Voltage Table empty.", return 0;); - PP_ASSERT_WITH_CODE((0 != count), - "Voltage Table empty.", return 0;); - - for (i = 0; i < count; i++) { - /* find first voltage bigger than requested */ - if (voltage_table->entries[i].value >= voltage) - return i; - } - - /* voltage is bigger than max voltage in the table */ - return i - 1; -} - -/** - * @brief PhwTonga_GetVoltageOrder - * Returns index of requested voltage record in lookup(table) - * @param hwmgr - pointer to hardware manager - * @param lookupTable - lookup list to search in - * @param voltage - voltage to look for - * @return 0 on success - */ -uint8_t tonga_get_voltage_index(phm_ppt_v1_voltage_lookup_table *look_up_table, - uint16_t voltage) -{ - uint8_t count = (uint8_t) (look_up_table->count); - uint8_t i; - - PP_ASSERT_WITH_CODE((NULL != look_up_table), "Lookup Table empty.", return 0;); - PP_ASSERT_WITH_CODE((0 != count), "Lookup Table empty.", return 0;); - - for (i = 0; i < count; i++) { - /* find first voltage equal or bigger than requested */ - if (look_up_table->entries[i].us_vdd >= voltage) - return i; - } - - /* voltage is bigger than max voltage in the table */ - return i-1; -} - -bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr) -{ - /* - * We return the status of Voltage Control instead of checking SCLK/MCLK DPM - * because we may have test scenarios that need us intentionly disable SCLK/MCLK DPM, - * whereas voltage control is a fundemental change that will not be disabled - */ - - return (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) ? 1 : 0); -} - -/** - * Re-generate the DPM level mask value - * @param hwmgr the address of the hardware manager - */ -static uint32_t tonga_get_dpm_level_enable_mask_value( - struct tonga_single_dpm_table * dpm_table) -{ - uint32_t i; - uint32_t mask_value = 0; - - for (i = dpm_table->count; i > 0; i--) { - mask_value = mask_value << 1; - - if (dpm_table->dpm_levels[i-1].enabled) - mask_value |= 0x1; - else - mask_value &= 0xFFFFFFFE; - } - return mask_value; -} - -/** - * Retrieve DPM default values from registry (if available) - * - * @param hwmgr the address of the powerplay hardware manager. - */ -void tonga_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - phw_tonga_ulv_parm *ulv = &(data->ulv); - uint32_t tmp; - - ulv->ch_ulv_parameter = PPTONGA_CGULVPARAMETER_DFLT; - data->voting_rights_clients0 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT0; - data->voting_rights_clients1 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT1; - data->voting_rights_clients2 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT2; - data->voting_rights_clients3 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT3; - data->voting_rights_clients4 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT4; - data->voting_rights_clients5 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT5; - data->voting_rights_clients6 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT6; - data->voting_rights_clients7 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT7; - - data->static_screen_threshold_unit = PPTONGA_STATICSCREENTHRESHOLDUNIT_DFLT; - data->static_screen_threshold = PPTONGA_STATICSCREENTHRESHOLD_DFLT; - - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ABM); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_NonABMSupportInPPLib); - - tmp = 0; - if (tmp == 0) - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DynamicACTiming); - - tmp = 0; - if (0 != tmp) - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DisableMemoryTransition); - - data->mclk_strobe_mode_threshold = 40000; - data->mclk_stutter_mode_threshold = 30000; - data->mclk_edc_enable_threshold = 40000; - data->mclk_edc_wr_enable_threshold = 40000; - - tmp = 0; - if (tmp != 0) - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DisableMCLS); - - data->pcie_gen_performance.max = PP_PCIEGen1; - data->pcie_gen_performance.min = PP_PCIEGen3; - data->pcie_gen_power_saving.max = PP_PCIEGen1; - data->pcie_gen_power_saving.min = PP_PCIEGen3; - - data->pcie_lane_performance.max = 0; - data->pcie_lane_performance.min = 16; - data->pcie_lane_power_saving.max = 0; - data->pcie_lane_power_saving.min = 16; - - tmp = 0; - - if (tmp) - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkThrottleLowNotification); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DynamicUVDState); - -} - -int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - - int result = 0; - uint32_t low_sclk_interrupt_threshold = 0; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkThrottleLowNotification) - && (hwmgr->gfx_arbiter.sclk_threshold != data->low_sclk_interrupt_threshold)) { - data->low_sclk_interrupt_threshold = hwmgr->gfx_arbiter.sclk_threshold; - low_sclk_interrupt_threshold = data->low_sclk_interrupt_threshold; - - CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); - - result = tonga_copy_bytes_to_smc( - hwmgr->smumgr, - data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable, - LowSclkInterruptThreshold), - (uint8_t *)&low_sclk_interrupt_threshold, - sizeof(uint32_t), - data->sram_end - ); - } - - return result; -} - -/** - * Find SCLK value that is associated with specified virtual_voltage_Id. - * - * @param hwmgr the address of the powerplay hardware manager. - * @param virtual_voltage_Id voltageId to look for. - * @param sclk output value . - * @return always 0 if success and 2 if association not found - */ -static int tonga_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr, - phm_ppt_v1_voltage_lookup_table *lookup_table, - uint16_t virtual_voltage_id, uint32_t *sclk) -{ - uint8_t entryId; - uint8_t voltageId; - struct phm_ppt_v1_information *pptable_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -1); - - /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */ - for (entryId = 0; entryId < pptable_info->vdd_dep_on_sclk->count; entryId++) { - voltageId = pptable_info->vdd_dep_on_sclk->entries[entryId].vddInd; - if (lookup_table->entries[voltageId].us_vdd == virtual_voltage_id) - break; - } - - PP_ASSERT_WITH_CODE(entryId < pptable_info->vdd_dep_on_sclk->count, - "Can't find requested voltage id in vdd_dep_on_sclk table!", - return -1; - ); - - *sclk = pptable_info->vdd_dep_on_sclk->entries[entryId].clk; - - return 0; -} - -/** - * Get Leakage VDDC based on leakage ID. - * - * @param hwmgr the address of the powerplay hardware manager. - * @return 2 if vddgfx returned is greater than 2V or if BIOS - */ -int tonga_get_evv_voltage(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk; - uint16_t virtual_voltage_id; - uint16_t vddc = 0; - uint16_t vddgfx = 0; - uint16_t i, j; - uint32_t sclk = 0; - - /* retrieve voltage for leakage ID (0xff01 + i) */ - for (i = 0; i < TONGA_MAX_LEAKAGE_COUNT; i++) { - virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; - - /* in split mode we should have only vddgfx EVV leakages */ - if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) { - if (0 == tonga_get_sclk_for_voltage_evv(hwmgr, - pptable_info->vddgfx_lookup_table, virtual_voltage_id, &sclk)) { - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ClockStretcher)) { - for (j = 1; j < sclk_table->count; j++) { - if (sclk_table->entries[j].clk == sclk && - sclk_table->entries[j].cks_enable == 0) { - sclk += 5000; - break; - } - } - } - if (0 == atomctrl_get_voltage_evv_on_sclk - (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk, - virtual_voltage_id, &vddgfx)) { - /* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */ - PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -1); - - /* the voltage should not be zero nor equal to leakage ID */ - if (vddgfx != 0 && vddgfx != virtual_voltage_id) { - data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx; - data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = virtual_voltage_id; - data->vddcgfx_leakage.count++; - } - } else { - printk("Error retrieving EVV voltage value!\n"); - } - } - } else { - /* in merged mode we have only vddc EVV leakages */ - if (0 == tonga_get_sclk_for_voltage_evv(hwmgr, - pptable_info->vddc_lookup_table, - virtual_voltage_id, &sclk)) { - if (0 == atomctrl_get_voltage_evv_on_sclk - (hwmgr, VOLTAGE_TYPE_VDDC, sclk, - virtual_voltage_id, &vddc)) { - /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */ - PP_ASSERT_WITH_CODE(vddc < 2000, "Invalid VDDC value!", return -1); - - /* the voltage should not be zero nor equal to leakage ID */ - if (vddc != 0 && vddc != virtual_voltage_id) { - data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc; - data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id; - data->vddc_leakage.count++; - } - } else { - printk("Error retrieving EVV voltage value!\n"); - } - } - } - } - - return 0; -} - -int tonga_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - - /* enable SCLK dpm */ - if (0 == data->sclk_dpm_key_disabled) { - PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_DPM_Enable)), - "Failed to enable SCLK DPM during DPM Start Function!", - return -1); - } - - /* enable MCLK dpm */ - if (0 == data->mclk_dpm_key_disabled) { - PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_Enable)), - "Failed to enable MCLK DPM during DPM Start Function!", - return -1); - - PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC0_CNTL, 0x05);/* CH0,1 read */ - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC1_CNTL, 0x05);/* CH2,3 read */ - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_CPL_CNTL, 0x100005);/*Read */ - - udelay(10); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC0_CNTL, 0x400005);/* CH0,1 write */ - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_MC1_CNTL, 0x400005);/* CH2,3 write */ - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixLCAC_CPL_CNTL, 0x500005);/* write */ - - } - - return 0; -} - -int tonga_start_dpm(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - - /* enable general power management */ - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, 1); - /* enable sclk deep sleep */ - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, DYNAMIC_PM_EN, 1); - - /* prepare for PCIE DPM */ - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + - offsetof(SMU72_SoftRegisters, VoltageChangeTimeout), 0x1000); - - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, SWRST_COMMAND_1, RESETLC, 0x0); - - PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_Voltage_Cntl_Enable)), - "Failed to enable voltage DPM during DPM Start Function!", - return -1); - - if (0 != tonga_enable_sclk_mclk_dpm(hwmgr)) { - PP_ASSERT_WITH_CODE(0, "Failed to enable Sclk DPM and Mclk DPM!", return -1); - } - - /* enable PCIE dpm */ - if (0 == data->pcie_dpm_key_disabled) { - PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_PCIeDPM_Enable)), - "Failed to enable pcie DPM during DPM Start Function!", - return -1 - ); - } - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_Falcon_QuickTransition)) { - smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_EnableACDCGPIOInterrupt); - } - - return 0; -} - -int tonga_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - - /* disable SCLK dpm */ - if (0 == data->sclk_dpm_key_disabled) { - /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ - PP_ASSERT_WITH_CODE( - !tonga_is_dpm_running(hwmgr), - "Trying to Disable SCLK DPM when DPM is disabled", - return -1 - ); - - PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_DPM_Disable)), - "Failed to disable SCLK DPM during DPM stop Function!", - return -1); - } - - /* disable MCLK dpm */ - if (0 == data->mclk_dpm_key_disabled) { - /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */ - PP_ASSERT_WITH_CODE( - !tonga_is_dpm_running(hwmgr), - "Trying to Disable MCLK DPM when DPM is disabled", - return -1 - ); - - PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_Disable)), - "Failed to Disable MCLK DPM during DPM stop Function!", - return -1); - } - - return 0; -} - -int tonga_stop_dpm(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, 0); - /* disable sclk deep sleep*/ - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, DYNAMIC_PM_EN, 0); - - /* disable PCIE dpm */ - if (0 == data->pcie_dpm_key_disabled) { - /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ - PP_ASSERT_WITH_CODE( - !tonga_is_dpm_running(hwmgr), - "Trying to Disable PCIE DPM when DPM is disabled", - return -1 - ); - PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_PCIeDPM_Disable)), - "Failed to disable pcie DPM during DPM stop Function!", - return -1); - } - - if (0 != tonga_disable_sclk_mclk_dpm(hwmgr)) - PP_ASSERT_WITH_CODE(0, "Failed to disable Sclk DPM and Mclk DPM!", return -1); - - /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ - PP_ASSERT_WITH_CODE( - !tonga_is_dpm_running(hwmgr), - "Trying to Disable Voltage CNTL when DPM is disabled", - return -1 - ); - - PP_ASSERT_WITH_CODE( - (0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_Voltage_Cntl_Disable)), - "Failed to disable voltage DPM during DPM stop Function!", - return -1); - - return 0; -} - -int tonga_enable_sclk_control(struct pp_hwmgr *hwmgr) -{ - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, 0); - - return 0; -} - -/** - * Send a message to the SMC and return a parameter - * - * @param hwmgr: the address of the powerplay hardware manager. - * @param msg: the message to send. - * @param parameter: pointer to the received parameter - * @return The response that came from the SMC. - */ -PPSMC_Result tonga_send_msg_to_smc_return_parameter( - struct pp_hwmgr *hwmgr, - PPSMC_Msg msg, - uint32_t *parameter) -{ - int result; - - result = smum_send_msg_to_smc(hwmgr->smumgr, msg); - - if ((0 == result) && parameter) { - *parameter = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - } - - return result; -} - -/** - * force DPM power State - * - * @param hwmgr: the address of the powerplay hardware manager. - * @param n : DPM level - * @return The response that came from the SMC. - */ -int tonga_dpm_force_state(struct pp_hwmgr *hwmgr, uint32_t n) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - uint32_t level_mask = 1 << n; - - /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */ - PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr), - "Trying to force SCLK when DPM is disabled", - return -1;); - if (0 == data->sclk_dpm_key_disabled) - return (0 == smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, - (PPSMC_Msg)(PPSMC_MSG_SCLKDPM_SetEnabledMask), - level_mask) ? 0 : 1); - - return 0; -} - -/** - * force DPM power State - * - * @param hwmgr: the address of the powerplay hardware manager. - * @param n : DPM level - * @return The response that came from the SMC. - */ -int tonga_dpm_force_state_mclk(struct pp_hwmgr *hwmgr, uint32_t n) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - uint32_t level_mask = 1 << n; - - /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */ - PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr), - "Trying to Force MCLK when DPM is disabled", - return -1;); - if (0 == data->mclk_dpm_key_disabled) - return (0 == smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, - (PPSMC_Msg)(PPSMC_MSG_MCLKDPM_SetEnabledMask), - level_mask) ? 0 : 1); - - return 0; -} - -/** - * force DPM power State - * - * @param hwmgr: the address of the powerplay hardware manager. - * @param n : DPM level - * @return The response that came from the SMC. - */ -int tonga_dpm_force_state_pcie(struct pp_hwmgr *hwmgr, uint32_t n) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - - /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ - PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr), - "Trying to Force PCIE level when DPM is disabled", - return -1;); - if (0 == data->pcie_dpm_key_disabled) - return (0 == smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, - (PPSMC_Msg)(PPSMC_MSG_PCIeDPM_ForceLevel), - n) ? 0 : 1); - - return 0; -} - -/** - * Set the initial state by calling SMC to switch to this state directly - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int tonga_set_boot_state(struct pp_hwmgr *hwmgr) -{ - /* - * SMC only stores one state that SW will ask to switch too, - * so we switch the the just uploaded one - */ - return (0 == tonga_disable_sclk_mclk_dpm(hwmgr)) ? 0 : 1; -} - -/** - * Get the location of various tables inside the FW image. - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int tonga_process_firmware_header(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct tonga_smumgr *tonga_smu = (struct tonga_smumgr *)(hwmgr->smumgr->backend); - - uint32_t tmp; - int result; - bool error = false; - - result = tonga_read_smc_sram_dword(hwmgr->smumgr, - SMU72_FIRMWARE_HEADER_LOCATION + - offsetof(SMU72_Firmware_Header, DpmTable), - &tmp, data->sram_end); - - if (0 == result) { - data->dpm_table_start = tmp; - } - - error |= (0 != result); - - result = tonga_read_smc_sram_dword(hwmgr->smumgr, - SMU72_FIRMWARE_HEADER_LOCATION + - offsetof(SMU72_Firmware_Header, SoftRegisters), - &tmp, data->sram_end); - - if (0 == result) { - data->soft_regs_start = tmp; - tonga_smu->ulSoftRegsStart = tmp; - } - - error |= (0 != result); - - - result = tonga_read_smc_sram_dword(hwmgr->smumgr, - SMU72_FIRMWARE_HEADER_LOCATION + - offsetof(SMU72_Firmware_Header, mcRegisterTable), - &tmp, data->sram_end); - - if (0 == result) { - data->mc_reg_table_start = tmp; - } - - result = tonga_read_smc_sram_dword(hwmgr->smumgr, - SMU72_FIRMWARE_HEADER_LOCATION + - offsetof(SMU72_Firmware_Header, FanTable), - &tmp, data->sram_end); - - if (0 == result) { - data->fan_table_start = tmp; - } - - error |= (0 != result); - - result = tonga_read_smc_sram_dword(hwmgr->smumgr, - SMU72_FIRMWARE_HEADER_LOCATION + - offsetof(SMU72_Firmware_Header, mcArbDramTimingTable), - &tmp, data->sram_end); - - if (0 == result) { - data->arb_table_start = tmp; - } - - error |= (0 != result); - - - result = tonga_read_smc_sram_dword(hwmgr->smumgr, - SMU72_FIRMWARE_HEADER_LOCATION + - offsetof(SMU72_Firmware_Header, Version), - &tmp, data->sram_end); - - if (0 == result) { - hwmgr->microcode_version_info.SMC = tmp; - } - - error |= (0 != result); - - return error ? 1 : 0; -} - -/** - * Read clock related registers. - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int tonga_read_clock_registers(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - - data->clock_registers.vCG_SPLL_FUNC_CNTL = - cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL); - data->clock_registers.vCG_SPLL_FUNC_CNTL_2 = - cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2); - data->clock_registers.vCG_SPLL_FUNC_CNTL_3 = - cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3); - data->clock_registers.vCG_SPLL_FUNC_CNTL_4 = - cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4); - data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM = - cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM); - data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 = - cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2); - data->clock_registers.vDLL_CNTL = - cgs_read_register(hwmgr->device, mmDLL_CNTL); - data->clock_registers.vMCLK_PWRMGT_CNTL = - cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL); - data->clock_registers.vMPLL_AD_FUNC_CNTL = - cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL); - data->clock_registers.vMPLL_DQ_FUNC_CNTL = - cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL); - data->clock_registers.vMPLL_FUNC_CNTL = - cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL); - data->clock_registers.vMPLL_FUNC_CNTL_1 = - cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1); - data->clock_registers.vMPLL_FUNC_CNTL_2 = - cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2); - data->clock_registers.vMPLL_SS1 = - cgs_read_register(hwmgr->device, mmMPLL_SS1); - data->clock_registers.vMPLL_SS2 = - cgs_read_register(hwmgr->device, mmMPLL_SS2); - - return 0; -} - -/** - * Find out if memory is GDDR5. - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int tonga_get_memory_type(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - uint32_t temp; - - temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0); - - data->is_memory_GDDR5 = (MC_SEQ_MISC0_GDDR5_VALUE == - ((temp & MC_SEQ_MISC0_GDDR5_MASK) >> - MC_SEQ_MISC0_GDDR5_SHIFT)); - - return 0; -} - -/** - * Enables Dynamic Power Management by SMC - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int tonga_enable_acpi_power_management(struct pp_hwmgr *hwmgr) -{ - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, STATIC_PM_EN, 1); - - return 0; -} - -/** - * Initialize PowerGating States for different engines - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int tonga_init_power_gate_state(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - - data->uvd_power_gated = false; - data->vce_power_gated = false; - data->samu_power_gated = false; - data->acp_power_gated = false; - data->pg_acp_init = true; - - return 0; -} - -/** - * Checks if DPM is enabled - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int tonga_check_for_dpm_running(struct pp_hwmgr *hwmgr) -{ - /* - * We return the status of Voltage Control instead of checking SCLK/MCLK DPM - * because we may have test scenarios that need us intentionly disable SCLK/MCLK DPM, - * whereas voltage control is a fundemental change that will not be disabled - */ - return (!tonga_is_dpm_running(hwmgr) ? 0 : 1); -} - -/** - * Checks if DPM is stopped - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int tonga_check_for_dpm_stopped(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - - if (tonga_is_dpm_running(hwmgr)) { - /* If HW Virtualization is enabled, dpm_table_start will not have a valid value */ - if (!data->dpm_table_start) { - return 1; - } - } - - return 0; -} - -/** - * Remove repeated voltage values and create table with unique values. - * - * @param hwmgr the address of the powerplay hardware manager. - * @param voltage_table the pointer to changing voltage table - * @return 1 in success - */ - -static int tonga_trim_voltage_table(struct pp_hwmgr *hwmgr, - pp_atomctrl_voltage_table *voltage_table) -{ - uint32_t table_size, i, j; - uint16_t vvalue; - bool bVoltageFound = false; - pp_atomctrl_voltage_table *table; - - PP_ASSERT_WITH_CODE((NULL != voltage_table), "Voltage Table empty.", return -1;); - table_size = sizeof(pp_atomctrl_voltage_table); - table = kzalloc(table_size, GFP_KERNEL); - - if (NULL == table) - return -ENOMEM; - - memset(table, 0x00, table_size); - table->mask_low = voltage_table->mask_low; - table->phase_delay = voltage_table->phase_delay; - - for (i = 0; i < voltage_table->count; i++) { - vvalue = voltage_table->entries[i].value; - bVoltageFound = false; - - for (j = 0; j < table->count; j++) { - if (vvalue == table->entries[j].value) { - bVoltageFound = true; - break; - } - } - - if (!bVoltageFound) { - table->entries[table->count].value = vvalue; - table->entries[table->count].smio_low = - voltage_table->entries[i].smio_low; - table->count++; - } - } - - memcpy(table, voltage_table, sizeof(pp_atomctrl_voltage_table)); - - kfree(table); - - return 0; -} - -static int tonga_get_svi2_vdd_ci_voltage_table( - struct pp_hwmgr *hwmgr, - phm_ppt_v1_clock_voltage_dependency_table *voltage_dependency_table) -{ - uint32_t i; - int result; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - pp_atomctrl_voltage_table *vddci_voltage_table = &(data->vddci_voltage_table); - - PP_ASSERT_WITH_CODE((0 != voltage_dependency_table->count), - "Voltage Dependency Table empty.", return -1;); - - vddci_voltage_table->mask_low = 0; - vddci_voltage_table->phase_delay = 0; - vddci_voltage_table->count = voltage_dependency_table->count; - - for (i = 0; i < voltage_dependency_table->count; i++) { - vddci_voltage_table->entries[i].value = - voltage_dependency_table->entries[i].vddci; - vddci_voltage_table->entries[i].smio_low = 0; - } - - result = tonga_trim_voltage_table(hwmgr, vddci_voltage_table); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to trim VDDCI table.", return result;); - - return 0; -} - - - -static int tonga_get_svi2_vdd_voltage_table( - struct pp_hwmgr *hwmgr, - phm_ppt_v1_voltage_lookup_table *look_up_table, - pp_atomctrl_voltage_table *voltage_table) -{ - uint8_t i = 0; - - PP_ASSERT_WITH_CODE((0 != look_up_table->count), - "Voltage Lookup Table empty.", return -1;); - - voltage_table->mask_low = 0; - voltage_table->phase_delay = 0; - - voltage_table->count = look_up_table->count; - - for (i = 0; i < voltage_table->count; i++) { - voltage_table->entries[i].value = look_up_table->entries[i].us_vdd; - voltage_table->entries[i].smio_low = 0; - } - - return 0; -} - -/* - * -------------------------------------------------------- Voltage Tables -------------------------------------------------------------------------- - * If the voltage table would be bigger than what will fit into the state table on the SMC keep only the higher entries. - */ - -static void tonga_trim_voltage_table_to_fit_state_table( - struct pp_hwmgr *hwmgr, - uint32_t max_voltage_steps, - pp_atomctrl_voltage_table *voltage_table) -{ - unsigned int i, diff; - - if (voltage_table->count <= max_voltage_steps) { - return; - } - - diff = voltage_table->count - max_voltage_steps; - - for (i = 0; i < max_voltage_steps; i++) { - voltage_table->entries[i] = voltage_table->entries[i + diff]; - } - - voltage_table->count = max_voltage_steps; - - return; -} - -/** - * Create Voltage Tables. - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int tonga_construct_voltage_tables(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - int result; - - /* MVDD has only GPIO voltage control */ - if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { - result = atomctrl_get_voltage_table_v3(hwmgr, - VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT, &(data->mvdd_voltage_table)); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve MVDD table.", return result;); - } - - if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) { - /* GPIO voltage */ - result = atomctrl_get_voltage_table_v3(hwmgr, - VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT, &(data->vddci_voltage_table)); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve VDDCI table.", return result;); - } else if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) { - /* SVI2 voltage */ - result = tonga_get_svi2_vdd_ci_voltage_table(hwmgr, - pptable_info->vdd_dep_on_mclk); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve SVI2 VDDCI table from dependancy table.", return result;); - } - - if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) { - /* VDDGFX has only SVI2 voltage control */ - result = tonga_get_svi2_vdd_voltage_table(hwmgr, - pptable_info->vddgfx_lookup_table, &(data->vddgfx_voltage_table)); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;); - } - - if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { - /* VDDC has only SVI2 voltage control */ - result = tonga_get_svi2_vdd_voltage_table(hwmgr, - pptable_info->vddc_lookup_table, &(data->vddc_voltage_table)); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve SVI2 VDDC table from lookup table.", return result;); - } - - PP_ASSERT_WITH_CODE( - (data->vddc_voltage_table.count <= (SMU72_MAX_LEVELS_VDDC)), - "Too many voltage values for VDDC. Trimming to fit state table.", - tonga_trim_voltage_table_to_fit_state_table(hwmgr, - SMU72_MAX_LEVELS_VDDC, &(data->vddc_voltage_table)); - ); - - PP_ASSERT_WITH_CODE( - (data->vddgfx_voltage_table.count <= (SMU72_MAX_LEVELS_VDDGFX)), - "Too many voltage values for VDDGFX. Trimming to fit state table.", - tonga_trim_voltage_table_to_fit_state_table(hwmgr, - SMU72_MAX_LEVELS_VDDGFX, &(data->vddgfx_voltage_table)); - ); - - PP_ASSERT_WITH_CODE( - (data->vddci_voltage_table.count <= (SMU72_MAX_LEVELS_VDDCI)), - "Too many voltage values for VDDCI. Trimming to fit state table.", - tonga_trim_voltage_table_to_fit_state_table(hwmgr, - SMU72_MAX_LEVELS_VDDCI, &(data->vddci_voltage_table)); - ); - - PP_ASSERT_WITH_CODE( - (data->mvdd_voltage_table.count <= (SMU72_MAX_LEVELS_MVDD)), - "Too many voltage values for MVDD. Trimming to fit state table.", - tonga_trim_voltage_table_to_fit_state_table(hwmgr, - SMU72_MAX_LEVELS_MVDD, &(data->mvdd_voltage_table)); - ); - - return 0; -} - -/** - * Vddc table preparation for SMC. - * - * @param hwmgr the address of the hardware manager - * @param table the SMC DPM table structure to be populated - * @return always 0 - */ -static int tonga_populate_smc_vddc_table(struct pp_hwmgr *hwmgr, - SMU72_Discrete_DpmTable *table) -{ - unsigned int count; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - - if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { - table->VddcLevelCount = data->vddc_voltage_table.count; - for (count = 0; count < table->VddcLevelCount; count++) { - table->VddcTable[count] = - PP_HOST_TO_SMC_US(data->vddc_voltage_table.entries[count].value * VOLTAGE_SCALE); - } - CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount); - } - return 0; -} - -/** - * VddGfx table preparation for SMC. - * - * @param hwmgr the address of the hardware manager - * @param table the SMC DPM table structure to be populated - * @return always 0 - */ -static int tonga_populate_smc_vdd_gfx_table(struct pp_hwmgr *hwmgr, - SMU72_Discrete_DpmTable *table) -{ - unsigned int count; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - - if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) { - table->VddGfxLevelCount = data->vddgfx_voltage_table.count; - for (count = 0; count < data->vddgfx_voltage_table.count; count++) { - table->VddGfxTable[count] = - PP_HOST_TO_SMC_US(data->vddgfx_voltage_table.entries[count].value * VOLTAGE_SCALE); - } - CONVERT_FROM_HOST_TO_SMC_UL(table->VddGfxLevelCount); - } - return 0; -} - -/** - * Vddci table preparation for SMC. - * - * @param *hwmgr The address of the hardware manager. - * @param *table The SMC DPM table structure to be populated. - * @return 0 - */ -static int tonga_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr, - SMU72_Discrete_DpmTable *table) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - uint32_t count; - - table->VddciLevelCount = data->vddci_voltage_table.count; - for (count = 0; count < table->VddciLevelCount; count++) { - if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) { - table->VddciTable[count] = - PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE); - } else if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) { - table->SmioTable1.Pattern[count].Voltage = - PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE); - /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level. */ - table->SmioTable1.Pattern[count].Smio = - (uint8_t) count; - table->Smio[count] |= - data->vddci_voltage_table.entries[count].smio_low; - table->VddciTable[count] = - PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE); - } - } - - table->SmioMask1 = data->vddci_voltage_table.mask_low; - CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount); - - return 0; -} - -/** - * Mvdd table preparation for SMC. - * - * @param *hwmgr The address of the hardware manager. - * @param *table The SMC DPM table structure to be populated. - * @return 0 - */ -static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr, - SMU72_Discrete_DpmTable *table) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - uint32_t count; - - if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { - table->MvddLevelCount = data->mvdd_voltage_table.count; - for (count = 0; count < table->MvddLevelCount; count++) { - table->SmioTable2.Pattern[count].Voltage = - PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE); - /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/ - table->SmioTable2.Pattern[count].Smio = - (uint8_t) count; - table->Smio[count] |= - data->mvdd_voltage_table.entries[count].smio_low; - } - table->SmioMask2 = data->mvdd_voltage_table.mask_low; - - CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount); - } - - return 0; -} - -/** - * Convert a voltage value in mv unit to VID number required by SMU firmware - */ -static uint8_t convert_to_vid(uint16_t vddc) -{ - return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25); -} - - -/** - * Preparation of vddc and vddgfx CAC tables for SMC. - * - * @param hwmgr the address of the hardware manager - * @param table the SMC DPM table structure to be populated - * @return always 0 - */ -static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr, - SMU72_Discrete_DpmTable *table) -{ - uint32_t count; - uint8_t index; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table = pptable_info->vddgfx_lookup_table; - struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table = pptable_info->vddc_lookup_table; - - /* pTables is already swapped, so in order to use the value from it, we need to swap it back. */ - uint32_t vddcLevelCount = PP_SMC_TO_HOST_UL(table->VddcLevelCount); - uint32_t vddgfxLevelCount = PP_SMC_TO_HOST_UL(table->VddGfxLevelCount); - - for (count = 0; count < vddcLevelCount; count++) { - /* We are populating vddc CAC data to BapmVddc table in split and merged mode */ - index = tonga_get_voltage_index(vddc_lookup_table, - data->vddc_voltage_table.entries[count].value); - table->BapmVddcVidLoSidd[count] = - convert_to_vid(vddc_lookup_table->entries[index].us_cac_low); - table->BapmVddcVidHiSidd[count] = - convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid); - table->BapmVddcVidHiSidd2[count] = - convert_to_vid(vddc_lookup_table->entries[index].us_cac_high); - } - - if ((data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2)) { - /* We are populating vddgfx CAC data to BapmVddgfx table in split mode */ - for (count = 0; count < vddgfxLevelCount; count++) { - index = tonga_get_voltage_index(vddgfx_lookup_table, - data->vddgfx_voltage_table.entries[count].value); - table->BapmVddGfxVidLoSidd[count] = - convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_low); - table->BapmVddGfxVidHiSidd[count] = - convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_mid); - table->BapmVddGfxVidHiSidd2[count] = - convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_high); - } - } else { - for (count = 0; count < vddcLevelCount; count++) { - index = tonga_get_voltage_index(vddc_lookup_table, - data->vddc_voltage_table.entries[count].value); - table->BapmVddGfxVidLoSidd[count] = - convert_to_vid(vddc_lookup_table->entries[index].us_cac_low); - table->BapmVddGfxVidHiSidd[count] = - convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid); - table->BapmVddGfxVidHiSidd2[count] = - convert_to_vid(vddc_lookup_table->entries[index].us_cac_high); - } - } - - return 0; -} - - -/** - * Preparation of voltage tables for SMC. - * - * @param hwmgr the address of the hardware manager - * @param table the SMC DPM table structure to be populated - * @return always 0 - */ - -int tonga_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr, - SMU72_Discrete_DpmTable *table) -{ - int result; - - result = tonga_populate_smc_vddc_table(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "can not populate VDDC voltage table to SMC", return -1); - - result = tonga_populate_smc_vdd_ci_table(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "can not populate VDDCI voltage table to SMC", return -1); - - result = tonga_populate_smc_vdd_gfx_table(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "can not populate VDDGFX voltage table to SMC", return -1); - - result = tonga_populate_smc_mvdd_table(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "can not populate MVDD voltage table to SMC", return -1); - - result = tonga_populate_cac_tables(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "can not populate CAC voltage tables to SMC", return -1); - - return 0; -} - -/** - * Populates the SMC VRConfig field in DPM table. - * - * @param hwmgr the address of the hardware manager - * @param table the SMC DPM table structure to be populated - * @return always 0 - */ -static int tonga_populate_vr_config(struct pp_hwmgr *hwmgr, - SMU72_Discrete_DpmTable *table) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - uint16_t config; - - if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) { - /* Splitted mode */ - config = VR_SVI2_PLANE_1; - table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT); - - if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { - config = VR_SVI2_PLANE_2; - table->VRConfig |= config; - } else { - printk(KERN_ERR "[ powerplay ] VDDC and VDDGFX should be both on SVI2 control in splitted mode! \n"); - } - } else { - /* Merged mode */ - config = VR_MERGED_WITH_VDDC; - table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT); - - /* Set Vddc Voltage Controller */ - if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { - config = VR_SVI2_PLANE_1; - table->VRConfig |= config; - } else { - printk(KERN_ERR "[ powerplay ] VDDC should be on SVI2 control in merged mode! \n"); - } - } - - /* Set Vddci Voltage Controller */ - if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) { - config = VR_SVI2_PLANE_2; /* only in merged mode */ - table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT); - } else if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) { - config = VR_SMIO_PATTERN_1; - table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT); - } - - /* Set Mvdd Voltage Controller */ - if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { - config = VR_SMIO_PATTERN_2; - table->VRConfig |= (config<<VRCONF_MVDD_SHIFT); - } - - return 0; -} - -static int tonga_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr, - phm_ppt_v1_clock_voltage_dependency_table *allowed_clock_voltage_table, - uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd) -{ - uint32_t i = 0; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - - /* clock - voltage dependency table is empty table */ - if (allowed_clock_voltage_table->count == 0) - return -1; - - for (i = 0; i < allowed_clock_voltage_table->count; i++) { - /* find first sclk bigger than request */ - if (allowed_clock_voltage_table->entries[i].clk >= clock) { - voltage->VddGfx = tonga_get_voltage_index(pptable_info->vddgfx_lookup_table, - allowed_clock_voltage_table->entries[i].vddgfx); - - voltage->Vddc = tonga_get_voltage_index(pptable_info->vddc_lookup_table, - allowed_clock_voltage_table->entries[i].vddc); - - if (allowed_clock_voltage_table->entries[i].vddci) { - voltage->Vddci = tonga_get_voltage_id(&data->vddci_voltage_table, - allowed_clock_voltage_table->entries[i].vddci); - } else { - voltage->Vddci = tonga_get_voltage_id(&data->vddci_voltage_table, - allowed_clock_voltage_table->entries[i].vddc - data->vddc_vddci_delta); - } - - if (allowed_clock_voltage_table->entries[i].mvdd) { - *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i].mvdd; - } - - voltage->Phases = 1; - return 0; - } - } - - /* sclk is bigger than max sclk in the dependence table */ - voltage->VddGfx = tonga_get_voltage_index(pptable_info->vddgfx_lookup_table, - allowed_clock_voltage_table->entries[i-1].vddgfx); - voltage->Vddc = tonga_get_voltage_index(pptable_info->vddc_lookup_table, - allowed_clock_voltage_table->entries[i-1].vddc); - - if (allowed_clock_voltage_table->entries[i-1].vddci) { - voltage->Vddci = tonga_get_voltage_id(&data->vddci_voltage_table, - allowed_clock_voltage_table->entries[i-1].vddci); - } - if (allowed_clock_voltage_table->entries[i-1].mvdd) { - *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i-1].mvdd; - } - - return 0; -} - -/** - * Call SMC to reset S0/S1 to S1 and Reset SMIO to initial value - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int tonga_reset_to_default(struct pp_hwmgr *hwmgr) -{ - return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults) == 0) ? 0 : 1; -} - -int tonga_populate_memory_timing_parameters( - struct pp_hwmgr *hwmgr, - uint32_t engine_clock, - uint32_t memory_clock, - struct SMU72_Discrete_MCArbDramTimingTableEntry *arb_regs - ) -{ - uint32_t dramTiming; - uint32_t dramTiming2; - uint32_t burstTime; - int result; - - result = atomctrl_set_engine_dram_timings_rv770(hwmgr, - engine_clock, memory_clock); - - PP_ASSERT_WITH_CODE(result == 0, - "Error calling VBIOS to set DRAM_TIMING.", return result); - - dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); - dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); - burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); - - arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming); - arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2); - arb_regs->McArbBurstTime = (uint8_t)burstTime; - - return 0; -} - -/** - * Setup parameters for the MC ARB. - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - * This function is to be called from the SetPowerState table. - */ -int tonga_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - int result = 0; - SMU72_Discrete_MCArbDramTimingTable arb_regs; - uint32_t i, j; - - memset(&arb_regs, 0x00, sizeof(SMU72_Discrete_MCArbDramTimingTable)); - - for (i = 0; i < data->dpm_table.sclk_table.count; i++) { - for (j = 0; j < data->dpm_table.mclk_table.count; j++) { - result = tonga_populate_memory_timing_parameters - (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value, - data->dpm_table.mclk_table.dpm_levels[j].value, - &arb_regs.entries[i][j]); - - if (0 != result) { - break; - } - } - } - - if (0 == result) { - result = tonga_copy_bytes_to_smc( - hwmgr->smumgr, - data->arb_table_start, - (uint8_t *)&arb_regs, - sizeof(SMU72_Discrete_MCArbDramTimingTable), - data->sram_end - ); - } - - return result; -} - -static int tonga_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU72_Discrete_DpmTable *table) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct tonga_dpm_table *dpm_table = &data->dpm_table; - uint32_t i; - - /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */ - for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) { - table->LinkLevel[i].PcieGenSpeed = - (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value; - table->LinkLevel[i].PcieLaneCount = - (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1); - table->LinkLevel[i].EnabledForActivity = - 1; - table->LinkLevel[i].SPC = - (uint8_t)(data->pcie_spc_cap & 0xff); - table->LinkLevel[i].DownThreshold = - PP_HOST_TO_SMC_UL(5); - table->LinkLevel[i].UpThreshold = - PP_HOST_TO_SMC_UL(30); - } - - data->smc_state_table.LinkLevelCount = - (uint8_t)dpm_table->pcie_speed_table.count; - data->dpm_level_enable_mask.pcie_dpm_enable_mask = - tonga_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table); - - return 0; -} - -static int tonga_populate_smc_uvd_level(struct pp_hwmgr *hwmgr, - SMU72_Discrete_DpmTable *table) -{ - int result = 0; - - uint8_t count; - pp_atomctrl_clock_dividers_vi dividers; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table; - - table->UvdLevelCount = (uint8_t) (mm_table->count); - table->UvdBootLevel = 0; - - for (count = 0; count < table->UvdLevelCount; count++) { - table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk; - table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk; - table->UvdLevel[count].MinVoltage.Vddc = - tonga_get_voltage_index(pptable_info->vddc_lookup_table, - mm_table->entries[count].vddc); - table->UvdLevel[count].MinVoltage.VddGfx = - (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ? - tonga_get_voltage_index(pptable_info->vddgfx_lookup_table, - mm_table->entries[count].vddgfx) : 0; - table->UvdLevel[count].MinVoltage.Vddci = - tonga_get_voltage_id(&data->vddci_voltage_table, - mm_table->entries[count].vddc - data->vddc_vddci_delta); - table->UvdLevel[count].MinVoltage.Phases = 1; - - /* retrieve divider value for VBIOS */ - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, - table->UvdLevel[count].VclkFrequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), - "can not find divide id for Vclk clock", return result); - - table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider; - - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, - table->UvdLevel[count].DclkFrequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), - "can not find divide id for Dclk clock", return result); - - table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider; - - CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency); - CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency); - //CONVERT_FROM_HOST_TO_SMC_UL((uint32_t)table->UvdLevel[count].MinVoltage); - } - - return result; - -} - -static int tonga_populate_smc_vce_level(struct pp_hwmgr *hwmgr, - SMU72_Discrete_DpmTable *table) -{ - int result = 0; - - uint8_t count; - pp_atomctrl_clock_dividers_vi dividers; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table; - - table->VceLevelCount = (uint8_t) (mm_table->count); - table->VceBootLevel = 0; - - for (count = 0; count < table->VceLevelCount; count++) { - table->VceLevel[count].Frequency = - mm_table->entries[count].eclk; - table->VceLevel[count].MinVoltage.Vddc = - tonga_get_voltage_index(pptable_info->vddc_lookup_table, - mm_table->entries[count].vddc); - table->VceLevel[count].MinVoltage.VddGfx = - (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ? - tonga_get_voltage_index(pptable_info->vddgfx_lookup_table, - mm_table->entries[count].vddgfx) : 0; - table->VceLevel[count].MinVoltage.Vddci = - tonga_get_voltage_id(&data->vddci_voltage_table, - mm_table->entries[count].vddc - data->vddc_vddci_delta); - table->VceLevel[count].MinVoltage.Phases = 1; - - /* retrieve divider value for VBIOS */ - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, - table->VceLevel[count].Frequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), - "can not find divide id for VCE engine clock", return result); - - table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider; - - CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency); - } - - return result; -} - -static int tonga_populate_smc_acp_level(struct pp_hwmgr *hwmgr, - SMU72_Discrete_DpmTable *table) -{ - int result = 0; - uint8_t count; - pp_atomctrl_clock_dividers_vi dividers; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table; - - table->AcpLevelCount = (uint8_t) (mm_table->count); - table->AcpBootLevel = 0; - - for (count = 0; count < table->AcpLevelCount; count++) { - table->AcpLevel[count].Frequency = - pptable_info->mm_dep_table->entries[count].aclk; - table->AcpLevel[count].MinVoltage.Vddc = - tonga_get_voltage_index(pptable_info->vddc_lookup_table, - mm_table->entries[count].vddc); - table->AcpLevel[count].MinVoltage.VddGfx = - (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ? - tonga_get_voltage_index(pptable_info->vddgfx_lookup_table, - mm_table->entries[count].vddgfx) : 0; - table->AcpLevel[count].MinVoltage.Vddci = - tonga_get_voltage_id(&data->vddci_voltage_table, - mm_table->entries[count].vddc - data->vddc_vddci_delta); - table->AcpLevel[count].MinVoltage.Phases = 1; - - /* retrieve divider value for VBIOS */ - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, - table->AcpLevel[count].Frequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), - "can not find divide id for engine clock", return result); - - table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider; - - CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency); - } - - return result; -} - -static int tonga_populate_smc_samu_level(struct pp_hwmgr *hwmgr, - SMU72_Discrete_DpmTable *table) -{ - int result = 0; - uint8_t count; - pp_atomctrl_clock_dividers_vi dividers; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table; - - table->SamuBootLevel = 0; - table->SamuLevelCount = (uint8_t) (mm_table->count); - - for (count = 0; count < table->SamuLevelCount; count++) { - /* not sure whether we need evclk or not */ - table->SamuLevel[count].Frequency = - pptable_info->mm_dep_table->entries[count].samclock; - table->SamuLevel[count].MinVoltage.Vddc = - tonga_get_voltage_index(pptable_info->vddc_lookup_table, - mm_table->entries[count].vddc); - table->SamuLevel[count].MinVoltage.VddGfx = - (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ? - tonga_get_voltage_index(pptable_info->vddgfx_lookup_table, - mm_table->entries[count].vddgfx) : 0; - table->SamuLevel[count].MinVoltage.Vddci = - tonga_get_voltage_id(&data->vddci_voltage_table, - mm_table->entries[count].vddc - data->vddc_vddci_delta); - table->SamuLevel[count].MinVoltage.Phases = 1; - - /* retrieve divider value for VBIOS */ - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, - table->SamuLevel[count].Frequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), - "can not find divide id for samu clock", return result); - - table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider; - - CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency); - } - - return result; -} - -/** - * Populates the SMC MCLK structure using the provided memory clock - * - * @param hwmgr the address of the hardware manager - * @param memory_clock the memory clock to use to populate the structure - * @param sclk the SMC SCLK structure to be populated - */ -static int tonga_calculate_mclk_params( - struct pp_hwmgr *hwmgr, - uint32_t memory_clock, - SMU72_Discrete_MemoryLevel *mclk, - bool strobe_mode, - bool dllStateOn - ) -{ - const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - uint32_t dll_cntl = data->clock_registers.vDLL_CNTL; - uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL; - uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL; - uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL; - uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL; - uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1; - uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2; - uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1; - uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2; - - pp_atomctrl_memory_clock_param mpll_param; - int result; - - result = atomctrl_get_memory_pll_dividers_si(hwmgr, - memory_clock, &mpll_param, strobe_mode); - PP_ASSERT_WITH_CODE(0 == result, - "Error retrieving Memory Clock Parameters from VBIOS.", return result); - - /* MPLL_FUNC_CNTL setup*/ - mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl); - - /* MPLL_FUNC_CNTL_1 setup*/ - mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, - MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf); - mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, - MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac); - mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, - MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode); - - /* MPLL_AD_FUNC_CNTL setup*/ - mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl, - MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider); - - if (data->is_memory_GDDR5) { - /* MPLL_DQ_FUNC_CNTL setup*/ - mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl, - MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel); - mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl, - MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider); - } - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MemorySpreadSpectrumSupport)) { - /* - ************************************ - Fref = Reference Frequency - NF = Feedback divider ratio - NR = Reference divider ratio - Fnom = Nominal VCO output frequency = Fref * NF / NR - Fs = Spreading Rate - D = Percentage down-spread / 2 - Fint = Reference input frequency to PFD = Fref / NR - NS = Spreading rate divider ratio = int(Fint / (2 * Fs)) - CLKS = NS - 1 = ISS_STEP_NUM[11:0] - NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2) - CLKV = 65536 * NV = ISS_STEP_SIZE[25:0] - ************************************* - */ - pp_atomctrl_internal_ss_info ss_info; - uint32_t freq_nom; - uint32_t tmp; - uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr); - - /* for GDDR5 for all modes and DDR3 */ - if (1 == mpll_param.qdr) - freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider); - else - freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider); - - /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/ - tmp = (freq_nom / reference_clock); - tmp = tmp * tmp; - - if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) { - /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */ - /* ss.Info.speed_spectrum_rate -- in unit of khz */ - /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */ - /* = reference_clock * 5 / speed_spectrum_rate */ - uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate; - - /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */ - /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */ - uint32_t clkv = - (uint32_t)((((131 * ss_info.speed_spectrum_percentage * - ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom); - - mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv); - mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks); - } - } - - /* MCLK_PWRMGT_CNTL setup */ - mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, - MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed); - mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, - MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn); - mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, - MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn); - - - /* Save the result data to outpupt memory level structure */ - mclk->MclkFrequency = memory_clock; - mclk->MpllFuncCntl = mpll_func_cntl; - mclk->MpllFuncCntl_1 = mpll_func_cntl_1; - mclk->MpllFuncCntl_2 = mpll_func_cntl_2; - mclk->MpllAdFuncCntl = mpll_ad_func_cntl; - mclk->MpllDqFuncCntl = mpll_dq_func_cntl; - mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl; - mclk->DllCntl = dll_cntl; - mclk->MpllSs1 = mpll_ss1; - mclk->MpllSs2 = mpll_ss2; - - return 0; -} - -static uint8_t tonga_get_mclk_frequency_ratio(uint32_t memory_clock, - bool strobe_mode) -{ - uint8_t mc_para_index; - - if (strobe_mode) { - if (memory_clock < 12500) { - mc_para_index = 0x00; - } else if (memory_clock > 47500) { - mc_para_index = 0x0f; - } else { - mc_para_index = (uint8_t)((memory_clock - 10000) / 2500); - } - } else { - if (memory_clock < 65000) { - mc_para_index = 0x00; - } else if (memory_clock > 135000) { - mc_para_index = 0x0f; - } else { - mc_para_index = (uint8_t)((memory_clock - 60000) / 5000); - } - } - - return mc_para_index; -} - -static uint8_t tonga_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock) -{ - uint8_t mc_para_index; - - if (memory_clock < 10000) { - mc_para_index = 0; - } else if (memory_clock >= 80000) { - mc_para_index = 0x0f; - } else { - mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1); - } - - return mc_para_index; -} - -static int tonga_populate_single_memory_level( - struct pp_hwmgr *hwmgr, - uint32_t memory_clock, - SMU72_Discrete_MemoryLevel *memory_level - ) -{ - uint32_t minMvdd = 0; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - int result = 0; - bool dllStateOn; - struct cgs_display_info info = {0}; - - - if (NULL != pptable_info->vdd_dep_on_mclk) { - result = tonga_get_dependecy_volt_by_clk(hwmgr, - pptable_info->vdd_dep_on_mclk, memory_clock, &memory_level->MinVoltage, &minMvdd); - PP_ASSERT_WITH_CODE((0 == result), - "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result); - } - - if (data->mvdd_control == TONGA_VOLTAGE_CONTROL_NONE) { - memory_level->MinMvdd = data->vbios_boot_state.mvdd_bootup_value; - } else { - memory_level->MinMvdd = minMvdd; - } - memory_level->EnabledForThrottle = 1; - memory_level->EnabledForActivity = 0; - memory_level->UpHyst = 0; - memory_level->DownHyst = 100; - memory_level->VoltageDownHyst = 0; - - /* Indicates maximum activity level for this performance level.*/ - memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target; - memory_level->StutterEnable = 0; - memory_level->StrobeEnable = 0; - memory_level->EdcReadEnable = 0; - memory_level->EdcWriteEnable = 0; - memory_level->RttEnable = 0; - - /* default set to low watermark. Highest level will be set to high later.*/ - memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; - - cgs_get_active_displays_info(hwmgr->device, &info); - data->display_timing.num_existing_displays = info.display_count; - - if ((data->mclk_stutter_mode_threshold != 0) && - (memory_clock <= data->mclk_stutter_mode_threshold) && - (!data->is_uvd_enabled) - && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1) - && (data->display_timing.num_existing_displays <= 2) - && (data->display_timing.num_existing_displays != 0)) - memory_level->StutterEnable = 1; - - /* decide strobe mode*/ - memory_level->StrobeEnable = (data->mclk_strobe_mode_threshold != 0) && - (memory_clock <= data->mclk_strobe_mode_threshold); - - /* decide EDC mode and memory clock ratio*/ - if (data->is_memory_GDDR5) { - memory_level->StrobeRatio = tonga_get_mclk_frequency_ratio(memory_clock, - memory_level->StrobeEnable); - - if ((data->mclk_edc_enable_threshold != 0) && - (memory_clock > data->mclk_edc_enable_threshold)) { - memory_level->EdcReadEnable = 1; - } - - if ((data->mclk_edc_wr_enable_threshold != 0) && - (memory_clock > data->mclk_edc_wr_enable_threshold)) { - memory_level->EdcWriteEnable = 1; - } - - if (memory_level->StrobeEnable) { - if (tonga_get_mclk_frequency_ratio(memory_clock, 1) >= - ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) { - dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0; - } else { - dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0; - } - - } else { - dllStateOn = data->dll_defaule_on; - } - } else { - memory_level->StrobeRatio = - tonga_get_ddr3_mclk_frequency_ratio(memory_clock); - dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0; - } - - result = tonga_calculate_mclk_params(hwmgr, - memory_clock, memory_level, memory_level->StrobeEnable, dllStateOn); - - if (0 == result) { - CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinMvdd); - /* MCLK frequency in units of 10KHz*/ - CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency); - /* Indicates maximum activity level for this performance level.*/ - CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel); - CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl); - CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1); - CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2); - CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl); - CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl); - CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl); - CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl); - CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1); - CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2); - } - - return result; -} - -/** - * Populates the SMC MVDD structure using the provided memory clock. - * - * @param hwmgr the address of the hardware manager - * @param mclk the MCLK value to be used in the decision if MVDD should be high or low. - * @param voltage the SMC VOLTAGE structure to be populated - */ -int tonga_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk, SMIO_Pattern *smio_pattern) -{ - const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - uint32_t i = 0; - - if (TONGA_VOLTAGE_CONTROL_NONE != data->mvdd_control) { - /* find mvdd value which clock is more than request */ - for (i = 0; i < pptable_info->vdd_dep_on_mclk->count; i++) { - if (mclk <= pptable_info->vdd_dep_on_mclk->entries[i].clk) { - /* Always round to higher voltage. */ - smio_pattern->Voltage = data->mvdd_voltage_table.entries[i].value; - break; - } - } - - PP_ASSERT_WITH_CODE(i < pptable_info->vdd_dep_on_mclk->count, - "MVDD Voltage is outside the supported range.", return -1); - - } else { - return -1; - } - - return 0; -} - - -static int tonga_populate_smv_acpi_level(struct pp_hwmgr *hwmgr, - SMU72_Discrete_DpmTable *table) -{ - int result = 0; - const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - pp_atomctrl_clock_dividers_vi dividers; - SMIO_Pattern voltage_level; - uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; - uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2; - uint32_t dll_cntl = data->clock_registers.vDLL_CNTL; - uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL; - - /* The ACPI state should not do DPM on DC (or ever).*/ - table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; - - table->ACPILevel.MinVoltage = data->smc_state_table.GraphicsLevel[0].MinVoltage; - - /* assign zero for now*/ - table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr); - - /* get the engine clock dividers for this clock value*/ - result = atomctrl_get_engine_pll_dividers_vi(hwmgr, - table->ACPILevel.SclkFrequency, ÷rs); - - PP_ASSERT_WITH_CODE(result == 0, - "Error retrieving Engine Clock dividers from VBIOS.", return result); - - /* divider ID for required SCLK*/ - table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider; - table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; - table->ACPILevel.DeepSleepDivId = 0; - - spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, - CG_SPLL_FUNC_CNTL, SPLL_PWRON, 0); - spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, - CG_SPLL_FUNC_CNTL, SPLL_RESET, 1); - spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, - CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL, 4); - - table->ACPILevel.CgSpllFuncCntl = spll_func_cntl; - table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2; - table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; - table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; - table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; - table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; - table->ACPILevel.CcPwrDynRm = 0; - table->ACPILevel.CcPwrDynRm1 = 0; - - - /* For various features to be enabled/disabled while this level is active.*/ - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags); - /* SCLK frequency in units of 10KHz*/ - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm); - CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1); - - /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/ - table->MemoryACPILevel.MinVoltage = data->smc_state_table.MemoryLevel[0].MinVoltage; - - /* CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);*/ - - if (0 == tonga_populate_mvdd_value(hwmgr, 0, &voltage_level)) - table->MemoryACPILevel.MinMvdd = - PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE); - else - table->MemoryACPILevel.MinMvdd = 0; - - /* Force reset on DLL*/ - mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, - MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1); - mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, - MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1); - - /* Disable DLL in ACPIState*/ - mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, - MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0); - mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, - MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0); - - /* Enable DLL bypass signal*/ - dll_cntl = PHM_SET_FIELD(dll_cntl, - DLL_CNTL, MRDCK0_BYPASS, 0); - dll_cntl = PHM_SET_FIELD(dll_cntl, - DLL_CNTL, MRDCK1_BYPASS, 0); - - table->MemoryACPILevel.DllCntl = - PP_HOST_TO_SMC_UL(dll_cntl); - table->MemoryACPILevel.MclkPwrmgtCntl = - PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl); - table->MemoryACPILevel.MpllAdFuncCntl = - PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL); - table->MemoryACPILevel.MpllDqFuncCntl = - PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL); - table->MemoryACPILevel.MpllFuncCntl = - PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL); - table->MemoryACPILevel.MpllFuncCntl_1 = - PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1); - table->MemoryACPILevel.MpllFuncCntl_2 = - PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2); - table->MemoryACPILevel.MpllSs1 = - PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1); - table->MemoryACPILevel.MpllSs2 = - PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2); - - table->MemoryACPILevel.EnabledForThrottle = 0; - table->MemoryACPILevel.EnabledForActivity = 0; - table->MemoryACPILevel.UpHyst = 0; - table->MemoryACPILevel.DownHyst = 100; - table->MemoryACPILevel.VoltageDownHyst = 0; - /* Indicates maximum activity level for this performance level.*/ - table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target); - - table->MemoryACPILevel.StutterEnable = 0; - table->MemoryACPILevel.StrobeEnable = 0; - table->MemoryACPILevel.EdcReadEnable = 0; - table->MemoryACPILevel.EdcWriteEnable = 0; - table->MemoryACPILevel.RttEnable = 0; - - return result; -} - -static int tonga_find_boot_level(struct tonga_single_dpm_table *table, uint32_t value, uint32_t *boot_level) -{ - int result = 0; - uint32_t i; - - for (i = 0; i < table->count; i++) { - if (value == table->dpm_levels[i].value) { - *boot_level = i; - result = 0; - } - } - return result; -} - -static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr, - SMU72_Discrete_DpmTable *table) -{ - int result = 0; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - - table->GraphicsBootLevel = 0; /* 0 == DPM[0] (low), etc. */ - table->MemoryBootLevel = 0; /* 0 == DPM[0] (low), etc. */ - - /* find boot level from dpm table*/ - result = tonga_find_boot_level(&(data->dpm_table.sclk_table), - data->vbios_boot_state.sclk_bootup_value, - (uint32_t *)&(data->smc_state_table.GraphicsBootLevel)); - - if (0 != result) { - data->smc_state_table.GraphicsBootLevel = 0; - printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \ - in dependency table. Using Graphics DPM level 0!"); - result = 0; - } - - result = tonga_find_boot_level(&(data->dpm_table.mclk_table), - data->vbios_boot_state.mclk_bootup_value, - (uint32_t *)&(data->smc_state_table.MemoryBootLevel)); - - if (0 != result) { - data->smc_state_table.MemoryBootLevel = 0; - printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \ - in dependency table. Using Memory DPM level 0!"); - result = 0; - } - - table->BootVoltage.Vddc = - tonga_get_voltage_id(&(data->vddc_voltage_table), - data->vbios_boot_state.vddc_bootup_value); - table->BootVoltage.VddGfx = - tonga_get_voltage_id(&(data->vddgfx_voltage_table), - data->vbios_boot_state.vddgfx_bootup_value); - table->BootVoltage.Vddci = - tonga_get_voltage_id(&(data->vddci_voltage_table), - data->vbios_boot_state.vddci_bootup_value); - table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value; - - CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd); - - return result; -} - - -/** - * Calculates the SCLK dividers using the provided engine clock - * - * @param hwmgr the address of the hardware manager - * @param engine_clock the engine clock to use to populate the structure - * @param sclk the SMC SCLK structure to be populated - */ -int tonga_calculate_sclk_params(struct pp_hwmgr *hwmgr, - uint32_t engine_clock, SMU72_Discrete_GraphicsLevel *sclk) -{ - const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - pp_atomctrl_clock_dividers_vi dividers; - uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; - uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; - uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; - uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; - uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; - uint32_t reference_clock; - uint32_t reference_divider; - uint32_t fbdiv; - int result; - - /* get the engine clock dividers for this clock value*/ - result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, ÷rs); - - PP_ASSERT_WITH_CODE(result == 0, - "Error retrieving Engine Clock dividers from VBIOS.", return result); - - /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/ - reference_clock = atomctrl_get_reference_clock(hwmgr); - - reference_divider = 1 + dividers.uc_pll_ref_div; - - /* low 14 bits is fraction and high 12 bits is divider*/ - fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF; - - /* SPLL_FUNC_CNTL setup*/ - spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, - CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div); - spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, - CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div); - - /* SPLL_FUNC_CNTL_3 setup*/ - spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, - CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv); - - /* set to use fractional accumulation*/ - spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, - CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_EngineSpreadSpectrumSupport)) { - pp_atomctrl_internal_ss_info ss_info; - - uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div; - if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) { - /* - * ss_info.speed_spectrum_percentage -- in unit of 0.01% - * ss_info.speed_spectrum_rate -- in unit of khz - */ - /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */ - uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate); - - /* clkv = 2 * D * fbdiv / NS */ - uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000); - - cg_spll_spread_spectrum = - PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS); - cg_spll_spread_spectrum = - PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1); - cg_spll_spread_spectrum_2 = - PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV); - } - } - - sclk->SclkFrequency = engine_clock; - sclk->CgSpllFuncCntl3 = spll_func_cntl_3; - sclk->CgSpllFuncCntl4 = spll_func_cntl_4; - sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum; - sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2; - sclk->SclkDid = (uint8_t)dividers.pll_post_divider; - - return 0; -} - -static uint8_t tonga_get_sleep_divider_id_from_clock(uint32_t engine_clock, - uint32_t min_engine_clock_in_sr) -{ - uint32_t i, temp; - uint32_t min = max(min_engine_clock_in_sr, (uint32_t)TONGA_MINIMUM_ENGINE_CLOCK); - - PP_ASSERT_WITH_CODE((engine_clock >= min), - "Engine clock can't satisfy stutter requirement!", return 0); - - for (i = TONGA_MAX_DEEPSLEEP_DIVIDER_ID;; i--) { - temp = engine_clock >> i; - - if(temp >= min || i == 0) - break; - } - return (uint8_t)i; -} - -/** - * Populates single SMC SCLK structure using the provided engine clock - * - * @param hwmgr the address of the hardware manager - * @param engine_clock the engine clock to use to populate the structure - * @param sclk the SMC SCLK structure to be populated - */ -static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr, uint32_t engine_clock, uint16_t sclk_activity_level_threshold, SMU72_Discrete_GraphicsLevel *graphic_level) -{ - int result; - uint32_t threshold; - uint32_t mvdd; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - - result = tonga_calculate_sclk_params(hwmgr, engine_clock, graphic_level); - - - /* populate graphics levels*/ - result = tonga_get_dependecy_volt_by_clk(hwmgr, - pptable_info->vdd_dep_on_sclk, engine_clock, - &graphic_level->MinVoltage, &mvdd); - PP_ASSERT_WITH_CODE((0 == result), - "can not find VDDC voltage value for VDDC \ - engine clock dependency table", return result); - - /* SCLK frequency in units of 10KHz*/ - graphic_level->SclkFrequency = engine_clock; - - /* Indicates maximum activity level for this performance level. 50% for now*/ - graphic_level->ActivityLevel = sclk_activity_level_threshold; - - graphic_level->CcPwrDynRm = 0; - graphic_level->CcPwrDynRm1 = 0; - /* this level can be used if activity is high enough.*/ - graphic_level->EnabledForActivity = 0; - /* this level can be used for throttling.*/ - graphic_level->EnabledForThrottle = 1; - graphic_level->UpHyst = 0; - graphic_level->DownHyst = 0; - graphic_level->VoltageDownHyst = 0; - graphic_level->PowerThrottle = 0; - - threshold = engine_clock * data->fast_watemark_threshold / 100; -/* - *get the DAL clock. do it in funture. - PECI_GetMinClockSettings(hwmgr->peci, &minClocks); - data->display_timing.min_clock_insr = minClocks.engineClockInSR; -*/ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkDeepSleep)) - graphic_level->DeepSleepDivId = - tonga_get_sleep_divider_id_from_clock(engine_clock, - data->display_timing.min_clock_insr); - - /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/ - graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; - - if (0 == result) { - /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVoltage);*/ - /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);*/ - CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency); - CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel); - CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3); - CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4); - CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum); - CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2); - CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm); - CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1); - } - - return result; -} - -/** - * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states - * - * @param hwmgr the address of the hardware manager - */ -static int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct tonga_dpm_table *dpm_table = &data->dpm_table; - phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table; - uint8_t pcie_entry_count = (uint8_t) data->dpm_table.pcie_speed_table.count; - int result = 0; - uint32_t level_array_adress = data->dpm_table_start + - offsetof(SMU72_Discrete_DpmTable, GraphicsLevel); - uint32_t level_array_size = sizeof(SMU72_Discrete_GraphicsLevel) * - SMU72_MAX_LEVELS_GRAPHICS; /* 64 -> long; 32 -> int*/ - SMU72_Discrete_GraphicsLevel *levels = data->smc_state_table.GraphicsLevel; - uint32_t i, maxEntry; - uint8_t highest_pcie_level_enabled = 0, lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0, count = 0; - PECI_RegistryValue reg_value; - memset(levels, 0x00, level_array_size); - - for (i = 0; i < dpm_table->sclk_table.count; i++) { - result = tonga_populate_single_graphic_level(hwmgr, - dpm_table->sclk_table.dpm_levels[i].value, - (uint16_t)data->activity_target[i], - &(data->smc_state_table.GraphicsLevel[i])); - - if (0 != result) - return result; - - /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */ - if (i > 1) - data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0; - - if (0 == i) { - reg_value = 0; - if (reg_value != 0) - data->smc_state_table.GraphicsLevel[0].UpHyst = (uint8_t)reg_value; - } - - if (1 == i) { - reg_value = 0; - if (reg_value != 0) - data->smc_state_table.GraphicsLevel[1].UpHyst = (uint8_t)reg_value; - } - } - - /* Only enable level 0 for now. */ - data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1; - - /* set highest level watermark to high */ - if (dpm_table->sclk_table.count > 1) - data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark = - PPSMC_DISPLAY_WATERMARK_HIGH; - - data->smc_state_table.GraphicsDpmLevelCount = - (uint8_t)dpm_table->sclk_table.count; - data->dpm_level_enable_mask.sclk_dpm_enable_mask = - tonga_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); - - if (pcie_table != NULL) { - PP_ASSERT_WITH_CODE((pcie_entry_count >= 1), - "There must be 1 or more PCIE levels defined in PPTable.", return -1); - maxEntry = pcie_entry_count - 1; /* for indexing, we need to decrement by 1.*/ - for (i = 0; i < dpm_table->sclk_table.count; i++) { - data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = - (uint8_t) ((i < maxEntry) ? i : maxEntry); - } - } else { - if (0 == data->dpm_level_enable_mask.pcie_dpm_enable_mask) - printk(KERN_ERR "[ powerplay ] Pcie Dpm Enablemask is 0!"); - - while (data->dpm_level_enable_mask.pcie_dpm_enable_mask && - ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & - (1<<(highest_pcie_level_enabled+1))) != 0)) { - highest_pcie_level_enabled++; - } - - while (data->dpm_level_enable_mask.pcie_dpm_enable_mask && - ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & - (1<<lowest_pcie_level_enabled)) == 0)) { - lowest_pcie_level_enabled++; - } - - while ((count < highest_pcie_level_enabled) && - ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & - (1<<(lowest_pcie_level_enabled+1+count))) == 0)) { - count++; - } - mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ? - (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled; - - - /* set pcieDpmLevel to highest_pcie_level_enabled*/ - for (i = 2; i < dpm_table->sclk_table.count; i++) { - data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled; - } - - /* set pcieDpmLevel to lowest_pcie_level_enabled*/ - data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled; - - /* set pcieDpmLevel to mid_pcie_level_enabled*/ - data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled; - } - /* level count will send to smc once at init smc table and never change*/ - result = tonga_copy_bytes_to_smc(hwmgr->smumgr, level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end); - - if (0 != result) - return result; - - return 0; -} - -/** - * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states - * - * @param hwmgr the address of the hardware manager - */ - -static int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct tonga_dpm_table *dpm_table = &data->dpm_table; - int result; - /* populate MCLK dpm table to SMU7 */ - uint32_t level_array_adress = data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable, MemoryLevel); - uint32_t level_array_size = sizeof(SMU72_Discrete_MemoryLevel) * SMU72_MAX_LEVELS_MEMORY; - SMU72_Discrete_MemoryLevel *levels = data->smc_state_table.MemoryLevel; - uint32_t i; - - memset(levels, 0x00, level_array_size); - - for (i = 0; i < dpm_table->mclk_table.count; i++) { - PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value), - "can not populate memory level as memory clock is zero", return -1); - result = tonga_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value, - &(data->smc_state_table.MemoryLevel[i])); - if (0 != result) { - return result; - } - } - - /* Only enable level 0 for now.*/ - data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1; - - /* - * in order to prevent MC activity from stutter mode to push DPM up. - * the UVD change complements this by putting the MCLK in a higher state - * by default such that we are not effected by up threshold or and MCLK DPM latency. - */ - data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F; - CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.MemoryLevel[0].ActivityLevel); - - data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count; - data->dpm_level_enable_mask.mclk_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); - /* set highest level watermark to high*/ - data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH; - - /* level count will send to smc once at init smc table and never change*/ - result = tonga_copy_bytes_to_smc(hwmgr->smumgr, - level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end); - - if (0 != result) { - return result; - } - - return 0; -} - -struct TONGA_DLL_SPEED_SETTING { - uint16_t Min; /* Minimum Data Rate*/ - uint16_t Max; /* Maximum Data Rate*/ - uint32_t dll_speed; /* The desired DLL_SPEED setting*/ -}; - -static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) -{ - return 0; -} - -/* ---------------------------------------- ULV related functions ----------------------------------------------------*/ - - -static int tonga_reset_single_dpm_table( - struct pp_hwmgr *hwmgr, - struct tonga_single_dpm_table *dpm_table, - uint32_t count) -{ - uint32_t i; - if (!(count <= MAX_REGULAR_DPM_NUMBER)) - printk(KERN_ERR "[ powerplay ] Fatal error, can not set up single DPM \ - table entries to exceed max number! \n"); - - dpm_table->count = count; - for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) { - dpm_table->dpm_levels[i].enabled = false; - } - - return 0; -} - -static void tonga_setup_pcie_table_entry( - struct tonga_single_dpm_table *dpm_table, - uint32_t index, uint32_t pcie_gen, - uint32_t pcie_lanes) -{ - dpm_table->dpm_levels[index].value = pcie_gen; - dpm_table->dpm_levels[index].param1 = pcie_lanes; - dpm_table->dpm_levels[index].enabled = true; -} - -static int tonga_setup_default_pcie_tables(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table; - uint32_t i, maxEntry; - - if (data->use_pcie_performance_levels && !data->use_pcie_power_saving_levels) { - data->pcie_gen_power_saving = data->pcie_gen_performance; - data->pcie_lane_power_saving = data->pcie_lane_performance; - } else if (!data->use_pcie_performance_levels && data->use_pcie_power_saving_levels) { - data->pcie_gen_performance = data->pcie_gen_power_saving; - data->pcie_lane_performance = data->pcie_lane_power_saving; - } - - tonga_reset_single_dpm_table(hwmgr, &data->dpm_table.pcie_speed_table, SMU72_MAX_LEVELS_LINK); - - if (pcie_table != NULL) { - /* - * maxEntry is used to make sure we reserve one PCIE level for boot level (fix for A+A PSPP issue). - * If PCIE table from PPTable have ULV entry + 8 entries, then ignore the last entry. - */ - maxEntry = (SMU72_MAX_LEVELS_LINK < pcie_table->count) ? - SMU72_MAX_LEVELS_LINK : pcie_table->count; - for (i = 1; i < maxEntry; i++) { - tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i-1, - get_pcie_gen_support(data->pcie_gen_cap, pcie_table->entries[i].gen_speed), - get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); - } - data->dpm_table.pcie_speed_table.count = maxEntry - 1; - } else { - /* Hardcode Pcie Table */ - tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0, - get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); - tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1, - get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); - tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2, - get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); - tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3, - get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); - tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4, - get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); - tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5, - get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); - data->dpm_table.pcie_speed_table.count = 6; - } - /* Populate last level for boot PCIE level, but do not increment count. */ - tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, - data->dpm_table.pcie_speed_table.count, - get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen), - get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); - - return 0; - -} - -/* - * This function is to initalize all DPM state tables for SMU7 based on the dependency table. - * Dynamic state patching function will then trim these state tables to the allowed range based - * on the power policy or external client requests, such as UVD request, etc. - */ -static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - uint32_t i; - - phm_ppt_v1_clock_voltage_dependency_table *allowed_vdd_sclk_table = - pptable_info->vdd_dep_on_sclk; - phm_ppt_v1_clock_voltage_dependency_table *allowed_vdd_mclk_table = - pptable_info->vdd_dep_on_mclk; - - PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL, - "SCLK dependency table is missing. This table is mandatory", return -1); - PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1, - "SCLK dependency table has to have is missing. This table is mandatory", return -1); - - PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL, - "MCLK dependency table is missing. This table is mandatory", return -1); - PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1, - "VMCLK dependency table has to have is missing. This table is mandatory", return -1); - - /* clear the state table to reset everything to default */ - memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table)); - tonga_reset_single_dpm_table(hwmgr, &data->dpm_table.sclk_table, SMU72_MAX_LEVELS_GRAPHICS); - tonga_reset_single_dpm_table(hwmgr, &data->dpm_table.mclk_table, SMU72_MAX_LEVELS_MEMORY); - /* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.VddcTable, SMU72_MAX_LEVELS_VDDC); */ - /* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.vdd_gfx_table, SMU72_MAX_LEVELS_VDDGFX);*/ - /* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.vdd_ci_table, SMU72_MAX_LEVELS_VDDCI);*/ - /* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.mvdd_table, SMU72_MAX_LEVELS_MVDD);*/ - - PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL, - "SCLK dependency table is missing. This table is mandatory", return -1); - /* Initialize Sclk DPM table based on allow Sclk values*/ - data->dpm_table.sclk_table.count = 0; - - for (i = 0; i < allowed_vdd_sclk_table->count; i++) { - if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value != - allowed_vdd_sclk_table->entries[i].clk) { - data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value = - allowed_vdd_sclk_table->entries[i].clk; - data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = true; /*(i==0) ? 1 : 0; to do */ - data->dpm_table.sclk_table.count++; - } - } - - PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL, - "MCLK dependency table is missing. This table is mandatory", return -1); - /* Initialize Mclk DPM table based on allow Mclk values */ - data->dpm_table.mclk_table.count = 0; - for (i = 0; i < allowed_vdd_mclk_table->count; i++) { - if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value != - allowed_vdd_mclk_table->entries[i].clk) { - data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value = - allowed_vdd_mclk_table->entries[i].clk; - data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = true; /*(i==0) ? 1 : 0; */ - data->dpm_table.mclk_table.count++; - } - } - - /* setup PCIE gen speed levels*/ - tonga_setup_default_pcie_tables(hwmgr); - - /* save a copy of the default DPM table*/ - memcpy(&(data->golden_dpm_table), &(data->dpm_table), sizeof(struct tonga_dpm_table)); - - return 0; -} - -int tonga_populate_smc_initial_state(struct pp_hwmgr *hwmgr, - const struct tonga_power_state *bootState) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - uint8_t count, level; - - count = (uint8_t) (pptable_info->vdd_dep_on_sclk->count); - for (level = 0; level < count; level++) { - if (pptable_info->vdd_dep_on_sclk->entries[level].clk >= - bootState->performance_levels[0].engine_clock) { - data->smc_state_table.GraphicsBootLevel = level; - break; - } - } - - count = (uint8_t) (pptable_info->vdd_dep_on_mclk->count); - for (level = 0; level < count; level++) { - if (pptable_info->vdd_dep_on_mclk->entries[level].clk >= - bootState->performance_levels[0].memory_clock) { - data->smc_state_table.MemoryBootLevel = level; - break; - } - } - - return 0; -} - -/** - * Initializes the SMC table and uploads it - * - * @param hwmgr the address of the powerplay hardware manager. - * @param pInput the pointer to input data (PowerState) - * @return always 0 - */ -int tonga_init_smc_table(struct pp_hwmgr *hwmgr) -{ - int result; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - SMU72_Discrete_DpmTable *table = &(data->smc_state_table); - const phw_tonga_ulv_parm *ulv = &(data->ulv); - uint8_t i; - PECI_RegistryValue reg_value; - pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; - - result = tonga_setup_default_dpm_tables(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to setup default DPM tables!", return result;); - memset(&(data->smc_state_table), 0x00, sizeof(data->smc_state_table)); - if (TONGA_VOLTAGE_CONTROL_NONE != data->voltage_control) { - tonga_populate_smc_voltage_tables(hwmgr, table); - } - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_AutomaticDCTransition)) { - table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; - } - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StepVddc)) { - table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; - } - - if (data->is_memory_GDDR5) { - table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; - } - - i = PHM_READ_FIELD(hwmgr->device, CC_MC_MAX_CHANNEL, NOOFCHAN); - - if (i == 1 || i == 0) { - table->SystemFlags |= PPSMC_SYSTEMFLAG_12CHANNEL; - } - - if (ulv->ulv_supported && pptable_info->us_ulv_voltage_offset) { - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize ULV state!", return result;); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_ULV_PARAMETER, ulv->ch_ulv_parameter); - } - - result = tonga_populate_smc_link_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize Link Level!", return result;); - - result = tonga_populate_all_graphic_levels(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize Graphics Level!", return result;); - - result = tonga_populate_all_memory_levels(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize Memory Level!", return result;); - - result = tonga_populate_smv_acpi_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize ACPI Level!", return result;); - - result = tonga_populate_smc_vce_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize VCE Level!", return result;); - - result = tonga_populate_smc_acp_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize ACP Level!", return result;); - - result = tonga_populate_smc_samu_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize SAMU Level!", return result;); - - /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */ - /* need to populate the ARB settings for the initial state. */ - result = tonga_program_memory_timing_parameters(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to Write ARB settings for the initial state.", return result;); - - result = tonga_populate_smc_uvd_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize UVD Level!", return result;); - - result = tonga_populate_smc_boot_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize Boot Level!", return result;); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ClockStretcher)) { - result = tonga_populate_clock_stretcher_data_table(hwmgr); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to populate Clock Stretcher Data Table!", return result;); - } - table->GraphicsVoltageChangeEnable = 1; - table->GraphicsThermThrottleEnable = 1; - table->GraphicsInterval = 1; - table->VoltageInterval = 1; - table->ThermalInterval = 1; - table->TemperatureLimitHigh = - pptable_info->cac_dtp_table->usTargetOperatingTemp * - TONGA_Q88_FORMAT_CONVERSION_UNIT; - table->TemperatureLimitLow = - (pptable_info->cac_dtp_table->usTargetOperatingTemp - 1) * - TONGA_Q88_FORMAT_CONVERSION_UNIT; - table->MemoryVoltageChangeEnable = 1; - table->MemoryInterval = 1; - table->VoltageResponseTime = 0; - table->PhaseResponseTime = 0; - table->MemoryThermThrottleEnable = 1; - - /* - * Cail reads current link status and reports it as cap (we cannot change this due to some previous issues we had) - * SMC drops the link status to lowest level after enabling DPM by PowerPlay. After pnp or toggling CF, driver gets reloaded again - * but this time Cail reads current link status which was set to low by SMC and reports it as cap to powerplay - * To avoid it, we set PCIeBootLinkLevel to highest dpm level - */ - PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count), - "There must be 1 or more PCIE levels defined in PPTable.", - return -1); - - table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count); - - table->PCIeGenInterval = 1; - - result = tonga_populate_vr_config(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to populate VRConfig setting!", return result); - - table->ThermGpio = 17; - table->SclkStepSize = 0x4000; - - reg_value = 0; - if ((0 == reg_value) && - (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, - &gpio_pin_assignment))) { - table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift; - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_RegulatorHot); - } else { - table->VRHotGpio = TONGA_UNUSED_GPIO_PIN; - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_RegulatorHot); - } - - /* ACDC Switch GPIO */ - reg_value = 0; - if ((0 == reg_value) && - (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID, - &gpio_pin_assignment))) { - table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift; - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_AutomaticDCTransition); - } else { - table->AcDcGpio = TONGA_UNUSED_GPIO_PIN; - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_AutomaticDCTransition); - } - - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_Falcon_QuickTransition); - - reg_value = 0; - if (1 == reg_value) { - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_AutomaticDCTransition); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_Falcon_QuickTransition); - } - - reg_value = 0; - if ((0 == reg_value) && (atomctrl_get_pp_assign_pin(hwmgr, - THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin_assignment))) { - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ThermalOutGPIO); - - table->ThermOutGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift; - - table->ThermOutPolarity = - (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) & - (1 << gpio_pin_assignment.uc_gpio_pin_bit_shift))) ? 1:0; - - table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY; - - /* if required, combine VRHot/PCC with thermal out GPIO*/ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_RegulatorHot) && - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_CombinePCCWithThermalSignal)){ - table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT; - } - } else { - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ThermalOutGPIO); - - table->ThermOutGpio = 17; - table->ThermOutPolarity = 1; - table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE; - } - - for (i = 0; i < SMU72_MAX_ENTRIES_SMIO; i++) { - table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]); - } - CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags); - CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig); - CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1); - CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2); - CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); - CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); - CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); - CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); - CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); - - /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ - result = tonga_copy_bytes_to_smc(hwmgr->smumgr, data->dpm_table_start + - offsetof(SMU72_Discrete_DpmTable, SystemFlags), - (uint8_t *)&(table->SystemFlags), - sizeof(SMU72_Discrete_DpmTable)-3 * sizeof(SMU72_PIDController), - data->sram_end); - - PP_ASSERT_WITH_CODE(0 == result, - "Failed to upload dpm data to SMC memory!", return result;); - - return result; -} - -/* Look up the voltaged based on DAL's requested level. and then send the requested VDDC voltage to SMC*/ -static void tonga_apply_dal_minimum_voltage_request(struct pp_hwmgr *hwmgr) -{ - return; -} - -int tonga_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr) -{ - PPSMC_Result result; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - - /* Apply minimum voltage based on DAL's request level */ - tonga_apply_dal_minimum_voltage_request(hwmgr); - - if (0 == data->sclk_dpm_key_disabled) { - /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ - if (tonga_is_dpm_running(hwmgr)) - printk(KERN_ERR "[ powerplay ] Trying to set Enable Mask when DPM is disabled \n"); - - if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) { - result = smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, - (PPSMC_Msg)PPSMC_MSG_SCLKDPM_SetEnabledMask, - data->dpm_level_enable_mask.sclk_dpm_enable_mask); - PP_ASSERT_WITH_CODE((0 == result), - "Set Sclk Dpm enable Mask failed", return -1); - } - } - - if (0 == data->mclk_dpm_key_disabled) { - /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ - if (tonga_is_dpm_running(hwmgr)) - printk(KERN_ERR "[ powerplay ] Trying to set Enable Mask when DPM is disabled \n"); - - if (0 != data->dpm_level_enable_mask.mclk_dpm_enable_mask) { - result = smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, - (PPSMC_Msg)PPSMC_MSG_MCLKDPM_SetEnabledMask, - data->dpm_level_enable_mask.mclk_dpm_enable_mask); - PP_ASSERT_WITH_CODE((0 == result), - "Set Mclk Dpm enable Mask failed", return -1); - } - } - - return 0; -} - - -int tonga_force_dpm_highest(struct pp_hwmgr *hwmgr) -{ - uint32_t level, tmp; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - - if (0 == data->pcie_dpm_key_disabled) { - /* PCIE */ - if (data->dpm_level_enable_mask.pcie_dpm_enable_mask != 0) { - level = 0; - tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask; - while (tmp >>= 1) - level++ ; - - if (0 != level) { - PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_pcie(hwmgr, level)), - "force highest pcie dpm state failed!", return -1); - } - } - } - - if (0 == data->sclk_dpm_key_disabled) { - /* SCLK */ - if (data->dpm_level_enable_mask.sclk_dpm_enable_mask != 0) { - level = 0; - tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask; - while (tmp >>= 1) - level++ ; - - if (0 != level) { - PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state(hwmgr, level)), - "force highest sclk dpm state failed!", return -1); - if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, - CGS_IND_REG__SMC, TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX) != level) - printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \ - Curr_Sclk_Index does not match the level \n"); - - } - } - } - - if (0 == data->mclk_dpm_key_disabled) { - /* MCLK */ - if (data->dpm_level_enable_mask.mclk_dpm_enable_mask != 0) { - level = 0; - tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask; - while (tmp >>= 1) - level++ ; - - if (0 != level) { - PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_mclk(hwmgr, level)), - "force highest mclk dpm state failed!", return -1); - if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - TARGET_AND_CURRENT_PROFILE_INDEX, CURR_MCLK_INDEX) != level) - printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \ - Curr_Mclk_Index does not match the level \n"); - } - } - } - - return 0; -} - -/** - * Find the MC microcode version and store it in the HwMgr struct - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int tonga_get_mc_microcode_version (struct pp_hwmgr *hwmgr) -{ - cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F); - - hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA); - - return 0; -} - -/** - * Initialize Dynamic State Adjustment Rule Settings - * - * @param hwmgr the address of the powerplay hardware manager. - */ -int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr) -{ - uint32_t table_size; - struct phm_clock_voltage_dependency_table *table_clk_vlt; - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - - hwmgr->dyn_state.mclk_sclk_ratio = 4; - hwmgr->dyn_state.sclk_mclk_delta = 15000; /* 150 MHz */ - hwmgr->dyn_state.vddc_vddci_delta = 200; /* 200mV */ - - /* initialize vddc_dep_on_dal_pwrl table */ - table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record); - table_clk_vlt = kzalloc(table_size, GFP_KERNEL); - - if (NULL == table_clk_vlt) { - printk(KERN_ERR "[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n"); - return -ENOMEM; - } else { - table_clk_vlt->count = 4; - table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW; - table_clk_vlt->entries[0].v = 0; - table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW; - table_clk_vlt->entries[1].v = 720; - table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL; - table_clk_vlt->entries[2].v = 810; - table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE; - table_clk_vlt->entries[3].v = 900; - pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt; - hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt; - } - - return 0; -} - -static int tonga_set_private_var_based_on_pptale(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - - phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table = - pptable_info->vdd_dep_on_sclk; - phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table = - pptable_info->vdd_dep_on_mclk; - - PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL, - "VDD dependency on SCLK table is missing. \ - This table is mandatory", return -1); - PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1, - "VDD dependency on SCLK table has to have is missing. \ - This table is mandatory", return -1); - - PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL, - "VDD dependency on MCLK table is missing. \ - This table is mandatory", return -1); - PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1, - "VDD dependency on MCLK table has to have is missing. \ - This table is mandatory", return -1); - - data->min_vddc_in_pp_table = (uint16_t)allowed_sclk_vdd_table->entries[0].vddc; - data->max_vddc_in_pp_table = (uint16_t)allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc; - - pptable_info->max_clock_voltage_on_ac.sclk = - allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk; - pptable_info->max_clock_voltage_on_ac.mclk = - allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk; - pptable_info->max_clock_voltage_on_ac.vddc = - allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc; - pptable_info->max_clock_voltage_on_ac.vddci = - allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci; - - hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = - pptable_info->max_clock_voltage_on_ac.sclk; - hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = - pptable_info->max_clock_voltage_on_ac.mclk; - hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = - pptable_info->max_clock_voltage_on_ac.vddc; - hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = - pptable_info->max_clock_voltage_on_ac.vddci; - - return 0; -} - -int tonga_unforce_dpm_levels(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - int result = 1; - - PP_ASSERT_WITH_CODE (!tonga_is_dpm_running(hwmgr), - "Trying to Unforce DPM when DPM is disabled. Returning without sending SMC message.", - return result); - - if (0 == data->pcie_dpm_key_disabled) { - PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc( - hwmgr->smumgr, - PPSMC_MSG_PCIeDPM_UnForceLevel)), - "unforce pcie level failed!", - return -1); - } - - result = tonga_upload_dpm_level_enable_mask(hwmgr); - - return result; -} - -static uint32_t tonga_get_lowest_enable_level( - struct pp_hwmgr *hwmgr, uint32_t level_mask) -{ - uint32_t level = 0; - - while (0 == (level_mask & (1 << level))) - level++; - - return level; -} - -static int tonga_force_dpm_lowest(struct pp_hwmgr *hwmgr) -{ - uint32_t level; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - - if (0 == data->pcie_dpm_key_disabled) { - /* PCIE */ - if (data->dpm_level_enable_mask.pcie_dpm_enable_mask != 0) { - level = tonga_get_lowest_enable_level(hwmgr, - data->dpm_level_enable_mask.pcie_dpm_enable_mask); - PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_pcie(hwmgr, level)), - "force lowest pcie dpm state failed!", return -1); - } - } - - if (0 == data->sclk_dpm_key_disabled) { - /* SCLK */ - if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) { - level = tonga_get_lowest_enable_level(hwmgr, - data->dpm_level_enable_mask.sclk_dpm_enable_mask); - - PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state(hwmgr, level)), - "force sclk dpm state failed!", return -1); - - if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, - CGS_IND_REG__SMC, TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX) != level) - printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \ - Curr_Sclk_Index does not match the level \n"); - } - } - - if (0 == data->mclk_dpm_key_disabled) { - /* MCLK */ - if (data->dpm_level_enable_mask.mclk_dpm_enable_mask != 0) { - level = tonga_get_lowest_enable_level(hwmgr, - data->dpm_level_enable_mask.mclk_dpm_enable_mask); - PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_mclk(hwmgr, level)), - "force lowest mclk dpm state failed!", return -1); - if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - TARGET_AND_CURRENT_PROFILE_INDEX, CURR_MCLK_INDEX) != level) - printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \ - Curr_Mclk_Index does not match the level \n"); - } - } - - return 0; -} - -static int tonga_patch_voltage_dependency_tables_with_lookup_table(struct pp_hwmgr *hwmgr) -{ - uint8_t entryId; - uint8_t voltageId; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - - phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk; - phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk; - phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table; - - if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) { - for (entryId = 0; entryId < sclk_table->count; ++entryId) { - voltageId = sclk_table->entries[entryId].vddInd; - sclk_table->entries[entryId].vddgfx = - pptable_info->vddgfx_lookup_table->entries[voltageId].us_vdd; - } - } else { - for (entryId = 0; entryId < sclk_table->count; ++entryId) { - voltageId = sclk_table->entries[entryId].vddInd; - sclk_table->entries[entryId].vddc = - pptable_info->vddc_lookup_table->entries[voltageId].us_vdd; - } - } - - for (entryId = 0; entryId < mclk_table->count; ++entryId) { - voltageId = mclk_table->entries[entryId].vddInd; - mclk_table->entries[entryId].vddc = - pptable_info->vddc_lookup_table->entries[voltageId].us_vdd; - } - - for (entryId = 0; entryId < mm_table->count; ++entryId) { - voltageId = mm_table->entries[entryId].vddcInd; - mm_table->entries[entryId].vddc = - pptable_info->vddc_lookup_table->entries[voltageId].us_vdd; - } - - return 0; - -} - -static int tonga_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr) -{ - uint8_t entryId; - phm_ppt_v1_voltage_lookup_record v_record; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - - phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk; - phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk; - - if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) { - for (entryId = 0; entryId < sclk_table->count; ++entryId) { - if (sclk_table->entries[entryId].vdd_offset & (1 << 15)) - v_record.us_vdd = sclk_table->entries[entryId].vddgfx + - sclk_table->entries[entryId].vdd_offset - 0xFFFF; - else - v_record.us_vdd = sclk_table->entries[entryId].vddgfx + - sclk_table->entries[entryId].vdd_offset; - - sclk_table->entries[entryId].vddc = - v_record.us_cac_low = v_record.us_cac_mid = - v_record.us_cac_high = v_record.us_vdd; - - tonga_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record); - } - - for (entryId = 0; entryId < mclk_table->count; ++entryId) { - if (mclk_table->entries[entryId].vdd_offset & (1 << 15)) - v_record.us_vdd = mclk_table->entries[entryId].vddc + - mclk_table->entries[entryId].vdd_offset - 0xFFFF; - else - v_record.us_vdd = mclk_table->entries[entryId].vddc + - mclk_table->entries[entryId].vdd_offset; - - mclk_table->entries[entryId].vddgfx = v_record.us_cac_low = - v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd; - tonga_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record); - } - } - - return 0; - -} - -static int tonga_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr) -{ - uint32_t entryId; - phm_ppt_v1_voltage_lookup_record v_record; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table; - - if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) { - for (entryId = 0; entryId < mm_table->count; entryId++) { - if (mm_table->entries[entryId].vddgfx_offset & (1 << 15)) - v_record.us_vdd = mm_table->entries[entryId].vddc + - mm_table->entries[entryId].vddgfx_offset - 0xFFFF; - else - v_record.us_vdd = mm_table->entries[entryId].vddc + - mm_table->entries[entryId].vddgfx_offset; - - /* Add the calculated VDDGFX to the VDDGFX lookup table */ - mm_table->entries[entryId].vddgfx = v_record.us_cac_low = - v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd; - tonga_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record); - } - } - return 0; -} - - -/** - * Change virtual leakage voltage to actual value. - * - * @param hwmgr the address of the powerplay hardware manager. - * @param pointer to changing voltage - * @param pointer to leakage table - */ -static void tonga_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr, - uint16_t *voltage, phw_tonga_leakage_voltage *pLeakageTable) -{ - uint32_t leakage_index; - - /* search for leakage voltage ID 0xff01 ~ 0xff08 */ - for (leakage_index = 0; leakage_index < pLeakageTable->count; leakage_index++) { - /* if this voltage matches a leakage voltage ID */ - /* patch with actual leakage voltage */ - if (pLeakageTable->leakage_id[leakage_index] == *voltage) { - *voltage = pLeakageTable->actual_voltage[leakage_index]; - break; - } - } - - if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0) - printk(KERN_ERR "[ powerplay ] Voltage value looks like a Leakage ID but it's not patched \n"); -} - -/** - * Patch voltage lookup table by EVV leakages. - * - * @param hwmgr the address of the powerplay hardware manager. - * @param pointer to voltage lookup table - * @param pointer to leakage table - * @return always 0 - */ -static int tonga_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr, - phm_ppt_v1_voltage_lookup_table *lookup_table, - phw_tonga_leakage_voltage *pLeakageTable) -{ - uint32_t i; - - for (i = 0; i < lookup_table->count; i++) { - tonga_patch_with_vdd_leakage(hwmgr, - &lookup_table->entries[i].us_vdd, pLeakageTable); - } - - return 0; -} - -static int tonga_patch_clock_voltage_lomits_with_vddc_leakage(struct pp_hwmgr *hwmgr, - phw_tonga_leakage_voltage *pLeakageTable, uint16_t *Vddc) -{ - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - - tonga_patch_with_vdd_leakage(hwmgr, (uint16_t *)Vddc, pLeakageTable); - hwmgr->dyn_state.max_clock_voltage_on_dc.vddc = - pptable_info->max_clock_voltage_on_dc.vddc; - - return 0; -} - -static int tonga_patch_clock_voltage_limits_with_vddgfx_leakage( - struct pp_hwmgr *hwmgr, phw_tonga_leakage_voltage *pLeakageTable, - uint16_t *Vddgfx) -{ - tonga_patch_with_vdd_leakage(hwmgr, (uint16_t *)Vddgfx, pLeakageTable); - return 0; -} - -int tonga_sort_lookup_table(struct pp_hwmgr *hwmgr, - phm_ppt_v1_voltage_lookup_table *lookup_table) -{ - uint32_t table_size, i, j; - phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record; - table_size = lookup_table->count; - - PP_ASSERT_WITH_CODE(0 != lookup_table->count, - "Lookup table is empty", return -1); - - /* Sorting voltages */ - for (i = 0; i < table_size - 1; i++) { - for (j = i + 1; j > 0; j--) { - if (lookup_table->entries[j].us_vdd < lookup_table->entries[j-1].us_vdd) { - tmp_voltage_lookup_record = lookup_table->entries[j-1]; - lookup_table->entries[j-1] = lookup_table->entries[j]; - lookup_table->entries[j] = tmp_voltage_lookup_record; - } - } - } - - return 0; -} - -static int tonga_complete_dependency_tables(struct pp_hwmgr *hwmgr) -{ - int result = 0; - int tmp_result; - tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - - if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) { - tmp_result = tonga_patch_lookup_table_with_leakage(hwmgr, - pptable_info->vddgfx_lookup_table, &(data->vddcgfx_leakage)); - if (tmp_result != 0) - result = tmp_result; - - tmp_result = tonga_patch_clock_voltage_limits_with_vddgfx_leakage(hwmgr, - &(data->vddcgfx_leakage), &pptable_info->max_clock_voltage_on_dc.vddgfx); - if (tmp_result != 0) - result = tmp_result; - } else { - tmp_result = tonga_patch_lookup_table_with_leakage(hwmgr, - pptable_info->vddc_lookup_table, &(data->vddc_leakage)); - if (tmp_result != 0) - result = tmp_result; - - tmp_result = tonga_patch_clock_voltage_lomits_with_vddc_leakage(hwmgr, - &(data->vddc_leakage), &pptable_info->max_clock_voltage_on_dc.vddc); - if (tmp_result != 0) - result = tmp_result; - } - - tmp_result = tonga_patch_voltage_dependency_tables_with_lookup_table(hwmgr); - if (tmp_result != 0) - result = tmp_result; - - tmp_result = tonga_calc_voltage_dependency_tables(hwmgr); - if (tmp_result != 0) - result = tmp_result; - - tmp_result = tonga_calc_mm_voltage_dependency_table(hwmgr); - if (tmp_result != 0) - result = tmp_result; - - tmp_result = tonga_sort_lookup_table(hwmgr, pptable_info->vddgfx_lookup_table); - if (tmp_result != 0) - result = tmp_result; - - tmp_result = tonga_sort_lookup_table(hwmgr, pptable_info->vddc_lookup_table); - if (tmp_result != 0) - result = tmp_result; - - return result; -} - -int tonga_init_sclk_threshold(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - data->low_sclk_interrupt_threshold = 0; - - return 0; -} - -int tonga_setup_asic_task(struct pp_hwmgr *hwmgr) -{ - int tmp_result, result = 0; - - tmp_result = tonga_read_clock_registers(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to read clock registers!", result = tmp_result); - - tmp_result = tonga_get_memory_type(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to get memory type!", result = tmp_result); - - tmp_result = tonga_enable_acpi_power_management(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable ACPI power management!", result = tmp_result); - - tmp_result = tonga_init_power_gate_state(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to init power gate state!", result = tmp_result); - - tmp_result = tonga_get_mc_microcode_version(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to get MC microcode version!", result = tmp_result); - - tmp_result = tonga_init_sclk_threshold(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to init sclk threshold!", result = tmp_result); - - return result; -} - -/** - * Enable voltage control - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int tonga_enable_voltage_control(struct pp_hwmgr *hwmgr) -{ - /* enable voltage control */ - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1); - - return 0; -} - -/** - * Checks if we want to support voltage control - * - * @param hwmgr the address of the powerplay hardware manager. - */ -bool cf_tonga_voltage_control(const struct pp_hwmgr *hwmgr) -{ - const struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - return(TONGA_VOLTAGE_CONTROL_NONE != data->voltage_control); -} - -/*---------------------------MC----------------------------*/ - -uint8_t tonga_get_memory_modile_index(struct pp_hwmgr *hwmgr) -{ - return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16)); -} - -bool tonga_check_s0_mc_reg_index(uint16_t inReg, uint16_t *outReg) -{ - bool result = true; - - switch (inReg) { - case mmMC_SEQ_RAS_TIMING: - *outReg = mmMC_SEQ_RAS_TIMING_LP; - break; - - case mmMC_SEQ_DLL_STBY: - *outReg = mmMC_SEQ_DLL_STBY_LP; - break; - - case mmMC_SEQ_G5PDX_CMD0: - *outReg = mmMC_SEQ_G5PDX_CMD0_LP; - break; - - case mmMC_SEQ_G5PDX_CMD1: - *outReg = mmMC_SEQ_G5PDX_CMD1_LP; - break; - - case mmMC_SEQ_G5PDX_CTRL: - *outReg = mmMC_SEQ_G5PDX_CTRL_LP; - break; - - case mmMC_SEQ_CAS_TIMING: - *outReg = mmMC_SEQ_CAS_TIMING_LP; - break; - - case mmMC_SEQ_MISC_TIMING: - *outReg = mmMC_SEQ_MISC_TIMING_LP; - break; - - case mmMC_SEQ_MISC_TIMING2: - *outReg = mmMC_SEQ_MISC_TIMING2_LP; - break; - - case mmMC_SEQ_PMG_DVS_CMD: - *outReg = mmMC_SEQ_PMG_DVS_CMD_LP; - break; - - case mmMC_SEQ_PMG_DVS_CTL: - *outReg = mmMC_SEQ_PMG_DVS_CTL_LP; - break; - - case mmMC_SEQ_RD_CTL_D0: - *outReg = mmMC_SEQ_RD_CTL_D0_LP; - break; - - case mmMC_SEQ_RD_CTL_D1: - *outReg = mmMC_SEQ_RD_CTL_D1_LP; - break; - - case mmMC_SEQ_WR_CTL_D0: - *outReg = mmMC_SEQ_WR_CTL_D0_LP; - break; - - case mmMC_SEQ_WR_CTL_D1: - *outReg = mmMC_SEQ_WR_CTL_D1_LP; - break; - - case mmMC_PMG_CMD_EMRS: - *outReg = mmMC_SEQ_PMG_CMD_EMRS_LP; - break; - - case mmMC_PMG_CMD_MRS: - *outReg = mmMC_SEQ_PMG_CMD_MRS_LP; - break; - - case mmMC_PMG_CMD_MRS1: - *outReg = mmMC_SEQ_PMG_CMD_MRS1_LP; - break; - - case mmMC_SEQ_PMG_TIMING: - *outReg = mmMC_SEQ_PMG_TIMING_LP; - break; - - case mmMC_PMG_CMD_MRS2: - *outReg = mmMC_SEQ_PMG_CMD_MRS2_LP; - break; - - case mmMC_SEQ_WR_CTL_2: - *outReg = mmMC_SEQ_WR_CTL_2_LP; - break; - - default: - result = false; - break; - } - - return result; -} - -int tonga_set_s0_mc_reg_index(phw_tonga_mc_reg_table *table) -{ - uint32_t i; - uint16_t address; - - for (i = 0; i < table->last; i++) { - table->mc_reg_address[i].s0 = - tonga_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) - ? address : table->mc_reg_address[i].s1; - } - return 0; -} - -int tonga_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table, phw_tonga_mc_reg_table *ni_table) -{ - uint8_t i, j; - - PP_ASSERT_WITH_CODE((table->last <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE), - "Invalid VramInfo table.", return -1); - PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES), - "Invalid VramInfo table.", return -1); - - for (i = 0; i < table->last; i++) { - ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; - } - ni_table->last = table->last; - - for (i = 0; i < table->num_entries; i++) { - ni_table->mc_reg_table_entry[i].mclk_max = - table->mc_reg_table_entry[i].mclk_max; - for (j = 0; j < table->last; j++) { - ni_table->mc_reg_table_entry[i].mc_data[j] = - table->mc_reg_table_entry[i].mc_data[j]; - } - } - - ni_table->num_entries = table->num_entries; - - return 0; -} - -/** - * VBIOS omits some information to reduce size, we need to recover them here. - * 1. when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to mmMC_PMG_CMD_EMRS /_LP[15:0]. - * Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0] - * 2. when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to mmMC_PMG_CMD_MRS1/_LP[15:0]. - * 3. need to set these data for each clock range - * - * @param hwmgr the address of the powerplay hardware manager. - * @param table the address of MCRegTable - * @return always 0 - */ -int tonga_set_mc_special_registers(struct pp_hwmgr *hwmgr, phw_tonga_mc_reg_table *table) -{ - uint8_t i, j, k; - uint32_t temp_reg; - const tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - for (i = 0, j = table->last; i < table->last; i++) { - PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE), - "Invalid VramInfo table.", return -1); - switch (table->mc_reg_address[i].s1) { - /* - * mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to mmMC_PMG_CMD_EMRS /_LP[15:0]. - * Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0] - */ - case mmMC_SEQ_MISC1: - temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS); - table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS; - table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP; - for (k = 0; k < table->num_entries; k++) { - table->mc_reg_table_entry[k].mc_data[j] = - ((temp_reg & 0xffff0000)) | - ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); - } - j++; - PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE), - "Invalid VramInfo table.", return -1); - - temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS); - table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS; - table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP; - for (k = 0; k < table->num_entries; k++) { - table->mc_reg_table_entry[k].mc_data[j] = - (temp_reg & 0xffff0000) | - (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); - - if (!data->is_memory_GDDR5) { - table->mc_reg_table_entry[k].mc_data[j] |= 0x100; - } - } - j++; - PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE), - "Invalid VramInfo table.", return -1); - - if (!data->is_memory_GDDR5) { - table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD; - table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD; - for (k = 0; k < table->num_entries; k++) { - table->mc_reg_table_entry[k].mc_data[j] = - (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; - } - j++; - PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE), - "Invalid VramInfo table.", return -1); - } - - break; - - case mmMC_SEQ_RESERVE_M: - temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1); - table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1; - table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP; - for (k = 0; k < table->num_entries; k++) { - table->mc_reg_table_entry[k].mc_data[j] = - (temp_reg & 0xffff0000) | - (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); - } - j++; - PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE), - "Invalid VramInfo table.", return -1); - break; - - default: - break; - } - - } - - table->last = j; - - return 0; -} - -int tonga_set_valid_flag(phw_tonga_mc_reg_table *table) -{ - uint8_t i, j; - for (i = 0; i < table->last; i++) { - for (j = 1; j < table->num_entries; j++) { - if (table->mc_reg_table_entry[j-1].mc_data[i] != - table->mc_reg_table_entry[j].mc_data[i]) { - table->validflag |= (1<<i); - break; - } - } - } - - return 0; -} - -int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) -{ - int result; - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - pp_atomctrl_mc_reg_table *table; - phw_tonga_mc_reg_table *ni_table = &data->tonga_mc_reg_table; - uint8_t module_index = tonga_get_memory_modile_index(hwmgr); - - table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL); - - if (NULL == table) - return -ENOMEM; - - /* Program additional LP registers that are no longer programmed by VBIOS */ - cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING)); - cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING)); - cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY)); - cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0)); - cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1)); - cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL)); - cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD)); - cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL)); - cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING)); - cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2)); - cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS)); - cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS)); - cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1)); - cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0)); - cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1)); - cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0)); - cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1)); - cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING)); - cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2)); - cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2)); - - memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table)); - - result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table); - - if (0 == result) - result = tonga_copy_vbios_smc_reg_table(table, ni_table); - - if (0 == result) { - tonga_set_s0_mc_reg_index(ni_table); - result = tonga_set_mc_special_registers(hwmgr, ni_table); - } - - if (0 == result) - tonga_set_valid_flag(ni_table); - - kfree(table); - return result; -} - -/* -* Copy one arb setting to another and then switch the active set. -* arbFreqSrc and arbFreqDest is one of the MC_CG_ARB_FREQ_Fx constants. -*/ -int tonga_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr, - uint32_t arbFreqSrc, uint32_t arbFreqDest) -{ - uint32_t mc_arb_dram_timing; - uint32_t mc_arb_dram_timing2; - uint32_t burst_time; - uint32_t mc_cg_config; - - switch (arbFreqSrc) { - case MC_CG_ARB_FREQ_F0: - mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); - mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); - burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); - break; - - case MC_CG_ARB_FREQ_F1: - mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1); - mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1); - burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1); - break; - - default: - return -1; - } - - switch (arbFreqDest) { - case MC_CG_ARB_FREQ_F0: - cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing); - cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2); - PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time); - break; - - case MC_CG_ARB_FREQ_F1: - cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing); - cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2); - PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time); - break; - - default: - return -1; - } - - mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG); - mc_cg_config |= 0x0000000F; - cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config); - PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arbFreqDest); - - return 0; -} - -/** - * Initial switch from ARB F0->F1 - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - * This function is to be called from the SetPowerState table. - */ -int tonga_initial_switch_from_arb_f0_to_f1(struct pp_hwmgr *hwmgr) -{ - return tonga_copy_and_switch_arb_sets(hwmgr, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); -} - -/** - * Initialize the ARB DRAM timing table's index field. - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int tonga_init_arb_table_index(struct pp_hwmgr *hwmgr) -{ - const tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - uint32_t tmp; - int result; - - /* - * This is a read-modify-write on the first byte of the ARB table. - * The first byte in the SMU72_Discrete_MCArbDramTimingTable structure is the field 'current'. - * This solution is ugly, but we never write the whole table only individual fields in it. - * In reality this field should not be in that structure but in a soft register. - */ - result = tonga_read_smc_sram_dword(hwmgr->smumgr, - data->arb_table_start, &tmp, data->sram_end); - - if (0 != result) - return result; - - tmp &= 0x00FFFFFF; - tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24; - - return tonga_write_smc_sram_dword(hwmgr->smumgr, - data->arb_table_start, tmp, data->sram_end); -} - -int tonga_populate_mc_reg_address(struct pp_hwmgr *hwmgr, SMU72_Discrete_MCRegisters *mc_reg_table) -{ - const struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - uint32_t i, j; - - for (i = 0, j = 0; j < data->tonga_mc_reg_table.last; j++) { - if (data->tonga_mc_reg_table.validflag & 1<<j) { - PP_ASSERT_WITH_CODE(i < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE, - "Index of mc_reg_table->address[] array out of boundary", return -1); - mc_reg_table->address[i].s0 = - PP_HOST_TO_SMC_US(data->tonga_mc_reg_table.mc_reg_address[j].s0); - mc_reg_table->address[i].s1 = - PP_HOST_TO_SMC_US(data->tonga_mc_reg_table.mc_reg_address[j].s1); - i++; - } - } - - mc_reg_table->last = (uint8_t)i; - - return 0; -} - -/*convert register values from driver to SMC format */ -void tonga_convert_mc_registers( - const phw_tonga_mc_reg_entry * pEntry, - SMU72_Discrete_MCRegisterSet *pData, - uint32_t numEntries, uint32_t validflag) -{ - uint32_t i, j; - - for (i = 0, j = 0; j < numEntries; j++) { - if (validflag & 1<<j) { - pData->value[i] = PP_HOST_TO_SMC_UL(pEntry->mc_data[j]); - i++; - } - } -} - -/* find the entry in the memory range table, then populate the value to SMC's tonga_mc_reg_table */ -int tonga_convert_mc_reg_table_entry_to_smc( - struct pp_hwmgr *hwmgr, - const uint32_t memory_clock, - SMU72_Discrete_MCRegisterSet *mc_reg_table_data - ) -{ - const tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - uint32_t i = 0; - - for (i = 0; i < data->tonga_mc_reg_table.num_entries; i++) { - if (memory_clock <= - data->tonga_mc_reg_table.mc_reg_table_entry[i].mclk_max) { - break; - } - } - - if ((i == data->tonga_mc_reg_table.num_entries) && (i > 0)) - --i; - - tonga_convert_mc_registers(&data->tonga_mc_reg_table.mc_reg_table_entry[i], - mc_reg_table_data, data->tonga_mc_reg_table.last, data->tonga_mc_reg_table.validflag); - - return 0; -} - -int tonga_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr, - SMU72_Discrete_MCRegisters *mc_reg_table) -{ - int result = 0; - tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - int res; - uint32_t i; - - for (i = 0; i < data->dpm_table.mclk_table.count; i++) { - res = tonga_convert_mc_reg_table_entry_to_smc( - hwmgr, - data->dpm_table.mclk_table.dpm_levels[i].value, - &mc_reg_table->data[i] - ); - - if (0 != res) - result = res; - } - - return result; -} - -int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr) -{ - int result; - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - memset(&data->mc_reg_table, 0x00, sizeof(SMU72_Discrete_MCRegisters)); - result = tonga_populate_mc_reg_address(hwmgr, &(data->mc_reg_table)); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize MCRegTable for the MC register addresses!", return result;); - - result = tonga_convert_mc_reg_table_to_smc(hwmgr, &data->mc_reg_table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize MCRegTable for driver state!", return result;); - - return tonga_copy_bytes_to_smc(hwmgr->smumgr, data->mc_reg_table_start, - (uint8_t *)&data->mc_reg_table, sizeof(SMU72_Discrete_MCRegisters), data->sram_end); -} - -/** - * Programs static screed detection parameters - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int tonga_program_static_screen_threshold_parameters(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - - /* Set static screen threshold unit*/ - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, - CGS_IND_REG__SMC, CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT, - data->static_screen_threshold_unit); - /* Set static screen threshold*/ - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, - CGS_IND_REG__SMC, CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD, - data->static_screen_threshold); - - return 0; -} - -/** - * Setup display gap for glitch free memory clock switching. - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int tonga_enable_display_gap(struct pp_hwmgr *hwmgr) -{ - uint32_t display_gap = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL); - - display_gap = PHM_SET_FIELD(display_gap, - CG_DISPLAY_GAP_CNTL, DISP_GAP, DISPLAY_GAP_IGNORE); - - display_gap = PHM_SET_FIELD(display_gap, - CG_DISPLAY_GAP_CNTL, DISP_GAP_MCHG, DISPLAY_GAP_VBLANK); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_DISPLAY_GAP_CNTL, display_gap); - - return 0; -} - -/** - * Programs activity state transition voting clients - * - * @param hwmgr the address of the powerplay hardware manager. - * @return always 0 - */ -int tonga_program_voting_clients(struct pp_hwmgr *hwmgr) -{ - tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - - /* Clear reset for voting clients before enabling DPM */ - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7); - - return 0; -} - - -int tonga_enable_dpm_tasks(struct pp_hwmgr *hwmgr) -{ - int tmp_result, result = 0; - - tmp_result = tonga_check_for_dpm_stopped(hwmgr); - - if (cf_tonga_voltage_control(hwmgr)) { - tmp_result = tonga_enable_voltage_control(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable voltage control!", result = tmp_result); - - tmp_result = tonga_construct_voltage_tables(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to contruct voltage tables!", result = tmp_result); - } - - tmp_result = tonga_initialize_mc_reg_table(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to initialize MC reg table!", result = tmp_result); - - tmp_result = tonga_program_static_screen_threshold_parameters(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to program static screen threshold parameters!", result = tmp_result); - - tmp_result = tonga_enable_display_gap(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable display gap!", result = tmp_result); - - tmp_result = tonga_program_voting_clients(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to program voting clients!", result = tmp_result); - - tmp_result = tonga_process_firmware_header(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to process firmware header!", result = tmp_result); - - tmp_result = tonga_initial_switch_from_arb_f0_to_f1(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to initialize switch from ArbF0 to F1!", result = tmp_result); - - tmp_result = tonga_init_smc_table(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to initialize SMC table!", result = tmp_result); - - tmp_result = tonga_init_arb_table_index(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to initialize ARB table index!", result = tmp_result); - - tmp_result = tonga_populate_initial_mc_reg_table(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to populate initialize MC Reg table!", result = tmp_result); - - tmp_result = tonga_notify_smc_display_change(hwmgr, false); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to notify no display!", result = tmp_result); - - /* enable SCLK control */ - tmp_result = tonga_enable_sclk_control(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to enable SCLK control!", result = tmp_result); - - /* enable DPM */ - tmp_result = tonga_start_dpm(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to start DPM!", result = tmp_result); - - return result; -} - -int tonga_disable_dpm_tasks(struct pp_hwmgr *hwmgr) -{ - int tmp_result, result = 0; - - tmp_result = tonga_check_for_dpm_running(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "SMC is still running!", return 0); - - tmp_result = tonga_stop_dpm(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to stop DPM!", result = tmp_result); - - tmp_result = tonga_reset_to_default(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to reset to default!", result = tmp_result); - - return result; -} - -int tonga_reset_asic_tasks(struct pp_hwmgr *hwmgr) -{ - int result; - - result = tonga_set_boot_state(hwmgr); - if (0 != result) - printk(KERN_ERR "[ powerplay ] Failed to reset asic via set boot state! \n"); - - return result; -} - -int tonga_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) -{ - return phm_hwmgr_backend_fini(hwmgr); -} - -/** - * Initializes the Volcanic Islands Hardware Manager - * - * @param hwmgr the address of the powerplay hardware manager. - * @return 1 if success; otherwise appropriate error code. - */ -int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) -{ - int result = 0; - SMU72_Discrete_DpmTable *table = NULL; - tonga_hwmgr *data; - pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - phw_tonga_ulv_parm *ulv; - struct cgs_system_info sys_info = {0}; - - PP_ASSERT_WITH_CODE((NULL != hwmgr), - "Invalid Parameter!", return -1;); - - data = kzalloc(sizeof(struct tonga_hwmgr), GFP_KERNEL); - if (data == NULL) - return -ENOMEM; - - hwmgr->backend = data; - - data->dll_defaule_on = false; - data->sram_end = SMC_RAM_END; - - data->activity_target[0] = PPTONGA_TARGETACTIVITY_DFLT; - data->activity_target[1] = PPTONGA_TARGETACTIVITY_DFLT; - data->activity_target[2] = PPTONGA_TARGETACTIVITY_DFLT; - data->activity_target[3] = PPTONGA_TARGETACTIVITY_DFLT; - data->activity_target[4] = PPTONGA_TARGETACTIVITY_DFLT; - data->activity_target[5] = PPTONGA_TARGETACTIVITY_DFLT; - data->activity_target[6] = PPTONGA_TARGETACTIVITY_DFLT; - data->activity_target[7] = PPTONGA_TARGETACTIVITY_DFLT; - - data->vddc_vddci_delta = VDDC_VDDCI_DELTA; - data->vddc_vddgfx_delta = VDDC_VDDGFX_DELTA; - data->mclk_activity_target = PPTONGA_MCLK_TARGETACTIVITY_DFLT; - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DisableVoltageIsland); - - data->sclk_dpm_key_disabled = 0; - data->mclk_dpm_key_disabled = 0; - data->pcie_dpm_key_disabled = 0; - data->pcc_monitor_enabled = 0; - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_UnTabledHardwareInterface); - - data->gpio_debug = 0; - data->engine_clock_data = 0; - data->memory_clock_data = 0; - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DynamicPatchPowerState); - - /* need to set voltage control types before EVV patching*/ - data->voltage_control = TONGA_VOLTAGE_CONTROL_NONE; - data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_NONE; - data->vdd_gfx_control = TONGA_VOLTAGE_CONTROL_NONE; - data->mvdd_control = TONGA_VOLTAGE_CONTROL_NONE; - data->force_pcie_gen = PP_PCIEGenInvalid; - - if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, - VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) { - data->voltage_control = TONGA_VOLTAGE_CONTROL_BY_SVID2; - } - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ControlVDDGFX)) { - if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, - VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) { - data->vdd_gfx_control = TONGA_VOLTAGE_CONTROL_BY_SVID2; - } - } - - if (TONGA_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control) { - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ControlVDDGFX); - } - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_EnableMVDDControl)) { - if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, - VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT)) { - data->mvdd_control = TONGA_VOLTAGE_CONTROL_BY_GPIO; - } - } - - if (TONGA_VOLTAGE_CONTROL_NONE == data->mvdd_control) { - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_EnableMVDDControl); - } - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ControlVDDCI)) { - if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, - VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT)) - data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_BY_GPIO; - else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, - VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2)) - data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_BY_SVID2; - } - - if (TONGA_VOLTAGE_CONTROL_NONE == data->vdd_ci_control) - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ControlVDDCI); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TablelessHardwareInterface); - - if (pptable_info->cac_dtp_table->usClockStretchAmount != 0) - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ClockStretcher); - - /* Initializes DPM default values*/ - tonga_initialize_dpm_defaults(hwmgr); - - /* Get leakage voltage based on leakage ID.*/ - PP_ASSERT_WITH_CODE((0 == tonga_get_evv_voltage(hwmgr)), - "Get EVV Voltage Failed. Abort Driver loading!", return -1); - - tonga_complete_dependency_tables(hwmgr); - - /* Parse pptable data read from VBIOS*/ - tonga_set_private_var_based_on_pptale(hwmgr); - - /* ULV Support*/ - ulv = &(data->ulv); - ulv->ulv_supported = false; - - /* Initalize Dynamic State Adjustment Rule Settings*/ - result = tonga_initializa_dynamic_state_adjustment_rule_settings(hwmgr); - if (result) - printk(KERN_ERR "[ powerplay ] tonga_initializa_dynamic_state_adjustment_rule_settings failed!\n"); - data->uvd_enabled = false; - - table = &(data->smc_state_table); - - /* - * if ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable, - * Peak Current Control feature is enabled and we should program PCC HW register - */ - if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) { - uint32_t temp_reg = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL); - - switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) { - case 0: - temp_reg = PHM_SET_FIELD(temp_reg, - CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1); - break; - case 1: - temp_reg = PHM_SET_FIELD(temp_reg, - CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2); - break; - case 2: - temp_reg = PHM_SET_FIELD(temp_reg, - CNB_PWRMGT_CNTL, GNB_SLOW, 0x1); - break; - case 3: - temp_reg = PHM_SET_FIELD(temp_reg, - CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1); - break; - case 4: - temp_reg = PHM_SET_FIELD(temp_reg, - CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1); - break; - default: - printk(KERN_ERR "[ powerplay ] Failed to setup PCC HW register! \ - Wrong GPIO assigned for VDDC_PCC_GPIO_PINID! \n"); - break; - } - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixCNB_PWRMGT_CNTL, temp_reg); - } - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_EnableSMU7ThermalManagement); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SMU7); - - data->vddc_phase_shed_control = false; - - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_UVDPowerGating); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_VCEPowerGating); - sys_info.size = sizeof(struct cgs_system_info); - sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS; - result = cgs_query_system_info(hwmgr->device, &sys_info); - if (!result) { - if (sys_info.value & AMD_PG_SUPPORT_UVD) - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_UVDPowerGating); - if (sys_info.value & AMD_PG_SUPPORT_VCE) - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_VCEPowerGating); - } - - if (0 == result) { - data->is_tlu_enabled = false; - hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = - TONGA_MAX_HARDWARE_POWERLEVELS; - hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; - hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; - - sys_info.size = sizeof(struct cgs_system_info); - sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO; - result = cgs_query_system_info(hwmgr->device, &sys_info); - if (result) - data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK; - else - data->pcie_gen_cap = (uint32_t)sys_info.value; - if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) - data->pcie_spc_cap = 20; - sys_info.size = sizeof(struct cgs_system_info); - sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW; - result = cgs_query_system_info(hwmgr->device, &sys_info); - if (result) - data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK; - else - data->pcie_lane_cap = (uint32_t)sys_info.value; - } else { - /* Ignore return value in here, we are cleaning up a mess. */ - tonga_hwmgr_backend_fini(hwmgr); - } - - return result; -} - -static int tonga_force_dpm_level(struct pp_hwmgr *hwmgr, - enum amd_dpm_forced_level level) -{ - int ret = 0; - - switch (level) { - case AMD_DPM_FORCED_LEVEL_HIGH: - ret = tonga_force_dpm_highest(hwmgr); - if (ret) - return ret; - break; - case AMD_DPM_FORCED_LEVEL_LOW: - ret = tonga_force_dpm_lowest(hwmgr); - if (ret) - return ret; - break; - case AMD_DPM_FORCED_LEVEL_AUTO: - ret = tonga_unforce_dpm_levels(hwmgr); - if (ret) - return ret; - break; - default: - break; - } - - hwmgr->dpm_level = level; - return ret; -} - -static int tonga_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, - struct pp_power_state *prequest_ps, - const struct pp_power_state *pcurrent_ps) -{ - struct tonga_power_state *tonga_ps = - cast_phw_tonga_power_state(&prequest_ps->hardware); - - uint32_t sclk; - uint32_t mclk; - struct PP_Clocks minimum_clocks = {0}; - bool disable_mclk_switching; - bool disable_mclk_switching_for_frame_lock; - struct cgs_display_info info = {0}; - const struct phm_clock_and_voltage_limits *max_limits; - uint32_t i; - tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - - int32_t count; - int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; - - data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label); - - PP_ASSERT_WITH_CODE(tonga_ps->performance_level_count == 2, - "VI should always have 2 performance levels", - ); - - max_limits = (PP_PowerSource_AC == hwmgr->power_source) ? - &(hwmgr->dyn_state.max_clock_voltage_on_ac) : - &(hwmgr->dyn_state.max_clock_voltage_on_dc); - - if (PP_PowerSource_DC == hwmgr->power_source) { - for (i = 0; i < tonga_ps->performance_level_count; i++) { - if (tonga_ps->performance_levels[i].memory_clock > max_limits->mclk) - tonga_ps->performance_levels[i].memory_clock = max_limits->mclk; - if (tonga_ps->performance_levels[i].engine_clock > max_limits->sclk) - tonga_ps->performance_levels[i].engine_clock = max_limits->sclk; - } - } - - tonga_ps->vce_clocks.EVCLK = hwmgr->vce_arbiter.evclk; - tonga_ps->vce_clocks.ECCLK = hwmgr->vce_arbiter.ecclk; - - tonga_ps->acp_clk = hwmgr->acp_arbiter.acpclk; - - cgs_get_active_displays_info(hwmgr->device, &info); - - /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/ - - /* TO DO GetMinClockSettings(hwmgr->pPECI, &minimum_clocks); */ - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) { - - max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac); - stable_pstate_sclk = (max_limits->sclk * 75) / 100; - - for (count = pptable_info->vdd_dep_on_sclk->count-1; count >= 0; count--) { - if (stable_pstate_sclk >= pptable_info->vdd_dep_on_sclk->entries[count].clk) { - stable_pstate_sclk = pptable_info->vdd_dep_on_sclk->entries[count].clk; - break; - } - } - - if (count < 0) - stable_pstate_sclk = pptable_info->vdd_dep_on_sclk->entries[0].clk; - - stable_pstate_mclk = max_limits->mclk; - - minimum_clocks.engineClock = stable_pstate_sclk; - minimum_clocks.memoryClock = stable_pstate_mclk; - } - - if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk) - minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk; - - if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk) - minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk; - - tonga_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold; - - if (0 != hwmgr->gfx_arbiter.sclk_over_drive) { - PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <= hwmgr->platform_descriptor.overdriveLimit.engineClock), - "Overdrive sclk exceeds limit", - hwmgr->gfx_arbiter.sclk_over_drive = hwmgr->platform_descriptor.overdriveLimit.engineClock); - - if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk) - tonga_ps->performance_levels[1].engine_clock = hwmgr->gfx_arbiter.sclk_over_drive; - } - - if (0 != hwmgr->gfx_arbiter.mclk_over_drive) { - PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <= hwmgr->platform_descriptor.overdriveLimit.memoryClock), - "Overdrive mclk exceeds limit", - hwmgr->gfx_arbiter.mclk_over_drive = hwmgr->platform_descriptor.overdriveLimit.memoryClock); - - if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk) - tonga_ps->performance_levels[1].memory_clock = hwmgr->gfx_arbiter.mclk_over_drive; - } - - disable_mclk_switching_for_frame_lock = phm_cap_enabled( - hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); - - disable_mclk_switching = (1 < info.display_count) || - disable_mclk_switching_for_frame_lock; - - sclk = tonga_ps->performance_levels[0].engine_clock; - mclk = tonga_ps->performance_levels[0].memory_clock; - - if (disable_mclk_switching) - mclk = tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].memory_clock; - - if (sclk < minimum_clocks.engineClock) - sclk = (minimum_clocks.engineClock > max_limits->sclk) ? max_limits->sclk : minimum_clocks.engineClock; - - if (mclk < minimum_clocks.memoryClock) - mclk = (minimum_clocks.memoryClock > max_limits->mclk) ? max_limits->mclk : minimum_clocks.memoryClock; - - tonga_ps->performance_levels[0].engine_clock = sclk; - tonga_ps->performance_levels[0].memory_clock = mclk; - - tonga_ps->performance_levels[1].engine_clock = - (tonga_ps->performance_levels[1].engine_clock >= tonga_ps->performance_levels[0].engine_clock) ? - tonga_ps->performance_levels[1].engine_clock : - tonga_ps->performance_levels[0].engine_clock; - - if (disable_mclk_switching) { - if (mclk < tonga_ps->performance_levels[1].memory_clock) - mclk = tonga_ps->performance_levels[1].memory_clock; - - tonga_ps->performance_levels[0].memory_clock = mclk; - tonga_ps->performance_levels[1].memory_clock = mclk; - } else { - if (tonga_ps->performance_levels[1].memory_clock < tonga_ps->performance_levels[0].memory_clock) - tonga_ps->performance_levels[1].memory_clock = tonga_ps->performance_levels[0].memory_clock; - } - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) { - for (i=0; i < tonga_ps->performance_level_count; i++) { - tonga_ps->performance_levels[i].engine_clock = stable_pstate_sclk; - tonga_ps->performance_levels[i].memory_clock = stable_pstate_mclk; - tonga_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max; - tonga_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max; - } - } - - return 0; -} - -int tonga_get_power_state_size(struct pp_hwmgr *hwmgr) -{ - return sizeof(struct tonga_power_state); -} - -static int tonga_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) -{ - struct pp_power_state *ps; - struct tonga_power_state *tonga_ps; - - if (hwmgr == NULL) - return -EINVAL; - - ps = hwmgr->request_ps; - - if (ps == NULL) - return -EINVAL; - - tonga_ps = cast_phw_tonga_power_state(&ps->hardware); - - if (low) - return tonga_ps->performance_levels[0].memory_clock; - else - return tonga_ps->performance_levels[tonga_ps->performance_level_count-1].memory_clock; -} - -static int tonga_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) -{ - struct pp_power_state *ps; - struct tonga_power_state *tonga_ps; - - if (hwmgr == NULL) - return -EINVAL; - - ps = hwmgr->request_ps; - - if (ps == NULL) - return -EINVAL; - - tonga_ps = cast_phw_tonga_power_state(&ps->hardware); - - if (low) - return tonga_ps->performance_levels[0].engine_clock; - else - return tonga_ps->performance_levels[tonga_ps->performance_level_count-1].engine_clock; -} - -static uint16_t tonga_get_current_pcie_speed( - struct pp_hwmgr *hwmgr) -{ - uint32_t speed_cntl = 0; - - speed_cntl = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__PCIE, - ixPCIE_LC_SPEED_CNTL); - return((uint16_t)PHM_GET_FIELD(speed_cntl, - PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE)); -} - -static int tonga_get_current_pcie_lane_number( - struct pp_hwmgr *hwmgr) -{ - uint32_t link_width; - - link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, - CGS_IND_REG__PCIE, - PCIE_LC_LINK_WIDTH_CNTL, - LC_LINK_WIDTH_RD); - - PP_ASSERT_WITH_CODE((7 >= link_width), - "Invalid PCIe lane width!", return 0); - - return decode_pcie_lane_width(link_width); -} - -static int tonga_dpm_patch_boot_state(struct pp_hwmgr *hwmgr, - struct pp_hw_power_state *hw_ps) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - struct tonga_power_state *ps = (struct tonga_power_state *)hw_ps; - ATOM_FIRMWARE_INFO_V2_2 *fw_info; - uint16_t size; - uint8_t frev, crev; - int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); - - /* First retrieve the Boot clocks and VDDC from the firmware info table. - * We assume here that fw_info is unchanged if this call fails. - */ - fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table( - hwmgr->device, index, - &size, &frev, &crev); - if (!fw_info) - /* During a test, there is no firmware info table. */ - return 0; - - /* Patch the state. */ - data->vbios_boot_state.sclk_bootup_value = le32_to_cpu(fw_info->ulDefaultEngineClock); - data->vbios_boot_state.mclk_bootup_value = le32_to_cpu(fw_info->ulDefaultMemoryClock); - data->vbios_boot_state.mvdd_bootup_value = le16_to_cpu(fw_info->usBootUpMVDDCVoltage); - data->vbios_boot_state.vddc_bootup_value = le16_to_cpu(fw_info->usBootUpVDDCVoltage); - data->vbios_boot_state.vddci_bootup_value = le16_to_cpu(fw_info->usBootUpVDDCIVoltage); - data->vbios_boot_state.pcie_gen_bootup_value = tonga_get_current_pcie_speed(hwmgr); - data->vbios_boot_state.pcie_lane_bootup_value = - (uint16_t)tonga_get_current_pcie_lane_number(hwmgr); - - /* set boot power state */ - ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value; - ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value; - ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value; - ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value; - - return 0; -} - -static int tonga_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr, - void *state, struct pp_power_state *power_state, - void *pp_table, uint32_t classification_flag) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - struct tonga_power_state *tonga_ps = - (struct tonga_power_state *)(&(power_state->hardware)); - - struct tonga_performance_level *performance_level; - - ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state; - - ATOM_Tonga_POWERPLAYTABLE *powerplay_table = - (ATOM_Tonga_POWERPLAYTABLE *)pp_table; - - ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table = - (ATOM_Tonga_SCLK_Dependency_Table *) - (((unsigned long)powerplay_table) + - le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); - - ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = - (ATOM_Tonga_MCLK_Dependency_Table *) - (((unsigned long)powerplay_table) + - le16_to_cpu(powerplay_table->usMclkDependencyTableOffset)); - - /* The following fields are not initialized here: id orderedList allStatesList */ - power_state->classification.ui_label = - (le16_to_cpu(state_entry->usClassification) & - ATOM_PPLIB_CLASSIFICATION_UI_MASK) >> - ATOM_PPLIB_CLASSIFICATION_UI_SHIFT; - power_state->classification.flags = classification_flag; - /* NOTE: There is a classification2 flag in BIOS that is not being used right now */ - - power_state->classification.temporary_state = false; - power_state->classification.to_be_deleted = false; - - power_state->validation.disallowOnDC = - (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & ATOM_Tonga_DISALLOW_ON_DC)); - - power_state->pcie.lanes = 0; - - power_state->display.disableFrameModulation = false; - power_state->display.limitRefreshrate = false; - power_state->display.enableVariBright = - (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & ATOM_Tonga_ENABLE_VARIBRIGHT)); - - power_state->validation.supportedPowerLevels = 0; - power_state->uvd_clocks.VCLK = 0; - power_state->uvd_clocks.DCLK = 0; - power_state->temperatures.min = 0; - power_state->temperatures.max = 0; - - performance_level = &(tonga_ps->performance_levels - [tonga_ps->performance_level_count++]); - - PP_ASSERT_WITH_CODE( - (tonga_ps->performance_level_count < SMU72_MAX_LEVELS_GRAPHICS), - "Performance levels exceeds SMC limit!", - return -1); - - PP_ASSERT_WITH_CODE( - (tonga_ps->performance_level_count <= - hwmgr->platform_descriptor.hardwareActivityPerformanceLevels), - "Performance levels exceeds Driver limit!", - return -1); - - /* Performance levels are arranged from low to high. */ - performance_level->memory_clock = - le32_to_cpu(mclk_dep_table->entries[state_entry->ucMemoryClockIndexLow].ulMclk); - - performance_level->engine_clock = - le32_to_cpu(sclk_dep_table->entries[state_entry->ucEngineClockIndexLow].ulSclk); - - performance_level->pcie_gen = get_pcie_gen_support( - data->pcie_gen_cap, - state_entry->ucPCIEGenLow); - - performance_level->pcie_lane = get_pcie_lane_support( - data->pcie_lane_cap, - state_entry->ucPCIELaneHigh); - - performance_level = - &(tonga_ps->performance_levels[tonga_ps->performance_level_count++]); - - performance_level->memory_clock = - le32_to_cpu(mclk_dep_table->entries[state_entry->ucMemoryClockIndexHigh].ulMclk); - - performance_level->engine_clock = - le32_to_cpu(sclk_dep_table->entries[state_entry->ucEngineClockIndexHigh].ulSclk); - - performance_level->pcie_gen = get_pcie_gen_support( - data->pcie_gen_cap, - state_entry->ucPCIEGenHigh); - - performance_level->pcie_lane = get_pcie_lane_support( - data->pcie_lane_cap, - state_entry->ucPCIELaneHigh); - - return 0; -} - -static int tonga_get_pp_table_entry(struct pp_hwmgr *hwmgr, - unsigned long entry_index, struct pp_power_state *ps) -{ - int result; - struct tonga_power_state *tonga_ps; - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - - struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table = - table_info->vdd_dep_on_mclk; - - ps->hardware.magic = PhwTonga_Magic; - - tonga_ps = cast_phw_tonga_power_state(&(ps->hardware)); - - result = tonga_get_powerplay_table_entry(hwmgr, entry_index, ps, - tonga_get_pp_table_entry_callback_func); - - /* This is the earliest time we have all the dependency table and the VBIOS boot state - * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state - * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state - */ - if (dep_mclk_table != NULL && dep_mclk_table->count == 1) { - if (dep_mclk_table->entries[0].clk != - data->vbios_boot_state.mclk_bootup_value) - printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table " - "does not match VBIOS boot MCLK level"); - if (dep_mclk_table->entries[0].vddci != - data->vbios_boot_state.vddci_bootup_value) - printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table " - "does not match VBIOS boot VDDCI level"); - } - - /* set DC compatible flag if this state supports DC */ - if (!ps->validation.disallowOnDC) - tonga_ps->dc_compatible = true; - - if (ps->classification.flags & PP_StateClassificationFlag_ACPI) - data->acpi_pcie_gen = tonga_ps->performance_levels[0].pcie_gen; - else if (ps->classification.flags & PP_StateClassificationFlag_Boot) { - if (data->bacos.best_match == 0xffff) { - /* For V.I. use boot state as base BACO state */ - data->bacos.best_match = PP_StateClassificationFlag_Boot; - data->bacos.performance_level = tonga_ps->performance_levels[0]; - } - } - - tonga_ps->uvd_clocks.VCLK = ps->uvd_clocks.VCLK; - tonga_ps->uvd_clocks.DCLK = ps->uvd_clocks.DCLK; - - if (!result) { - uint32_t i; - - switch (ps->classification.ui_label) { - case PP_StateUILabel_Performance: - data->use_pcie_performance_levels = true; - - for (i = 0; i < tonga_ps->performance_level_count; i++) { - if (data->pcie_gen_performance.max < - tonga_ps->performance_levels[i].pcie_gen) - data->pcie_gen_performance.max = - tonga_ps->performance_levels[i].pcie_gen; - - if (data->pcie_gen_performance.min > - tonga_ps->performance_levels[i].pcie_gen) - data->pcie_gen_performance.min = - tonga_ps->performance_levels[i].pcie_gen; - - if (data->pcie_lane_performance.max < - tonga_ps->performance_levels[i].pcie_lane) - data->pcie_lane_performance.max = - tonga_ps->performance_levels[i].pcie_lane; - - if (data->pcie_lane_performance.min > - tonga_ps->performance_levels[i].pcie_lane) - data->pcie_lane_performance.min = - tonga_ps->performance_levels[i].pcie_lane; - } - break; - case PP_StateUILabel_Battery: - data->use_pcie_power_saving_levels = true; - - for (i = 0; i < tonga_ps->performance_level_count; i++) { - if (data->pcie_gen_power_saving.max < - tonga_ps->performance_levels[i].pcie_gen) - data->pcie_gen_power_saving.max = - tonga_ps->performance_levels[i].pcie_gen; - - if (data->pcie_gen_power_saving.min > - tonga_ps->performance_levels[i].pcie_gen) - data->pcie_gen_power_saving.min = - tonga_ps->performance_levels[i].pcie_gen; - - if (data->pcie_lane_power_saving.max < - tonga_ps->performance_levels[i].pcie_lane) - data->pcie_lane_power_saving.max = - tonga_ps->performance_levels[i].pcie_lane; - - if (data->pcie_lane_power_saving.min > - tonga_ps->performance_levels[i].pcie_lane) - data->pcie_lane_power_saving.min = - tonga_ps->performance_levels[i].pcie_lane; - } - break; - default: - break; - } - } - return 0; -} - -static void -tonga_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m) -{ - uint32_t sclk, mclk, activity_percent; - uint32_t offset; - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)(PPSMC_MSG_API_GetSclkFrequency)); - - sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - - smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)(PPSMC_MSG_API_GetMclkFrequency)); - - mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - seq_printf(m, "\n [ mclk ]: %u MHz\n\n [ sclk ]: %u MHz\n", mclk/100, sclk/100); - - offset = data->soft_regs_start + offsetof(SMU72_SoftRegisters, AverageGraphicsActivity); - activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset); - activity_percent += 0x80; - activity_percent >>= 8; - - seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent); - - seq_printf(m, "uvd %sabled\n", data->uvd_power_gated ? "dis" : "en"); - - seq_printf(m, "vce %sabled\n", data->vce_power_gated ? "dis" : "en"); -} - -static int tonga_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input) -{ - const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; - const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state); - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - struct tonga_single_dpm_table *psclk_table = &(data->dpm_table.sclk_table); - uint32_t sclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].engine_clock; - struct tonga_single_dpm_table *pmclk_table = &(data->dpm_table.mclk_table); - uint32_t mclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].memory_clock; - struct PP_Clocks min_clocks = {0}; - uint32_t i; - struct cgs_display_info info = {0}; - - data->need_update_smu7_dpm_table = 0; - - for (i = 0; i < psclk_table->count; i++) { - if (sclk == psclk_table->dpm_levels[i].value) - break; - } - - if (i >= psclk_table->count) - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; - else { - /* TODO: Check SCLK in DAL's minimum clocks in case DeepSleep divider update is required.*/ - if(data->display_timing.min_clock_insr != min_clocks.engineClockInSR) - data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK; - } - - for (i=0; i < pmclk_table->count; i++) { - if (mclk == pmclk_table->dpm_levels[i].value) - break; - } - - if (i >= pmclk_table->count) - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; - - cgs_get_active_displays_info(hwmgr->device, &info); - - if (data->display_timing.num_existing_displays != info.display_count) - data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK; - - return 0; -} - -static uint16_t tonga_get_maximum_link_speed(struct pp_hwmgr *hwmgr, const struct tonga_power_state *hw_ps) -{ - uint32_t i; - uint32_t sclk, max_sclk = 0; - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - struct tonga_dpm_table *pdpm_table = &data->dpm_table; - - for (i = 0; i < hw_ps->performance_level_count; i++) { - sclk = hw_ps->performance_levels[i].engine_clock; - if (max_sclk < sclk) - max_sclk = sclk; - } - - for (i = 0; i < pdpm_table->sclk_table.count; i++) { - if (pdpm_table->sclk_table.dpm_levels[i].value == max_sclk) - return (uint16_t) ((i >= pdpm_table->pcie_speed_table.count) ? - pdpm_table->pcie_speed_table.dpm_levels[pdpm_table->pcie_speed_table.count-1].value : - pdpm_table->pcie_speed_table.dpm_levels[i].value); - } - - return 0; -} - -static int tonga_request_link_speed_change_before_state_change(struct pp_hwmgr *hwmgr, const void *input) -{ - const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - const struct tonga_power_state *tonga_nps = cast_const_phw_tonga_power_state(states->pnew_state); - const struct tonga_power_state *tonga_cps = cast_const_phw_tonga_power_state(states->pcurrent_state); - - uint16_t target_link_speed = tonga_get_maximum_link_speed(hwmgr, tonga_nps); - uint16_t current_link_speed; - - if (data->force_pcie_gen == PP_PCIEGenInvalid) - current_link_speed = tonga_get_maximum_link_speed(hwmgr, tonga_cps); - else - current_link_speed = data->force_pcie_gen; - - data->force_pcie_gen = PP_PCIEGenInvalid; - data->pspp_notify_required = false; - if (target_link_speed > current_link_speed) { - switch(target_link_speed) { - case PP_PCIEGen3: - if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false)) - break; - data->force_pcie_gen = PP_PCIEGen2; - if (current_link_speed == PP_PCIEGen2) - break; - case PP_PCIEGen2: - if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false)) - break; - default: - data->force_pcie_gen = tonga_get_current_pcie_speed(hwmgr); - break; - } - } else { - if (target_link_speed < current_link_speed) - data->pspp_notify_required = true; - } - - return 0; -} - -static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - if (0 == data->need_update_smu7_dpm_table) - return 0; - - if ((0 == data->sclk_dpm_key_disabled) && - (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { - PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr), - "Trying to freeze SCLK DPM when DPM is disabled", - ); - PP_ASSERT_WITH_CODE( - 0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_FreezeLevel), - "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!", - return -1); - } - - if ((0 == data->mclk_dpm_key_disabled) && - (data->need_update_smu7_dpm_table & - DPMTABLE_OD_UPDATE_MCLK)) { - PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr), - "Trying to freeze MCLK DPM when DPM is disabled", - ); - PP_ASSERT_WITH_CODE( - 0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_FreezeLevel), - "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!", - return -1); - } - - return 0; -} - -static int tonga_populate_and_upload_sclk_mclk_dpm_levels(struct pp_hwmgr *hwmgr, const void *input) -{ - int result = 0; - - const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; - const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state); - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - uint32_t sclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].engine_clock; - uint32_t mclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].memory_clock; - struct tonga_dpm_table *pdpm_table = &data->dpm_table; - - struct tonga_dpm_table *pgolden_dpm_table = &data->golden_dpm_table; - uint32_t dpm_count, clock_percent; - uint32_t i; - - if (0 == data->need_update_smu7_dpm_table) - return 0; - - if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) { - pdpm_table->sclk_table.dpm_levels[pdpm_table->sclk_table.count-1].value = sclk; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) { - /* Need to do calculation based on the golden DPM table - * as the Heatmap GPU Clock axis is also based on the default values - */ - PP_ASSERT_WITH_CODE( - (pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value != 0), - "Divide by 0!", - return -1); - dpm_count = pdpm_table->sclk_table.count < 2 ? 0 : pdpm_table->sclk_table.count-2; - for (i = dpm_count; i > 1; i--) { - if (sclk > pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value) { - clock_percent = ((sclk - pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value)*100) / - pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value; - - pdpm_table->sclk_table.dpm_levels[i].value = - pgolden_dpm_table->sclk_table.dpm_levels[i].value + - (pgolden_dpm_table->sclk_table.dpm_levels[i].value * clock_percent)/100; - - } else if (pgolden_dpm_table->sclk_table.dpm_levels[pdpm_table->sclk_table.count-1].value > sclk) { - clock_percent = ((pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value - sclk)*100) / - pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value; - - pdpm_table->sclk_table.dpm_levels[i].value = - pgolden_dpm_table->sclk_table.dpm_levels[i].value - - (pgolden_dpm_table->sclk_table.dpm_levels[i].value * clock_percent)/100; - } else - pdpm_table->sclk_table.dpm_levels[i].value = - pgolden_dpm_table->sclk_table.dpm_levels[i].value; - } - } - } - - if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) { - pdpm_table->mclk_table.dpm_levels[pdpm_table->mclk_table.count-1].value = mclk; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) { - - PP_ASSERT_WITH_CODE( - (pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value != 0), - "Divide by 0!", - return -1); - dpm_count = pdpm_table->mclk_table.count < 2? 0 : pdpm_table->mclk_table.count-2; - for (i = dpm_count; i > 1; i--) { - if (mclk > pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value) { - clock_percent = ((mclk - pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value)*100) / - pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value; - - pdpm_table->mclk_table.dpm_levels[i].value = - pgolden_dpm_table->mclk_table.dpm_levels[i].value + - (pgolden_dpm_table->mclk_table.dpm_levels[i].value * clock_percent)/100; - - } else if (pgolden_dpm_table->mclk_table.dpm_levels[pdpm_table->mclk_table.count-1].value > mclk) { - clock_percent = ((pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value - mclk)*100) / - pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value; - - pdpm_table->mclk_table.dpm_levels[i].value = - pgolden_dpm_table->mclk_table.dpm_levels[i].value - - (pgolden_dpm_table->mclk_table.dpm_levels[i].value * clock_percent)/100; - } else - pdpm_table->mclk_table.dpm_levels[i].value = pgolden_dpm_table->mclk_table.dpm_levels[i].value; - } - } - } - - if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) { - result = tonga_populate_all_graphic_levels(hwmgr); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", - return result); - } - - if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) { - /*populate MCLK dpm table to SMU7 */ - result = tonga_populate_all_memory_levels(hwmgr); - PP_ASSERT_WITH_CODE((0 == result), - "Failed to populate MCLK during PopulateNewDPMClocksStates Function!", - return result); - } - - return result; -} - -static int tonga_trim_single_dpm_states(struct pp_hwmgr *hwmgr, - struct tonga_single_dpm_table * pdpm_table, - uint32_t low_limit, uint32_t high_limit) -{ - uint32_t i; - - for (i = 0; i < pdpm_table->count; i++) { - if ((pdpm_table->dpm_levels[i].value < low_limit) || - (pdpm_table->dpm_levels[i].value > high_limit)) - pdpm_table->dpm_levels[i].enabled = false; - else - pdpm_table->dpm_levels[i].enabled = true; - } - return 0; -} - -static int tonga_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct tonga_power_state *hw_state) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - uint32_t high_limit_count; - - PP_ASSERT_WITH_CODE((hw_state->performance_level_count >= 1), - "power state did not have any performance level", - return -1); - - high_limit_count = (1 == hw_state->performance_level_count) ? 0: 1; - - tonga_trim_single_dpm_states(hwmgr, - &(data->dpm_table.sclk_table), - hw_state->performance_levels[0].engine_clock, - hw_state->performance_levels[high_limit_count].engine_clock); - - tonga_trim_single_dpm_states(hwmgr, - &(data->dpm_table.mclk_table), - hw_state->performance_levels[0].memory_clock, - hw_state->performance_levels[high_limit_count].memory_clock); - - return 0; -} - -static int tonga_generate_dpm_level_enable_mask(struct pp_hwmgr *hwmgr, const void *input) -{ - int result; - const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state); - - result = tonga_trim_dpm_states(hwmgr, tonga_ps); - if (0 != result) - return result; - - data->dpm_level_enable_mask.sclk_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table); - data->dpm_level_enable_mask.mclk_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table); - data->last_mclk_dpm_enable_mask = data->dpm_level_enable_mask.mclk_dpm_enable_mask; - if (data->uvd_enabled) - data->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE; - - data->dpm_level_enable_mask.pcie_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table); - - return 0; -} - -int tonga_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) -{ - return smum_send_msg_to_smc(hwmgr->smumgr, enable ? - (PPSMC_Msg)PPSMC_MSG_VCEDPM_Enable : - (PPSMC_Msg)PPSMC_MSG_VCEDPM_Disable); -} - -int tonga_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable) -{ - return smum_send_msg_to_smc(hwmgr->smumgr, enable ? - (PPSMC_Msg)PPSMC_MSG_UVDDPM_Enable : - (PPSMC_Msg)PPSMC_MSG_UVDDPM_Disable); -} - -int tonga_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - uint32_t mm_boot_level_offset, mm_boot_level_value; - struct phm_ppt_v1_information *ptable_information = (struct phm_ppt_v1_information *)(hwmgr->pptable); - - if (!bgate) { - data->smc_state_table.UvdBootLevel = (uint8_t) (ptable_information->mm_dep_table->count - 1); - mm_boot_level_offset = data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable, UvdBootLevel); - mm_boot_level_offset /= 4; - mm_boot_level_offset *= 4; - mm_boot_level_value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset); - mm_boot_level_value &= 0x00FFFFFF; - mm_boot_level_value |= data->smc_state_table.UvdBootLevel << 24; - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); - - if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM) || - phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_UVDDPM_SetEnabledMask, - (uint32_t)(1 << data->smc_state_table.UvdBootLevel)); - } - - return tonga_enable_disable_uvd_dpm(hwmgr, !bgate); -} - -int tonga_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input) -{ - const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - const struct tonga_power_state *tonga_nps = cast_const_phw_tonga_power_state(states->pnew_state); - const struct tonga_power_state *tonga_cps = cast_const_phw_tonga_power_state(states->pcurrent_state); - - uint32_t mm_boot_level_offset, mm_boot_level_value; - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - - if (tonga_nps->vce_clocks.EVCLK > 0 && (tonga_cps == NULL || tonga_cps->vce_clocks.EVCLK == 0)) { - data->smc_state_table.VceBootLevel = (uint8_t) (pptable_info->mm_dep_table->count - 1); - - mm_boot_level_offset = data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable, VceBootLevel); - mm_boot_level_offset /= 4; - mm_boot_level_offset *= 4; - mm_boot_level_value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset); - mm_boot_level_value &= 0xFF00FFFF; - mm_boot_level_value |= data->smc_state_table.VceBootLevel << 16; - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_VCEDPM_SetEnabledMask, - (uint32_t)(1 << data->smc_state_table.VceBootLevel)); - - tonga_enable_disable_vce_dpm(hwmgr, true); - } else if (tonga_nps->vce_clocks.EVCLK == 0 && tonga_cps != NULL && tonga_cps->vce_clocks.EVCLK > 0) - tonga_enable_disable_vce_dpm(hwmgr, false); - - return 0; -} - -static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - uint32_t address; - int32_t result; - - if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) - return 0; - - - memset(&data->mc_reg_table, 0, sizeof(SMU72_Discrete_MCRegisters)); - - result = tonga_convert_mc_reg_table_to_smc(hwmgr, &(data->mc_reg_table)); - - if(result != 0) - return result; - - - address = data->mc_reg_table_start + (uint32_t)offsetof(SMU72_Discrete_MCRegisters, data[0]); - - return tonga_copy_bytes_to_smc(hwmgr->smumgr, address, - (uint8_t *)&data->mc_reg_table.data[0], - sizeof(SMU72_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count, - data->sram_end); -} - -static int tonga_program_memory_timing_parameters_conditionally(struct pp_hwmgr *hwmgr) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - if (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK)) - return tonga_program_memory_timing_parameters(hwmgr); - - return 0; -} - -static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - if (0 == data->need_update_smu7_dpm_table) - return 0; - - if ((0 == data->sclk_dpm_key_disabled) && - (data->need_update_smu7_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { - - PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr), - "Trying to Unfreeze SCLK DPM when DPM is disabled", - ); - PP_ASSERT_WITH_CODE( - 0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_UnfreezeLevel), - "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!", - return -1); - } - - if ((0 == data->mclk_dpm_key_disabled) && - (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { - - PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr), - "Trying to Unfreeze MCLK DPM when DPM is disabled", - ); - PP_ASSERT_WITH_CODE( - 0 == smum_send_msg_to_smc(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_UnfreezeLevel), - "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!", - return -1); - } - - data->need_update_smu7_dpm_table = 0; - - return 0; -} - -static int tonga_notify_link_speed_change_after_state_change(struct pp_hwmgr *hwmgr, const void *input) -{ - const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state); - uint16_t target_link_speed = tonga_get_maximum_link_speed(hwmgr, tonga_ps); - uint8_t request; - - if (data->pspp_notify_required || - data->pcie_performance_request) { - if (target_link_speed == PP_PCIEGen3) - request = PCIE_PERF_REQ_GEN3; - else if (target_link_speed == PP_PCIEGen2) - request = PCIE_PERF_REQ_GEN2; - else - request = PCIE_PERF_REQ_GEN1; - - if(request == PCIE_PERF_REQ_GEN1 && tonga_get_current_pcie_speed(hwmgr) > 0) { - data->pcie_performance_request = false; - return 0; - } - - if (0 != acpi_pcie_perf_request(hwmgr->device, request, false)) { - if (PP_PCIEGen2 == target_link_speed) - printk("PSPP request to switch to Gen2 from Gen3 Failed!"); - else - printk("PSPP request to switch to Gen1 from Gen2 Failed!"); - } - } - - data->pcie_performance_request = false; - return 0; -} - -static int tonga_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) -{ - int tmp_result, result = 0; - - tmp_result = tonga_find_dpm_states_clocks_in_dpm_table(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to find DPM states clocks in DPM table!", result = tmp_result); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest)) { - tmp_result = tonga_request_link_speed_change_before_state_change(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to request link speed change before state change!", result = tmp_result); - } - - tmp_result = tonga_freeze_sclk_mclk_dpm(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to freeze SCLK MCLK DPM!", result = tmp_result); - - tmp_result = tonga_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to populate and upload SCLK MCLK DPM levels!", result = tmp_result); - - tmp_result = tonga_generate_dpm_level_enable_mask(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to generate DPM level enabled mask!", result = tmp_result); - - tmp_result = tonga_update_vce_dpm(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update VCE DPM!", result = tmp_result); - - tmp_result = tonga_update_sclk_threshold(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update SCLK threshold!", result = tmp_result); - - tmp_result = tonga_update_and_upload_mc_reg_table(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to upload MC reg table!", result = tmp_result); - - tmp_result = tonga_program_memory_timing_parameters_conditionally(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to program memory timing parameters!", result = tmp_result); - - tmp_result = tonga_unfreeze_sclk_mclk_dpm(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to unfreeze SCLK MCLK DPM!", result = tmp_result); - - tmp_result = tonga_upload_dpm_level_enable_mask(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to upload DPM level enabled mask!", result = tmp_result); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest)) { - tmp_result = tonga_notify_link_speed_change_after_state_change(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to notify link speed change after state change!", result = tmp_result); - } - - return result; -} - -/** -* Set maximum target operating fan output PWM -* -* @param pHwMgr: the address of the powerplay hardware manager. -* @param usMaxFanPwm: max operating fan PWM in percents -* @return The response that came from the SMC. -*/ -static int tonga_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm) -{ - hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm; - - if (phm_is_hw_access_blocked(hwmgr)) - return 0; - - return (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm) ? 0 : -1); -} - -int tonga_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr) -{ - uint32_t num_active_displays = 0; - struct cgs_display_info info = {0}; - info.mode_info = NULL; - - cgs_get_active_displays_info(hwmgr->device, &info); - - num_active_displays = info.display_count; - - if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */ - tonga_notify_smc_display_change(hwmgr, false); - else - tonga_notify_smc_display_change(hwmgr, true); - - return 0; -} - -/** -* Programs the display gap -* -* @param hwmgr the address of the powerplay hardware manager. -* @return always OK -*/ -int tonga_program_display_gap(struct pp_hwmgr *hwmgr) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - uint32_t num_active_displays = 0; - uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL); - uint32_t display_gap2; - uint32_t pre_vbi_time_in_us; - uint32_t frame_time_in_us; - uint32_t ref_clock; - uint32_t refresh_rate = 0; - struct cgs_display_info info = {0}; - struct cgs_mode_info mode_info; - - info.mode_info = &mode_info; - - cgs_get_active_displays_info(hwmgr->device, &info); - num_active_displays = info.display_count; - - display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0)? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap); - - ref_clock = mode_info.ref_clock; - refresh_rate = mode_info.refresh_rate; - - if(0 == refresh_rate) - refresh_rate = 60; - - frame_time_in_us = 1000000 / refresh_rate; - - pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us; - display_gap2 = pre_vbi_time_in_us * (ref_clock / 100); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU72_SoftRegisters, PreVBlankGap), 0x64); - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU72_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us)); - - if (num_active_displays == 1) - tonga_notify_smc_display_change(hwmgr, true); - - return 0; -} - -int tonga_display_configuration_changed_task(struct pp_hwmgr *hwmgr) -{ - - tonga_program_display_gap(hwmgr); - - /* to do PhwTonga_CacUpdateDisplayConfiguration(pHwMgr); */ - return 0; -} - -/** -* Set maximum target operating fan output RPM -* -* @param pHwMgr: the address of the powerplay hardware manager. -* @param usMaxFanRpm: max operating fan RPM value. -* @return The response that came from the SMC. -*/ -static int tonga_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm) -{ - hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = us_max_fan_pwm; - - if (phm_is_hw_access_blocked(hwmgr)) - return 0; - - return (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanRpmMax, us_max_fan_pwm) ? 0 : -1); -} - -uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr) -{ - uint32_t reference_clock; - uint32_t tc; - uint32_t divide; - - ATOM_FIRMWARE_INFO *fw_info; - uint16_t size; - uint8_t frev, crev; - int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); - - tc = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK); - - if (tc) - return TCLK; - - fw_info = (ATOM_FIRMWARE_INFO *)cgs_atom_get_data_table(hwmgr->device, index, - &size, &frev, &crev); - - if (!fw_info) - return 0; - - reference_clock = le16_to_cpu(fw_info->usReferenceClock); - - divide = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE); - - if (0 != divide) - return reference_clock / 4; - - return reference_clock; -} - -int tonga_dpm_set_interrupt_state(void *private_data, - unsigned src_id, unsigned type, - int enabled) -{ - uint32_t cg_thermal_int; - struct pp_hwmgr *hwmgr = ((struct pp_eventmgr *)private_data)->hwmgr; - - if (hwmgr == NULL) - return -EINVAL; - - switch (type) { - case AMD_THERMAL_IRQ_LOW_TO_HIGH: - if (enabled) { - cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT); - cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); - } else { - cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT); - cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); - } - break; - - case AMD_THERMAL_IRQ_HIGH_TO_LOW: - if (enabled) { - cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT); - cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); - } else { - cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT); - cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); - } - break; - default: - break; - } - return 0; -} - -int tonga_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr, - const void *thermal_interrupt_info) -{ - int result; - const struct pp_interrupt_registration_info *info = - (const struct pp_interrupt_registration_info *)thermal_interrupt_info; - - if (info == NULL) - return -EINVAL; - - result = cgs_add_irq_source(hwmgr->device, 230, AMD_THERMAL_IRQ_LAST, - tonga_dpm_set_interrupt_state, - info->call_back, info->context); - - if (result) - return -EINVAL; - - result = cgs_add_irq_source(hwmgr->device, 231, AMD_THERMAL_IRQ_LAST, - tonga_dpm_set_interrupt_state, - info->call_back, info->context); - - if (result) - return -EINVAL; - - return 0; -} - -bool tonga_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - bool is_update_required = false; - struct cgs_display_info info = {0,0,NULL}; - - cgs_get_active_displays_info(hwmgr->device, &info); - - if (data->display_timing.num_existing_displays != info.display_count) - is_update_required = true; -/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL - if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { - cgs_get_min_clock_settings(hwmgr->device, &min_clocks); - if(min_clocks.engineClockInSR != data->display_timing.minClockInSR) - is_update_required = true; -*/ - return is_update_required; -} - -static inline bool tonga_are_power_levels_equal(const struct tonga_performance_level *pl1, - const struct tonga_performance_level *pl2) -{ - return ((pl1->memory_clock == pl2->memory_clock) && - (pl1->engine_clock == pl2->engine_clock) && - (pl1->pcie_gen == pl2->pcie_gen) && - (pl1->pcie_lane == pl2->pcie_lane)); -} - -int tonga_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal) -{ - const struct tonga_power_state *psa = cast_const_phw_tonga_power_state(pstate1); - const struct tonga_power_state *psb = cast_const_phw_tonga_power_state(pstate2); - int i; - - if (equal == NULL || psa == NULL || psb == NULL) - return -EINVAL; - - /* If the two states don't even have the same number of performance levels they cannot be the same state. */ - if (psa->performance_level_count != psb->performance_level_count) { - *equal = false; - return 0; - } - - for (i = 0; i < psa->performance_level_count; i++) { - if (!tonga_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) { - /* If we have found even one performance level pair that is different the states are different. */ - *equal = false; - return 0; - } - } - - /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ - *equal = ((psa->uvd_clocks.VCLK == psb->uvd_clocks.VCLK) && (psa->uvd_clocks.DCLK == psb->uvd_clocks.DCLK)); - *equal &= ((psa->vce_clocks.EVCLK == psb->vce_clocks.EVCLK) && (psa->vce_clocks.ECCLK == psb->vce_clocks.ECCLK)); - *equal &= (psa->sclk_threshold == psb->sclk_threshold); - *equal &= (psa->acp_clk == psb->acp_clk); - - return 0; -} - -static int tonga_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) -{ - if (mode) { - /* stop auto-manage */ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl)) - tonga_fan_ctrl_stop_smc_fan_control(hwmgr); - tonga_fan_ctrl_set_static_mode(hwmgr, mode); - } else - /* restart auto-manage */ - tonga_fan_ctrl_reset_fan_speed_to_default(hwmgr); - - return 0; -} - -static int tonga_get_fan_control_mode(struct pp_hwmgr *hwmgr) -{ - if (hwmgr->fan_ctrl_is_in_default_mode) - return hwmgr->fan_ctrl_default_mode; - else - return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_FDO_CTRL2, FDO_PWM_MODE); -} - -static int tonga_force_clock_level(struct pp_hwmgr *hwmgr, - enum pp_clock_type type, uint32_t mask) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) - return -EINVAL; - - switch (type) { - case PP_SCLK: - if (!data->sclk_dpm_key_disabled) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SCLKDPM_SetEnabledMask, - data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask); - break; - case PP_MCLK: - if (!data->mclk_dpm_key_disabled) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_MCLKDPM_SetEnabledMask, - data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask); - break; - case PP_PCIE: - { - uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask; - uint32_t level = 0; - - while (tmp >>= 1) - level++; - - if (!data->pcie_dpm_key_disabled) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_PCIeDPM_ForceLevel, - level); - break; - } - default: - break; - } - - return 0; -} - -static int tonga_print_clock_levels(struct pp_hwmgr *hwmgr, - enum pp_clock_type type, char *buf) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - struct tonga_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); - struct tonga_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); - struct tonga_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table); - int i, now, size = 0; - uint32_t clock, pcie_speed; - - switch (type) { - case PP_SCLK: - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency); - clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - - for (i = 0; i < sclk_table->count; i++) { - if (clock > sclk_table->dpm_levels[i].value) - continue; - break; - } - now = i; - - for (i = 0; i < sclk_table->count; i++) - size += sprintf(buf + size, "%d: %uMhz %s\n", - i, sclk_table->dpm_levels[i].value / 100, - (i == now) ? "*" : ""); - break; - case PP_MCLK: - smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency); - clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - - for (i = 0; i < mclk_table->count; i++) { - if (clock > mclk_table->dpm_levels[i].value) - continue; - break; - } - now = i; - - for (i = 0; i < mclk_table->count; i++) - size += sprintf(buf + size, "%d: %uMhz %s\n", - i, mclk_table->dpm_levels[i].value / 100, - (i == now) ? "*" : ""); - break; - case PP_PCIE: - pcie_speed = tonga_get_current_pcie_speed(hwmgr); - for (i = 0; i < pcie_table->count; i++) { - if (pcie_speed != pcie_table->dpm_levels[i].value) - continue; - break; - } - now = i; - - for (i = 0; i < pcie_table->count; i++) - size += sprintf(buf + size, "%d: %s %s\n", i, - (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" : - (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" : - (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "", - (i == now) ? "*" : ""); - break; - default: - break; - } - return size; -} - -static int tonga_get_sclk_od(struct pp_hwmgr *hwmgr) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - struct tonga_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); - struct tonga_single_dpm_table *golden_sclk_table = - &(data->golden_dpm_table.sclk_table); - int value; - - value = (sclk_table->dpm_levels[sclk_table->count - 1].value - - golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * - 100 / - golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; - - return value; -} - -static int tonga_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - struct tonga_single_dpm_table *golden_sclk_table = - &(data->golden_dpm_table.sclk_table); - struct pp_power_state *ps; - struct tonga_power_state *tonga_ps; - - if (value > 20) - value = 20; - - ps = hwmgr->request_ps; - - if (ps == NULL) - return -EINVAL; - - tonga_ps = cast_phw_tonga_power_state(&ps->hardware); - - tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].engine_clock = - golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * - value / 100 + - golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; - - return 0; -} - -static int tonga_get_mclk_od(struct pp_hwmgr *hwmgr) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - struct tonga_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); - struct tonga_single_dpm_table *golden_mclk_table = - &(data->golden_dpm_table.mclk_table); - int value; - - value = (mclk_table->dpm_levels[mclk_table->count - 1].value - - golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * - 100 / - golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; - - return value; -} - -static int tonga_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - struct tonga_single_dpm_table *golden_mclk_table = - &(data->golden_dpm_table.mclk_table); - struct pp_power_state *ps; - struct tonga_power_state *tonga_ps; - - if (value > 20) - value = 20; - - ps = hwmgr->request_ps; - - if (ps == NULL) - return -EINVAL; - - tonga_ps = cast_phw_tonga_power_state(&ps->hardware); - - tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].memory_clock = - golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * - value / 100 + - golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; - - return 0; -} - -static const struct pp_hwmgr_func tonga_hwmgr_funcs = { - .backend_init = &tonga_hwmgr_backend_init, - .backend_fini = &tonga_hwmgr_backend_fini, - .asic_setup = &tonga_setup_asic_task, - .dynamic_state_management_enable = &tonga_enable_dpm_tasks, - .dynamic_state_management_disable = &tonga_disable_dpm_tasks, - .apply_state_adjust_rules = tonga_apply_state_adjust_rules, - .force_dpm_level = &tonga_force_dpm_level, - .power_state_set = tonga_set_power_state_tasks, - .get_power_state_size = tonga_get_power_state_size, - .get_mclk = tonga_dpm_get_mclk, - .get_sclk = tonga_dpm_get_sclk, - .patch_boot_state = tonga_dpm_patch_boot_state, - .get_pp_table_entry = tonga_get_pp_table_entry, - .get_num_of_pp_table_entries = tonga_get_number_of_powerplay_table_entries, - .print_current_perforce_level = tonga_print_current_perforce_level, - .powerdown_uvd = tonga_phm_powerdown_uvd, - .powergate_uvd = tonga_phm_powergate_uvd, - .powergate_vce = tonga_phm_powergate_vce, - .disable_clock_power_gating = tonga_phm_disable_clock_power_gating, - .update_clock_gatings = tonga_phm_update_clock_gatings, - .notify_smc_display_config_after_ps_adjustment = tonga_notify_smc_display_config_after_ps_adjustment, - .display_config_changed = tonga_display_configuration_changed_task, - .set_max_fan_pwm_output = tonga_set_max_fan_pwm_output, - .set_max_fan_rpm_output = tonga_set_max_fan_rpm_output, - .get_temperature = tonga_thermal_get_temperature, - .stop_thermal_controller = tonga_thermal_stop_thermal_controller, - .get_fan_speed_info = tonga_fan_ctrl_get_fan_speed_info, - .get_fan_speed_percent = tonga_fan_ctrl_get_fan_speed_percent, - .set_fan_speed_percent = tonga_fan_ctrl_set_fan_speed_percent, - .reset_fan_speed_to_default = tonga_fan_ctrl_reset_fan_speed_to_default, - .get_fan_speed_rpm = tonga_fan_ctrl_get_fan_speed_rpm, - .set_fan_speed_rpm = tonga_fan_ctrl_set_fan_speed_rpm, - .uninitialize_thermal_controller = tonga_thermal_ctrl_uninitialize_thermal_controller, - .register_internal_thermal_interrupt = tonga_register_internal_thermal_interrupt, - .check_smc_update_required_for_display_configuration = tonga_check_smc_update_required_for_display_configuration, - .check_states_equal = tonga_check_states_equal, - .set_fan_control_mode = tonga_set_fan_control_mode, - .get_fan_control_mode = tonga_get_fan_control_mode, - .force_clock_level = tonga_force_clock_level, - .print_clock_levels = tonga_print_clock_levels, - .get_sclk_od = tonga_get_sclk_od, - .set_sclk_od = tonga_set_sclk_od, - .get_mclk_od = tonga_get_mclk_od, - .set_mclk_od = tonga_set_mclk_od, -}; - -int tonga_hwmgr_init(struct pp_hwmgr *hwmgr) -{ - hwmgr->hwmgr_func = &tonga_hwmgr_funcs; - hwmgr->pptable_func = &tonga_pptable_funcs; - pp_tonga_thermal_initialize(hwmgr); - return 0; -} - diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h deleted file mode 100644 index 3961884bfa9b..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h +++ /dev/null @@ -1,397 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef TONGA_HWMGR_H -#define TONGA_HWMGR_H - -#include "hwmgr.h" -#include "smu72_discrete.h" -#include "ppatomctrl.h" -#include "ppinterrupt.h" -#include "tonga_powertune.h" -#include "pp_endian.h" - -#define TONGA_MAX_HARDWARE_POWERLEVELS 2 -#define TONGA_DYNCLK_NUMBER_OF_TREND_COEFFICIENTS 15 - -struct tonga_performance_level { - uint32_t memory_clock; - uint32_t engine_clock; - uint16_t pcie_gen; - uint16_t pcie_lane; -}; - -struct _phw_tonga_bacos { - uint32_t best_match; - uint32_t baco_flags; - struct tonga_performance_level performance_level; -}; -typedef struct _phw_tonga_bacos phw_tonga_bacos; - -struct _phw_tonga_uvd_clocks { - uint32_t VCLK; - uint32_t DCLK; -}; - -typedef struct _phw_tonga_uvd_clocks phw_tonga_uvd_clocks; - -struct _phw_tonga_vce_clocks { - uint32_t EVCLK; - uint32_t ECCLK; -}; - -typedef struct _phw_tonga_vce_clocks phw_tonga_vce_clocks; - -struct tonga_power_state { - uint32_t magic; - phw_tonga_uvd_clocks uvd_clocks; - phw_tonga_vce_clocks vce_clocks; - uint32_t sam_clk; - uint32_t acp_clk; - uint16_t performance_level_count; - bool dc_compatible; - uint32_t sclk_threshold; - struct tonga_performance_level performance_levels[TONGA_MAX_HARDWARE_POWERLEVELS]; -}; - -struct _phw_tonga_dpm_level { - bool enabled; - uint32_t value; - uint32_t param1; -}; -typedef struct _phw_tonga_dpm_level phw_tonga_dpm_level; - -#define TONGA_MAX_DEEPSLEEP_DIVIDER_ID 5 -#define MAX_REGULAR_DPM_NUMBER 8 -#define TONGA_MINIMUM_ENGINE_CLOCK 2500 - -struct tonga_single_dpm_table { - uint32_t count; - phw_tonga_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER]; -}; - -struct tonga_dpm_table { - struct tonga_single_dpm_table sclk_table; - struct tonga_single_dpm_table mclk_table; - struct tonga_single_dpm_table pcie_speed_table; - struct tonga_single_dpm_table vddc_table; - struct tonga_single_dpm_table vdd_gfx_table; - struct tonga_single_dpm_table vdd_ci_table; - struct tonga_single_dpm_table mvdd_table; -}; -typedef struct _phw_tonga_dpm_table phw_tonga_dpm_table; - - -struct _phw_tonga_clock_regisiters { - uint32_t vCG_SPLL_FUNC_CNTL; - uint32_t vCG_SPLL_FUNC_CNTL_2; - uint32_t vCG_SPLL_FUNC_CNTL_3; - uint32_t vCG_SPLL_FUNC_CNTL_4; - uint32_t vCG_SPLL_SPREAD_SPECTRUM; - uint32_t vCG_SPLL_SPREAD_SPECTRUM_2; - uint32_t vDLL_CNTL; - uint32_t vMCLK_PWRMGT_CNTL; - uint32_t vMPLL_AD_FUNC_CNTL; - uint32_t vMPLL_DQ_FUNC_CNTL; - uint32_t vMPLL_FUNC_CNTL; - uint32_t vMPLL_FUNC_CNTL_1; - uint32_t vMPLL_FUNC_CNTL_2; - uint32_t vMPLL_SS1; - uint32_t vMPLL_SS2; -}; -typedef struct _phw_tonga_clock_regisiters phw_tonga_clock_registers; - -struct _phw_tonga_voltage_smio_registers { - uint32_t vs0_vid_lower_smio_cntl; -}; -typedef struct _phw_tonga_voltage_smio_registers phw_tonga_voltage_smio_registers; - - -struct _phw_tonga_mc_reg_entry { - uint32_t mclk_max; - uint32_t mc_data[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE]; -}; -typedef struct _phw_tonga_mc_reg_entry phw_tonga_mc_reg_entry; - -struct _phw_tonga_mc_reg_table { - uint8_t last; /* number of registers*/ - uint8_t num_entries; /* number of entries in mc_reg_table_entry used*/ - uint16_t validflag; /* indicate the corresponding register is valid or not. 1: valid, 0: invalid. bit0->address[0], bit1->address[1], etc.*/ - phw_tonga_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES]; - SMU72_Discrete_MCRegisterAddress mc_reg_address[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE]; -}; -typedef struct _phw_tonga_mc_reg_table phw_tonga_mc_reg_table; - -#define DISABLE_MC_LOADMICROCODE 1 -#define DISABLE_MC_CFGPROGRAMMING 2 - -/*Ultra Low Voltage parameter structure */ -struct _phw_tonga_ulv_parm{ - bool ulv_supported; - uint32_t ch_ulv_parameter; - uint32_t ulv_volt_change_delay; - struct tonga_performance_level ulv_power_level; -}; -typedef struct _phw_tonga_ulv_parm phw_tonga_ulv_parm; - -#define TONGA_MAX_LEAKAGE_COUNT 8 - -struct _phw_tonga_leakage_voltage { - uint16_t count; - uint16_t leakage_id[TONGA_MAX_LEAKAGE_COUNT]; - uint16_t actual_voltage[TONGA_MAX_LEAKAGE_COUNT]; -}; -typedef struct _phw_tonga_leakage_voltage phw_tonga_leakage_voltage; - -struct _phw_tonga_display_timing { - uint32_t min_clock_insr; - uint32_t num_existing_displays; -}; -typedef struct _phw_tonga_display_timing phw_tonga_display_timing; - -struct _phw_tonga_dpmlevel_enable_mask { - uint32_t uvd_dpm_enable_mask; - uint32_t vce_dpm_enable_mask; - uint32_t acp_dpm_enable_mask; - uint32_t samu_dpm_enable_mask; - uint32_t sclk_dpm_enable_mask; - uint32_t mclk_dpm_enable_mask; - uint32_t pcie_dpm_enable_mask; -}; -typedef struct _phw_tonga_dpmlevel_enable_mask phw_tonga_dpmlevel_enable_mask; - -struct _phw_tonga_pcie_perf_range { - uint16_t max; - uint16_t min; -}; -typedef struct _phw_tonga_pcie_perf_range phw_tonga_pcie_perf_range; - -struct _phw_tonga_vbios_boot_state { - uint16_t mvdd_bootup_value; - uint16_t vddc_bootup_value; - uint16_t vddci_bootup_value; - uint16_t vddgfx_bootup_value; - uint32_t sclk_bootup_value; - uint32_t mclk_bootup_value; - uint16_t pcie_gen_bootup_value; - uint16_t pcie_lane_bootup_value; -}; -typedef struct _phw_tonga_vbios_boot_state phw_tonga_vbios_boot_state; - -#define DPMTABLE_OD_UPDATE_SCLK 0x00000001 -#define DPMTABLE_OD_UPDATE_MCLK 0x00000002 -#define DPMTABLE_UPDATE_SCLK 0x00000004 -#define DPMTABLE_UPDATE_MCLK 0x00000008 - -/* We need to review which fields are needed. */ -/* This is mostly a copy of the RV7xx/Evergreen structure which is close, but not identical to the N.Islands one. */ -struct tonga_hwmgr { - struct tonga_dpm_table dpm_table; - struct tonga_dpm_table golden_dpm_table; - - uint32_t voting_rights_clients0; - uint32_t voting_rights_clients1; - uint32_t voting_rights_clients2; - uint32_t voting_rights_clients3; - uint32_t voting_rights_clients4; - uint32_t voting_rights_clients5; - uint32_t voting_rights_clients6; - uint32_t voting_rights_clients7; - uint32_t static_screen_threshold_unit; - uint32_t static_screen_threshold; - uint32_t voltage_control; - uint32_t vdd_gfx_control; - - uint32_t vddc_vddci_delta; - uint32_t vddc_vddgfx_delta; - - struct pp_interrupt_registration_info internal_high_thermal_interrupt_info; - struct pp_interrupt_registration_info internal_low_thermal_interrupt_info; - struct pp_interrupt_registration_info smc_to_host_interrupt_info; - uint32_t active_auto_throttle_sources; - - struct pp_interrupt_registration_info external_throttle_interrupt; - irq_handler_func_t external_throttle_callback; - void *external_throttle_context; - - struct pp_interrupt_registration_info ctf_interrupt_info; - irq_handler_func_t ctf_callback; - void *ctf_context; - - phw_tonga_clock_registers clock_registers; - phw_tonga_voltage_smio_registers voltage_smio_registers; - - bool is_memory_GDDR5; - uint16_t acpi_vddc; - bool pspp_notify_required; /* Flag to indicate if PSPP notification to SBIOS is required */ - uint16_t force_pcie_gen; /* The forced PCI-E speed if not 0xffff */ - uint16_t acpi_pcie_gen; /* The PCI-E speed at ACPI time */ - uint32_t pcie_gen_cap; /* The PCI-E speed capabilities bitmap from CAIL */ - uint32_t pcie_lane_cap; /* The PCI-E lane capabilities bitmap from CAIL */ - uint32_t pcie_spc_cap; /* Symbol Per Clock Capabilities from registry */ - phw_tonga_leakage_voltage vddc_leakage; /* The Leakage VDDC supported (based on leakage ID).*/ - phw_tonga_leakage_voltage vddcgfx_leakage; /* The Leakage VDDC supported (based on leakage ID). */ - phw_tonga_leakage_voltage vddci_leakage; /* The Leakage VDDCI supported (based on leakage ID). */ - - uint32_t mvdd_control; - uint32_t vddc_mask_low; - uint32_t mvdd_mask_low; - uint16_t max_vddc_in_pp_table; /* the maximum VDDC value in the powerplay table*/ - uint16_t min_vddc_in_pp_table; - uint16_t max_vddci_in_pp_table; /* the maximum VDDCI value in the powerplay table */ - uint16_t min_vddci_in_pp_table; - uint32_t mclk_strobe_mode_threshold; - uint32_t mclk_stutter_mode_threshold; - uint32_t mclk_edc_enable_threshold; - uint32_t mclk_edc_wr_enable_threshold; - bool is_uvd_enabled; - bool is_xdma_enabled; - phw_tonga_vbios_boot_state vbios_boot_state; - - bool battery_state; - bool is_tlu_enabled; - bool pcie_performance_request; - - /* -------------- SMC SRAM Address of firmware header tables ----------------*/ - uint32_t sram_end; /* The first address after the SMC SRAM. */ - uint32_t dpm_table_start; /* The start of the dpm table in the SMC SRAM. */ - uint32_t soft_regs_start; /* The start of the soft registers in the SMC SRAM. */ - uint32_t mc_reg_table_start; /* The start of the mc register table in the SMC SRAM. */ - uint32_t fan_table_start; /* The start of the fan table in the SMC SRAM. */ - uint32_t arb_table_start; /* The start of the ARB setting table in the SMC SRAM. */ - SMU72_Discrete_DpmTable smc_state_table; /* The carbon copy of the SMC state table. */ - SMU72_Discrete_MCRegisters mc_reg_table; - SMU72_Discrete_Ulv ulv_setting; /* The carbon copy of ULV setting. */ - /* -------------- Stuff originally coming from Evergreen --------------------*/ - phw_tonga_mc_reg_table tonga_mc_reg_table; - uint32_t vdd_ci_control; - pp_atomctrl_voltage_table vddc_voltage_table; - pp_atomctrl_voltage_table vddci_voltage_table; - pp_atomctrl_voltage_table vddgfx_voltage_table; - pp_atomctrl_voltage_table mvdd_voltage_table; - - uint32_t mgcg_cgtt_local2; - uint32_t mgcg_cgtt_local3; - uint32_t gpio_debug; - uint32_t mc_micro_code_feature; - uint32_t highest_mclk; - uint16_t acpi_vdd_ci; - uint8_t mvdd_high_index; - uint8_t mvdd_low_index; - bool dll_defaule_on; - bool performance_request_registered; - - /* ----------------- Low Power Features ---------------------*/ - phw_tonga_bacos bacos; - phw_tonga_ulv_parm ulv; - /* ----------------- CAC Stuff ---------------------*/ - uint32_t cac_table_start; - bool cac_configuration_required; /* TRUE if PP_CACConfigurationRequired == 1 */ - bool driver_calculate_cac_leakage; /* TRUE if PP_DriverCalculateCACLeakage == 1 */ - bool cac_enabled; - /* ----------------- DPM2 Parameters ---------------------*/ - uint32_t power_containment_features; - bool enable_bapm_feature; - bool enable_tdc_limit_feature; - bool enable_pkg_pwr_tracking_feature; - bool disable_uvd_power_tune_feature; - phw_tonga_pt_defaults *power_tune_defaults; - SMU72_Discrete_PmFuses power_tune_table; - uint32_t ul_dte_tj_offset; /* Fudge factor in DPM table to correct HW DTE errors */ - uint32_t fast_watemark_threshold; /* use fast watermark if clock is equal or above this. In percentage of the target high sclk. */ - - /* ----------------- Phase Shedding ---------------------*/ - bool vddc_phase_shed_control; - /* --------------------- DI/DT --------------------------*/ - phw_tonga_display_timing display_timing; - /* --------- ReadRegistry data for memory and engine clock margins ---- */ - uint32_t engine_clock_data; - uint32_t memory_clock_data; - /* -------- Thermal Temperature Setting --------------*/ - phw_tonga_dpmlevel_enable_mask dpm_level_enable_mask; - uint32_t need_update_smu7_dpm_table; - uint32_t sclk_dpm_key_disabled; - uint32_t mclk_dpm_key_disabled; - uint32_t pcie_dpm_key_disabled; - uint32_t min_engine_clocks; /* used to store the previous dal min sclock */ - phw_tonga_pcie_perf_range pcie_gen_performance; - phw_tonga_pcie_perf_range pcie_lane_performance; - phw_tonga_pcie_perf_range pcie_gen_power_saving; - phw_tonga_pcie_perf_range pcie_lane_power_saving; - bool use_pcie_performance_levels; - bool use_pcie_power_saving_levels; - uint32_t activity_target[SMU72_MAX_LEVELS_GRAPHICS]; /* percentage value from 0-100, default 50 */ - uint32_t mclk_activity_target; - uint32_t low_sclk_interrupt_threshold; - uint32_t last_mclk_dpm_enable_mask; - bool uvd_enabled; - uint32_t pcc_monitor_enabled; - - /* --------- Power Gating States ------------*/ - bool uvd_power_gated; /* 1: gated, 0:not gated */ - bool vce_power_gated; /* 1: gated, 0:not gated */ - bool samu_power_gated; /* 1: gated, 0:not gated */ - bool acp_power_gated; /* 1: gated, 0:not gated */ - bool pg_acp_init; -}; - -typedef struct tonga_hwmgr tonga_hwmgr; - -#define TONGA_DPM2_NEAR_TDP_DEC 10 -#define TONGA_DPM2_ABOVE_SAFE_INC 5 -#define TONGA_DPM2_BELOW_SAFE_INC 20 - -#define TONGA_DPM2_LTA_WINDOW_SIZE 7 /* Log2 of the LTA window size (l2numWin_TDP). Eg. If LTA windows size is 128, then this value should be Log2(128) = 7. */ - -#define TONGA_DPM2_LTS_TRUNCATE 0 - -#define TONGA_DPM2_TDP_SAFE_LIMIT_PERCENT 80 /* Maximum 100 */ - -#define TONGA_DPM2_MAXPS_PERCENT_H 90 /* Maximum 0xFF */ -#define TONGA_DPM2_MAXPS_PERCENT_M 90 /* Maximum 0xFF */ - -#define TONGA_DPM2_PWREFFICIENCYRATIO_MARGIN 50 - -#define TONGA_DPM2_SQ_RAMP_MAX_POWER 0x3FFF -#define TONGA_DPM2_SQ_RAMP_MIN_POWER 0x12 -#define TONGA_DPM2_SQ_RAMP_MAX_POWER_DELTA 0x15 -#define TONGA_DPM2_SQ_RAMP_SHORT_TERM_INTERVAL_SIZE 0x1E -#define TONGA_DPM2_SQ_RAMP_LONG_TERM_INTERVAL_RATIO 0xF - -#define TONGA_VOLTAGE_CONTROL_NONE 0x0 -#define TONGA_VOLTAGE_CONTROL_BY_GPIO 0x1 -#define TONGA_VOLTAGE_CONTROL_BY_SVID2 0x2 -#define TONGA_VOLTAGE_CONTROL_MERGED 0x3 - -#define TONGA_Q88_FORMAT_CONVERSION_UNIT 256 /*To convert to Q8.8 format for firmware */ - -#define TONGA_UNUSED_GPIO_PIN 0x7F - -int tonga_hwmgr_init(struct pp_hwmgr *hwmgr); -int tonga_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input); -int tonga_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate); -int tonga_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable); -int tonga_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable); -uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr); - -#endif - diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c deleted file mode 100644 index 47ef1ca2d78b..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c +++ /dev/null @@ -1,590 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include <asm/div64.h> -#include "tonga_thermal.h" -#include "tonga_hwmgr.h" -#include "tonga_smumgr.h" -#include "tonga_ppsmc.h" -#include "smu/smu_7_1_2_d.h" -#include "smu/smu_7_1_2_sh_mask.h" - -/** -* Get Fan Speed Control Parameters. -* @param hwmgr the address of the powerplay hardware manager. -* @param pSpeed is the address of the structure where the result is to be placed. -* @exception Always succeeds except if we cannot zero out the output structure. -*/ -int tonga_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info) -{ - - if (hwmgr->thermal_controller.fanInfo.bNoFan) - return 0; - - fan_speed_info->supports_percent_read = true; - fan_speed_info->supports_percent_write = true; - fan_speed_info->min_percent = 0; - fan_speed_info->max_percent = 100; - - if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) { - fan_speed_info->supports_rpm_read = true; - fan_speed_info->supports_rpm_write = true; - fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM; - fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM; - } else { - fan_speed_info->min_rpm = 0; - fan_speed_info->max_rpm = 0; - } - - return 0; -} - -/** -* Get Fan Speed in percent. -* @param hwmgr the address of the powerplay hardware manager. -* @param pSpeed is the address of the structure where the result is to be placed. -* @exception Fails is the 100% setting appears to be 0. -*/ -int tonga_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed) -{ - uint32_t duty100; - uint32_t duty; - uint64_t tmp64; - - if (hwmgr->thermal_controller.fanInfo.bNoFan) - return 0; - - duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100); - duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_STATUS, FDO_PWM_DUTY); - - if (0 == duty100) - return -EINVAL; - - - tmp64 = (uint64_t)duty * 100; - do_div(tmp64, duty100); - *speed = (uint32_t)tmp64; - - if (*speed > 100) - *speed = 100; - - return 0; -} - -/** -* Get Fan Speed in RPM. -* @param hwmgr the address of the powerplay hardware manager. -* @param speed is the address of the structure where the result is to be placed. -* @exception Returns not supported if no fan is found or if pulses per revolution are not set -*/ -int tonga_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed) -{ - return 0; -} - -/** -* Set Fan Speed Control to static mode, so that the user can decide what speed to use. -* @param hwmgr the address of the powerplay hardware manager. -* mode the fan control mode, 0 default, 1 by percent, 5, by RPM -* @exception Should always succeed. -*/ -int tonga_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode) -{ - - if (hwmgr->fan_ctrl_is_in_default_mode) { - hwmgr->fan_ctrl_default_mode = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE); - hwmgr->tmin = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN); - hwmgr->fan_ctrl_is_in_default_mode = false; - } - - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, 0); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, mode); - - return 0; -} - -/** -* Reset Fan Speed Control to default mode. -* @param hwmgr the address of the powerplay hardware manager. -* @exception Should always succeed. -*/ -int tonga_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr) -{ - if (!hwmgr->fan_ctrl_is_in_default_mode) { - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, hwmgr->tmin); - hwmgr->fan_ctrl_is_in_default_mode = true; - } - - return 0; -} - -int tonga_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr) -{ - int result; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ODFuzzyFanControlSupport)) { - cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY); - result = (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl) == 0) ? 0 : -EINVAL; -/* - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_FanSpeedInTableIsRPM)) - hwmgr->set_max_fan_rpm_output(hwmgr, hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM); - else - hwmgr->set_max_fan_pwm_output(hwmgr, hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM); -*/ - } else { - cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE); - result = (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl) == 0) ? 0 : -EINVAL; - } -/* TO DO FOR SOME DEVICE ID 0X692b, send this msg return invalid command. - if (result == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature != 0) - result = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanTemperatureTarget, \ - hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature) ? 0 : -EINVAL); -*/ - return result; -} - - -int tonga_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr) -{ - return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl) == 0) ? 0 : -EINVAL; -} - -/** -* Set Fan Speed in percent. -* @param hwmgr the address of the powerplay hardware manager. -* @param speed is the percentage value (0% - 100%) to be set. -* @exception Fails is the 100% setting appears to be 0. -*/ -int tonga_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed) -{ - uint32_t duty100; - uint32_t duty; - uint64_t tmp64; - - if (hwmgr->thermal_controller.fanInfo.bNoFan) - return -EINVAL; - - if (speed > 100) - speed = 100; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) - tonga_fan_ctrl_stop_smc_fan_control(hwmgr); - - duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100); - - if (0 == duty100) - return -EINVAL; - - tmp64 = (uint64_t)speed * duty100; - do_div(tmp64, 100); - duty = (uint32_t)tmp64; - - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL0, FDO_STATIC_DUTY, duty); - - return tonga_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); -} - -/** -* Reset Fan Speed to default. -* @param hwmgr the address of the powerplay hardware manager. -* @exception Always succeeds. -*/ -int tonga_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr) -{ - int result; - - if (hwmgr->thermal_controller.fanInfo.bNoFan) - return 0; - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) { - result = tonga_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); - if (0 == result) - result = tonga_fan_ctrl_start_smc_fan_control(hwmgr); - } else - result = tonga_fan_ctrl_set_default_mode(hwmgr); - - return result; -} - -/** -* Set Fan Speed in RPM. -* @param hwmgr the address of the powerplay hardware manager. -* @param speed is the percentage value (min - max) to be set. -* @exception Fails is the speed not lie between min and max. -*/ -int tonga_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) -{ - return 0; -} - -/** -* Reads the remote temperature from the SIslands thermal controller. -* -* @param hwmgr The address of the hardware manager. -*/ -int tonga_thermal_get_temperature(struct pp_hwmgr *hwmgr) -{ - int temp; - - temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_STATUS, CTF_TEMP); - -/* Bit 9 means the reading is lower than the lowest usable value. */ - if (0 != (0x200 & temp)) - temp = TONGA_THERMAL_MAXIMUM_TEMP_READING; - else - temp = (temp & 0x1ff); - - temp = temp * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - - return temp; -} - -/** -* Set the requested temperature range for high and low alert signals -* -* @param hwmgr The address of the hardware manager. -* @param range Temperature range to be programmed for high and low alert signals -* @exception PP_Result_BadInput if the input data is not valid. -*/ -static int tonga_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, uint32_t low_temp, uint32_t high_temp) -{ - uint32_t low = TONGA_THERMAL_MINIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - uint32_t high = TONGA_THERMAL_MAXIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - - if (low < low_temp) - low = low_temp; - if (high > high_temp) - high = high_temp; - - if (low > high) - return -EINVAL; - - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, DIG_THERM_DPM, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); - - return 0; -} - -/** -* Programs thermal controller one-time setting registers -* -* @param hwmgr The address of the hardware manager. -*/ -static int tonga_thermal_initialize(struct pp_hwmgr *hwmgr) -{ - if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_TACH_CTRL, EDGE_PER_REV, - hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution - 1); - - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28); - - return 0; -} - -/** -* Enable thermal alerts on the RV770 thermal controller. -* -* @param hwmgr The address of the hardware manager. -*/ -static int tonga_thermal_enable_alert(struct pp_hwmgr *hwmgr) -{ - uint32_t alert; - - alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK); - alert &= ~(TONGA_THERMAL_HIGH_ALERT_MASK | TONGA_THERMAL_LOW_ALERT_MASK); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert); - - /* send message to SMU to enable internal thermal interrupts */ - return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable) == 0) ? 0 : -1; -} - -/** -* Disable thermal alerts on the RV770 thermal controller. -* @param hwmgr The address of the hardware manager. -*/ -static int tonga_thermal_disable_alert(struct pp_hwmgr *hwmgr) -{ - uint32_t alert; - - alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK); - alert |= (TONGA_THERMAL_HIGH_ALERT_MASK | TONGA_THERMAL_LOW_ALERT_MASK); - PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert); - - /* send message to SMU to disable internal thermal interrupts */ - return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable) == 0) ? 0 : -1; -} - -/** -* Uninitialize the thermal controller. -* Currently just disables alerts. -* @param hwmgr The address of the hardware manager. -*/ -int tonga_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr) -{ - int result = tonga_thermal_disable_alert(hwmgr); - - if (hwmgr->thermal_controller.fanInfo.bNoFan) - tonga_fan_ctrl_set_default_mode(hwmgr); - - return result; -} - -/** -* Set up the fan table to control the fan using the SMC. -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from set temperature range routine -*/ -int tf_tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - SMU72_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; - uint32_t duty100; - uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2; - uint16_t fdo_min, slope1, slope2; - uint32_t reference_clock; - int res; - uint64_t tmp64; - - if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) - return 0; - - if (0 == data->fan_table_start) { - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); - return 0; - } - - duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100); - - if (0 == duty100) { - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); - return 0; - } - - tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100; - do_div(tmp64, 10000); - fdo_min = (uint16_t)tmp64; - - t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin; - t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed; - - pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin; - pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed; - - slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); - slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); - - fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100); - fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100); - fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100); - - fan_table.Slope1 = cpu_to_be16(slope1); - fan_table.Slope2 = cpu_to_be16(slope2); - - fan_table.FdoMin = cpu_to_be16(fdo_min); - - fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst); - - fan_table.HystUp = cpu_to_be16(1); - - fan_table.HystSlope = cpu_to_be16(1); - - fan_table.TempRespLim = cpu_to_be16(5); - - reference_clock = tonga_get_xclk(hwmgr); - - fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600); - - fan_table.FdoMax = cpu_to_be16((uint16_t)duty100); - - fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL); - - fan_table.FanControl_GL_Flag = 1; - - res = tonga_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), data->sram_end); -/* TO DO FOR SOME DEVICE ID 0X692b, send this msg return invalid command. - if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit != 0) - res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanMinPwm, \ - hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit) ? 0 : -1); - - if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit != 0) - res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanSclkTarget, \ - hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit) ? 0 : -1); - - if (0 != res) - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); -*/ - return 0; -} - -/** -* Start the fan control on the SMC. -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from set temperature range routine -*/ -int tf_tonga_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) -{ -/* If the fantable setup has failed we could have disabled PHM_PlatformCaps_MicrocodeFanControl even after this function was included in the table. - * Make sure that we still think controlling the fan is OK. -*/ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) { - tonga_fan_ctrl_start_smc_fan_control(hwmgr); - tonga_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); - } - - return 0; -} - -/** -* Set temperature range for high and low alerts -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from set temperature range routine -*/ -int tf_tonga_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) -{ - struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input; - - if (range == NULL) - return -EINVAL; - - return tonga_thermal_set_temperature_range(hwmgr, range->min, range->max); -} - -/** -* Programs one-time setting registers -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from initialize thermal controller routine -*/ -int tf_tonga_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) -{ - return tonga_thermal_initialize(hwmgr); -} - -/** -* Enable high and low alerts -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from enable alert routine -*/ -int tf_tonga_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) -{ - return tonga_thermal_enable_alert(hwmgr); -} - -/** -* Disable high and low alerts -* @param hwmgr the address of the powerplay hardware manager. -* @param pInput the pointer to input data -* @param pOutput the pointer to output data -* @param pStorage the pointer to temporary storage -* @param Result the last failure code -* @return result from disable alert routine -*/ -static int tf_tonga_thermal_disable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) -{ - return tonga_thermal_disable_alert(hwmgr); -} - -static const struct phm_master_table_item tonga_thermal_start_thermal_controller_master_list[] = { - { NULL, tf_tonga_thermal_initialize }, - { NULL, tf_tonga_thermal_set_temperature_range }, - { NULL, tf_tonga_thermal_enable_alert }, -/* We should restrict performance levels to low before we halt the SMC. - * On the other hand we are still in boot state when we do this so it would be pointless. - * If this assumption changes we have to revisit this table. - */ - { NULL, tf_tonga_thermal_setup_fan_table}, - { NULL, tf_tonga_thermal_start_smc_fan_control}, - { NULL, NULL } -}; - -static const struct phm_master_table_header tonga_thermal_start_thermal_controller_master = { - 0, - PHM_MasterTableFlag_None, - tonga_thermal_start_thermal_controller_master_list -}; - -static const struct phm_master_table_item tonga_thermal_set_temperature_range_master_list[] = { - { NULL, tf_tonga_thermal_disable_alert}, - { NULL, tf_tonga_thermal_set_temperature_range}, - { NULL, tf_tonga_thermal_enable_alert}, - { NULL, NULL } -}; - -static const struct phm_master_table_header tonga_thermal_set_temperature_range_master = { - 0, - PHM_MasterTableFlag_None, - tonga_thermal_set_temperature_range_master_list -}; - -int tonga_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr) -{ - if (!hwmgr->thermal_controller.fanInfo.bNoFan) - tonga_fan_ctrl_set_default_mode(hwmgr); - return 0; -} - -/** -* Initializes the thermal controller related functions in the Hardware Manager structure. -* @param hwmgr The address of the hardware manager. -* @exception Any error code from the low-level communication. -*/ -int pp_tonga_thermal_initialize(struct pp_hwmgr *hwmgr) -{ - int result; - - result = phm_construct_table(hwmgr, &tonga_thermal_set_temperature_range_master, &(hwmgr->set_temperature_range)); - - if (0 == result) { - result = phm_construct_table(hwmgr, - &tonga_thermal_start_thermal_controller_master, - &(hwmgr->start_thermal_controller)); - if (0 != result) - phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range)); - } - - if (0 == result) - hwmgr->fan_ctrl_is_in_default_mode = true; - return result; -} - diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.h deleted file mode 100644 index aa335f267e25..000000000000 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.h +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef TONGA_THERMAL_H -#define TONGA_THERMAL_H - -#include "hwmgr.h" - -#define TONGA_THERMAL_HIGH_ALERT_MASK 0x1 -#define TONGA_THERMAL_LOW_ALERT_MASK 0x2 - -#define TONGA_THERMAL_MINIMUM_TEMP_READING -256 -#define TONGA_THERMAL_MAXIMUM_TEMP_READING 255 - -#define TONGA_THERMAL_MINIMUM_ALERT_TEMP 0 -#define TONGA_THERMAL_MAXIMUM_ALERT_TEMP 255 - -#define FDO_PWM_MODE_STATIC 1 -#define FDO_PWM_MODE_STATIC_RPM 5 - - -extern int tf_tonga_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result); -extern int tf_tonga_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result); -extern int tf_tonga_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result); - -extern int tonga_thermal_get_temperature(struct pp_hwmgr *hwmgr); -extern int tonga_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr); -extern int tonga_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info); -extern int tonga_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed); -extern int tonga_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr); -extern int tonga_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode); -extern int tonga_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed); -extern int tonga_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr); -extern int pp_tonga_thermal_initialize(struct pp_hwmgr *hwmgr); -extern int tonga_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr); -extern int tonga_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed); -extern int tonga_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed); -extern int tonga_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr); - -#endif - diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h index b764c8c05ec8..3fb5e57a378b 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h @@ -29,6 +29,19 @@ #include "amd_shared.h" #include "cgs_common.h" +enum amd_pp_sensors { + AMDGPU_PP_SENSOR_GFX_SCLK = 0, + AMDGPU_PP_SENSOR_VDDNB, + AMDGPU_PP_SENSOR_VDDGFX, + AMDGPU_PP_SENSOR_UVD_VCLK, + AMDGPU_PP_SENSOR_UVD_DCLK, + AMDGPU_PP_SENSOR_VCE_ECCLK, + AMDGPU_PP_SENSOR_GPU_LOAD, + AMDGPU_PP_SENSOR_GFX_MCLK, + AMDGPU_PP_SENSOR_GPU_TEMP, + AMDGPU_PP_SENSOR_VCE_POWER, + AMDGPU_PP_SENSOR_UVD_POWER, +}; enum amd_pp_event { AMD_PP_EVENT_INITIALIZE = 0, @@ -131,9 +144,8 @@ struct amd_pp_init { struct cgs_device *device; uint32_t chip_family; uint32_t chip_id; - uint32_t rev_id; - bool powercontainment_enabled; }; + enum amd_pp_display_config_type{ AMD_PP_DisplayConfigType_None = 0, AMD_PP_DisplayConfigType_DP54 , @@ -261,6 +273,7 @@ enum amd_pp_clock_type { struct amd_pp_clocks { uint32_t count; uint32_t clock[MAX_NUM_CLOCKS]; + uint32_t latency[MAX_NUM_CLOCKS]; }; @@ -332,8 +345,6 @@ struct amd_powerplay_funcs { int (*powergate_uvd)(void *handle, bool gate); int (*dispatch_tasks)(void *handle, enum amd_pp_event event_id, void *input, void *output); - void (*print_current_performance_level)(void *handle, - struct seq_file *m); int (*set_fan_control_mode)(void *handle, uint32_t mode); int (*get_fan_control_mode)(void *handle); int (*set_fan_speed_percent)(void *handle, uint32_t percent); @@ -347,6 +358,7 @@ struct amd_powerplay_funcs { int (*set_sclk_od)(void *handle, uint32_t value); int (*get_mclk_od)(void *handle); int (*set_mclk_od)(void *handle, uint32_t value); + int (*read_sensor)(void *handle, int idx, int32_t *value); }; struct amd_powerplay { @@ -378,4 +390,6 @@ int amd_powerplay_get_clock_by_type(void *handle, int amd_powerplay_get_display_mode_validation_clocks(void *handle, struct amd_pp_simple_clock_info *output); +int amd_set_clockgating_by_smu(void *handle, uint32_t msg_id); + #endif /* _AMD_POWERPLAY_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h index 962cb5385951..d4495839c64c 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h @@ -341,7 +341,6 @@ extern int phm_powerdown_uvd(struct pp_hwmgr *hwmgr); extern int phm_setup_asic(struct pp_hwmgr *hwmgr); extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr); extern int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr); -extern void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr); extern bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr); extern int phm_block_hw_access(struct pp_hwmgr *hwmgr, bool block); extern int phm_set_power_state(struct pp_hwmgr *hwmgr, diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index bf0d2accf7bf..4f0fedd1e9d3 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -31,15 +31,20 @@ #include "hwmgr_ppt.h" #include "ppatomctrl.h" #include "hwmgr_ppt.h" +#include "power_state.h" struct pp_instance; struct pp_hwmgr; -struct pp_hw_power_state; -struct pp_power_state; -struct PP_VCEState; struct phm_fan_speed_info; struct pp_atomctrl_voltage_table; +extern int amdgpu_powercontainment; +extern int amdgpu_sclk_deep_sleep_en; +extern unsigned amdgpu_pp_feature_mask; + +#define VOLTAGE_SCALE 4 + +uint8_t convert_to_vid(uint16_t vddc); enum DISPLAY_GAP { DISPLAY_GAP_VBLANK_OR_WM = 0, /* Wait for vblank or MCHG watermark. */ @@ -49,7 +54,6 @@ enum DISPLAY_GAP { }; typedef enum DISPLAY_GAP DISPLAY_GAP; - struct vi_dpm_level { bool enabled; uint32_t value; @@ -71,6 +75,19 @@ enum PP_Result { #define PCIE_PERF_REQ_GEN2 3 #define PCIE_PERF_REQ_GEN3 4 +enum PP_FEATURE_MASK { + PP_SCLK_DPM_MASK = 0x1, + PP_MCLK_DPM_MASK = 0x2, + PP_PCIE_DPM_MASK = 0x4, + PP_SCLK_DEEP_SLEEP_MASK = 0x8, + PP_POWER_CONTAINMENT_MASK = 0x10, + PP_UVD_HANDSHAKE_MASK = 0x20, + PP_SMC_VOLTAGE_CONTROL_MASK = 0x40, + PP_VBI_TIME_SUPPORT_MASK = 0x80, + PP_ULV_MASK = 0x100, + PP_ENABLE_GFX_CG_THRU_SMU = 0x200 +}; + enum PHM_BackEnd_Magic { PHM_Dummy_Magic = 0xAA5555AA, PHM_RV770_Magic = 0xDCBAABCD, @@ -294,8 +311,6 @@ struct pp_hwmgr_func { int (*get_sclk)(struct pp_hwmgr *hwmgr, bool low); int (*power_state_set)(struct pp_hwmgr *hwmgr, const void *state); - void (*print_current_perforce_level)(struct pp_hwmgr *hwmgr, - struct seq_file *m); int (*enable_clock_power_gating)(struct pp_hwmgr *hwmgr); int (*notify_smc_display_config_after_ps_adjustment)(struct pp_hwmgr *hwmgr); int (*display_config_changed)(struct pp_hwmgr *hwmgr); @@ -342,6 +357,7 @@ struct pp_hwmgr_func { int (*set_sclk_od)(struct pp_hwmgr *hwmgr, uint32_t value); int (*get_mclk_od)(struct pp_hwmgr *hwmgr); int (*set_mclk_od)(struct pp_hwmgr *hwmgr, uint32_t value); + int (*read_sensor)(struct pp_hwmgr *hwmgr, int idx, int32_t *value); }; struct pp_table_func { @@ -351,7 +367,7 @@ struct pp_table_func { int (*pptable_get_vce_state_table_entry)( struct pp_hwmgr *hwmgr, unsigned long i, - struct PP_VCEState *vce_state, + struct pp_vce_state *vce_state, void **clock_info, unsigned long *flag); }; @@ -570,22 +586,43 @@ struct phm_microcode_version_info { uint32_t NB; }; +#define PP_MAX_VCE_LEVELS 6 + +enum PP_VCE_LEVEL { + PP_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */ + PP_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */ + PP_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */ + PP_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */ + PP_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */ + PP_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */ +}; + + +enum PP_TABLE_VERSION { + PP_TABLE_V0 = 0, + PP_TABLE_V1, + PP_TABLE_V2, + PP_TABLE_MAX +}; + /** * The main hardware manager structure. */ struct pp_hwmgr { uint32_t chip_family; uint32_t chip_id; - uint32_t hw_revision; - uint32_t sub_sys_id; - uint32_t sub_vendor_id; + uint32_t pp_table_version; void *device; struct pp_smumgr *smumgr; const void *soft_pp_table; uint32_t soft_pp_table_size; void *hardcode_pp_table; bool need_pp_table_upload; + + struct pp_vce_state vce_states[PP_MAX_VCE_LEVELS]; + uint32_t num_vce_state_tables; + enum amd_dpm_forced_level dpm_level; bool block_hw_access; struct phm_gfx_arbiter gfx_arbiter; @@ -614,7 +651,6 @@ struct pp_hwmgr { uint32_t num_ps; struct pp_thermal_controller_info thermal_controller; bool fan_ctrl_is_in_default_mode; - bool powercontainment_enabled; uint32_t fan_ctrl_default_mode; uint32_t tmin; struct phm_microcode_version_info microcode_version_info; @@ -624,6 +660,7 @@ struct pp_hwmgr { struct pp_power_state *boot_ps; struct pp_power_state *uvd_ps; struct amd_pp_display_configuration display_config; + uint32_t feature_mask; }; @@ -637,16 +674,7 @@ extern int hw_init_power_state_table(struct pp_hwmgr *hwmgr); extern int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index, uint32_t value, uint32_t mask); -extern int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr, - uint32_t index, uint32_t value, uint32_t mask); -extern uint32_t phm_read_indirect_register(struct pp_hwmgr *hwmgr, - uint32_t indirect_port, uint32_t index); - -extern void phm_write_indirect_register(struct pp_hwmgr *hwmgr, - uint32_t indirect_port, - uint32_t index, - uint32_t value); extern void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr, uint32_t indirect_port, @@ -654,12 +682,7 @@ extern void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr, uint32_t value, uint32_t mask); -extern void phm_wait_for_indirect_register_unequal( - struct pp_hwmgr *hwmgr, - uint32_t indirect_port, - uint32_t index, - uint32_t value, - uint32_t mask); + extern bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr); extern bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr); @@ -673,6 +696,8 @@ extern void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps, st extern int phm_reset_single_dpm_table(void *table, uint32_t count, int max); extern void phm_setup_pcie_table_entry(void *table, uint32_t index, uint32_t pcie_gen, uint32_t pcie_lanes); extern int32_t phm_get_dpm_level_enable_mask_value(void *table); +extern uint8_t phm_get_voltage_id(struct pp_atomctrl_voltage_table *voltage_table, + uint32_t voltage); extern uint8_t phm_get_voltage_index(struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage); extern uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci); extern int phm_find_boot_level(void *table, uint32_t value, uint32_t *boot_level); @@ -683,6 +708,10 @@ extern int phm_hwmgr_backend_fini(struct pp_hwmgr *hwmgr); extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask); extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr); +extern int smu7_hwmgr_init(struct pp_hwmgr *hwmgr); +extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, + uint32_t sclk, uint16_t id, uint16_t *voltage); + #define PHM_ENTIRE_REGISTER_MASK 0xFFFFFFFFU #define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT @@ -697,44 +726,6 @@ extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr); PHM_FIELD_SHIFT(reg, field)) -#define PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, index, value, mask) \ - phm_wait_on_register(hwmgr, index, value, mask) - -#define PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, index, value, mask) \ - phm_wait_for_register_unequal(hwmgr, index, value, mask) - -#define PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask) \ - phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX, index, value, mask) - -#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask) \ - phm_wait_for_indirect_register_unequal(hwmgr, mm##port##_INDEX, index, value, mask) - -#define PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask) \ - phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX_0, index, value, mask) - -#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask) \ - phm_wait_for_indirect_register_unequal(hwmgr, mm##port##_INDEX_0, index, value, mask) - -/* Operations on named registers. */ - -#define PHM_WAIT_REGISTER(hwmgr, reg, value, mask) \ - PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, mm##reg, value, mask) - -#define PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, value, mask) \ - PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, mm##reg, value, mask) - -#define PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \ - PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) - -#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \ - PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) - -#define PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \ - PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) - -#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \ - PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) - /* Operations on named fields. */ #define PHM_READ_FIELD(device, reg, field) \ @@ -762,60 +753,16 @@ extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr); PHM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ reg, field, fieldval)) -#define PHM_WAIT_FIELD(hwmgr, reg, field, fieldval) \ - PHM_WAIT_REGISTER(hwmgr, reg, (fieldval) \ - << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field)) - -#define PHM_WAIT_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \ - PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \ - << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field)) - -#define PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \ - PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \ - << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field)) +#define PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask) \ + phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX, index, value, mask) -#define PHM_WAIT_FIELD_UNEQUAL(hwmgr, reg, field, fieldval) \ - PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, (fieldval) \ - << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field)) -#define PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \ - PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, (fieldval) \ - << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field)) +#define PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \ + PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) -#define PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \ - PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, (fieldval) \ +#define PHM_WAIT_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \ + PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \ << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field)) -/* Operations on arrays of registers & fields. */ - -#define PHM_READ_ARRAY_REGISTER(device, reg, offset) \ - cgs_read_register(device, mm##reg + (offset)) - -#define PHM_WRITE_ARRAY_REGISTER(device, reg, offset, value) \ - cgs_write_register(device, mm##reg + (offset), value) - -#define PHM_WAIT_ARRAY_REGISTER(hwmgr, reg, offset, value, mask) \ - PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, mm##reg + (offset), value, mask) - -#define PHM_WAIT_ARRAY_REGISTER_UNEQUAL(hwmgr, reg, offset, value, mask) \ - PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, mm##reg + (offset), value, mask) - -#define PHM_READ_ARRAY_FIELD(hwmgr, reg, offset, field) \ - PHM_GET_FIELD(PHM_READ_ARRAY_REGISTER(hwmgr->device, reg, offset), reg, field) - -#define PHM_WRITE_ARRAY_FIELD(hwmgr, reg, offset, field, fieldvalue) \ - PHM_WRITE_ARRAY_REGISTER(hwmgr->device, reg, offset, \ - PHM_SET_FIELD(PHM_READ_ARRAY_REGISTER(hwmgr->device, reg, offset), \ - reg, field, fieldvalue)) - -#define PHM_WAIT_ARRAY_FIELD(hwmgr, reg, offset, field, fieldvalue) \ - PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, mm##reg + (offset), \ - (fieldvalue) << PHM_FIELD_SHIFT(reg, field), \ - PHM_FIELD_MASK(reg, field)) - -#define PHM_WAIT_ARRAY_FIELD_UNEQUAL(hwmgr, reg, offset, field, fieldvalue) \ - PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, mm##reg + (offset), \ - (fieldvalue) << PHM_FIELD_SHIFT(reg, field), \ - PHM_FIELD_MASK(reg, field)) #endif /* _HWMGR_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h b/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h index f497e7d98e6d..0de443612312 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h +++ b/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h @@ -23,8 +23,7 @@ #ifndef _POLARIS10_PWRVIRUS_H #define _POLARIS10_PWRVIRUS_H -#define mmSMC_IND_INDEX_11 0x01AC -#define mmSMC_IND_DATA_11 0x01AD + #define mmCP_HYP_MEC1_UCODE_ADDR 0xf81a #define mmCP_HYP_MEC1_UCODE_DATA 0xf81b #define mmCP_HYP_MEC2_UCODE_ADDR 0xf81c diff --git a/drivers/gpu/drm/amd/powerplay/inc/power_state.h b/drivers/gpu/drm/amd/powerplay/inc/power_state.h index a3f0ce4d5835..9ceaed9ac52a 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/power_state.h +++ b/drivers/gpu/drm/amd/powerplay/inc/power_state.h @@ -158,7 +158,7 @@ struct pp_power_state { /*Structure to hold a VCE state entry*/ -struct PP_VCEState { +struct pp_vce_state { uint32_t evclk; uint32_t ecclk; uint32_t sclk; @@ -171,30 +171,28 @@ enum PP_MMProfilingState { PP_MMProfilingState_Stopped }; -struct PP_Clock_Engine_Request { - unsigned long clientType; - unsigned long ctxid; +struct pp_clock_engine_request { + unsigned long client_type; + unsigned long ctx_id; uint64_t context_handle; unsigned long sclk; - unsigned long sclkHardMin; + unsigned long sclk_hard_min; unsigned long mclk; unsigned long iclk; unsigned long evclk; unsigned long ecclk; - unsigned long ecclkHardMin; + unsigned long ecclk_hard_min; unsigned long vclk; unsigned long dclk; - unsigned long samclk; - unsigned long acpclk; - unsigned long sclkOverdrive; - unsigned long mclkOverdrive; + unsigned long sclk_over_drive; + unsigned long mclk_over_drive; unsigned long sclk_threshold; unsigned long flag; unsigned long vclk_ceiling; unsigned long dclk_ceiling; unsigned long num_cus; - unsigned long pmflag; - enum PP_MMProfilingState MMProfilingState; + unsigned long pm_flag; + enum PP_MMProfilingState mm_profiling_state; }; #endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h index d7d83b7c7f95..bfdbec10cdd5 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h +++ b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h @@ -43,5 +43,8 @@ } while (0) +#define GET_FLEXIBLE_ARRAY_MEMBER_ADDR(type, member, ptr, n) \ + (type *)((char *)&(ptr)->member + (sizeof(type) * (n))) + #endif /* PP_DEBUG_H */ diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu71.h b/drivers/gpu/drm/amd/powerplay/inc/smu71.h new file mode 100644 index 000000000000..71c9b2d28640 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/smu71.h @@ -0,0 +1,510 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef SMU71_H +#define SMU71_H + +#if !defined(SMC_MICROCODE) +#pragma pack(push, 1) +#endif + +#define SMU__NUM_PCIE_DPM_LEVELS 8 +#define SMU__NUM_SCLK_DPM_STATE 8 +#define SMU__NUM_MCLK_DPM_LEVELS 4 +#define SMU__VARIANT__ICELAND 1 +#define SMU__DGPU_ONLY 1 +#define SMU__DYNAMIC_MCARB_SETTINGS 1 + +enum SID_OPTION { + SID_OPTION_HI, + SID_OPTION_LO, + SID_OPTION_COUNT +}; + +typedef struct { + uint32_t high; + uint32_t low; +} data_64_t; + +typedef struct { + data_64_t high; + data_64_t low; +} data_128_t; + +#define SMU7_CONTEXT_ID_SMC 1 +#define SMU7_CONTEXT_ID_VBIOS 2 + +#define SMU71_MAX_LEVELS_VDDC 8 +#define SMU71_MAX_LEVELS_VDDCI 4 +#define SMU71_MAX_LEVELS_MVDD 4 +#define SMU71_MAX_LEVELS_VDDNB 8 + +#define SMU71_MAX_LEVELS_GRAPHICS SMU__NUM_SCLK_DPM_STATE +#define SMU71_MAX_LEVELS_MEMORY SMU__NUM_MCLK_DPM_LEVELS +#define SMU71_MAX_LEVELS_GIO SMU__NUM_LCLK_DPM_LEVELS +#define SMU71_MAX_LEVELS_LINK SMU__NUM_PCIE_DPM_LEVELS +#define SMU71_MAX_ENTRIES_SMIO 32 + +#define DPM_NO_LIMIT 0 +#define DPM_NO_UP 1 +#define DPM_GO_DOWN 2 +#define DPM_GO_UP 3 + +#define SMU7_FIRST_DPM_GRAPHICS_LEVEL 0 +#define SMU7_FIRST_DPM_MEMORY_LEVEL 0 + +#define GPIO_CLAMP_MODE_VRHOT 1 +#define GPIO_CLAMP_MODE_THERM 2 +#define GPIO_CLAMP_MODE_DC 4 + +#define SCRATCH_B_TARG_PCIE_INDEX_SHIFT 0 +#define SCRATCH_B_TARG_PCIE_INDEX_MASK (0x7<<SCRATCH_B_TARG_PCIE_INDEX_SHIFT) +#define SCRATCH_B_CURR_PCIE_INDEX_SHIFT 3 +#define SCRATCH_B_CURR_PCIE_INDEX_MASK (0x7<<SCRATCH_B_CURR_PCIE_INDEX_SHIFT) +#define SCRATCH_B_TARG_UVD_INDEX_SHIFT 6 +#define SCRATCH_B_TARG_UVD_INDEX_MASK (0x7<<SCRATCH_B_TARG_UVD_INDEX_SHIFT) +#define SCRATCH_B_CURR_UVD_INDEX_SHIFT 9 +#define SCRATCH_B_CURR_UVD_INDEX_MASK (0x7<<SCRATCH_B_CURR_UVD_INDEX_SHIFT) +#define SCRATCH_B_TARG_VCE_INDEX_SHIFT 12 +#define SCRATCH_B_TARG_VCE_INDEX_MASK (0x7<<SCRATCH_B_TARG_VCE_INDEX_SHIFT) +#define SCRATCH_B_CURR_VCE_INDEX_SHIFT 15 +#define SCRATCH_B_CURR_VCE_INDEX_MASK (0x7<<SCRATCH_B_CURR_VCE_INDEX_SHIFT) +#define SCRATCH_B_TARG_ACP_INDEX_SHIFT 18 +#define SCRATCH_B_TARG_ACP_INDEX_MASK (0x7<<SCRATCH_B_TARG_ACP_INDEX_SHIFT) +#define SCRATCH_B_CURR_ACP_INDEX_SHIFT 21 +#define SCRATCH_B_CURR_ACP_INDEX_MASK (0x7<<SCRATCH_B_CURR_ACP_INDEX_SHIFT) +#define SCRATCH_B_TARG_SAMU_INDEX_SHIFT 24 +#define SCRATCH_B_TARG_SAMU_INDEX_MASK (0x7<<SCRATCH_B_TARG_SAMU_INDEX_SHIFT) +#define SCRATCH_B_CURR_SAMU_INDEX_SHIFT 27 +#define SCRATCH_B_CURR_SAMU_INDEX_MASK (0x7<<SCRATCH_B_CURR_SAMU_INDEX_SHIFT) + + +#if defined SMU__DGPU_ONLY +#define SMU71_DTE_ITERATIONS 5 +#define SMU71_DTE_SOURCES 3 +#define SMU71_DTE_SINKS 1 +#define SMU71_NUM_CPU_TES 0 +#define SMU71_NUM_GPU_TES 1 +#define SMU71_NUM_NON_TES 2 + +#endif + +#if defined SMU__FUSION_ONLY +#define SMU7_DTE_ITERATIONS 5 +#define SMU7_DTE_SOURCES 5 +#define SMU7_DTE_SINKS 3 +#define SMU7_NUM_CPU_TES 2 +#define SMU7_NUM_GPU_TES 1 +#define SMU7_NUM_NON_TES 2 + +#endif + +struct SMU71_PIDController +{ + uint32_t Ki; + int32_t LFWindupUpperLim; + int32_t LFWindupLowerLim; + uint32_t StatePrecision; + uint32_t LfPrecision; + uint32_t LfOffset; + uint32_t MaxState; + uint32_t MaxLfFraction; + uint32_t StateShift; +}; + +typedef struct SMU71_PIDController SMU71_PIDController; + +struct SMU7_LocalDpmScoreboard +{ + uint32_t PercentageBusy; + + int32_t PIDError; + int32_t PIDIntegral; + int32_t PIDOutput; + + uint32_t SigmaDeltaAccum; + uint32_t SigmaDeltaOutput; + uint32_t SigmaDeltaLevel; + + uint32_t UtilizationSetpoint; + + uint8_t TdpClampMode; + uint8_t TdcClampMode; + uint8_t ThermClampMode; + uint8_t VoltageBusy; + + int8_t CurrLevel; + int8_t TargLevel; + uint8_t LevelChangeInProgress; + uint8_t UpHyst; + + uint8_t DownHyst; + uint8_t VoltageDownHyst; + uint8_t DpmEnable; + uint8_t DpmRunning; + + uint8_t DpmForce; + uint8_t DpmForceLevel; + uint8_t DisplayWatermark; + uint8_t McArbIndex; + + uint32_t MinimumPerfSclk; + + uint8_t AcpiReq; + uint8_t AcpiAck; + uint8_t GfxClkSlow; + uint8_t GpioClampMode; + + uint8_t FpsFilterWeight; + uint8_t EnabledLevelsChange; + uint8_t DteClampMode; + uint8_t FpsClampMode; + + uint16_t LevelResidencyCounters [SMU71_MAX_LEVELS_GRAPHICS]; + uint16_t LevelSwitchCounters [SMU71_MAX_LEVELS_GRAPHICS]; + + void (*TargetStateCalculator)(uint8_t); + void (*SavedTargetStateCalculator)(uint8_t); + + uint16_t AutoDpmInterval; + uint16_t AutoDpmRange; + + uint8_t FpsEnabled; + uint8_t MaxPerfLevel; + uint8_t AllowLowClkInterruptToHost; + uint8_t FpsRunning; + + uint32_t MaxAllowedFrequency; +}; + +typedef struct SMU7_LocalDpmScoreboard SMU7_LocalDpmScoreboard; + +#define SMU7_MAX_VOLTAGE_CLIENTS 12 + +struct SMU7_VoltageScoreboard +{ + uint16_t CurrentVoltage; + uint16_t HighestVoltage; + uint16_t MaxVid; + uint8_t HighestVidOffset; + uint8_t CurrentVidOffset; +#if defined (SMU__DGPU_ONLY) + uint8_t CurrentPhases; + uint8_t HighestPhases; +#else + uint8_t AvsOffset; + uint8_t AvsOffsetApplied; +#endif + uint8_t ControllerBusy; + uint8_t CurrentVid; + uint16_t RequestedVoltage[SMU7_MAX_VOLTAGE_CLIENTS]; +#if defined (SMU__DGPU_ONLY) + uint8_t RequestedPhases[SMU7_MAX_VOLTAGE_CLIENTS]; +#endif + uint8_t EnabledRequest[SMU7_MAX_VOLTAGE_CLIENTS]; + uint8_t TargetIndex; + uint8_t Delay; + uint8_t ControllerEnable; + uint8_t ControllerRunning; + uint16_t CurrentStdVoltageHiSidd; + uint16_t CurrentStdVoltageLoSidd; +#if defined (SMU__DGPU_ONLY) + uint16_t RequestedVddci; + uint16_t CurrentVddci; + uint16_t HighestVddci; + uint8_t CurrentVddciVid; + uint8_t TargetVddciIndex; +#endif +}; + +typedef struct SMU7_VoltageScoreboard SMU7_VoltageScoreboard; + +// ------------------------------------------------------------------------------------------------------------------------- +#define SMU7_MAX_PCIE_LINK_SPEEDS 3 /* 0:Gen1 1:Gen2 2:Gen3 */ + +struct SMU7_PCIeLinkSpeedScoreboard +{ + uint8_t DpmEnable; + uint8_t DpmRunning; + uint8_t DpmForce; + uint8_t DpmForceLevel; + + uint8_t CurrentLinkSpeed; + uint8_t EnabledLevelsChange; + uint16_t AutoDpmInterval; + + uint16_t AutoDpmRange; + uint16_t AutoDpmCount; + + uint8_t DpmMode; + uint8_t AcpiReq; + uint8_t AcpiAck; + uint8_t CurrentLinkLevel; + +}; + +typedef struct SMU7_PCIeLinkSpeedScoreboard SMU7_PCIeLinkSpeedScoreboard; + +// -------------------------------------------------------- CAC table ------------------------------------------------------ +#define SMU7_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16 +#define SMU7_LKGE_LUT_NUM_OF_VOLT_ENTRIES 16 + +#define SMU7_SCALE_I 7 +#define SMU7_SCALE_R 12 + +struct SMU7_PowerScoreboard +{ + uint16_t MinVoltage; + uint16_t MaxVoltage; + + uint32_t AvgGpuPower; + + uint16_t VddcLeakagePower[SID_OPTION_COUNT]; + uint16_t VddcSclkConstantPower[SID_OPTION_COUNT]; + uint16_t VddcSclkDynamicPower[SID_OPTION_COUNT]; + uint16_t VddcNonSclkDynamicPower[SID_OPTION_COUNT]; + uint16_t VddcTotalPower[SID_OPTION_COUNT]; + uint16_t VddcTotalCurrent[SID_OPTION_COUNT]; + uint16_t VddcLoadVoltage[SID_OPTION_COUNT]; + uint16_t VddcNoLoadVoltage[SID_OPTION_COUNT]; + + uint16_t DisplayPhyPower; + uint16_t PciePhyPower; + + uint16_t VddciTotalPower; + uint16_t Vddr1TotalPower; + + uint32_t RocPower; + + uint32_t last_power; + uint32_t enableWinAvg; + + uint32_t lkg_acc; + uint16_t VoltLkgeScaler; + uint16_t TempLkgeScaler; + + uint32_t uvd_cac_dclk; + uint32_t uvd_cac_vclk; + uint32_t vce_cac_eclk; + uint32_t samu_cac_samclk; + uint32_t display_cac_dispclk; + uint32_t acp_cac_aclk; + uint32_t unb_cac; + + uint32_t WinTime; + + uint16_t GpuPwr_MAWt; + uint16_t FilteredVddcTotalPower; + + uint8_t CalculationRepeats; + uint8_t WaterfallUp; + uint8_t WaterfallDown; + uint8_t WaterfallLimit; +}; + +typedef struct SMU7_PowerScoreboard SMU7_PowerScoreboard; + +// -------------------------------------------------------------------------------------------------- + +struct SMU7_ThermalScoreboard +{ + int16_t GpuLimit; + int16_t GpuHyst; + uint16_t CurrGnbTemp; + uint16_t FilteredGnbTemp; + uint8_t ControllerEnable; + uint8_t ControllerRunning; + uint8_t WaterfallUp; + uint8_t WaterfallDown; + uint8_t WaterfallLimit; + uint8_t padding[3]; +}; + +typedef struct SMU7_ThermalScoreboard SMU7_ThermalScoreboard; + +// For FeatureEnables: +#define SMU7_SCLK_DPM_CONFIG_MASK 0x01 +#define SMU7_VOLTAGE_CONTROLLER_CONFIG_MASK 0x02 +#define SMU7_THERMAL_CONTROLLER_CONFIG_MASK 0x04 +#define SMU7_MCLK_DPM_CONFIG_MASK 0x08 +#define SMU7_UVD_DPM_CONFIG_MASK 0x10 +#define SMU7_VCE_DPM_CONFIG_MASK 0x20 +#define SMU7_ACP_DPM_CONFIG_MASK 0x40 +#define SMU7_SAMU_DPM_CONFIG_MASK 0x80 +#define SMU7_PCIEGEN_DPM_CONFIG_MASK 0x100 + +#define SMU7_ACP_MCLK_HANDSHAKE_DISABLE 0x00000001 +#define SMU7_ACP_SCLK_HANDSHAKE_DISABLE 0x00000002 +#define SMU7_UVD_MCLK_HANDSHAKE_DISABLE 0x00000100 +#define SMU7_UVD_SCLK_HANDSHAKE_DISABLE 0x00000200 +#define SMU7_VCE_MCLK_HANDSHAKE_DISABLE 0x00010000 +#define SMU7_VCE_SCLK_HANDSHAKE_DISABLE 0x00020000 + +// All 'soft registers' should be uint32_t. +struct SMU71_SoftRegisters +{ + uint32_t RefClockFrequency; + uint32_t PmTimerPeriod; + uint32_t FeatureEnables; +#if defined (SMU__DGPU_ONLY) + uint32_t PreVBlankGap; + uint32_t VBlankTimeout; + uint32_t TrainTimeGap; + uint32_t MvddSwitchTime; + uint32_t LongestAcpiTrainTime; + uint32_t AcpiDelay; + uint32_t G5TrainTime; + uint32_t DelayMpllPwron; + uint32_t VoltageChangeTimeout; +#endif + uint32_t HandshakeDisables; + + uint8_t DisplayPhy1Config; + uint8_t DisplayPhy2Config; + uint8_t DisplayPhy3Config; + uint8_t DisplayPhy4Config; + + uint8_t DisplayPhy5Config; + uint8_t DisplayPhy6Config; + uint8_t DisplayPhy7Config; + uint8_t DisplayPhy8Config; + + uint32_t AverageGraphicsActivity; + uint32_t AverageMemoryActivity; + uint32_t AverageGioActivity; + + uint8_t SClkDpmEnabledLevels; + uint8_t MClkDpmEnabledLevels; + uint8_t LClkDpmEnabledLevels; + uint8_t PCIeDpmEnabledLevels; + + uint32_t DRAM_LOG_ADDR_H; + uint32_t DRAM_LOG_ADDR_L; + uint32_t DRAM_LOG_PHY_ADDR_H; + uint32_t DRAM_LOG_PHY_ADDR_L; + uint32_t DRAM_LOG_BUFF_SIZE; + uint32_t UlvEnterCount; + uint32_t UlvTime; + uint32_t UcodeLoadStatus; + uint8_t DPMFreezeAndForced; + uint8_t Activity_Weight; + uint8_t Reserved8[2]; + uint32_t Reserved; +}; + +typedef struct SMU71_SoftRegisters SMU71_SoftRegisters; + +struct SMU71_Firmware_Header +{ + uint32_t Digest[5]; + uint32_t Version; + uint32_t HeaderSize; + uint32_t Flags; + uint32_t EntryPoint; + uint32_t CodeSize; + uint32_t ImageSize; + + uint32_t Rtos; + uint32_t SoftRegisters; + uint32_t DpmTable; + uint32_t FanTable; + uint32_t CacConfigTable; + uint32_t CacStatusTable; + + uint32_t mcRegisterTable; + + uint32_t mcArbDramTimingTable; + + uint32_t PmFuseTable; + uint32_t Globals; + uint32_t UvdDpmTable; + uint32_t AcpDpmTable; + uint32_t VceDpmTable; + uint32_t SamuDpmTable; + uint32_t UlvSettings; + uint32_t Reserved[37]; + uint32_t Signature; +}; + +typedef struct SMU71_Firmware_Header SMU71_Firmware_Header; + +struct SMU7_HystController_Data +{ + uint8_t waterfall_up; + uint8_t waterfall_down; + uint8_t pstate; + uint8_t clamp_mode; +}; + +typedef struct SMU7_HystController_Data SMU7_HystController_Data; + +#define SMU71_FIRMWARE_HEADER_LOCATION 0x20000 + +enum DisplayConfig { + PowerDown = 1, + DP54x4, + DP54x2, + DP54x1, + DP27x4, + DP27x2, + DP27x1, + HDMI297, + HDMI162, + LVDS, + DP324x4, + DP324x2, + DP324x1 +}; + +//#define SX_BLOCK_COUNT 8 +//#define MC_BLOCK_COUNT 1 +//#define CPL_BLOCK_COUNT 27 + +#if defined SMU__VARIANT__ICELAND + #define SX_BLOCK_COUNT 8 + #define MC_BLOCK_COUNT 1 + #define CPL_BLOCK_COUNT 29 +#endif + +struct SMU7_Local_Cac { + uint8_t BlockId; + uint8_t SignalId; + uint8_t Threshold; + uint8_t Padding; +}; + +typedef struct SMU7_Local_Cac SMU7_Local_Cac; + +struct SMU7_Local_Cac_Table { + SMU7_Local_Cac SxLocalCac[SX_BLOCK_COUNT]; + SMU7_Local_Cac CplLocalCac[CPL_BLOCK_COUNT]; + SMU7_Local_Cac McLocalCac[MC_BLOCK_COUNT]; +}; + +typedef struct SMU7_Local_Cac_Table SMU7_Local_Cac_Table; + +#if !defined(SMC_MICROCODE) +#pragma pack(pop) +#endif + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h b/drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h new file mode 100644 index 000000000000..c0e3936d5c2e --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h @@ -0,0 +1,631 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef SMU71_DISCRETE_H +#define SMU71_DISCRETE_H + +#include "smu71.h" + +#if !defined(SMC_MICROCODE) +#pragma pack(push, 1) +#endif + +#define VDDC_ON_SVI2 0x1 +#define VDDCI_ON_SVI2 0x2 +#define MVDD_ON_SVI2 0x4 + +struct SMU71_Discrete_VoltageLevel +{ + uint16_t Voltage; + uint16_t StdVoltageHiSidd; + uint16_t StdVoltageLoSidd; + uint8_t Smio; + uint8_t padding; +}; + +typedef struct SMU71_Discrete_VoltageLevel SMU71_Discrete_VoltageLevel; + +struct SMU71_Discrete_GraphicsLevel +{ + uint32_t MinVddc; + uint32_t MinVddcPhases; + + uint32_t SclkFrequency; + + uint8_t pcieDpmLevel; + uint8_t DeepSleepDivId; + uint16_t ActivityLevel; + + uint32_t CgSpllFuncCntl3; + uint32_t CgSpllFuncCntl4; + uint32_t SpllSpreadSpectrum; + uint32_t SpllSpreadSpectrum2; + uint32_t CcPwrDynRm; + uint32_t CcPwrDynRm1; + uint8_t SclkDid; + uint8_t DisplayWatermark; + uint8_t EnabledForActivity; + uint8_t EnabledForThrottle; + uint8_t UpHyst; + uint8_t DownHyst; + uint8_t VoltageDownHyst; + uint8_t PowerThrottle; +}; + +typedef struct SMU71_Discrete_GraphicsLevel SMU71_Discrete_GraphicsLevel; + +struct SMU71_Discrete_ACPILevel +{ + uint32_t Flags; + uint32_t MinVddc; + uint32_t MinVddcPhases; + uint32_t SclkFrequency; + uint8_t SclkDid; + uint8_t DisplayWatermark; + uint8_t DeepSleepDivId; + uint8_t padding; + uint32_t CgSpllFuncCntl; + uint32_t CgSpllFuncCntl2; + uint32_t CgSpllFuncCntl3; + uint32_t CgSpllFuncCntl4; + uint32_t SpllSpreadSpectrum; + uint32_t SpllSpreadSpectrum2; + uint32_t CcPwrDynRm; + uint32_t CcPwrDynRm1; +}; + +typedef struct SMU71_Discrete_ACPILevel SMU71_Discrete_ACPILevel; + +struct SMU71_Discrete_Ulv +{ + uint32_t CcPwrDynRm; + uint32_t CcPwrDynRm1; + uint16_t VddcOffset; + uint8_t VddcOffsetVid; + uint8_t VddcPhase; + uint32_t Reserved; +}; + +typedef struct SMU71_Discrete_Ulv SMU71_Discrete_Ulv; + +struct SMU71_Discrete_MemoryLevel +{ + uint32_t MinVddc; + uint32_t MinVddcPhases; + uint32_t MinVddci; + uint32_t MinMvdd; + + uint32_t MclkFrequency; + + uint8_t EdcReadEnable; + uint8_t EdcWriteEnable; + uint8_t RttEnable; + uint8_t StutterEnable; + + uint8_t StrobeEnable; + uint8_t StrobeRatio; + uint8_t EnabledForThrottle; + uint8_t EnabledForActivity; + + uint8_t UpHyst; + uint8_t DownHyst; + uint8_t VoltageDownHyst; + uint8_t padding; + + uint16_t ActivityLevel; + uint8_t DisplayWatermark; + uint8_t padding1; + + uint32_t MpllFuncCntl; + uint32_t MpllFuncCntl_1; + uint32_t MpllFuncCntl_2; + uint32_t MpllAdFuncCntl; + uint32_t MpllDqFuncCntl; + uint32_t MclkPwrmgtCntl; + uint32_t DllCntl; + uint32_t MpllSs1; + uint32_t MpllSs2; +}; + +typedef struct SMU71_Discrete_MemoryLevel SMU71_Discrete_MemoryLevel; + +struct SMU71_Discrete_LinkLevel +{ + uint8_t PcieGenSpeed; ///< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3 + uint8_t PcieLaneCount; ///< 1=x1, 2=x2, 3=x4, 4=x8, 5=x12, 6=x16 + uint8_t EnabledForActivity; + uint8_t SPC; + uint32_t DownThreshold; + uint32_t UpThreshold; + uint32_t Reserved; +}; + +typedef struct SMU71_Discrete_LinkLevel SMU71_Discrete_LinkLevel; + + +#ifdef SMU__DYNAMIC_MCARB_SETTINGS +// MC ARB DRAM Timing registers. +struct SMU71_Discrete_MCArbDramTimingTableEntry +{ + uint32_t McArbDramTiming; + uint32_t McArbDramTiming2; + uint8_t McArbBurstTime; + uint8_t padding[3]; +}; + +typedef struct SMU71_Discrete_MCArbDramTimingTableEntry SMU71_Discrete_MCArbDramTimingTableEntry; + +struct SMU71_Discrete_MCArbDramTimingTable +{ + SMU71_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS]; +}; + +typedef struct SMU71_Discrete_MCArbDramTimingTable SMU71_Discrete_MCArbDramTimingTable; +#endif + +// UVD VCLK/DCLK state (level) definition. +struct SMU71_Discrete_UvdLevel +{ + uint32_t VclkFrequency; + uint32_t DclkFrequency; + uint16_t MinVddc; + uint8_t MinVddcPhases; + uint8_t VclkDivider; + uint8_t DclkDivider; + uint8_t padding[3]; +}; + +typedef struct SMU71_Discrete_UvdLevel SMU71_Discrete_UvdLevel; + +// Clocks for other external blocks (VCE, ACP, SAMU). +struct SMU71_Discrete_ExtClkLevel +{ + uint32_t Frequency; + uint16_t MinVoltage; + uint8_t MinPhases; + uint8_t Divider; +}; + +typedef struct SMU71_Discrete_ExtClkLevel SMU71_Discrete_ExtClkLevel; + +// Everything that we need to keep track of about the current state. +// Use this instead of copies of the GraphicsLevel and MemoryLevel structures to keep track of state parameters +// that need to be checked later. +// We don't need to cache everything about a state, just a few parameters. +struct SMU71_Discrete_StateInfo +{ + uint32_t SclkFrequency; + uint32_t MclkFrequency; + uint32_t VclkFrequency; + uint32_t DclkFrequency; + uint32_t SamclkFrequency; + uint32_t AclkFrequency; + uint32_t EclkFrequency; + uint16_t MvddVoltage; + uint16_t padding16; + uint8_t DisplayWatermark; + uint8_t McArbIndex; + uint8_t McRegIndex; + uint8_t SeqIndex; + uint8_t SclkDid; + int8_t SclkIndex; + int8_t MclkIndex; + uint8_t PCIeGen; + +}; + +typedef struct SMU71_Discrete_StateInfo SMU71_Discrete_StateInfo; + + +struct SMU71_Discrete_DpmTable +{ + // Multi-DPM controller settings + SMU71_PIDController GraphicsPIDController; + SMU71_PIDController MemoryPIDController; + SMU71_PIDController LinkPIDController; + + uint32_t SystemFlags; + + // SMIO masks for voltage and phase controls + uint32_t SmioMaskVddcVid; + uint32_t SmioMaskVddcPhase; + uint32_t SmioMaskVddciVid; + uint32_t SmioMaskMvddVid; + + uint32_t VddcLevelCount; + uint32_t VddciLevelCount; + uint32_t MvddLevelCount; + + SMU71_Discrete_VoltageLevel VddcLevel [SMU71_MAX_LEVELS_VDDC]; + SMU71_Discrete_VoltageLevel VddciLevel [SMU71_MAX_LEVELS_VDDCI]; + SMU71_Discrete_VoltageLevel MvddLevel [SMU71_MAX_LEVELS_MVDD]; + + uint8_t GraphicsDpmLevelCount; + uint8_t MemoryDpmLevelCount; + uint8_t LinkLevelCount; + uint8_t MasterDeepSleepControl; + + uint32_t Reserved[5]; + + // State table entries for each DPM state + SMU71_Discrete_GraphicsLevel GraphicsLevel [SMU71_MAX_LEVELS_GRAPHICS]; + SMU71_Discrete_MemoryLevel MemoryACPILevel; + SMU71_Discrete_MemoryLevel MemoryLevel [SMU71_MAX_LEVELS_MEMORY]; + SMU71_Discrete_LinkLevel LinkLevel [SMU71_MAX_LEVELS_LINK]; + SMU71_Discrete_ACPILevel ACPILevel; + + uint32_t SclkStepSize; + uint32_t Smio [SMU71_MAX_ENTRIES_SMIO]; + + uint8_t GraphicsBootLevel; + uint8_t GraphicsVoltageChangeEnable; + uint8_t GraphicsThermThrottleEnable; + uint8_t GraphicsInterval; + + uint8_t VoltageInterval; + uint8_t ThermalInterval; + uint16_t TemperatureLimitHigh; + + uint16_t TemperatureLimitLow; + uint8_t MemoryBootLevel; + uint8_t MemoryVoltageChangeEnable; + + uint8_t MemoryInterval; + uint8_t MemoryThermThrottleEnable; + uint8_t MergedVddci; + uint8_t padding2; + + uint16_t VoltageResponseTime; + uint16_t PhaseResponseTime; + + uint8_t PCIeBootLinkLevel; + uint8_t PCIeGenInterval; + uint8_t DTEInterval; + uint8_t DTEMode; + + uint8_t SVI2Enable; + uint8_t VRHotGpio; + uint8_t AcDcGpio; + uint8_t ThermGpio; + + uint32_t DisplayCac; + + uint16_t MaxPwr; + uint16_t NomPwr; + + uint16_t FpsHighThreshold; + uint16_t FpsLowThreshold; + + uint16_t BAPMTI_R [SMU71_DTE_ITERATIONS][SMU71_DTE_SOURCES][SMU71_DTE_SINKS]; + uint16_t BAPMTI_RC [SMU71_DTE_ITERATIONS][SMU71_DTE_SOURCES][SMU71_DTE_SINKS]; + + uint8_t DTEAmbientTempBase; + uint8_t DTETjOffset; + uint8_t GpuTjMax; + uint8_t GpuTjHyst; + + uint16_t BootVddc; + uint16_t BootVddci; + + uint16_t BootMVdd; + uint16_t padding; + + uint32_t BAPM_TEMP_GRADIENT; + + uint32_t LowSclkInterruptThreshold; + uint32_t VddGfxReChkWait; + + uint16_t PPM_PkgPwrLimit; + uint16_t PPM_TemperatureLimit; + + uint16_t DefaultTdp; + uint16_t TargetTdp; +}; + +typedef struct SMU71_Discrete_DpmTable SMU71_Discrete_DpmTable; + +// --------------------------------------------------- AC Timing Parameters ------------------------------------------------ +#define SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE 16 +#define SMU71_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT SMU71_MAX_LEVELS_MEMORY + +struct SMU71_Discrete_MCRegisterAddress +{ + uint16_t s0; + uint16_t s1; +}; + +typedef struct SMU71_Discrete_MCRegisterAddress SMU71_Discrete_MCRegisterAddress; + +struct SMU71_Discrete_MCRegisterSet +{ + uint32_t value[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE]; +}; + +typedef struct SMU71_Discrete_MCRegisterSet SMU71_Discrete_MCRegisterSet; + +struct SMU71_Discrete_MCRegisters +{ + uint8_t last; + uint8_t reserved[3]; + SMU71_Discrete_MCRegisterAddress address[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE]; + SMU71_Discrete_MCRegisterSet data[SMU71_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT]; +}; + +typedef struct SMU71_Discrete_MCRegisters SMU71_Discrete_MCRegisters; + + +// --------------------------------------------------- Fan Table ----------------------------------------------------------- +struct SMU71_Discrete_FanTable +{ + uint16_t FdoMode; + int16_t TempMin; + int16_t TempMed; + int16_t TempMax; + int16_t Slope1; + int16_t Slope2; + int16_t FdoMin; + int16_t HystUp; + int16_t HystDown; + int16_t HystSlope; + int16_t TempRespLim; + int16_t TempCurr; + int16_t SlopeCurr; + int16_t PwmCurr; + uint32_t RefreshPeriod; + int16_t FdoMax; + uint8_t TempSrc; + int8_t Padding; +}; + +typedef struct SMU71_Discrete_FanTable SMU71_Discrete_FanTable; + +#define SMU7_DISCRETE_GPIO_SCLK_DEBUG 4 +#define SMU7_DISCRETE_GPIO_SCLK_DEBUG_BIT (0x1 << SMU7_DISCRETE_GPIO_SCLK_DEBUG) + +struct SMU71_MclkDpmScoreboard +{ + + uint32_t PercentageBusy; + + int32_t PIDError; + int32_t PIDIntegral; + int32_t PIDOutput; + + uint32_t SigmaDeltaAccum; + uint32_t SigmaDeltaOutput; + uint32_t SigmaDeltaLevel; + + uint32_t UtilizationSetpoint; + + uint8_t TdpClampMode; + uint8_t TdcClampMode; + uint8_t ThermClampMode; + uint8_t VoltageBusy; + + int8_t CurrLevel; + int8_t TargLevel; + uint8_t LevelChangeInProgress; + uint8_t UpHyst; + + uint8_t DownHyst; + uint8_t VoltageDownHyst; + uint8_t DpmEnable; + uint8_t DpmRunning; + + uint8_t DpmForce; + uint8_t DpmForceLevel; + uint8_t DisplayWatermark; + uint8_t McArbIndex; + + uint32_t MinimumPerfMclk; + + uint8_t AcpiReq; + uint8_t AcpiAck; + uint8_t MclkSwitchInProgress; + uint8_t MclkSwitchCritical; + + uint8_t TargetMclkIndex; + uint8_t TargetMvddIndex; + uint8_t MclkSwitchResult; + + uint8_t EnabledLevelsChange; + + uint16_t LevelResidencyCounters [SMU71_MAX_LEVELS_MEMORY]; + uint16_t LevelSwitchCounters [SMU71_MAX_LEVELS_MEMORY]; + + void (*TargetStateCalculator)(uint8_t); + void (*SavedTargetStateCalculator)(uint8_t); + + uint16_t AutoDpmInterval; + uint16_t AutoDpmRange; + + uint16_t MclkSwitchingTime; + uint8_t padding[2]; +}; + +typedef struct SMU71_MclkDpmScoreboard SMU71_MclkDpmScoreboard; + +struct SMU71_UlvScoreboard +{ + uint8_t EnterUlv; + uint8_t ExitUlv; + uint8_t UlvActive; + uint8_t WaitingForUlv; + uint8_t UlvEnable; + uint8_t UlvRunning; + uint8_t UlvMasterEnable; + uint8_t padding; + uint32_t UlvAbortedCount; + uint32_t UlvTimeStamp; +}; + +typedef struct SMU71_UlvScoreboard SMU71_UlvScoreboard; + +struct SMU71_VddGfxScoreboard +{ + uint8_t VddGfxEnable; + uint8_t VddGfxActive; + uint8_t padding[2]; + + uint32_t VddGfxEnteredCount; + uint32_t VddGfxAbortedCount; +}; + +typedef struct SMU71_VddGfxScoreboard SMU71_VddGfxScoreboard; + +struct SMU71_AcpiScoreboard { + uint32_t SavedInterruptMask[2]; + uint8_t LastACPIRequest; + uint8_t CgBifResp; + uint8_t RequestType; + uint8_t Padding; + SMU71_Discrete_ACPILevel D0Level; +}; + +typedef struct SMU71_AcpiScoreboard SMU71_AcpiScoreboard; + + +struct SMU71_Discrete_PmFuses { + // dw0-dw1 + uint8_t BapmVddCVidHiSidd[8]; + + // dw2-dw3 + uint8_t BapmVddCVidLoSidd[8]; + + // dw4-dw5 + uint8_t VddCVid[8]; + + // dw6 + uint8_t SviLoadLineEn; + uint8_t SviLoadLineVddC; + uint8_t SviLoadLineTrimVddC; + uint8_t SviLoadLineOffsetVddC; + + // dw7 + uint16_t TDC_VDDC_PkgLimit; + uint8_t TDC_VDDC_ThrottleReleaseLimitPerc; + uint8_t TDC_MAWt; + + // dw8 + uint8_t TdcWaterfallCtl; + uint8_t LPMLTemperatureMin; + uint8_t LPMLTemperatureMax; + uint8_t Reserved; + + // dw9-dw12 + uint8_t LPMLTemperatureScaler[16]; + + // dw13-dw14 + int16_t FuzzyFan_ErrorSetDelta; + int16_t FuzzyFan_ErrorRateSetDelta; + int16_t FuzzyFan_PwmSetDelta; + uint16_t Reserved6; + + // dw15 + uint8_t GnbLPML[16]; + + // dw15 + uint8_t GnbLPMLMaxVid; + uint8_t GnbLPMLMinVid; + uint8_t Reserved1[2]; + + // dw16 + uint16_t BapmVddCBaseLeakageHiSidd; + uint16_t BapmVddCBaseLeakageLoSidd; +}; + +typedef struct SMU71_Discrete_PmFuses SMU71_Discrete_PmFuses; + +struct SMU71_Discrete_Log_Header_Table { + uint32_t version; + uint32_t asic_id; + uint16_t flags; + uint16_t entry_size; + uint32_t total_size; + uint32_t num_of_entries; + uint8_t type; + uint8_t mode; + uint8_t filler_0[2]; + uint32_t filler_1[2]; +}; + +typedef struct SMU71_Discrete_Log_Header_Table SMU71_Discrete_Log_Header_Table; + +struct SMU71_Discrete_Log_Cntl { + uint8_t Enabled; + uint8_t Type; + uint8_t padding[2]; + uint32_t BufferSize; + uint32_t SamplesLogged; + uint32_t SampleSize; + uint32_t AddrL; + uint32_t AddrH; +}; + +typedef struct SMU71_Discrete_Log_Cntl SMU71_Discrete_Log_Cntl; + +#if defined SMU__DGPU_ONLY + #define CAC_ACC_NW_NUM_OF_SIGNALS 83 +#endif + + +struct SMU71_Discrete_Cac_Collection_Table { + uint32_t temperature; + uint32_t cac_acc_nw[CAC_ACC_NW_NUM_OF_SIGNALS]; + uint32_t filler[4]; +}; + +typedef struct SMU71_Discrete_Cac_Collection_Table SMU71_Discrete_Cac_Collection_Table; + +struct SMU71_Discrete_Cac_Verification_Table { + uint32_t VddcTotalPower; + uint32_t VddcLeakagePower; + uint32_t VddcConstantPower; + uint32_t VddcGfxDynamicPower; + uint32_t VddcUvdDynamicPower; + uint32_t VddcVceDynamicPower; + uint32_t VddcAcpDynamicPower; + uint32_t VddcPcieDynamicPower; + uint32_t VddcDceDynamicPower; + uint32_t VddcCurrent; + uint32_t VddcVoltage; + uint32_t VddciTotalPower; + uint32_t VddciLeakagePower; + uint32_t VddciConstantPower; + uint32_t VddciDynamicPower; + uint32_t Vddr1TotalPower; + uint32_t Vddr1LeakagePower; + uint32_t Vddr1ConstantPower; + uint32_t Vddr1DynamicPower; + uint32_t spare[8]; + uint32_t temperature; +}; + +typedef struct SMU71_Discrete_Cac_Verification_Table SMU71_Discrete_Cac_Verification_Table; + +#if !defined(SMC_MICROCODE) +#pragma pack(pop) +#endif + + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h index 33af5f511ab8..65eb630bfea3 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h @@ -1,5 +1,5 @@ /* - * Copyright 2015 Advanced Micro Devices, Inc. + * Copyright 2014 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -21,15 +21,38 @@ * */ -#ifndef _FIJI_CLOCK_POWER_GATING_H_ -#define _FIJI_CLOCK_POWER_GATING_H_ +#ifndef _PP_COMMON_H +#define _PP_COMMON_H -#include "fiji_hwmgr.h" -#include "pp_asicblocks.h" +#include "smu7_ppsmc.h" +#include "cgs_common.h" + +#include "smu/smu_7_1_3_d.h" +#include "smu/smu_7_1_3_sh_mask.h" + + +#include "smu74.h" +#include "smu74_discrete.h" + +#include "gmc/gmc_8_1_d.h" +#include "gmc/gmc_8_1_sh_mask.h" + +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" + + +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" + +#include "dce/dce_10_0_d.h" +#include "dce/dce_10_0_sh_mask.h" + +#include "gca/gfx_8_0_d.h" +#include "gca/gfx_8_0_sh_mask.h" + +#include "oss/oss_3_0_d.h" +#include "oss/oss_3_0_sh_mask.h" + + +#endif -extern int fiji_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate); -extern int fiji_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate); -extern int fiji_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate); -extern int fiji_phm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate); -extern int fiji_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr); -#endif /* _TONGA_CLOCK_POWER_GATING_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h new file mode 100644 index 000000000000..bce00096d80d --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h @@ -0,0 +1,412 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef DGPU_VI_PP_SMC_H +#define DGPU_VI_PP_SMC_H + + +#pragma pack(push, 1) + +#define PPSMC_MSG_SetGBDroopSettings ((uint16_t) 0x305) + +#define PPSMC_SWSTATE_FLAG_DC 0x01 +#define PPSMC_SWSTATE_FLAG_UVD 0x02 +#define PPSMC_SWSTATE_FLAG_VCE 0x04 + +#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL 0x00 +#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL 0x01 +#define PPSMC_THERMAL_PROTECT_TYPE_NONE 0xff + +#define PPSMC_SYSTEMFLAG_GPIO_DC 0x01 +#define PPSMC_SYSTEMFLAG_STEPVDDC 0x02 +#define PPSMC_SYSTEMFLAG_GDDR5 0x04 + +#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP 0x08 + +#define PPSMC_SYSTEMFLAG_REGULATOR_HOT 0x10 +#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG 0x20 + +#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK 0x07 +#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK 0x08 + +#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE 0x00 +#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE 0x01 + + +#define PPSMC_DPM2FLAGS_TDPCLMP 0x01 +#define PPSMC_DPM2FLAGS_PWRSHFT 0x02 +#define PPSMC_DPM2FLAGS_OCP 0x04 + + +#define PPSMC_DISPLAY_WATERMARK_LOW 0 +#define PPSMC_DISPLAY_WATERMARK_HIGH 1 + + +#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01 +#define PPSMC_STATEFLAG_POWERBOOST 0x02 +#define PPSMC_STATEFLAG_PSKIP_ON_TDP_FAULT 0x04 +#define PPSMC_STATEFLAG_POWERSHIFT 0x08 +#define PPSMC_STATEFLAG_SLOW_READ_MARGIN 0x10 +#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20 +#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40 + + +#define FDO_MODE_HARDWARE 0 +#define FDO_MODE_PIECE_WISE_LINEAR 1 + +enum FAN_CONTROL { + FAN_CONTROL_FUZZY, + FAN_CONTROL_TABLE +}; + + +#define PPSMC_Result_OK ((uint16_t)0x01) +#define PPSMC_Result_NoMore ((uint16_t)0x02) + +#define PPSMC_Result_NotNow ((uint16_t)0x03) +#define PPSMC_Result_Failed ((uint16_t)0xFF) +#define PPSMC_Result_UnknownCmd ((uint16_t)0xFE) +#define PPSMC_Result_UnknownVT ((uint16_t)0xFD) + +typedef uint16_t PPSMC_Result; + +#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x)) + + +#define PPSMC_MSG_Halt ((uint16_t)0x10) +#define PPSMC_MSG_Resume ((uint16_t)0x11) +#define PPSMC_MSG_EnableDPMLevel ((uint16_t)0x12) +#define PPSMC_MSG_ZeroLevelsDisabled ((uint16_t)0x13) +#define PPSMC_MSG_OneLevelsDisabled ((uint16_t)0x14) +#define PPSMC_MSG_TwoLevelsDisabled ((uint16_t)0x15) +#define PPSMC_MSG_EnableThermalInterrupt ((uint16_t)0x16) +#define PPSMC_MSG_RunningOnAC ((uint16_t)0x17) +#define PPSMC_MSG_LevelUp ((uint16_t)0x18) +#define PPSMC_MSG_LevelDown ((uint16_t)0x19) +#define PPSMC_MSG_ResetDPMCounters ((uint16_t)0x1a) +#define PPSMC_MSG_SwitchToSwState ((uint16_t)0x20) +#define PPSMC_MSG_SwitchToSwStateLast ((uint16_t)0x3f) +#define PPSMC_MSG_SwitchToInitialState ((uint16_t)0x40) +#define PPSMC_MSG_NoForcedLevel ((uint16_t)0x41) +#define PPSMC_MSG_ForceHigh ((uint16_t)0x42) +#define PPSMC_MSG_ForceMediumOrHigh ((uint16_t)0x43) +#define PPSMC_MSG_SwitchToMinimumPower ((uint16_t)0x51) +#define PPSMC_MSG_ResumeFromMinimumPower ((uint16_t)0x52) +#define PPSMC_MSG_EnableCac ((uint16_t)0x53) +#define PPSMC_MSG_DisableCac ((uint16_t)0x54) +#define PPSMC_DPMStateHistoryStart ((uint16_t)0x55) +#define PPSMC_DPMStateHistoryStop ((uint16_t)0x56) +#define PPSMC_CACHistoryStart ((uint16_t)0x57) +#define PPSMC_CACHistoryStop ((uint16_t)0x58) +#define PPSMC_TDPClampingActive ((uint16_t)0x59) +#define PPSMC_TDPClampingInactive ((uint16_t)0x5A) +#define PPSMC_StartFanControl ((uint16_t)0x5B) +#define PPSMC_StopFanControl ((uint16_t)0x5C) +#define PPSMC_NoDisplay ((uint16_t)0x5D) +#define PPSMC_HasDisplay ((uint16_t)0x5E) +#define PPSMC_MSG_UVDPowerOFF ((uint16_t)0x60) +#define PPSMC_MSG_UVDPowerON ((uint16_t)0x61) +#define PPSMC_MSG_EnableULV ((uint16_t)0x62) +#define PPSMC_MSG_DisableULV ((uint16_t)0x63) +#define PPSMC_MSG_EnterULV ((uint16_t)0x64) +#define PPSMC_MSG_ExitULV ((uint16_t)0x65) +#define PPSMC_PowerShiftActive ((uint16_t)0x6A) +#define PPSMC_PowerShiftInactive ((uint16_t)0x6B) +#define PPSMC_OCPActive ((uint16_t)0x6C) +#define PPSMC_OCPInactive ((uint16_t)0x6D) +#define PPSMC_CACLongTermAvgEnable ((uint16_t)0x6E) +#define PPSMC_CACLongTermAvgDisable ((uint16_t)0x6F) +#define PPSMC_MSG_InferredStateSweep_Start ((uint16_t)0x70) +#define PPSMC_MSG_InferredStateSweep_Stop ((uint16_t)0x71) +#define PPSMC_MSG_SwitchToLowestInfState ((uint16_t)0x72) +#define PPSMC_MSG_SwitchToNonInfState ((uint16_t)0x73) +#define PPSMC_MSG_AllStateSweep_Start ((uint16_t)0x74) +#define PPSMC_MSG_AllStateSweep_Stop ((uint16_t)0x75) +#define PPSMC_MSG_SwitchNextLowerInfState ((uint16_t)0x76) +#define PPSMC_MSG_SwitchNextHigherInfState ((uint16_t)0x77) +#define PPSMC_MSG_MclkRetrainingTest ((uint16_t)0x78) +#define PPSMC_MSG_ForceTDPClamping ((uint16_t)0x79) +#define PPSMC_MSG_CollectCAC_PowerCorreln ((uint16_t)0x7A) +#define PPSMC_MSG_CollectCAC_WeightCalib ((uint16_t)0x7B) +#define PPSMC_MSG_CollectCAC_SQonly ((uint16_t)0x7C) +#define PPSMC_MSG_CollectCAC_TemperaturePwr ((uint16_t)0x7D) + +#define PPSMC_MSG_ExtremitiesTest_Start ((uint16_t)0x7E) +#define PPSMC_MSG_ExtremitiesTest_Stop ((uint16_t)0x7F) +#define PPSMC_FlushDataCache ((uint16_t)0x80) +#define PPSMC_FlushInstrCache ((uint16_t)0x81) + +#define PPSMC_MSG_SetEnabledLevels ((uint16_t)0x82) +#define PPSMC_MSG_SetForcedLevels ((uint16_t)0x83) + +#define PPSMC_MSG_ResetToDefaults ((uint16_t)0x84) + +#define PPSMC_MSG_SetForcedLevelsAndJump ((uint16_t)0x85) +#define PPSMC_MSG_SetCACHistoryMode ((uint16_t)0x86) +#define PPSMC_MSG_EnableDTE ((uint16_t)0x87) +#define PPSMC_MSG_DisableDTE ((uint16_t)0x88) + +#define PPSMC_MSG_SmcSpaceSetAddress ((uint16_t)0x89) +#define PPSM_MSG_SmcSpaceWriteDWordInc ((uint16_t)0x8A) +#define PPSM_MSG_SmcSpaceWriteWordInc ((uint16_t)0x8B) +#define PPSM_MSG_SmcSpaceWriteByteInc ((uint16_t)0x8C) + +#define PPSMC_MSG_BREAK ((uint16_t)0xF8) + +#define PPSMC_MSG_Test ((uint16_t) 0x100) +#define PPSMC_MSG_DPM_Voltage_Pwrmgt ((uint16_t) 0x101) +#define PPSMC_MSG_DPM_Config ((uint16_t) 0x102) +#define PPSMC_MSG_PM_Controller_Start ((uint16_t) 0x103) +#define PPSMC_MSG_DPM_ForceState ((uint16_t) 0x104) +#define PPSMC_MSG_PG_PowerDownSIMD ((uint16_t) 0x105) +#define PPSMC_MSG_PG_PowerUpSIMD ((uint16_t) 0x106) +#define PPSMC_MSG_PM_Controller_Stop ((uint16_t) 0x107) +#define PPSMC_MSG_PG_SIMD_Config ((uint16_t) 0x108) +#define PPSMC_MSG_Voltage_Cntl_Enable ((uint16_t) 0x109) +#define PPSMC_MSG_Thermal_Cntl_Enable ((uint16_t) 0x10a) +#define PPSMC_MSG_Reset_Service ((uint16_t) 0x10b) +#define PPSMC_MSG_VCEPowerOFF ((uint16_t) 0x10e) +#define PPSMC_MSG_VCEPowerON ((uint16_t) 0x10f) +#define PPSMC_MSG_DPM_Disable_VCE_HS ((uint16_t) 0x110) +#define PPSMC_MSG_DPM_Enable_VCE_HS ((uint16_t) 0x111) +#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint16_t) 0x112) +#define PPSMC_MSG_DCEPowerOFF ((uint16_t) 0x113) +#define PPSMC_MSG_DCEPowerON ((uint16_t) 0x114) +#define PPSMC_MSG_PCIE_DDIPowerDown ((uint16_t) 0x117) +#define PPSMC_MSG_PCIE_DDIPowerUp ((uint16_t) 0x118) +#define PPSMC_MSG_PCIE_CascadePLLPowerDown ((uint16_t) 0x119) +#define PPSMC_MSG_PCIE_CascadePLLPowerUp ((uint16_t) 0x11a) +#define PPSMC_MSG_SYSPLLPowerOff ((uint16_t) 0x11b) +#define PPSMC_MSG_SYSPLLPowerOn ((uint16_t) 0x11c) +#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint16_t) 0x11d) +#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint16_t) 0x11e) +#define PPSMC_MSG_DISPLAYPHYStatusNotify ((uint16_t) 0x11f) +#define PPSMC_MSG_EnableBAPM ((uint16_t) 0x120) +#define PPSMC_MSG_DisableBAPM ((uint16_t) 0x121) +#define PPSMC_MSG_Spmi_Enable ((uint16_t) 0x122) +#define PPSMC_MSG_Spmi_Timer ((uint16_t) 0x123) +#define PPSMC_MSG_LCLK_DPM_Config ((uint16_t) 0x124) +#define PPSMC_MSG_VddNB_Request ((uint16_t) 0x125) +#define PPSMC_MSG_PCIE_DDIPhyPowerDown ((uint32_t) 0x126) +#define PPSMC_MSG_PCIE_DDIPhyPowerUp ((uint32_t) 0x127) +#define PPSMC_MSG_MCLKDPM_Config ((uint16_t) 0x128) + +#define PPSMC_MSG_UVDDPM_Config ((uint16_t) 0x129) +#define PPSMC_MSG_VCEDPM_Config ((uint16_t) 0x12A) +#define PPSMC_MSG_ACPDPM_Config ((uint16_t) 0x12B) +#define PPSMC_MSG_SAMUDPM_Config ((uint16_t) 0x12C) +#define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D) +#define PPSMC_MSG_VCEDPM_SetEnabledMask ((uint16_t) 0x12E) +#define PPSMC_MSG_ACPDPM_SetEnabledMask ((uint16_t) 0x12F) +#define PPSMC_MSG_SAMUDPM_SetEnabledMask ((uint16_t) 0x130) +#define PPSMC_MSG_MCLKDPM_ForceState ((uint16_t) 0x131) +#define PPSMC_MSG_MCLKDPM_NoForcedLevel ((uint16_t) 0x132) +#define PPSMC_MSG_Thermal_Cntl_Disable ((uint16_t) 0x133) +#define PPSMC_MSG_SetTDPLimit ((uint16_t) 0x134) +#define PPSMC_MSG_Voltage_Cntl_Disable ((uint16_t) 0x135) +#define PPSMC_MSG_PCIeDPM_Enable ((uint16_t) 0x136) +#define PPSMC_MSG_ACPPowerOFF ((uint16_t) 0x137) +#define PPSMC_MSG_ACPPowerON ((uint16_t) 0x138) +#define PPSMC_MSG_SAMPowerOFF ((uint16_t) 0x139) +#define PPSMC_MSG_SAMPowerON ((uint16_t) 0x13a) +#define PPSMC_MSG_SDMAPowerOFF ((uint16_t) 0x13b) +#define PPSMC_MSG_SDMAPowerON ((uint16_t) 0x13c) +#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d) +#define PPSMC_MSG_IOMMUPowerOFF ((uint16_t) 0x13e) +#define PPSMC_MSG_IOMMUPowerON ((uint16_t) 0x13f) +#define PPSMC_MSG_NBDPM_Enable ((uint16_t) 0x140) +#define PPSMC_MSG_NBDPM_Disable ((uint16_t) 0x141) +#define PPSMC_MSG_NBDPM_ForceNominal ((uint16_t) 0x142) +#define PPSMC_MSG_NBDPM_ForcePerformance ((uint16_t) 0x143) +#define PPSMC_MSG_NBDPM_UnForce ((uint16_t) 0x144) +#define PPSMC_MSG_SCLKDPM_SetEnabledMask ((uint16_t) 0x145) +#define PPSMC_MSG_MCLKDPM_SetEnabledMask ((uint16_t) 0x146) +#define PPSMC_MSG_PCIeDPM_ForceLevel ((uint16_t) 0x147) +#define PPSMC_MSG_PCIeDPM_UnForceLevel ((uint16_t) 0x148) +#define PPSMC_MSG_EnableACDCGPIOInterrupt ((uint16_t) 0x149) +#define PPSMC_MSG_EnableVRHotGPIOInterrupt ((uint16_t) 0x14a) +#define PPSMC_MSG_SwitchToAC ((uint16_t) 0x14b) +#define PPSMC_MSG_XDMAPowerOFF ((uint16_t) 0x14c) +#define PPSMC_MSG_XDMAPowerON ((uint16_t) 0x14d) + +#define PPSMC_MSG_DPM_Enable ((uint16_t) 0x14e) +#define PPSMC_MSG_DPM_Disable ((uint16_t) 0x14f) +#define PPSMC_MSG_MCLKDPM_Enable ((uint16_t) 0x150) +#define PPSMC_MSG_MCLKDPM_Disable ((uint16_t) 0x151) +#define PPSMC_MSG_LCLKDPM_Enable ((uint16_t) 0x152) +#define PPSMC_MSG_LCLKDPM_Disable ((uint16_t) 0x153) +#define PPSMC_MSG_UVDDPM_Enable ((uint16_t) 0x154) +#define PPSMC_MSG_UVDDPM_Disable ((uint16_t) 0x155) +#define PPSMC_MSG_SAMUDPM_Enable ((uint16_t) 0x156) +#define PPSMC_MSG_SAMUDPM_Disable ((uint16_t) 0x157) +#define PPSMC_MSG_ACPDPM_Enable ((uint16_t) 0x158) +#define PPSMC_MSG_ACPDPM_Disable ((uint16_t) 0x159) +#define PPSMC_MSG_VCEDPM_Enable ((uint16_t) 0x15a) +#define PPSMC_MSG_VCEDPM_Disable ((uint16_t) 0x15b) +#define PPSMC_MSG_LCLKDPM_SetEnabledMask ((uint16_t) 0x15c) +#define PPSMC_MSG_DPM_FPS_Mode ((uint16_t) 0x15d) +#define PPSMC_MSG_DPM_Activity_Mode ((uint16_t) 0x15e) +#define PPSMC_MSG_VddC_Request ((uint16_t) 0x15f) +#define PPSMC_MSG_MCLKDPM_GetEnabledMask ((uint16_t) 0x160) +#define PPSMC_MSG_LCLKDPM_GetEnabledMask ((uint16_t) 0x161) +#define PPSMC_MSG_SCLKDPM_GetEnabledMask ((uint16_t) 0x162) +#define PPSMC_MSG_UVDDPM_GetEnabledMask ((uint16_t) 0x163) +#define PPSMC_MSG_SAMUDPM_GetEnabledMask ((uint16_t) 0x164) +#define PPSMC_MSG_ACPDPM_GetEnabledMask ((uint16_t) 0x165) +#define PPSMC_MSG_VCEDPM_GetEnabledMask ((uint16_t) 0x166) +#define PPSMC_MSG_PCIeDPM_SetEnabledMask ((uint16_t) 0x167) +#define PPSMC_MSG_PCIeDPM_GetEnabledMask ((uint16_t) 0x168) +#define PPSMC_MSG_TDCLimitEnable ((uint16_t) 0x169) +#define PPSMC_MSG_TDCLimitDisable ((uint16_t) 0x16a) +#define PPSMC_MSG_DPM_AutoRotate_Mode ((uint16_t) 0x16b) +#define PPSMC_MSG_DISPCLK_FROM_FCH ((uint16_t) 0x16c) +#define PPSMC_MSG_DISPCLK_FROM_DFS ((uint16_t) 0x16d) +#define PPSMC_MSG_DPREFCLK_FROM_FCH ((uint16_t) 0x16e) +#define PPSMC_MSG_DPREFCLK_FROM_DFS ((uint16_t) 0x16f) +#define PPSMC_MSG_PmStatusLogStart ((uint16_t) 0x170) +#define PPSMC_MSG_PmStatusLogSample ((uint16_t) 0x171) +#define PPSMC_MSG_SCLK_AutoDPM_ON ((uint16_t) 0x172) +#define PPSMC_MSG_MCLK_AutoDPM_ON ((uint16_t) 0x173) +#define PPSMC_MSG_LCLK_AutoDPM_ON ((uint16_t) 0x174) +#define PPSMC_MSG_UVD_AutoDPM_ON ((uint16_t) 0x175) +#define PPSMC_MSG_SAMU_AutoDPM_ON ((uint16_t) 0x176) +#define PPSMC_MSG_ACP_AutoDPM_ON ((uint16_t) 0x177) +#define PPSMC_MSG_VCE_AutoDPM_ON ((uint16_t) 0x178) +#define PPSMC_MSG_PCIe_AutoDPM_ON ((uint16_t) 0x179) +#define PPSMC_MSG_MASTER_AutoDPM_ON ((uint16_t) 0x17a) +#define PPSMC_MSG_MASTER_AutoDPM_OFF ((uint16_t) 0x17b) +#define PPSMC_MSG_DYNAMICDISPPHYPOWER ((uint16_t) 0x17c) +#define PPSMC_MSG_CAC_COLLECTION_ON ((uint16_t) 0x17d) +#define PPSMC_MSG_CAC_COLLECTION_OFF ((uint16_t) 0x17e) +#define PPSMC_MSG_CAC_CORRELATION_ON ((uint16_t) 0x17f) +#define PPSMC_MSG_CAC_CORRELATION_OFF ((uint16_t) 0x180) +#define PPSMC_MSG_PM_STATUS_TO_DRAM_ON ((uint16_t) 0x181) +#define PPSMC_MSG_PM_STATUS_TO_DRAM_OFF ((uint16_t) 0x182) +#define PPSMC_MSG_ALLOW_LOWSCLK_INTERRUPT ((uint16_t) 0x184) +#define PPSMC_MSG_PkgPwrLimitEnable ((uint16_t) 0x185) +#define PPSMC_MSG_PkgPwrLimitDisable ((uint16_t) 0x186) +#define PPSMC_MSG_PkgPwrSetLimit ((uint16_t) 0x187) +#define PPSMC_MSG_OverDriveSetTargetTdp ((uint16_t) 0x188) +#define PPSMC_MSG_SCLKDPM_FreezeLevel ((uint16_t) 0x189) +#define PPSMC_MSG_SCLKDPM_UnfreezeLevel ((uint16_t) 0x18A) +#define PPSMC_MSG_MCLKDPM_FreezeLevel ((uint16_t) 0x18B) +#define PPSMC_MSG_MCLKDPM_UnfreezeLevel ((uint16_t) 0x18C) +#define PPSMC_MSG_START_DRAM_LOGGING ((uint16_t) 0x18D) +#define PPSMC_MSG_STOP_DRAM_LOGGING ((uint16_t) 0x18E) +#define PPSMC_MSG_MASTER_DeepSleep_ON ((uint16_t) 0x18F) +#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190) +#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191) +#define PPSMC_MSG_DisableACDCGPIOInterrupt ((uint16_t) 0x192) +#define PPSMC_MSG_OverrideVoltageControl_SetVddc ((uint16_t) 0x193) +#define PPSMC_MSG_OverrideVoltageControl_SetVddci ((uint16_t) 0x194) +#define PPSMC_MSG_SetVidOffset_1 ((uint16_t) 0x195) +#define PPSMC_MSG_SetVidOffset_2 ((uint16_t) 0x207) +#define PPSMC_MSG_GetVidOffset_1 ((uint16_t) 0x196) +#define PPSMC_MSG_GetVidOffset_2 ((uint16_t) 0x208) +#define PPSMC_MSG_THERMAL_OVERDRIVE_Enable ((uint16_t) 0x197) +#define PPSMC_MSG_THERMAL_OVERDRIVE_Disable ((uint16_t) 0x198) +#define PPSMC_MSG_SetTjMax ((uint16_t) 0x199) +#define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A) +#define PPSMC_MSG_WaitForMclkSwitchFinish ((uint16_t) 0x19B) +#define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C) +#define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D) + +#define PPSMC_MSG_API_GetSclkFrequency ((uint16_t) 0x200) +#define PPSMC_MSG_API_GetMclkFrequency ((uint16_t) 0x201) +#define PPSMC_MSG_API_GetSclkBusy ((uint16_t) 0x202) +#define PPSMC_MSG_API_GetMclkBusy ((uint16_t) 0x203) +#define PPSMC_MSG_API_GetAsicPower ((uint16_t) 0x204) +#define PPSMC_MSG_SetFanRpmMax ((uint16_t) 0x205) +#define PPSMC_MSG_SetFanSclkTarget ((uint16_t) 0x206) +#define PPSMC_MSG_SetFanMinPwm ((uint16_t) 0x209) +#define PPSMC_MSG_SetFanTemperatureTarget ((uint16_t) 0x20A) + +#define PPSMC_MSG_BACO_StartMonitor ((uint16_t) 0x240) +#define PPSMC_MSG_BACO_Cancel ((uint16_t) 0x241) +#define PPSMC_MSG_EnableVddGfx ((uint16_t) 0x242) +#define PPSMC_MSG_DisableVddGfx ((uint16_t) 0x243) +#define PPSMC_MSG_UcodeAddressLow ((uint16_t) 0x244) +#define PPSMC_MSG_UcodeAddressHigh ((uint16_t) 0x245) +#define PPSMC_MSG_UcodeLoadStatus ((uint16_t) 0x246) + +#define PPSMC_MSG_DRV_DRAM_ADDR_HI ((uint16_t) 0x250) +#define PPSMC_MSG_DRV_DRAM_ADDR_LO ((uint16_t) 0x251) +#define PPSMC_MSG_SMU_DRAM_ADDR_HI ((uint16_t) 0x252) +#define PPSMC_MSG_SMU_DRAM_ADDR_LO ((uint16_t) 0x253) +#define PPSMC_MSG_LoadUcodes ((uint16_t) 0x254) +#define PPSMC_MSG_PowerStateNotify ((uint16_t) 0x255) +#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_HI ((uint16_t) 0x256) +#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_LO ((uint16_t) 0x257) +#define PPSMC_MSG_VBIOS_DRAM_ADDR_HI ((uint16_t) 0x258) +#define PPSMC_MSG_VBIOS_DRAM_ADDR_LO ((uint16_t) 0x259) +#define PPSMC_MSG_LoadVBios ((uint16_t) 0x25A) +#define PPSMC_MSG_GetUcodeVersion ((uint16_t) 0x25B) +#define DMCUSMC_MSG_PSREntry ((uint16_t) 0x25C) +#define DMCUSMC_MSG_PSRExit ((uint16_t) 0x25D) +#define PPSMC_MSG_EnableClockGatingFeature ((uint16_t) 0x260) +#define PPSMC_MSG_DisableClockGatingFeature ((uint16_t) 0x261) +#define PPSMC_MSG_IsDeviceRunning ((uint16_t) 0x262) +#define PPSMC_MSG_LoadMetaData ((uint16_t) 0x263) +#define PPSMC_MSG_TMON_AutoCaliberate_Enable ((uint16_t) 0x264) +#define PPSMC_MSG_TMON_AutoCaliberate_Disable ((uint16_t) 0x265) +#define PPSMC_MSG_GetTelemetry1Slope ((uint16_t) 0x266) +#define PPSMC_MSG_GetTelemetry1Offset ((uint16_t) 0x267) +#define PPSMC_MSG_GetTelemetry2Slope ((uint16_t) 0x268) +#define PPSMC_MSG_GetTelemetry2Offset ((uint16_t) 0x269) +#define PPSMC_MSG_EnableAvfs ((uint16_t) 0x26A) +#define PPSMC_MSG_DisableAvfs ((uint16_t) 0x26B) + +#define PPSMC_MSG_PerformBtc ((uint16_t) 0x26C) +#define PPSMC_MSG_VftTableIsValid ((uint16_t) 0x275) +#define PPSMC_MSG_UseNewGPIOScheme ((uint16_t) 0x277) +#define PPSMC_MSG_GetEnabledPsm ((uint16_t) 0x400) +#define PPSMC_MSG_AgmStartPsm ((uint16_t) 0x401) +#define PPSMC_MSG_AgmReadPsm ((uint16_t) 0x402) +#define PPSMC_MSG_AgmResetPsm ((uint16_t) 0x403) +#define PPSMC_MSG_ReadVftCell ((uint16_t) 0x404) + +#define PPSMC_MSG_GFX_CU_PG_ENABLE ((uint16_t) 0x280) +#define PPSMC_MSG_GFX_CU_PG_DISABLE ((uint16_t) 0x281) +#define PPSMC_MSG_GetCurrPkgPwr ((uint16_t) 0x282) + +#define PPSMC_MSG_SetGpuPllDfsForSclk ((uint16_t) 0x300) +#define PPSMC_MSG_Didt_Block_Function ((uint16_t) 0x301) + +#define PPSMC_MSG_SetVBITimeout ((uint16_t) 0x306) + +#define PPSMC_MSG_SecureSRBMWrite ((uint16_t) 0x600) +#define PPSMC_MSG_SecureSRBMRead ((uint16_t) 0x601) +#define PPSMC_MSG_SetAddress ((uint16_t) 0x800) +#define PPSMC_MSG_GetData ((uint16_t) 0x801) +#define PPSMC_MSG_SetData ((uint16_t) 0x802) + +typedef uint16_t PPSMC_Msg; + +#define PPSMC_EVENT_STATUS_THERMAL 0x00000001 +#define PPSMC_EVENT_STATUS_REGULATORHOT 0x00000002 +#define PPSMC_EVENT_STATUS_DC 0x00000004 + +#pragma pack(pop) + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h index 3c235f0177cd..2139072065cc 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h @@ -28,6 +28,7 @@ struct pp_smumgr; struct pp_instance; +struct pp_hwmgr; #define smu_lower_32_bits(n) ((uint32_t)(n)) #define smu_upper_32_bits(n) ((uint32_t)(((n)>>16)>>16)) @@ -53,6 +54,45 @@ enum AVFS_BTC_STATUS { AVFS_BTC_SMUMSG_ERROR }; +enum SMU_TABLE { + SMU_UVD_TABLE = 0, + SMU_VCE_TABLE, + SMU_SAMU_TABLE, + SMU_BIF_TABLE, +}; + +enum SMU_TYPE { + SMU_SoftRegisters = 0, + SMU_Discrete_DpmTable, +}; + +enum SMU_MEMBER { + HandshakeDisables = 0, + VoltageChangeTimeout, + AverageGraphicsActivity, + PreVBlankGap, + VBlankTimeout, + UcodeLoadStatus, + UvdBootLevel, + VceBootLevel, + SamuBootLevel, + LowSclkInterruptThreshold, +}; + + +enum SMU_MAC_DEFINITION { + SMU_MAX_LEVELS_GRAPHICS = 0, + SMU_MAX_LEVELS_MEMORY, + SMU_MAX_LEVELS_LINK, + SMU_MAX_ENTRIES_SMIO, + SMU_MAX_LEVELS_VDDC, + SMU_MAX_LEVELS_VDDGFX, + SMU_MAX_LEVELS_VDDCI, + SMU_MAX_LEVELS_MVDD, + SMU_UVD_MCLK_HANDSHAKE_DISABLE, +}; + + struct pp_smumgr_func { int (*smu_init)(struct pp_smumgr *smumgr); int (*smu_fini)(struct pp_smumgr *smumgr); @@ -69,12 +109,23 @@ struct pp_smumgr_func { int (*download_pptable_settings)(struct pp_smumgr *smumgr, void **table); int (*upload_pptable_settings)(struct pp_smumgr *smumgr); + int (*update_smc_table)(struct pp_hwmgr *hwmgr, uint32_t type); + int (*process_firmware_header)(struct pp_hwmgr *hwmgr); + int (*update_sclk_threshold)(struct pp_hwmgr *hwmgr); + int (*thermal_setup_fan_table)(struct pp_hwmgr *hwmgr); + int (*thermal_avfs_enable)(struct pp_hwmgr *hwmgr); + int (*init_smc_table)(struct pp_hwmgr *hwmgr); + int (*populate_all_graphic_levels)(struct pp_hwmgr *hwmgr); + int (*populate_all_memory_levels)(struct pp_hwmgr *hwmgr); + int (*initialize_mc_reg_table)(struct pp_hwmgr *hwmgr); + uint32_t (*get_offsetof)(uint32_t type, uint32_t member); + uint32_t (*get_mac_definition)(uint32_t value); + bool (*is_dpm_running)(struct pp_hwmgr *hwmgr); }; struct pp_smumgr { uint32_t chip_family; uint32_t chip_id; - uint32_t hw_revision; void *device; void *backend; uint32_t usec_timeout; @@ -122,6 +173,30 @@ extern int smu_allocate_memory(void *device, uint32_t size, extern int smu_free_memory(void *device, void *handle); +extern int cz_smum_init(struct pp_smumgr *smumgr); +extern int iceland_smum_init(struct pp_smumgr *smumgr); +extern int tonga_smum_init(struct pp_smumgr *smumgr); +extern int fiji_smum_init(struct pp_smumgr *smumgr); +extern int polaris10_smum_init(struct pp_smumgr *smumgr); + +extern int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr); + +extern int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type); +extern int smum_process_firmware_header(struct pp_hwmgr *hwmgr); +extern int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result); +extern int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result); +extern int smum_init_smc_table(struct pp_hwmgr *hwmgr); +extern int smum_populate_all_graphic_levels(struct pp_hwmgr *hwmgr); +extern int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr); +extern int smum_initialize_mc_reg_table(struct pp_hwmgr *hwmgr); +extern uint32_t smum_get_offsetof(struct pp_smumgr *smumgr, + uint32_t type, uint32_t member); +extern uint32_t smum_get_mac_definition(struct pp_smumgr *smumgr, uint32_t value); + +extern bool smum_is_dpm_running(struct pp_hwmgr *hwmgr); + #define SMUM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT #define SMUM_FIELD_MASK(reg, field) reg##__##field##_MASK diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile index f10fb64ef981..51ff08301651 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile @@ -2,7 +2,9 @@ # Makefile for the 'smu manager' sub-component of powerplay. # It provides the smu management services for the driver. -SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o polaris10_smumgr.o +SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o fiji_smc.o \ + polaris10_smumgr.o iceland_smumgr.o polaris10_smc.o tonga_smc.o \ + smu7_smumgr.o iceland_smc.o AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR)) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c index 87c023e518ab..5a44485526d2 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c @@ -89,13 +89,8 @@ static int cz_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) if (result != 0) return result; - result = SMUM_WAIT_FIELD_UNEQUAL(smumgr, + return SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMU_MP1_SRBM2P_RESP_0, CONTENT, 0); - - if (result != 0) - return result; - - return 0; } static int cz_set_smc_sram_address(struct pp_smumgr *smumgr, @@ -106,12 +101,12 @@ static int cz_set_smc_sram_address(struct pp_smumgr *smumgr, if (0 != (3 & smc_address)) { printk(KERN_ERR "[ powerplay ] SMC address must be 4 byte aligned\n"); - return -1; + return -EINVAL; } if (limit <= (smc_address + 3)) { printk(KERN_ERR "[ powerplay ] SMC address beyond the SMC RAM area\n"); - return -1; + return -EINVAL; } cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX_0, @@ -129,9 +124,10 @@ static int cz_write_smc_sram_dword(struct pp_smumgr *smumgr, return -EINVAL; result = cz_set_smc_sram_address(smumgr, smc_address, limit); - cgs_write_register(smumgr->device, mmMP0PUB_IND_DATA_0, value); + if (!result) + cgs_write_register(smumgr->device, mmMP0PUB_IND_DATA_0, value); - return 0; + return result; } static int cz_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, @@ -148,7 +144,6 @@ static int cz_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, static int cz_request_smu_load_fw(struct pp_smumgr *smumgr) { struct cz_smumgr *cz_smu = (struct cz_smumgr *)(smumgr->backend); - int result = 0; uint32_t smc_address; if (!smumgr->reload_fw) { @@ -177,11 +172,9 @@ static int cz_request_smu_load_fw(struct pp_smumgr *smumgr) cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob, cz_smu->toc_entry_power_profiling_index); - result = cz_send_msg_to_smc_with_parameter(smumgr, + return cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob, cz_smu->toc_entry_initialize_index); - - return result; } static int cz_check_fw_load_finish(struct pp_smumgr *smumgr, @@ -195,9 +188,6 @@ static int cz_check_fw_load_finish(struct pp_smumgr *smumgr, if (smumgr == NULL || smumgr->device == NULL) return -EINVAL; - return cgs_read_register(smumgr->device, - mmSMU_MP1_SRBM2P_ARG_0); - cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX, index); for (i = 0; i < smumgr->usec_timeout; i++) { @@ -275,7 +265,10 @@ static int cz_start_smu(struct pp_smumgr *smumgr) if (smumgr->chip_id == CHIP_STONEY) fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK); - cz_request_smu_load_fw(smumgr); + ret = cz_request_smu_load_fw(smumgr); + if (ret) + printk(KERN_ERR "[ powerplay] SMU firmware load failed\n"); + cz_check_fw_load_finish(smumgr, fw_to_check); ret = cz_load_mec_firmware(smumgr); @@ -566,10 +559,7 @@ static int cz_smu_construct_toc_for_bootup(struct pp_smumgr *smumgr) cz_smu_populate_single_ucode_load_task(smumgr, CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false); - if (smumgr->chip_id == CHIP_STONEY) - cz_smu_populate_single_ucode_load_task(smumgr, - CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false); - else + if (smumgr->chip_id != CHIP_STONEY) cz_smu_populate_single_ucode_load_task(smumgr, CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false); cz_smu_populate_single_ucode_load_task(smumgr, @@ -580,10 +570,7 @@ static int cz_smu_construct_toc_for_bootup(struct pp_smumgr *smumgr) CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false); cz_smu_populate_single_ucode_load_task(smumgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); - if (smumgr->chip_id == CHIP_STONEY) - cz_smu_populate_single_ucode_load_task(smumgr, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); - else + if (smumgr->chip_id != CHIP_STONEY) cz_smu_populate_single_ucode_load_task(smumgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false); cz_smu_populate_single_ucode_load_task(smumgr, @@ -610,19 +597,12 @@ static int cz_smu_construct_toc(struct pp_smumgr *smumgr) struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; cz_smu->toc_entry_used_count = 0; - cz_smu_initialize_toc_empty_job_list(smumgr); - cz_smu_construct_toc_for_rlc_aram_save(smumgr); - cz_smu_construct_toc_for_vddgfx_enter(smumgr); - cz_smu_construct_toc_for_vddgfx_exit(smumgr); - cz_smu_construct_toc_for_power_profiling(smumgr); - cz_smu_construct_toc_for_bootup(smumgr); - cz_smu_construct_toc_for_clock_table(smumgr); return 0; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c new file mode 100644 index 000000000000..76310ac7ef0d --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c @@ -0,0 +1,2374 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "fiji_smc.h" +#include "smu7_dyn_defaults.h" + +#include "smu7_hwmgr.h" +#include "hardwaremanager.h" +#include "ppatomctrl.h" +#include "pp_debug.h" +#include "cgs_common.h" +#include "atombios.h" +#include "fiji_smumgr.h" +#include "pppcielanes.h" +#include "smu7_ppsmc.h" +#include "smu73.h" +#include "smu/smu_7_1_3_d.h" +#include "smu/smu_7_1_3_sh_mask.h" +#include "gmc/gmc_8_1_d.h" +#include "gmc/gmc_8_1_sh_mask.h" +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" +#include "dce/dce_10_0_d.h" +#include "dce/dce_10_0_sh_mask.h" +#include "smu7_smumgr.h" + +#define VOLTAGE_SCALE 4 +#define POWERTUNE_DEFAULT_SET_MAX 1 +#define VOLTAGE_VID_OFFSET_SCALE1 625 +#define VOLTAGE_VID_OFFSET_SCALE2 100 +#define VDDC_VDDCI_DELTA 300 +#define MC_CG_ARB_FREQ_F1 0x0b + +/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs + * not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ] + */ +static const uint16_t fiji_clock_stretcher_lookup_table[2][4] = { + {600, 1050, 3, 0}, {600, 1050, 6, 1} }; + +/* [FF, SS] type, [] 4 voltage ranges, and + * [Floor Freq, Boundary Freq, VID min , VID max] + */ +static const uint32_t fiji_clock_stretcher_ddt_table[2][4][4] = { + { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} }, + { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } }; + +/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] + * (coming from PWR_CKS_CNTL.stretch_amount reg spec) + */ +static const uint8_t fiji_clock_stretch_amount_conversion[2][6] = { + {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} }; + +static const struct fiji_pt_defaults fiji_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = { + /*sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc */ + {1, 0xF, 0xFD, + /* TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase */ + 0x19, 5, 45} +}; + +/* PPGen has the gain setting generated in x * 100 unit + * This function is to convert the unit to x * 4096(0x1000) unit. + * This is the unit expected by SMC firmware + */ +static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table, + uint32_t clock, uint32_t *voltage, uint32_t *mvdd) +{ + uint32_t i; + uint16_t vddci; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + *voltage = *mvdd = 0; + + + /* clock - voltage dependency table is empty table */ + if (dep_table->count == 0) + return -EINVAL; + + for (i = 0; i < dep_table->count; i++) { + /* find first sclk bigger than request */ + if (dep_table->entries[i].clk >= clock) { + *voltage |= (dep_table->entries[i].vddc * + VOLTAGE_SCALE) << VDDC_SHIFT; + if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control) + *voltage |= (data->vbios_boot_state.vddci_bootup_value * + VOLTAGE_SCALE) << VDDCI_SHIFT; + else if (dep_table->entries[i].vddci) + *voltage |= (dep_table->entries[i].vddci * + VOLTAGE_SCALE) << VDDCI_SHIFT; + else { + vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), + (dep_table->entries[i].vddc - + VDDC_VDDCI_DELTA)); + *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; + } + + if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) + *mvdd = data->vbios_boot_state.mvdd_bootup_value * + VOLTAGE_SCALE; + else if (dep_table->entries[i].mvdd) + *mvdd = (uint32_t) dep_table->entries[i].mvdd * + VOLTAGE_SCALE; + + *voltage |= 1 << PHASES_SHIFT; + return 0; + } + } + + /* sclk is bigger than max sclk in the dependence table */ + *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; + + if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control) + *voltage |= (data->vbios_boot_state.vddci_bootup_value * + VOLTAGE_SCALE) << VDDCI_SHIFT; + else if (dep_table->entries[i-1].vddci) { + vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), + (dep_table->entries[i].vddc - + VDDC_VDDCI_DELTA)); + *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; + } + + if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) + *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE; + else if (dep_table->entries[i].mvdd) + *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE; + + return 0; +} + + +static uint16_t scale_fan_gain_settings(uint16_t raw_setting) +{ + uint32_t tmp; + tmp = raw_setting * 4096 / 100; + return (uint16_t)tmp; +} + +static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t *sda) +{ + switch (line) { + case SMU7_I2CLineID_DDC1: + *scl = SMU7_I2C_DDC1CLK; + *sda = SMU7_I2C_DDC1DATA; + break; + case SMU7_I2CLineID_DDC2: + *scl = SMU7_I2C_DDC2CLK; + *sda = SMU7_I2C_DDC2DATA; + break; + case SMU7_I2CLineID_DDC3: + *scl = SMU7_I2C_DDC3CLK; + *sda = SMU7_I2C_DDC3DATA; + break; + case SMU7_I2CLineID_DDC4: + *scl = SMU7_I2C_DDC4CLK; + *sda = SMU7_I2C_DDC4DATA; + break; + case SMU7_I2CLineID_DDC5: + *scl = SMU7_I2C_DDC5CLK; + *sda = SMU7_I2C_DDC5DATA; + break; + case SMU7_I2CLineID_DDC6: + *scl = SMU7_I2C_DDC6CLK; + *sda = SMU7_I2C_DDC6DATA; + break; + case SMU7_I2CLineID_SCLSDA: + *scl = SMU7_I2C_SCL; + *sda = SMU7_I2C_SDA; + break; + case SMU7_I2CLineID_DDCVGA: + *scl = SMU7_I2C_DDCVGACLK; + *sda = SMU7_I2C_DDCVGADATA; + break; + default: + *scl = 0; + *sda = 0; + break; + } +} + +static void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) +{ + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + if (table_info && + table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX && + table_info->cac_dtp_table->usPowerTuneDataSetID) + smu_data->power_tune_defaults = + &fiji_power_tune_data_set_array + [table_info->cac_dtp_table->usPowerTuneDataSetID - 1]; + else + smu_data->power_tune_defaults = &fiji_power_tune_data_set_array[0]; + +} + +static int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) +{ + + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults; + + SMU73_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table); + + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table; + struct pp_advance_fan_control_parameters *fan_table = + &hwmgr->thermal_controller.advanceFanControlParameters; + uint8_t uc_scl, uc_sda; + + /* TDP number of fraction bits are changed from 8 to 7 for Fiji + * as requested by SMC team + */ + dpm_table->DefaultTdp = PP_HOST_TO_SMC_US( + (uint16_t)(cac_dtp_table->usTDP * 128)); + dpm_table->TargetTdp = PP_HOST_TO_SMC_US( + (uint16_t)(cac_dtp_table->usTDP * 128)); + + PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255, + "Target Operating Temp is out of Range!", + ); + + dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp); + dpm_table->GpuTjHyst = 8; + + dpm_table->DTEAmbientTempBase = defaults->DTEAmbientTempBase; + + /* The following are for new Fiji Multi-input fan/thermal control */ + dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US( + cac_dtp_table->usTargetOperatingTemp * 256); + dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US( + cac_dtp_table->usTemperatureLimitHotspot * 256); + dpm_table->TemperatureLimitLiquid1 = PP_HOST_TO_SMC_US( + cac_dtp_table->usTemperatureLimitLiquid1 * 256); + dpm_table->TemperatureLimitLiquid2 = PP_HOST_TO_SMC_US( + cac_dtp_table->usTemperatureLimitLiquid2 * 256); + dpm_table->TemperatureLimitVrVddc = PP_HOST_TO_SMC_US( + cac_dtp_table->usTemperatureLimitVrVddc * 256); + dpm_table->TemperatureLimitVrMvdd = PP_HOST_TO_SMC_US( + cac_dtp_table->usTemperatureLimitVrMvdd * 256); + dpm_table->TemperatureLimitPlx = PP_HOST_TO_SMC_US( + cac_dtp_table->usTemperatureLimitPlx * 256); + + dpm_table->FanGainEdge = PP_HOST_TO_SMC_US( + scale_fan_gain_settings(fan_table->usFanGainEdge)); + dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US( + scale_fan_gain_settings(fan_table->usFanGainHotspot)); + dpm_table->FanGainLiquid = PP_HOST_TO_SMC_US( + scale_fan_gain_settings(fan_table->usFanGainLiquid)); + dpm_table->FanGainVrVddc = PP_HOST_TO_SMC_US( + scale_fan_gain_settings(fan_table->usFanGainVrVddc)); + dpm_table->FanGainVrMvdd = PP_HOST_TO_SMC_US( + scale_fan_gain_settings(fan_table->usFanGainVrMvdd)); + dpm_table->FanGainPlx = PP_HOST_TO_SMC_US( + scale_fan_gain_settings(fan_table->usFanGainPlx)); + dpm_table->FanGainHbm = PP_HOST_TO_SMC_US( + scale_fan_gain_settings(fan_table->usFanGainHbm)); + + dpm_table->Liquid1_I2C_address = cac_dtp_table->ucLiquid1_I2C_address; + dpm_table->Liquid2_I2C_address = cac_dtp_table->ucLiquid2_I2C_address; + dpm_table->Vr_I2C_address = cac_dtp_table->ucVr_I2C_address; + dpm_table->Plx_I2C_address = cac_dtp_table->ucPlx_I2C_address; + + get_scl_sda_value(cac_dtp_table->ucLiquid_I2C_Line, &uc_scl, &uc_sda); + dpm_table->Liquid_I2C_LineSCL = uc_scl; + dpm_table->Liquid_I2C_LineSDA = uc_sda; + + get_scl_sda_value(cac_dtp_table->ucVr_I2C_Line, &uc_scl, &uc_sda); + dpm_table->Vr_I2C_LineSCL = uc_scl; + dpm_table->Vr_I2C_LineSDA = uc_sda; + + get_scl_sda_value(cac_dtp_table->ucPlx_I2C_Line, &uc_scl, &uc_sda); + dpm_table->Plx_I2C_LineSCL = uc_scl; + dpm_table->Plx_I2C_LineSDA = uc_sda; + + return 0; +} + + +static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr) +{ + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults; + + smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn; + smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC; + smu_data->power_tune_table.SviLoadLineTrimVddC = 3; + smu_data->power_tune_table.SviLoadLineOffsetVddC = 0; + + return 0; +} + + +static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr) +{ + uint16_t tdc_limit; + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults; + + /* TDC number of fraction bits are changed from 8 to 7 + * for Fiji as requested by SMC team + */ + tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128); + smu_data->power_tune_table.TDC_VDDC_PkgLimit = + CONVERT_FROM_HOST_TO_SMC_US(tdc_limit); + smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc = + defaults->TDC_VDDC_ThrottleReleaseLimitPerc; + smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt; + + return 0; +} + +static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) +{ + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults; + uint32_t temp; + + if (smu7_read_smc_sram_dword(hwmgr->smumgr, + fuse_table_offset + + offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl), + (uint32_t *)&temp, SMC_RAM_END)) + PP_ASSERT_WITH_CODE(false, + "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!", + return -EINVAL); + else { + smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl; + smu_data->power_tune_table.LPMLTemperatureMin = + (uint8_t)((temp >> 16) & 0xff); + smu_data->power_tune_table.LPMLTemperatureMax = + (uint8_t)((temp >> 8) & 0xff); + smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff); + } + return 0; +} + +static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr) +{ + int i; + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + + /* Currently not used. Set all to zero. */ + for (i = 0; i < 16; i++) + smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0; + + return 0; +} + +static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr) +{ + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + + if ((hwmgr->thermal_controller.advanceFanControlParameters. + usFanOutputSensitivity & (1 << 15)) || + 0 == hwmgr->thermal_controller.advanceFanControlParameters. + usFanOutputSensitivity) + hwmgr->thermal_controller.advanceFanControlParameters. + usFanOutputSensitivity = hwmgr->thermal_controller. + advanceFanControlParameters.usDefaultFanOutputSensitivity; + + smu_data->power_tune_table.FuzzyFan_PwmSetDelta = + PP_HOST_TO_SMC_US(hwmgr->thermal_controller. + advanceFanControlParameters.usFanOutputSensitivity); + return 0; +} + +static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr) +{ + int i; + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + + /* Currently not used. Set all to zero. */ + for (i = 0; i < 16; i++) + smu_data->power_tune_table.GnbLPML[i] = 0; + + return 0; +} + +static int fiji_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr) +{ + return 0; +} + +static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) +{ + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd; + uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd; + struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table; + + HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256); + LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256); + + smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd = + CONVERT_FROM_HOST_TO_SMC_US(HiSidd); + smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd = + CONVERT_FROM_HOST_TO_SMC_US(LoSidd); + + return 0; +} + +static int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr) +{ + uint32_t pm_fuse_table_offset; + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + if (smu7_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU73_Firmware_Header, PmFuseTable), + &pm_fuse_table_offset, SMC_RAM_END)) + PP_ASSERT_WITH_CODE(false, + "Attempt to get pm_fuse_table_offset Failed!", + return -EINVAL); + + /* DW6 */ + if (fiji_populate_svi_load_line(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate SviLoadLine Failed!", + return -EINVAL); + /* DW7 */ + if (fiji_populate_tdc_limit(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate TDCLimit Failed!", return -EINVAL); + /* DW8 */ + if (fiji_populate_dw8(hwmgr, pm_fuse_table_offset)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate TdcWaterfallCtl, " + "LPMLTemperature Min and Max Failed!", + return -EINVAL); + + /* DW9-DW12 */ + if (0 != fiji_populate_temperature_scaler(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate LPMLTemperatureScaler Failed!", + return -EINVAL); + + /* DW13-DW14 */ + if (fiji_populate_fuzzy_fan(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate Fuzzy Fan Control parameters Failed!", + return -EINVAL); + + /* DW15-DW18 */ + if (fiji_populate_gnb_lpml(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate GnbLPML Failed!", + return -EINVAL); + + /* DW19 */ + if (fiji_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate GnbLPML Min and Max Vid Failed!", + return -EINVAL); + + /* DW20 */ + if (fiji_populate_bapm_vddc_base_leakage_sidd(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate BapmVddCBaseLeakage Hi and Lo " + "Sidd Failed!", return -EINVAL); + + if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, + (uint8_t *)&smu_data->power_tune_table, + sizeof(struct SMU73_Discrete_PmFuses), SMC_RAM_END)) + PP_ASSERT_WITH_CODE(false, + "Attempt to download PmFuseTable Failed!", + return -EINVAL); + } + return 0; +} + +/** +* Preparation of vddc and vddgfx CAC tables for SMC. +* +* @param hwmgr the address of the hardware manager +* @param table the SMC DPM table structure to be populated +* @return always 0 +*/ +static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr, + struct SMU73_Discrete_DpmTable *table) +{ + uint32_t count; + uint8_t index; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_voltage_lookup_table *lookup_table = + table_info->vddc_lookup_table; + /* tables is already swapped, so in order to use the value from it, + * we need to swap it back. + * We are populating vddc CAC data to BapmVddc table + * in split and merged mode + */ + + for (count = 0; count < lookup_table->count; count++) { + index = phm_get_voltage_index(lookup_table, + data->vddc_voltage_table.entries[count].value); + table->BapmVddcVidLoSidd[count] = + convert_to_vid(lookup_table->entries[index].us_cac_low); + table->BapmVddcVidHiSidd[count] = + convert_to_vid(lookup_table->entries[index].us_cac_high); + } + + return 0; +} + +/** +* Preparation of voltage tables for SMC. +* +* @param hwmgr the address of the hardware manager +* @param table the SMC DPM table structure to be populated +* @return always 0 +*/ + +static int fiji_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr, + struct SMU73_Discrete_DpmTable *table) +{ + int result; + + result = fiji_populate_cac_table(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "can not populate CAC voltage tables to SMC", + return -EINVAL); + + return 0; +} + +static int fiji_populate_ulv_level(struct pp_hwmgr *hwmgr, + struct SMU73_Discrete_Ulv *state) +{ + int result = 0; + + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + state->CcPwrDynRm = 0; + state->CcPwrDynRm1 = 0; + + state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset; + state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset * + VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1); + + state->VddcPhase = 1; + + if (!result) { + CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1); + CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset); + } + return result; +} + +static int fiji_populate_ulv_state(struct pp_hwmgr *hwmgr, + struct SMU73_Discrete_DpmTable *table) +{ + return fiji_populate_ulv_level(hwmgr, &table->Ulv); +} + +static int fiji_populate_smc_link_level(struct pp_hwmgr *hwmgr, + struct SMU73_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_dpm_table *dpm_table = &data->dpm_table; + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + int i; + + /* Index (dpm_table->pcie_speed_table.count) + * is reserved for PCIE boot level. */ + for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) { + table->LinkLevel[i].PcieGenSpeed = + (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value; + table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width( + dpm_table->pcie_speed_table.dpm_levels[i].param1); + table->LinkLevel[i].EnabledForActivity = 1; + table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff); + table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5); + table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30); + } + + smu_data->smc_state_table.LinkLevelCount = + (uint8_t)dpm_table->pcie_speed_table.count; + data->dpm_level_enable_mask.pcie_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table); + + return 0; +} + + +/** +* Calculates the SCLK dividers using the provided engine clock +* +* @param hwmgr the address of the hardware manager +* @param clock the engine clock to use to populate the structure +* @param sclk the SMC SCLK structure to be populated +*/ +static int fiji_calculate_sclk_params(struct pp_hwmgr *hwmgr, + uint32_t clock, struct SMU73_Discrete_GraphicsLevel *sclk) +{ + const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct pp_atomctrl_clock_dividers_vi dividers; + uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; + uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; + uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; + uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; + uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; + uint32_t ref_clock; + uint32_t ref_divider; + uint32_t fbdiv; + int result; + + /* get the engine clock dividers for this clock value */ + result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, ÷rs); + + PP_ASSERT_WITH_CODE(result == 0, + "Error retrieving Engine Clock dividers from VBIOS.", + return result); + + /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */ + ref_clock = atomctrl_get_reference_clock(hwmgr); + ref_divider = 1 + dividers.uc_pll_ref_div; + + /* low 14 bits is fraction and high 12 bits is divider */ + fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF; + + /* SPLL_FUNC_CNTL setup */ + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL, + SPLL_REF_DIV, dividers.uc_pll_ref_div); + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL, + SPLL_PDIV_A, dividers.uc_pll_post_div); + + /* SPLL_FUNC_CNTL_3 setup*/ + spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3, + SPLL_FB_DIV, fbdiv); + + /* set to use fractional accumulation*/ + spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3, + SPLL_DITHEN, 1); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EngineSpreadSpectrumSupport)) { + struct pp_atomctrl_internal_ss_info ssInfo; + + uint32_t vco_freq = clock * dividers.uc_pll_post_div; + if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr, + vco_freq, &ssInfo)) { + /* + * ss_info.speed_spectrum_percentage -- in unit of 0.01% + * ss_info.speed_spectrum_rate -- in unit of khz + * + * clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 + */ + uint32_t clk_s = ref_clock * 5 / + (ref_divider * ssInfo.speed_spectrum_rate); + /* clkv = 2 * D * fbdiv / NS */ + uint32_t clk_v = 4 * ssInfo.speed_spectrum_percentage * + fbdiv / (clk_s * 10000); + + cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum, + CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s); + cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum, + CG_SPLL_SPREAD_SPECTRUM, SSEN, 1); + cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2, + CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v); + } + } + + sclk->SclkFrequency = clock; + sclk->CgSpllFuncCntl3 = spll_func_cntl_3; + sclk->CgSpllFuncCntl4 = spll_func_cntl_4; + sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum; + sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2; + sclk->SclkDid = (uint8_t)dividers.pll_post_divider; + + return 0; +} + +/** +* Populates single SMC SCLK structure using the provided engine clock +* +* @param hwmgr the address of the hardware manager +* @param clock the engine clock to use to populate the structure +* @param sclk the SMC SCLK structure to be populated +*/ + +static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr, + uint32_t clock, uint16_t sclk_al_threshold, + struct SMU73_Discrete_GraphicsLevel *level) +{ + int result; + /* PP_Clocks minClocks; */ + uint32_t threshold, mvdd; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + result = fiji_calculate_sclk_params(hwmgr, clock, level); + + /* populate graphics levels */ + result = fiji_get_dependency_volt_by_clk(hwmgr, + table_info->vdd_dep_on_sclk, clock, + (uint32_t *)(&level->MinVoltage), &mvdd); + PP_ASSERT_WITH_CODE((0 == result), + "can not find VDDC voltage value for " + "VDDC engine clock dependency table", + return result); + + level->SclkFrequency = clock; + level->ActivityLevel = sclk_al_threshold; + level->CcPwrDynRm = 0; + level->CcPwrDynRm1 = 0; + level->EnabledForActivity = 0; + level->EnabledForThrottle = 1; + level->UpHyst = 10; + level->DownHyst = 0; + level->VoltageDownHyst = 0; + level->PowerThrottle = 0; + + threshold = clock * data->fast_watermark_threshold / 100; + + data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) + level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock, + hwmgr->display_config.min_core_set_clock_in_sr); + + + /* Default to slow, highest DPM level will be + * set to PPSMC_DISPLAY_WATERMARK_LOW later. + */ + level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + + CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage); + CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency); + CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel); + CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3); + CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4); + CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum); + CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2); + CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1); + + return 0; +} +/** +* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states +* +* @param hwmgr the address of the hardware manager +*/ +int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + + struct smu7_dpm_table *dpm_table = &data->dpm_table; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table; + uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count; + int result = 0; + uint32_t array = smu_data->smu7_data.dpm_table_start + + offsetof(SMU73_Discrete_DpmTable, GraphicsLevel); + uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) * + SMU73_MAX_LEVELS_GRAPHICS; + struct SMU73_Discrete_GraphicsLevel *levels = + smu_data->smc_state_table.GraphicsLevel; + uint32_t i, max_entry; + uint8_t hightest_pcie_level_enabled = 0, + lowest_pcie_level_enabled = 0, + mid_pcie_level_enabled = 0, + count = 0; + + for (i = 0; i < dpm_table->sclk_table.count; i++) { + result = fiji_populate_single_graphic_level(hwmgr, + dpm_table->sclk_table.dpm_levels[i].value, + (uint16_t)smu_data->activity_target[i], + &levels[i]); + if (result) + return result; + + /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */ + if (i > 1) + levels[i].DeepSleepDivId = 0; + } + + /* Only enable level 0 for now.*/ + levels[0].EnabledForActivity = 1; + + /* set highest level watermark to high */ + levels[dpm_table->sclk_table.count - 1].DisplayWatermark = + PPSMC_DISPLAY_WATERMARK_HIGH; + + smu_data->smc_state_table.GraphicsDpmLevelCount = + (uint8_t)dpm_table->sclk_table.count; + data->dpm_level_enable_mask.sclk_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); + + if (pcie_table != NULL) { + PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt), + "There must be 1 or more PCIE levels defined in PPTable.", + return -EINVAL); + max_entry = pcie_entry_cnt - 1; + for (i = 0; i < dpm_table->sclk_table.count; i++) + levels[i].pcieDpmLevel = + (uint8_t) ((i < max_entry) ? i : max_entry); + } else { + while (data->dpm_level_enable_mask.pcie_dpm_enable_mask && + ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1 << (hightest_pcie_level_enabled + 1))) != 0)) + hightest_pcie_level_enabled++; + + while (data->dpm_level_enable_mask.pcie_dpm_enable_mask && + ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1 << lowest_pcie_level_enabled)) == 0)) + lowest_pcie_level_enabled++; + + while ((count < hightest_pcie_level_enabled) && + ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1 << (lowest_pcie_level_enabled + 1 + count))) == 0)) + count++; + + mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) < + hightest_pcie_level_enabled ? + (lowest_pcie_level_enabled + 1 + count) : + hightest_pcie_level_enabled; + + /* set pcieDpmLevel to hightest_pcie_level_enabled */ + for (i = 2; i < dpm_table->sclk_table.count; i++) + levels[i].pcieDpmLevel = hightest_pcie_level_enabled; + + /* set pcieDpmLevel to lowest_pcie_level_enabled */ + levels[0].pcieDpmLevel = lowest_pcie_level_enabled; + + /* set pcieDpmLevel to mid_pcie_level_enabled */ + levels[1].pcieDpmLevel = mid_pcie_level_enabled; + } + /* level count will send to smc once at init smc table and never change */ + result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, + (uint32_t)array_size, SMC_RAM_END); + + return result; +} + + +/** + * MCLK Frequency Ratio + * SEQ_CG_RESP Bit[31:24] - 0x0 + * Bit[27:24] \96 DDR3 Frequency ratio + * 0x0 <= 100MHz, 450 < 0x8 <= 500MHz + * 100 < 0x1 <= 150MHz, 500 < 0x9 <= 550MHz + * 150 < 0x2 <= 200MHz, 550 < 0xA <= 600MHz + * 200 < 0x3 <= 250MHz, 600 < 0xB <= 650MHz + * 250 < 0x4 <= 300MHz, 650 < 0xC <= 700MHz + * 300 < 0x5 <= 350MHz, 700 < 0xD <= 750MHz + * 350 < 0x6 <= 400MHz, 750 < 0xE <= 800MHz + * 400 < 0x7 <= 450MHz, 800 < 0xF + */ +static uint8_t fiji_get_mclk_frequency_ratio(uint32_t mem_clock) +{ + if (mem_clock <= 10000) + return 0x0; + if (mem_clock <= 15000) + return 0x1; + if (mem_clock <= 20000) + return 0x2; + if (mem_clock <= 25000) + return 0x3; + if (mem_clock <= 30000) + return 0x4; + if (mem_clock <= 35000) + return 0x5; + if (mem_clock <= 40000) + return 0x6; + if (mem_clock <= 45000) + return 0x7; + if (mem_clock <= 50000) + return 0x8; + if (mem_clock <= 55000) + return 0x9; + if (mem_clock <= 60000) + return 0xa; + if (mem_clock <= 65000) + return 0xb; + if (mem_clock <= 70000) + return 0xc; + if (mem_clock <= 75000) + return 0xd; + if (mem_clock <= 80000) + return 0xe; + /* mem_clock > 800MHz */ + return 0xf; +} + +/** +* Populates the SMC MCLK structure using the provided memory clock +* +* @param hwmgr the address of the hardware manager +* @param clock the memory clock to use to populate the structure +* @param sclk the SMC SCLK structure to be populated +*/ +static int fiji_calculate_mclk_params(struct pp_hwmgr *hwmgr, + uint32_t clock, struct SMU73_Discrete_MemoryLevel *mclk) +{ + struct pp_atomctrl_memory_clock_param mem_param; + int result; + + result = atomctrl_get_memory_pll_dividers_vi(hwmgr, clock, &mem_param); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to get Memory PLL Dividers.", + ); + + /* Save the result data to outpupt memory level structure */ + mclk->MclkFrequency = clock; + mclk->MclkDivider = (uint8_t)mem_param.mpll_post_divider; + mclk->FreqRange = fiji_get_mclk_frequency_ratio(clock); + + return result; +} + +static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr, + uint32_t clock, struct SMU73_Discrete_MemoryLevel *mem_level) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + int result = 0; + uint32_t mclk_stutter_mode_threshold = 60000; + + if (table_info->vdd_dep_on_mclk) { + result = fiji_get_dependency_volt_by_clk(hwmgr, + table_info->vdd_dep_on_mclk, clock, + (uint32_t *)(&mem_level->MinVoltage), &mem_level->MinMvdd); + PP_ASSERT_WITH_CODE((0 == result), + "can not find MinVddc voltage value from memory " + "VDDC voltage dependency table", return result); + } + + mem_level->EnabledForThrottle = 1; + mem_level->EnabledForActivity = 0; + mem_level->UpHyst = 0; + mem_level->DownHyst = 100; + mem_level->VoltageDownHyst = 0; + mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target; + mem_level->StutterEnable = false; + + mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + + /* enable stutter mode if all the follow condition applied + * PECI_GetNumberOfActiveDisplays(hwmgr->pPECI, + * &(data->DisplayTiming.numExistingDisplays)); + */ + data->display_timing.num_existing_displays = 1; + + if (mclk_stutter_mode_threshold && + (clock <= mclk_stutter_mode_threshold) && + (!data->is_uvd_enabled) && + (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, + STUTTER_ENABLE) & 0x1)) + mem_level->StutterEnable = true; + + result = fiji_calculate_mclk_params(hwmgr, clock, mem_level); + if (!result) { + CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd); + CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency); + CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel); + CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage); + } + return result; +} + +/** +* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states +* +* @param hwmgr the address of the hardware manager +*/ +int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct smu7_dpm_table *dpm_table = &data->dpm_table; + int result; + /* populate MCLK dpm table to SMU7 */ + uint32_t array = smu_data->smu7_data.dpm_table_start + + offsetof(SMU73_Discrete_DpmTable, MemoryLevel); + uint32_t array_size = sizeof(SMU73_Discrete_MemoryLevel) * + SMU73_MAX_LEVELS_MEMORY; + struct SMU73_Discrete_MemoryLevel *levels = + smu_data->smc_state_table.MemoryLevel; + uint32_t i; + + for (i = 0; i < dpm_table->mclk_table.count; i++) { + PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value), + "can not populate memory level as memory clock is zero", + return -EINVAL); + result = fiji_populate_single_memory_level(hwmgr, + dpm_table->mclk_table.dpm_levels[i].value, + &levels[i]); + if (result) + return result; + } + + /* Only enable level 0 for now. */ + levels[0].EnabledForActivity = 1; + + /* in order to prevent MC activity from stutter mode to push DPM up. + * the UVD change complements this by putting the MCLK in + * a higher state by default such that we are not effected by + * up threshold or and MCLK DPM latency. + */ + levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target; + CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel); + + smu_data->smc_state_table.MemoryDpmLevelCount = + (uint8_t)dpm_table->mclk_table.count; + data->dpm_level_enable_mask.mclk_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); + /* set highest level watermark to high */ + levels[dpm_table->mclk_table.count - 1].DisplayWatermark = + PPSMC_DISPLAY_WATERMARK_HIGH; + + /* level count will send to smc once at init smc table and never change */ + result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, + (uint32_t)array_size, SMC_RAM_END); + + return result; +} + + +/** +* Populates the SMC MVDD structure using the provided memory clock. +* +* @param hwmgr the address of the hardware manager +* @param mclk the MCLK value to be used in the decision if MVDD should be high or low. +* @param voltage the SMC VOLTAGE structure to be populated +*/ +static int fiji_populate_mvdd_value(struct pp_hwmgr *hwmgr, + uint32_t mclk, SMIO_Pattern *smio_pat) +{ + const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint32_t i = 0; + + if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) { + /* find mvdd value which clock is more than request */ + for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) { + if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) { + smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value; + break; + } + } + PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count, + "MVDD Voltage is outside the supported range.", + return -EINVAL); + } else + return -EINVAL; + + return 0; +} + +static int fiji_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, + SMU73_Discrete_DpmTable *table) +{ + int result = 0; + const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct pp_atomctrl_clock_dividers_vi dividers; + SMIO_Pattern vol_level; + uint32_t mvdd; + uint16_t us_mvdd; + uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; + uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2; + + table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; + + if (!data->sclk_dpm_key_disabled) { + /* Get MinVoltage and Frequency from DPM0, + * already converted to SMC_UL */ + table->ACPILevel.SclkFrequency = + data->dpm_table.sclk_table.dpm_levels[0].value; + result = fiji_get_dependency_volt_by_clk(hwmgr, + table_info->vdd_dep_on_sclk, + table->ACPILevel.SclkFrequency, + (uint32_t *)(&table->ACPILevel.MinVoltage), &mvdd); + PP_ASSERT_WITH_CODE((0 == result), + "Cannot find ACPI VDDC voltage value " \ + "in Clock Dependency Table", + ); + } else { + table->ACPILevel.SclkFrequency = + data->vbios_boot_state.sclk_bootup_value; + table->ACPILevel.MinVoltage = + data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE; + } + + /* get the engine clock dividers for this clock value */ + result = atomctrl_get_engine_pll_dividers_vi(hwmgr, + table->ACPILevel.SclkFrequency, ÷rs); + PP_ASSERT_WITH_CODE(result == 0, + "Error retrieving Engine Clock dividers from VBIOS.", + return result); + + table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider; + table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + table->ACPILevel.DeepSleepDivId = 0; + + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL, + SPLL_PWRON, 0); + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL, + SPLL_RESET, 1); + spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2, + SCLK_MUX_SEL, 4); + + table->ACPILevel.CgSpllFuncCntl = spll_func_cntl; + table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2; + table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; + table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; + table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; + table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; + table->ACPILevel.CcPwrDynRm = 0; + table->ACPILevel.CcPwrDynRm1 = 0; + + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1); + + if (!data->mclk_dpm_key_disabled) { + /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */ + table->MemoryACPILevel.MclkFrequency = + data->dpm_table.mclk_table.dpm_levels[0].value; + result = fiji_get_dependency_volt_by_clk(hwmgr, + table_info->vdd_dep_on_mclk, + table->MemoryACPILevel.MclkFrequency, + (uint32_t *)(&table->MemoryACPILevel.MinVoltage), &mvdd); + PP_ASSERT_WITH_CODE((0 == result), + "Cannot find ACPI VDDCI voltage value in Clock Dependency Table", + ); + } else { + table->MemoryACPILevel.MclkFrequency = + data->vbios_boot_state.mclk_bootup_value; + table->MemoryACPILevel.MinVoltage = + data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE; + } + + us_mvdd = 0; + if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) || + (data->mclk_dpm_key_disabled)) + us_mvdd = data->vbios_boot_state.mvdd_bootup_value; + else { + if (!fiji_populate_mvdd_value(hwmgr, + data->dpm_table.mclk_table.dpm_levels[0].value, + &vol_level)) + us_mvdd = vol_level.Voltage; + } + + table->MemoryACPILevel.MinMvdd = + PP_HOST_TO_SMC_UL(us_mvdd * VOLTAGE_SCALE); + + table->MemoryACPILevel.EnabledForThrottle = 0; + table->MemoryACPILevel.EnabledForActivity = 0; + table->MemoryACPILevel.UpHyst = 0; + table->MemoryACPILevel.DownHyst = 100; + table->MemoryACPILevel.VoltageDownHyst = 0; + table->MemoryACPILevel.ActivityLevel = + PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target); + + table->MemoryACPILevel.StutterEnable = false; + CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage); + + return result; +} + +static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr, + SMU73_Discrete_DpmTable *table) +{ + int result = -EINVAL; + uint8_t count; + struct pp_atomctrl_clock_dividers_vi dividers; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = + table_info->mm_dep_table; + + table->VceLevelCount = (uint8_t)(mm_table->count); + table->VceBootLevel = 0; + + for (count = 0; count < table->VceLevelCount; count++) { + table->VceLevel[count].Frequency = mm_table->entries[count].eclk; + table->VceLevel[count].MinVoltage = 0; + table->VceLevel[count].MinVoltage |= + (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; + table->VceLevel[count].MinVoltage |= + ((mm_table->entries[count].vddc - VDDC_VDDCI_DELTA) * + VOLTAGE_SCALE) << VDDCI_SHIFT; + table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT; + + /*retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->VceLevel[count].Frequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for VCE engine clock", + return result); + + table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage); + } + return result; +} + +static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr, + SMU73_Discrete_DpmTable *table) +{ + int result = -EINVAL; + uint8_t count; + struct pp_atomctrl_clock_dividers_vi dividers; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = + table_info->mm_dep_table; + + table->AcpLevelCount = (uint8_t)(mm_table->count); + table->AcpBootLevel = 0; + + for (count = 0; count < table->AcpLevelCount; count++) { + table->AcpLevel[count].Frequency = mm_table->entries[count].aclk; + table->AcpLevel[count].MinVoltage |= (mm_table->entries[count].vddc * + VOLTAGE_SCALE) << VDDC_SHIFT; + table->AcpLevel[count].MinVoltage |= ((mm_table->entries[count].vddc - + VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT; + table->AcpLevel[count].MinVoltage |= 1 << PHASES_SHIFT; + + /* retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->AcpLevel[count].Frequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for engine clock", return result); + + table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].MinVoltage); + } + return result; +} + +static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr, + SMU73_Discrete_DpmTable *table) +{ + int result = -EINVAL; + uint8_t count; + struct pp_atomctrl_clock_dividers_vi dividers; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = + table_info->mm_dep_table; + + table->SamuBootLevel = 0; + table->SamuLevelCount = (uint8_t)(mm_table->count); + + for (count = 0; count < table->SamuLevelCount; count++) { + /* not sure whether we need evclk or not */ + table->SamuLevel[count].MinVoltage = 0; + table->SamuLevel[count].Frequency = mm_table->entries[count].samclock; + table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc * + VOLTAGE_SCALE) << VDDC_SHIFT; + table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc - + VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT; + table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT; + + /* retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->SamuLevel[count].Frequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for samu clock", return result); + + table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage); + } + return result; +} + +static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr, + int32_t eng_clock, int32_t mem_clock, + struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs) +{ + uint32_t dram_timing; + uint32_t dram_timing2; + uint32_t burstTime; + ULONG state, trrds, trrdl; + int result; + + result = atomctrl_set_engine_dram_timings_rv770(hwmgr, + eng_clock, mem_clock); + PP_ASSERT_WITH_CODE(result == 0, + "Error calling VBIOS to set DRAM_TIMING.", return result); + + dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); + dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); + burstTime = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME); + + state = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, STATE0); + trrds = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDS0); + trrdl = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDL0); + + arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing); + arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2); + arb_regs->McArbBurstTime = (uint8_t)burstTime; + arb_regs->TRRDS = (uint8_t)trrds; + arb_regs->TRRDL = (uint8_t)trrdl; + + return 0; +} + +static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct SMU73_Discrete_MCArbDramTimingTable arb_regs; + uint32_t i, j; + int result = 0; + + for (i = 0; i < data->dpm_table.sclk_table.count; i++) { + for (j = 0; j < data->dpm_table.mclk_table.count; j++) { + result = fiji_populate_memory_timing_parameters(hwmgr, + data->dpm_table.sclk_table.dpm_levels[i].value, + data->dpm_table.mclk_table.dpm_levels[j].value, + &arb_regs.entries[i][j]); + if (result) + break; + } + } + + if (!result) + result = smu7_copy_bytes_to_smc( + hwmgr->smumgr, + smu_data->smu7_data.arb_table_start, + (uint8_t *)&arb_regs, + sizeof(SMU73_Discrete_MCArbDramTimingTable), + SMC_RAM_END); + return result; +} + +static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr, + struct SMU73_Discrete_DpmTable *table) +{ + int result = -EINVAL; + uint8_t count; + struct pp_atomctrl_clock_dividers_vi dividers; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = + table_info->mm_dep_table; + + table->UvdLevelCount = (uint8_t)(mm_table->count); + table->UvdBootLevel = 0; + + for (count = 0; count < table->UvdLevelCount; count++) { + table->UvdLevel[count].MinVoltage = 0; + table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk; + table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk; + table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc * + VOLTAGE_SCALE) << VDDC_SHIFT; + table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc - + VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT; + table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT; + + /* retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->UvdLevel[count].VclkFrequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for Vclk clock", return result); + + table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider; + + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->UvdLevel[count].DclkFrequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for Dclk clock", return result); + + table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage); + + } + return result; +} + +static int fiji_populate_smc_boot_level(struct pp_hwmgr *hwmgr, + struct SMU73_Discrete_DpmTable *table) +{ + int result = 0; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + table->GraphicsBootLevel = 0; + table->MemoryBootLevel = 0; + + /* find boot level from dpm table */ + result = phm_find_boot_level(&(data->dpm_table.sclk_table), + data->vbios_boot_state.sclk_bootup_value, + (uint32_t *)&(table->GraphicsBootLevel)); + + result = phm_find_boot_level(&(data->dpm_table.mclk_table), + data->vbios_boot_state.mclk_bootup_value, + (uint32_t *)&(table->MemoryBootLevel)); + + table->BootVddc = data->vbios_boot_state.vddc_bootup_value * + VOLTAGE_SCALE; + table->BootVddci = data->vbios_boot_state.vddci_bootup_value * + VOLTAGE_SCALE; + table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value * + VOLTAGE_SCALE; + + CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc); + CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci); + CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd); + + return 0; +} + +static int fiji_populate_smc_initailial_state(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint8_t count, level; + + count = (uint8_t)(table_info->vdd_dep_on_sclk->count); + for (level = 0; level < count; level++) { + if (table_info->vdd_dep_on_sclk->entries[level].clk >= + data->vbios_boot_state.sclk_bootup_value) { + smu_data->smc_state_table.GraphicsBootLevel = level; + break; + } + } + + count = (uint8_t)(table_info->vdd_dep_on_mclk->count); + for (level = 0; level < count; level++) { + if (table_info->vdd_dep_on_mclk->entries[level].clk >= + data->vbios_boot_state.mclk_bootup_value) { + smu_data->smc_state_table.MemoryBootLevel = level; + break; + } + } + + return 0; +} + +static int fiji_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) +{ + uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks, + volt_with_cks, value; + uint16_t clock_freq_u16; + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2, + volt_offset = 0; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = + table_info->vdd_dep_on_sclk; + + stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount; + + /* Read SMU_Eefuse to read and calculate RO and determine + * if the part is SS or FF. if RO >= 1660MHz, part is FF. + */ + efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixSMU_EFUSE_0 + (146 * 4)); + efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixSMU_EFUSE_0 + (148 * 4)); + efuse &= 0xFF000000; + efuse = efuse >> 24; + efuse2 &= 0xF; + + if (efuse2 == 1) + ro = (2300 - 1350) * efuse / 255 + 1350; + else + ro = (2500 - 1000) * efuse / 255 + 1000; + + if (ro >= 1660) + type = 0; + else + type = 1; + + /* Populate Stretch amount */ + smu_data->smc_state_table.ClockStretcherAmount = stretch_amount; + + /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */ + for (i = 0; i < sclk_table->count; i++) { + smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |= + sclk_table->entries[i].cks_enable << i; + volt_without_cks = (uint32_t)((14041 * + (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 / + (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000))); + volt_with_cks = (uint32_t)((13946 * + (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 / + (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000))); + if (volt_without_cks >= volt_with_cks) + volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks + + sclk_table->entries[i].cks_voffset) * 100 / 625) + 1); + smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset; + } + + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, + STRETCH_ENABLE, 0x0); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, + masterReset, 0x1); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, + staticEnable, 0x1); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, + masterReset, 0x0); + + /* Populate CKS Lookup Table */ + if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5) + stretch_amount2 = 0; + else if (stretch_amount == 3 || stretch_amount == 4) + stretch_amount2 = 1; + else { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ClockStretcher); + PP_ASSERT_WITH_CODE(false, + "Stretch Amount in PPTable not supported\n", + return -EINVAL); + } + + value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixPWR_CKS_CNTL); + value &= 0xFFC2FF87; + smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq = + fiji_clock_stretcher_lookup_table[stretch_amount2][0]; + smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq = + fiji_clock_stretcher_lookup_table[stretch_amount2][1]; + clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(smu_data->smc_state_table. + GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1]. + SclkFrequency) / 100); + if (fiji_clock_stretcher_lookup_table[stretch_amount2][0] < + clock_freq_u16 && + fiji_clock_stretcher_lookup_table[stretch_amount2][1] > + clock_freq_u16) { + /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */ + value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 16; + /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */ + value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][2]) << 18; + /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */ + value |= (fiji_clock_stretch_amount_conversion + [fiji_clock_stretcher_lookup_table[stretch_amount2][3]] + [stretch_amount]) << 3; + } + CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable. + CKS_LOOKUPTableEntry[0].minFreq); + CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable. + CKS_LOOKUPTableEntry[0].maxFreq); + smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting = + fiji_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F; + smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |= + (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 7; + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixPWR_CKS_CNTL, value); + + /* Populate DDT Lookup Table */ + for (i = 0; i < 4; i++) { + /* Assign the minimum and maximum VID stored + * in the last row of Clock Stretcher Voltage Table. + */ + smu_data->smc_state_table.ClockStretcherDataTable. + ClockStretcherDataTableEntry[i].minVID = + (uint8_t) fiji_clock_stretcher_ddt_table[type][i][2]; + smu_data->smc_state_table.ClockStretcherDataTable. + ClockStretcherDataTableEntry[i].maxVID = + (uint8_t) fiji_clock_stretcher_ddt_table[type][i][3]; + /* Loop through each SCLK and check the frequency + * to see if it lies within the frequency for clock stretcher. + */ + for (j = 0; j < smu_data->smc_state_table.GraphicsDpmLevelCount; j++) { + cks_setting = 0; + clock_freq = PP_SMC_TO_HOST_UL( + smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency); + /* Check the allowed frequency against the sclk level[j]. + * Sclk's endianness has already been converted, + * and it's in 10Khz unit, + * as opposed to Data table, which is in Mhz unit. + */ + if (clock_freq >= + (fiji_clock_stretcher_ddt_table[type][i][0]) * 100) { + cks_setting |= 0x2; + if (clock_freq < + (fiji_clock_stretcher_ddt_table[type][i][1]) * 100) + cks_setting |= 0x1; + } + smu_data->smc_state_table.ClockStretcherDataTable. + ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2); + } + CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table. + ClockStretcherDataTable. + ClockStretcherDataTableEntry[i].setting); + } + + value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL); + value &= 0xFFFFFFFE; + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value); + + return 0; +} + +/** +* Populates the SMC VRConfig field in DPM table. +* +* @param hwmgr the address of the hardware manager +* @param table the SMC DPM table structure to be populated +* @return always 0 +*/ +static int fiji_populate_vr_config(struct pp_hwmgr *hwmgr, + struct SMU73_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint16_t config; + + config = VR_MERGED_WITH_VDDC; + table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT); + + /* Set Vddc Voltage Controller */ + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { + config = VR_SVI2_PLANE_1; + table->VRConfig |= config; + } else { + PP_ASSERT_WITH_CODE(false, + "VDDC should be on SVI2 control in merged mode!", + ); + } + /* Set Vddci Voltage Controller */ + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) { + config = VR_SVI2_PLANE_2; /* only in merged mode */ + table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); + } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) { + config = VR_SMIO_PATTERN_1; + table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); + } else { + config = VR_STATIC_VOLTAGE; + table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); + } + /* Set Mvdd Voltage Controller */ + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) { + config = VR_SVI2_PLANE_2; + table->VRConfig |= (config << VRCONF_MVDD_SHIFT); + } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { + config = VR_SMIO_PATTERN_2; + table->VRConfig |= (config << VRCONF_MVDD_SHIFT); + } else { + config = VR_STATIC_VOLTAGE; + table->VRConfig |= (config << VRCONF_MVDD_SHIFT); + } + + return 0; +} + +static int fiji_init_arb_table_index(struct pp_smumgr *smumgr) +{ + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(smumgr->backend); + uint32_t tmp; + int result; + + /* This is a read-modify-write on the first byte of the ARB table. + * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure + * is the field 'current'. + * This solution is ugly, but we never write the whole table only + * individual fields in it. + * In reality this field should not be in that structure + * but in a soft register. + */ + result = smu7_read_smc_sram_dword(smumgr, + smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END); + + if (result) + return result; + + tmp &= 0x00FFFFFF; + tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24; + + return smu7_write_smc_sram_dword(smumgr, + smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END); +} + +/** +* Initializes the SMC table and uploads it +* +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data (PowerState) +* @return always 0 +*/ +int fiji_init_smc_table(struct pp_hwmgr *hwmgr) +{ + int result; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct SMU73_Discrete_DpmTable *table = &(smu_data->smc_state_table); + uint8_t i; + struct pp_atomctrl_gpio_pin_assignment gpio_pin; + + fiji_initialize_power_tune_defaults(hwmgr); + + if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control) + fiji_populate_smc_voltage_tables(hwmgr, table); + + table->SystemFlags = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition)) + table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StepVddc)) + table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; + + if (data->is_memory_gddr5) + table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; + + if (data->ulv_supported && table_info->us_ulv_voltage_offset) { + result = fiji_populate_ulv_state(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ULV state!", return result); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_ULV_PARAMETER, 0x40035); + } + + result = fiji_populate_smc_link_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Link Level!", return result); + + result = fiji_populate_all_graphic_levels(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Graphics Level!", return result); + + result = fiji_populate_all_memory_levels(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Memory Level!", return result); + + result = fiji_populate_smc_acpi_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ACPI Level!", return result); + + result = fiji_populate_smc_vce_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize VCE Level!", return result); + + result = fiji_populate_smc_acp_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ACP Level!", return result); + + result = fiji_populate_smc_samu_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize SAMU Level!", return result); + + /* Since only the initial state is completely set up at this point + * (the other states are just copies of the boot state) we only + * need to populate the ARB settings for the initial state. + */ + result = fiji_program_memory_timing_parameters(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to Write ARB settings for the initial state.", return result); + + result = fiji_populate_smc_uvd_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize UVD Level!", return result); + + result = fiji_populate_smc_boot_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Boot Level!", return result); + + result = fiji_populate_smc_initailial_state(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Boot State!", return result); + + result = fiji_populate_bapm_parameters_in_dpm_table(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to populate BAPM Parameters!", return result); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ClockStretcher)) { + result = fiji_populate_clock_stretcher_data_table(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to populate Clock Stretcher Data Table!", + return result); + } + + table->GraphicsVoltageChangeEnable = 1; + table->GraphicsThermThrottleEnable = 1; + table->GraphicsInterval = 1; + table->VoltageInterval = 1; + table->ThermalInterval = 1; + table->TemperatureLimitHigh = + table_info->cac_dtp_table->usTargetOperatingTemp * + SMU7_Q88_FORMAT_CONVERSION_UNIT; + table->TemperatureLimitLow = + (table_info->cac_dtp_table->usTargetOperatingTemp - 1) * + SMU7_Q88_FORMAT_CONVERSION_UNIT; + table->MemoryVoltageChangeEnable = 1; + table->MemoryInterval = 1; + table->VoltageResponseTime = 0; + table->PhaseResponseTime = 0; + table->MemoryThermThrottleEnable = 1; + table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/ + table->PCIeGenInterval = 1; + table->VRConfig = 0; + + result = fiji_populate_vr_config(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to populate VRConfig setting!", return result); + + table->ThermGpio = 17; + table->SclkStepSize = 0x4000; + + if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) { + table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift; + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot); + } else { + table->VRHotGpio = SMU7_UNUSED_GPIO_PIN; + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot); + } + + if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID, + &gpio_pin)) { + table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift; + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition); + } else { + table->AcDcGpio = SMU7_UNUSED_GPIO_PIN; + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition); + } + + /* Thermal Output GPIO */ + if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID, + &gpio_pin)) { + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalOutGPIO); + + table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift; + + /* For porlarity read GPIOPAD_A with assigned Gpio pin + * since VBIOS will program this register to set 'inactive state', + * driver can then determine 'active state' from this and + * program SMU with correct polarity + */ + table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) & + (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0; + table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY; + + /* if required, combine VRHot/PCC with thermal out GPIO */ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot) && + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CombinePCCWithThermalSignal)) + table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT; + } else { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalOutGPIO); + table->ThermOutGpio = 17; + table->ThermOutPolarity = 1; + table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE; + } + + for (i = 0; i < SMU73_MAX_ENTRIES_SMIO; i++) + table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]); + + CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags); + CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2); + CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); + CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); + CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); + CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); + CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); + + /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ + result = smu7_copy_bytes_to_smc(hwmgr->smumgr, + smu_data->smu7_data.dpm_table_start + + offsetof(SMU73_Discrete_DpmTable, SystemFlags), + (uint8_t *)&(table->SystemFlags), + sizeof(SMU73_Discrete_DpmTable) - 3 * sizeof(SMU73_PIDController), + SMC_RAM_END); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to upload dpm data to SMC memory!", return result); + + result = fiji_init_arb_table_index(hwmgr->smumgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to upload arb data to SMC memory!", return result); + + result = fiji_populate_pm_fuses(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to populate PM fuses to SMC memory!", return result); + return 0; +} + +/** +* Set up the fan table to control the fan using the SMC. +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from set temperature range routine +*/ +int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) +{ + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + + SMU73_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; + uint32_t duty100; + uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2; + uint16_t fdo_min, slope1, slope2; + uint32_t reference_clock; + int res; + uint64_t tmp64; + + if (smu_data->smu7_data.fan_table_start == 0) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl); + return 0; + } + + duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_FDO_CTRL1, FMAX_DUTY100); + + if (duty100 == 0) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl); + return 0; + } + + tmp64 = hwmgr->thermal_controller.advanceFanControlParameters. + usPWMMin * duty100; + do_div(tmp64, 10000); + fdo_min = (uint16_t)tmp64; + + t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - + hwmgr->thermal_controller.advanceFanControlParameters.usTMin; + t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - + hwmgr->thermal_controller.advanceFanControlParameters.usTMed; + + pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - + hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin; + pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - + hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed; + + slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); + slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); + + fan_table.TempMin = cpu_to_be16((50 + hwmgr-> + thermal_controller.advanceFanControlParameters.usTMin) / 100); + fan_table.TempMed = cpu_to_be16((50 + hwmgr-> + thermal_controller.advanceFanControlParameters.usTMed) / 100); + fan_table.TempMax = cpu_to_be16((50 + hwmgr-> + thermal_controller.advanceFanControlParameters.usTMax) / 100); + + fan_table.Slope1 = cpu_to_be16(slope1); + fan_table.Slope2 = cpu_to_be16(slope2); + + fan_table.FdoMin = cpu_to_be16(fdo_min); + + fan_table.HystDown = cpu_to_be16(hwmgr-> + thermal_controller.advanceFanControlParameters.ucTHyst); + + fan_table.HystUp = cpu_to_be16(1); + + fan_table.HystSlope = cpu_to_be16(1); + + fan_table.TempRespLim = cpu_to_be16(5); + + reference_clock = smu7_get_xclk(hwmgr); + + fan_table.RefreshPeriod = cpu_to_be32((hwmgr-> + thermal_controller.advanceFanControlParameters.ulCycleDelay * + reference_clock) / 1600); + + fan_table.FdoMax = cpu_to_be16((uint16_t)duty100); + + fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD( + hwmgr->device, CGS_IND_REG__SMC, + CG_MULT_THERMAL_CTRL, TEMP_SEL); + + res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.fan_table_start, + (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), + SMC_RAM_END); + + if (!res && hwmgr->thermal_controller. + advanceFanControlParameters.ucMinimumPWMLimit) + res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetFanMinPwm, + hwmgr->thermal_controller. + advanceFanControlParameters.ucMinimumPWMLimit); + + if (!res && hwmgr->thermal_controller. + advanceFanControlParameters.ulMinFanSCLKAcousticLimit) + res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetFanSclkTarget, + hwmgr->thermal_controller. + advanceFanControlParameters.ulMinFanSCLKAcousticLimit); + + if (res) + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl); + + return 0; +} + +int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK)) + return fiji_program_memory_timing_parameters(hwmgr); + + return 0; +} + +int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + + int result = 0; + uint32_t low_sclk_interrupt_threshold = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkThrottleLowNotification) + && (hwmgr->gfx_arbiter.sclk_threshold != + data->low_sclk_interrupt_threshold)) { + data->low_sclk_interrupt_threshold = + hwmgr->gfx_arbiter.sclk_threshold; + low_sclk_interrupt_threshold = + data->low_sclk_interrupt_threshold; + + CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); + + result = smu7_copy_bytes_to_smc( + hwmgr->smumgr, + smu_data->smu7_data.dpm_table_start + + offsetof(SMU73_Discrete_DpmTable, + LowSclkInterruptThreshold), + (uint8_t *)&low_sclk_interrupt_threshold, + sizeof(uint32_t), + SMC_RAM_END); + } + result = fiji_program_mem_timing_parameters(hwmgr); + PP_ASSERT_WITH_CODE((result == 0), + "Failed to program memory timing parameters!", + ); + return result; +} + +uint32_t fiji_get_offsetof(uint32_t type, uint32_t member) +{ + switch (type) { + case SMU_SoftRegisters: + switch (member) { + case HandshakeDisables: + return offsetof(SMU73_SoftRegisters, HandshakeDisables); + case VoltageChangeTimeout: + return offsetof(SMU73_SoftRegisters, VoltageChangeTimeout); + case AverageGraphicsActivity: + return offsetof(SMU73_SoftRegisters, AverageGraphicsActivity); + case PreVBlankGap: + return offsetof(SMU73_SoftRegisters, PreVBlankGap); + case VBlankTimeout: + return offsetof(SMU73_SoftRegisters, VBlankTimeout); + case UcodeLoadStatus: + return offsetof(SMU73_SoftRegisters, UcodeLoadStatus); + } + case SMU_Discrete_DpmTable: + switch (member) { + case UvdBootLevel: + return offsetof(SMU73_Discrete_DpmTable, UvdBootLevel); + case VceBootLevel: + return offsetof(SMU73_Discrete_DpmTable, VceBootLevel); + case SamuBootLevel: + return offsetof(SMU73_Discrete_DpmTable, SamuBootLevel); + case LowSclkInterruptThreshold: + return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold); + } + } + printk("cant't get the offset of type %x member %x \n", type, member); + return 0; +} + +uint32_t fiji_get_mac_definition(uint32_t value) +{ + switch (value) { + case SMU_MAX_LEVELS_GRAPHICS: + return SMU73_MAX_LEVELS_GRAPHICS; + case SMU_MAX_LEVELS_MEMORY: + return SMU73_MAX_LEVELS_MEMORY; + case SMU_MAX_LEVELS_LINK: + return SMU73_MAX_LEVELS_LINK; + case SMU_MAX_ENTRIES_SMIO: + return SMU73_MAX_ENTRIES_SMIO; + case SMU_MAX_LEVELS_VDDC: + return SMU73_MAX_LEVELS_VDDC; + case SMU_MAX_LEVELS_VDDGFX: + return SMU73_MAX_LEVELS_VDDGFX; + case SMU_MAX_LEVELS_VDDCI: + return SMU73_MAX_LEVELS_VDDCI; + case SMU_MAX_LEVELS_MVDD: + return SMU73_MAX_LEVELS_MVDD; + } + + printk("cant't get the mac of %x \n", value); + return 0; +} + + +static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr) +{ + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + uint32_t mm_boot_level_offset, mm_boot_level_value; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + smu_data->smc_state_table.UvdBootLevel = 0; + if (table_info->mm_dep_table->count > 0) + smu_data->smc_state_table.UvdBootLevel = + (uint8_t) (table_info->mm_dep_table->count - 1); + mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU73_Discrete_DpmTable, + UvdBootLevel); + mm_boot_level_offset /= 4; + mm_boot_level_offset *= 4; + mm_boot_level_value = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset); + mm_boot_level_value &= 0x00FFFFFF; + mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24; + cgs_write_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); + + if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDDPM) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_UVDDPM_SetEnabledMask, + (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel)); + return 0; +} + +static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr) +{ + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + uint32_t mm_boot_level_offset, mm_boot_level_value; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) + smu_data->smc_state_table.VceBootLevel = + (uint8_t) (table_info->mm_dep_table->count - 1); + else + smu_data->smc_state_table.VceBootLevel = 0; + + mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + + offsetof(SMU73_Discrete_DpmTable, VceBootLevel); + mm_boot_level_offset /= 4; + mm_boot_level_offset *= 4; + mm_boot_level_value = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset); + mm_boot_level_value &= 0xFF00FFFF; + mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16; + cgs_write_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_VCEDPM_SetEnabledMask, + (uint32_t)1 << smu_data->smc_state_table.VceBootLevel); + return 0; +} + +static int fiji_update_samu_smc_table(struct pp_hwmgr *hwmgr) +{ + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + uint32_t mm_boot_level_offset, mm_boot_level_value; + + + smu_data->smc_state_table.SamuBootLevel = 0; + mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + + offsetof(SMU73_Discrete_DpmTable, SamuBootLevel); + + mm_boot_level_offset /= 4; + mm_boot_level_offset *= 4; + mm_boot_level_value = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset); + mm_boot_level_value &= 0xFFFFFF00; + mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0; + cgs_write_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SAMUDPM_SetEnabledMask, + (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel)); + return 0; +} + +int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) +{ + switch (type) { + case SMU_UVD_TABLE: + fiji_update_uvd_smc_table(hwmgr); + break; + case SMU_VCE_TABLE: + fiji_update_vce_smc_table(hwmgr); + break; + case SMU_SAMU_TABLE: + fiji_update_samu_smc_table(hwmgr); + break; + default: + break; + } + return 0; +} + + +/** +* Get the location of various tables inside the FW image. +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always 0 +*/ +int fiji_process_firmware_header(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + uint32_t tmp; + int result; + bool error = false; + + result = smu7_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU73_Firmware_Header, DpmTable), + &tmp, SMC_RAM_END); + + if (0 == result) + smu_data->smu7_data.dpm_table_start = tmp; + + error |= (0 != result); + + result = smu7_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU73_Firmware_Header, SoftRegisters), + &tmp, SMC_RAM_END); + + if (!result) { + data->soft_regs_start = tmp; + smu_data->smu7_data.soft_regs_start = tmp; + } + + error |= (0 != result); + + result = smu7_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU73_Firmware_Header, mcRegisterTable), + &tmp, SMC_RAM_END); + + if (!result) + smu_data->smu7_data.mc_reg_table_start = tmp; + + result = smu7_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU73_Firmware_Header, FanTable), + &tmp, SMC_RAM_END); + + if (!result) + smu_data->smu7_data.fan_table_start = tmp; + + error |= (0 != result); + + result = smu7_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU73_Firmware_Header, mcArbDramTimingTable), + &tmp, SMC_RAM_END); + + if (!result) + smu_data->smu7_data.arb_table_start = tmp; + + error |= (0 != result); + + result = smu7_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU73_Firmware_Header, Version), + &tmp, SMC_RAM_END); + + if (!result) + hwmgr->microcode_version_info.SMC = tmp; + + error |= (0 != result); + + return error ? -1 : 0; +} + +int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) +{ + + /* Program additional LP registers + * that are no longer programmed by VBIOS + */ + cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2)); + cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0)); + cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING)); + + return 0; +} + +bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr) +{ + return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON)) + ? true : false; +} diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h new file mode 100644 index 000000000000..d30d150f9ca6 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h @@ -0,0 +1,51 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef FIJI_SMC_H +#define FIJI_SMC_H + +#include "smumgr.h" +#include "smu73.h" + +struct fiji_pt_defaults { + uint8_t SviLoadLineEn; + uint8_t SviLoadLineVddC; + uint8_t TDC_VDDC_ThrottleReleaseLimitPerc; + uint8_t TDC_MAWt; + uint8_t TdcWaterfallCtl; + uint8_t DTEAmbientTempBase; +}; + +int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr); +int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr); +int fiji_init_smc_table(struct pp_hwmgr *hwmgr); +int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr); +int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type); +int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr); +uint32_t fiji_get_offsetof(uint32_t type, uint32_t member); +uint32_t fiji_get_mac_definition(uint32_t value); +int fiji_process_firmware_header(struct pp_hwmgr *hwmgr); +int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr); +bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr); + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index 8e52a2e82db5..02fe1df855a9 100644..100755 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c @@ -38,6 +38,7 @@ #include "bif/bif_5_0_sh_mask.h" #include "pp_debug.h" #include "fiji_pwrvirus.h" +#include "fiji_smc.h" #define AVFS_EN_MSB 1568 #define AVFS_EN_LSB 1568 @@ -57,509 +58,6 @@ static const struct SMU73_Discrete_GraphicsLevel avfs_graphics_level[8] = { { 0xf811d047, 0x80380100, 0x01, 0x00, 0x1e00, 0x00000610, 0x87020000, 0x21680000, 0x12000000, 0, 0, 0x0c, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 } }; -static enum cgs_ucode_id fiji_convert_fw_type_to_cgs(uint32_t fw_type) -{ - enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM; - - switch (fw_type) { - case UCODE_ID_SMU: - result = CGS_UCODE_ID_SMU; - break; - case UCODE_ID_SDMA0: - result = CGS_UCODE_ID_SDMA0; - break; - case UCODE_ID_SDMA1: - result = CGS_UCODE_ID_SDMA1; - break; - case UCODE_ID_CP_CE: - result = CGS_UCODE_ID_CP_CE; - break; - case UCODE_ID_CP_PFP: - result = CGS_UCODE_ID_CP_PFP; - break; - case UCODE_ID_CP_ME: - result = CGS_UCODE_ID_CP_ME; - break; - case UCODE_ID_CP_MEC: - result = CGS_UCODE_ID_CP_MEC; - break; - case UCODE_ID_CP_MEC_JT1: - result = CGS_UCODE_ID_CP_MEC_JT1; - break; - case UCODE_ID_CP_MEC_JT2: - result = CGS_UCODE_ID_CP_MEC_JT2; - break; - case UCODE_ID_RLC_G: - result = CGS_UCODE_ID_RLC_G; - break; - default: - break; - } - - return result; -} -/** -* Set the address for reading/writing the SMC SRAM space. -* @param smumgr the address of the powerplay hardware manager. -* @param smc_addr the address in the SMC RAM to access. -*/ -static int fiji_set_smc_sram_address(struct pp_smumgr *smumgr, - uint32_t smc_addr, uint32_t limit) -{ - PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)), - "SMC address must be 4 byte aligned.", return -EINVAL;); - PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)), - "SMC address is beyond the SMC RAM area.", return -EINVAL;); - - cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, smc_addr); - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); - - return 0; -} - -/** -* Copy bytes from an array into the SMC RAM space. -* -* @param smumgr the address of the powerplay SMU manager. -* @param smcStartAddress the start address in the SMC RAM to copy bytes to. -* @param src the byte array to copy the bytes from. -* @param byteCount the number of bytes to copy. -*/ -int fiji_copy_bytes_to_smc(struct pp_smumgr *smumgr, - uint32_t smcStartAddress, const uint8_t *src, - uint32_t byteCount, uint32_t limit) -{ - int result; - uint32_t data, originalData; - uint32_t addr, extraShift; - - PP_ASSERT_WITH_CODE((0 == (3 & smcStartAddress)), - "SMC address must be 4 byte aligned.", return -EINVAL;); - PP_ASSERT_WITH_CODE((limit > (smcStartAddress + byteCount)), - "SMC address is beyond the SMC RAM area.", return -EINVAL;); - - addr = smcStartAddress; - - while (byteCount >= 4) { - /* Bytes are written into the SMC addres space with the MSB first. */ - data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3]; - - result = fiji_set_smc_sram_address(smumgr, addr, limit); - if (result) - return result; - - cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data); - - src += 4; - byteCount -= 4; - addr += 4; - } - - if (byteCount) { - /* Now write the odd bytes left. - * Do a read modify write cycle. - */ - data = 0; - - result = fiji_set_smc_sram_address(smumgr, addr, limit); - if (result) - return result; - - originalData = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0); - extraShift = 8 * (4 - byteCount); - - while (byteCount > 0) { - /* Bytes are written into the SMC addres - * space with the MSB first. - */ - data = (0x100 * data) + *src++; - byteCount--; - } - data <<= extraShift; - data |= (originalData & ~((~0UL) << extraShift)); - - result = fiji_set_smc_sram_address(smumgr, addr, limit); - if (!result) - return result; - - cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data); - } - return 0; -} - -int fiji_program_jump_on_start(struct pp_smumgr *smumgr) -{ - static const unsigned char data[] = { 0xE0, 0x00, 0x80, 0x40 }; - - fiji_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data) + 1); - - return 0; -} - -/** -* Return if the SMC is currently running. -* -* @param smumgr the address of the powerplay hardware manager. -*/ -bool fiji_is_smc_ram_running(struct pp_smumgr *smumgr) -{ - return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, - CGS_IND_REG__SMC, - SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) - && (0x20100 <= cgs_read_ind_register(smumgr->device, - CGS_IND_REG__SMC, ixSMC_PC_C))); -} - -/** -* Send a message to the SMC, and wait for its response. -* -* @param smumgr the address of the powerplay hardware manager. -* @param msg the message to send. -* @return The response that came from the SMC. -*/ -int fiji_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) -{ - if (!fiji_is_smc_ram_running(smumgr)) - return -1; - - if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) { - printk(KERN_ERR "Failed to send Previous Message."); - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - } - - cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - - return 0; -} - -/** - * Send a message to the SMC with parameter - * @param smumgr: the address of the powerplay hardware manager. - * @param msg: the message to send. - * @param parameter: the parameter to send - * @return The response that came from the SMC. - */ -int fiji_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, - uint16_t msg, uint32_t parameter) -{ - if (!fiji_is_smc_ram_running(smumgr)) - return -1; - - if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) { - printk(KERN_ERR "Failed to send Previous Message."); - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - } - - cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter); - cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - - return 0; -} - - -/** -* Send a message to the SMC with parameter, do not wait for response -* -* @param smumgr: the address of the powerplay hardware manager. -* @param msg: the message to send. -* @param parameter: the parameter to send -* @return The response that came from the SMC. -*/ -int fiji_send_msg_to_smc_with_parameter_without_waiting( - struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter) -{ - if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) { - printk(KERN_ERR "Failed to send Previous Message."); - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - } - cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter); - cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); - - return 0; -} - -/** -* Uploads the SMU firmware from .hex file -* -* @param smumgr the address of the powerplay SMU manager. -* @return 0 or -1. -*/ - -static int fiji_upload_smu_firmware_image(struct pp_smumgr *smumgr) -{ - const uint8_t *src; - uint32_t byte_count; - uint32_t *data; - struct cgs_firmware_info info = {0}; - - cgs_get_firmware_info(smumgr->device, - fiji_convert_fw_type_to_cgs(UCODE_ID_SMU), &info); - - if (info.image_size & 3) { - printk(KERN_ERR "SMC ucode is not 4 bytes aligned\n"); - return -EINVAL; - } - - if (info.image_size > FIJI_SMC_SIZE) { - printk(KERN_ERR "SMC address is beyond the SMC RAM area\n"); - return -EINVAL; - } - - cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, 0x20000); - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1); - - byte_count = info.image_size; - src = (const uint8_t *)info.kptr; - - data = (uint32_t *)src; - for (; byte_count >= 4; data++, byte_count -= 4) - cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data[0]); - - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); - return 0; -} - -/** -* Read a 32bit value from the SMC SRAM space. -* ALL PARAMETERS ARE IN HOST BYTE ORDER. -* @param smumgr the address of the powerplay hardware manager. -* @param smc_addr the address in the SMC RAM to access. -* @param value and output parameter for the data read from the SMC SRAM. -*/ -int fiji_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, - uint32_t *value, uint32_t limit) -{ - int result = fiji_set_smc_sram_address(smumgr, smc_addr, limit); - - if (result) - return result; - - *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0); - return 0; -} - -/** -* Write a 32bit value to the SMC SRAM space. -* ALL PARAMETERS ARE IN HOST BYTE ORDER. -* @param smumgr the address of the powerplay hardware manager. -* @param smc_addr the address in the SMC RAM to access. -* @param value to write to the SMC SRAM. -*/ -int fiji_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, - uint32_t value, uint32_t limit) -{ - int result; - - result = fiji_set_smc_sram_address(smumgr, smc_addr, limit); - - if (result) - return result; - - cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, value); - return 0; -} - -static uint32_t fiji_get_mask_for_firmware_type(uint32_t fw_type) -{ - uint32_t result = 0; - - switch (fw_type) { - case UCODE_ID_SDMA0: - result = UCODE_ID_SDMA0_MASK; - break; - case UCODE_ID_SDMA1: - result = UCODE_ID_SDMA1_MASK; - break; - case UCODE_ID_CP_CE: - result = UCODE_ID_CP_CE_MASK; - break; - case UCODE_ID_CP_PFP: - result = UCODE_ID_CP_PFP_MASK; - break; - case UCODE_ID_CP_ME: - result = UCODE_ID_CP_ME_MASK; - break; - case UCODE_ID_CP_MEC_JT1: - result = UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK; - break; - case UCODE_ID_CP_MEC_JT2: - result = UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT2_MASK; - break; - case UCODE_ID_RLC_G: - result = UCODE_ID_RLC_G_MASK; - break; - default: - printk(KERN_ERR "UCode type is out of range!"); - result = 0; - } - - return result; -} - -/* Populate one firmware image to the data structure */ -static int fiji_populate_single_firmware_entry(struct pp_smumgr *smumgr, - uint32_t fw_type, struct SMU_Entry *entry) -{ - int result; - struct cgs_firmware_info info = {0}; - - result = cgs_get_firmware_info( - smumgr->device, - fiji_convert_fw_type_to_cgs(fw_type), - &info); - - if (!result) { - entry->version = 0; - entry->id = (uint16_t)fw_type; - entry->image_addr_high = smu_upper_32_bits(info.mc_addr); - entry->image_addr_low = smu_lower_32_bits(info.mc_addr); - entry->meta_data_addr_high = 0; - entry->meta_data_addr_low = 0; - entry->data_size_byte = info.image_size; - entry->num_register_entries = 0; - - if (fw_type == UCODE_ID_RLC_G) - entry->flags = 1; - else - entry->flags = 0; - } - - return result; -} - -static int fiji_request_smu_load_fw(struct pp_smumgr *smumgr) -{ - struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); - uint32_t fw_to_load; - struct SMU_DRAMData_TOC *toc; - - if (priv->soft_regs_start) - cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, - priv->soft_regs_start + - offsetof(SMU73_SoftRegisters, UcodeLoadStatus), - 0x0); - - toc = (struct SMU_DRAMData_TOC *)priv->header; - toc->num_entries = 0; - toc->structure_version = 1; - - PP_ASSERT_WITH_CODE( - 0 == fiji_populate_single_firmware_entry(smumgr, - UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n" , return -1 ); - PP_ASSERT_WITH_CODE( - 0 == fiji_populate_single_firmware_entry(smumgr, - UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n" , return -1 ); - PP_ASSERT_WITH_CODE( - 0 == fiji_populate_single_firmware_entry(smumgr, - UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n" , return -1 ); - PP_ASSERT_WITH_CODE( - 0 == fiji_populate_single_firmware_entry(smumgr, - UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n" , return -1 ); - PP_ASSERT_WITH_CODE( - 0 == fiji_populate_single_firmware_entry(smumgr, - UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n" , return -1 ); - PP_ASSERT_WITH_CODE( - 0 == fiji_populate_single_firmware_entry(smumgr, - UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n" , return -1 ); - PP_ASSERT_WITH_CODE( - 0 == fiji_populate_single_firmware_entry(smumgr, - UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n" , return -1 ); - PP_ASSERT_WITH_CODE( - 0 == fiji_populate_single_firmware_entry(smumgr, - UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n" , return -1 ); - PP_ASSERT_WITH_CODE( - 0 == fiji_populate_single_firmware_entry(smumgr, - UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n" , return -1 ); - - fiji_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, - priv->header_buffer.mc_addr_high); - fiji_send_msg_to_smc_with_parameter(smumgr,PPSMC_MSG_DRV_DRAM_ADDR_LO, - priv->header_buffer.mc_addr_low); - - fw_to_load = UCODE_ID_RLC_G_MASK - + UCODE_ID_SDMA0_MASK - + UCODE_ID_SDMA1_MASK - + UCODE_ID_CP_CE_MASK - + UCODE_ID_CP_ME_MASK - + UCODE_ID_CP_PFP_MASK - + UCODE_ID_CP_MEC_MASK - + UCODE_ID_CP_MEC_JT1_MASK - + UCODE_ID_CP_MEC_JT2_MASK; - - if (fiji_send_msg_to_smc_with_parameter(smumgr, - PPSMC_MSG_LoadUcodes, fw_to_load)) - printk(KERN_ERR "Fail to Request SMU Load uCode"); - - return 0; -} - - -/* Check if the FW has been loaded, SMU will not return - * if loading has not finished. - */ -static int fiji_check_fw_load_finish(struct pp_smumgr *smumgr, - uint32_t fw_type) -{ - struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); - uint32_t mask = fiji_get_mask_for_firmware_type(fw_type); - - /* Check SOFT_REGISTERS_TABLE_28.UcodeLoadStatus */ - if (smum_wait_on_indirect_register(smumgr, mmSMC_IND_INDEX, - priv->soft_regs_start + - offsetof(SMU73_SoftRegisters, UcodeLoadStatus), - mask, mask)) { - printk(KERN_ERR "check firmware loading failed\n"); - return -EINVAL; - } - return 0; -} - - -static int fiji_reload_firmware(struct pp_smumgr *smumgr) -{ - return smumgr->smumgr_funcs->start_smu(smumgr); -} - -static bool fiji_is_hw_virtualization_enabled(struct pp_smumgr *smumgr) -{ - uint32_t value; - - value = cgs_read_register(smumgr->device, mmBIF_IOV_FUNC_IDENTIFIER); - if (value & BIF_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK) { - /* driver reads on SR-IOV enabled PF: 0x80000000 - * driver reads on SR-IOV enabled VF: 0x80000001 - * driver reads on SR-IOV disabled: 0x00000000 - */ - return true; - } - return false; -} - -static int fiji_request_smu_specific_fw_load(struct pp_smumgr *smumgr, uint32_t fw_type) -{ - if (fiji_is_hw_virtualization_enabled(smumgr)) { - uint32_t masks = fiji_get_mask_for_firmware_type(fw_type); - if (fiji_send_msg_to_smc_with_parameter_without_waiting(smumgr, - PPSMC_MSG_LoadUcodes, masks)) - printk(KERN_ERR "Fail to Request SMU Load uCode"); - } - /* For non-virtualization cases, - * SMU loads all FWs at once in fiji_request_smu_load_fw. - */ - return 0; -} - static int fiji_start_smu_in_protection_mode(struct pp_smumgr *smumgr) { int result = 0; @@ -571,7 +69,7 @@ static int fiji_start_smu_in_protection_mode(struct pp_smumgr *smumgr) SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - result = fiji_upload_smu_firmware_image(smumgr); + result = smu7_upload_smu_firmware_image(smumgr); if (result) return result; @@ -610,8 +108,8 @@ static int fiji_start_smu_in_protection_mode(struct pp_smumgr *smumgr) SMU_STATUS, SMU_DONE, 0); /* Check pass/failed indicator */ - if (1 != SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, - SMU_STATUS, SMU_PASS)) { + if (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMU_STATUS, SMU_PASS) != 1) { PP_ASSERT_WITH_CODE(false, "SMU Firmware start failed!", return -1); } @@ -639,12 +137,12 @@ static int fiji_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr) SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - result = fiji_upload_smu_firmware_image(smumgr); + result = smu7_upload_smu_firmware_image(smumgr); if (result) return result; /* Set smc instruct start point at 0x0 */ - fiji_program_jump_on_start(smumgr); + smu7_program_jump_on_start(smumgr); /* Enable clock */ SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, @@ -698,15 +196,15 @@ static int fiji_start_avfs_btc(struct pp_smumgr *smumgr) priv->avfs.AvfsBtcStatus = AVFS_BTC_STARTED; if (priv->avfs.AvfsBtcParam) { - if (!fiji_send_msg_to_smc_with_parameter(smumgr, + if (!smum_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_PerformBtc, priv->avfs.AvfsBtcParam)) { - if (!fiji_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs)) { + if (!smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs)) { priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_UNSAVED; result = 0; } else { printk(KERN_ERR "[AVFS][fiji_start_avfs_btc] Attempt" " to Enable AVFS Failed!"); - fiji_send_msg_to_smc(smumgr, PPSMC_MSG_DisableAvfs); + smum_send_msg_to_smc(smumgr, PPSMC_MSG_DisableAvfs); result = -1; } } else { @@ -736,7 +234,7 @@ int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr) charz_freq = 0x30750000; /* In 10KHz units 0x00007530 Actual value */ inversion_voltage = 0x1A04; /* mV Q14.2 0x41A Actual value */ - PP_ASSERT_WITH_CODE(0 == fiji_read_smc_sram_dword(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, PmFuseTable), &table_start, 0x40000), "[AVFS][Fiji_SetupGfxLvlStruct] SMU could not communicate " @@ -748,13 +246,13 @@ int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr) inversion_voltage_addr = table_start + offsetof(struct SMU73_Discrete_PmFuses, InversionVoltage); - result = fiji_copy_bytes_to_smc(smumgr, charz_freq_addr, + result = smu7_copy_bytes_to_smc(smumgr, charz_freq_addr, (uint8_t *)(&charz_freq), sizeof(charz_freq), 0x40000); PP_ASSERT_WITH_CODE(0 == result, "[AVFS][fiji_setup_pm_fuse_for_avfs] charz_freq could not " "be populated.", return -1;); - result = fiji_copy_bytes_to_smc(smumgr, inversion_voltage_addr, + result = smu7_copy_bytes_to_smc(smumgr, inversion_voltage_addr, (uint8_t *)(&inversion_voltage), sizeof(inversion_voltage), 0x40000); PP_ASSERT_WITH_CODE(0 == result, "[AVFS][fiji_setup_pm_fuse_for_avfs] " "charz_freq could not be populated.", return -1;); @@ -769,7 +267,7 @@ int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr) uint32_t level_addr, vr_config_addr; uint32_t level_size = sizeof(avfs_graphics_level); - PP_ASSERT_WITH_CODE(0 == fiji_read_smc_sram_dword(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, DpmTable), &table_start, 0x40000), @@ -784,7 +282,7 @@ int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr) vr_config_addr = table_start + offsetof(SMU73_Discrete_DpmTable, VRConfig); - PP_ASSERT_WITH_CODE(0 == fiji_copy_bytes_to_smc(smumgr, vr_config_addr, + PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, vr_config_addr, (uint8_t *)&vr_config, sizeof(int32_t), 0x40000), "[AVFS][Fiji_SetupGfxLvlStruct] Problems copying " "vr_config value over to SMC", @@ -792,7 +290,7 @@ int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr) level_addr = table_start + offsetof(SMU73_Discrete_DpmTable, GraphicsLevel); - PP_ASSERT_WITH_CODE(0 == fiji_copy_bytes_to_smc(smumgr, level_addr, + PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, level_addr, (uint8_t *)(&avfs_graphics_level), level_size, 0x40000), "[AVFS][Fiji_SetupGfxLvlStruct] Copying of DPM table failed!", return -1;); @@ -839,13 +337,13 @@ int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started) break; case AVFS_BTC_COMPLETED_RESTORED: /*S3 State - Post SMU Start*/ priv->avfs.AvfsBtcStatus = AVFS_BTC_SMUMSG_ERROR; - PP_ASSERT_WITH_CODE(0 == fiji_send_msg_to_smc(smumgr, - PPSMC_MSG_VftTableIsValid), + PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(smumgr, + 0x666), "[AVFS][fiji_avfs_event_mgr] SMU did not respond " "correctly to VftTableIsValid Msg", return -1;); priv->avfs.AvfsBtcStatus = AVFS_BTC_SMUMSG_ERROR; - PP_ASSERT_WITH_CODE(0 == fiji_send_msg_to_smc(smumgr, + PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs), "[AVFS][fiji_avfs_event_mgr] SMU did not respond " "correctly to EnableAvfs Message Msg", @@ -898,7 +396,7 @@ static int fiji_start_smu(struct pp_smumgr *smumgr) struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); /* Only start SMC if SMC RAM is not running */ - if (!fiji_is_smc_ram_running(smumgr)) { + if (!smu7_is_smc_ram_running(smumgr)) { fiji_avfs_event_mgr(smumgr, false); /* Check if SMU is running in protected mode */ @@ -929,12 +427,12 @@ static int fiji_start_smu(struct pp_smumgr *smumgr) /* Setup SoftRegsStart here for register lookup in case * DummyBackEnd is used and ProcessFirmwareHeader is not executed */ - fiji_read_smc_sram_dword(smumgr, + smu7_read_smc_sram_dword(smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, SoftRegisters), - &(priv->soft_regs_start), 0x40000); + &(priv->smu7_data.soft_regs_start), 0x40000); - result = fiji_request_smu_load_fw(smumgr); + result = smu7_request_smu_load_fw(smumgr); return result; } @@ -963,28 +461,10 @@ static bool fiji_is_hw_avfs_present(struct pp_smumgr *smumgr) static int fiji_smu_init(struct pp_smumgr *smumgr) { struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); - uint64_t mc_addr; - - priv->header_buffer.data_size = - ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096; - smu_allocate_memory(smumgr->device, - priv->header_buffer.data_size, - CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, - PAGE_SIZE, - &mc_addr, - &priv->header_buffer.kaddr, - &priv->header_buffer.handle); - - priv->header = priv->header_buffer.kaddr; - priv->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); - priv->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr); - - PP_ASSERT_WITH_CODE((NULL != priv->header), - "Out of memory.", - kfree(smumgr->backend); - cgs_free_gpu_mem(smumgr->device, - (cgs_handle_t)priv->header_buffer.handle); - return -1); + int i; + + if (smu7_init(smumgr)) + return -EINVAL; priv->avfs.AvfsBtcStatus = AVFS_BTC_BOOT; if (fiji_is_hw_avfs_present(smumgr)) @@ -999,37 +479,35 @@ static int fiji_smu_init(struct pp_smumgr *smumgr) else priv->avfs.AvfsBtcStatus = AVFS_BTC_NOTSUPPORTED; - priv->acpi_optimization = 1; + for (i = 0; i < SMU73_MAX_LEVELS_GRAPHICS; i++) + priv->activity_target[i] = 30; return 0; } -static int fiji_smu_fini(struct pp_smumgr *smumgr) -{ - struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); - - smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle); - - if (smumgr->backend) { - kfree(smumgr->backend); - smumgr->backend = NULL; - } - - cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); - return 0; -} static const struct pp_smumgr_func fiji_smu_funcs = { .smu_init = &fiji_smu_init, - .smu_fini = &fiji_smu_fini, + .smu_fini = &smu7_smu_fini, .start_smu = &fiji_start_smu, - .check_fw_load_finish = &fiji_check_fw_load_finish, - .request_smu_load_fw = &fiji_reload_firmware, - .request_smu_load_specific_fw = &fiji_request_smu_specific_fw_load, - .send_msg_to_smc = &fiji_send_msg_to_smc, - .send_msg_to_smc_with_parameter = &fiji_send_msg_to_smc_with_parameter, + .check_fw_load_finish = &smu7_check_fw_load_finish, + .request_smu_load_fw = &smu7_reload_firmware, + .request_smu_load_specific_fw = NULL, + .send_msg_to_smc = &smu7_send_msg_to_smc, + .send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter, .download_pptable_settings = NULL, .upload_pptable_settings = NULL, + .update_smc_table = fiji_update_smc_table, + .get_offsetof = fiji_get_offsetof, + .process_firmware_header = fiji_process_firmware_header, + .init_smc_table = fiji_init_smc_table, + .update_sclk_threshold = fiji_update_sclk_threshold, + .thermal_setup_fan_table = fiji_thermal_setup_fan_table, + .populate_all_graphic_levels = fiji_populate_all_graphic_levels, + .populate_all_memory_levels = fiji_populate_all_memory_levels, + .get_mac_definition = fiji_get_mac_definition, + .initialize_mc_reg_table = fiji_initialize_mc_reg_table, + .is_dpm_running = fiji_is_dpm_running, }; int fiji_smum_init(struct pp_smumgr *smumgr) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h index b4eb483215b1..adcbdfb209be 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h @@ -23,37 +23,31 @@ #ifndef _FIJI_SMUMANAGER_H_ #define _FIJI_SMUMANAGER_H_ +#include "smu73_discrete.h" +#include <pp_endian.h> +#include "smu7_smumgr.h" + + struct fiji_smu_avfs { enum AVFS_BTC_STATUS AvfsBtcStatus; uint32_t AvfsBtcParam; }; -struct fiji_buffer_entry { - uint32_t data_size; - uint32_t mc_addr_low; - uint32_t mc_addr_high; - void *kaddr; - unsigned long handle; -}; struct fiji_smumgr { - uint8_t *header; - uint8_t *mec_image; - uint32_t soft_regs_start; + struct smu7_smumgr smu7_data; + struct fiji_smu_avfs avfs; - uint32_t acpi_optimization; + struct SMU73_Discrete_DpmTable smc_state_table; + struct SMU73_Discrete_Ulv ulv_setting; + struct SMU73_Discrete_PmFuses power_tune_table; + const struct fiji_pt_defaults *power_tune_defaults; + uint32_t activity_target[SMU73_MAX_LEVELS_GRAPHICS]; - struct fiji_buffer_entry header_buffer; }; -int fiji_smum_init(struct pp_smumgr *smumgr); -int fiji_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smcAddress, - uint32_t *value, uint32_t limit); -int fiji_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, - uint32_t value, uint32_t limit); -int fiji_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smcStartAddress, - const uint8_t *src, uint32_t byteCount, uint32_t limit); + #endif diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c new file mode 100644 index 000000000000..eda802bc63c8 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c @@ -0,0 +1,2576 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * + */ + +#include "iceland_smc.h" +#include "smu7_dyn_defaults.h" + +#include "smu7_hwmgr.h" +#include "hardwaremanager.h" +#include "ppatomctrl.h" +#include "pp_debug.h" +#include "cgs_common.h" +#include "atombios.h" +#include "pppcielanes.h" +#include "pp_endian.h" +#include "smu7_ppsmc.h" + +#include "smu71_discrete.h" + +#include "smu/smu_7_1_1_d.h" +#include "smu/smu_7_1_1_sh_mask.h" + +#include "gmc/gmc_8_1_d.h" +#include "gmc/gmc_8_1_sh_mask.h" + +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" + +#include "dce/dce_10_0_d.h" +#include "dce/dce_10_0_sh_mask.h" +#include "processpptables.h" + +#include "iceland_smumgr.h" + +#define VOLTAGE_SCALE 4 +#define POWERTUNE_DEFAULT_SET_MAX 1 +#define VOLTAGE_VID_OFFSET_SCALE1 625 +#define VOLTAGE_VID_OFFSET_SCALE2 100 +#define MC_CG_ARB_FREQ_F1 0x0b +#define VDDC_VDDCI_DELTA 200 + +#define DEVICE_ID_VI_ICELAND_M_6900 0x6900 +#define DEVICE_ID_VI_ICELAND_M_6901 0x6901 +#define DEVICE_ID_VI_ICELAND_M_6902 0x6902 +#define DEVICE_ID_VI_ICELAND_M_6903 0x6903 + +static const struct iceland_pt_defaults defaults_iceland = { + /* + * sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, + * TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT + */ + 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000, + { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 }, + { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } +}; + +/* 35W - XT, XTL */ +static const struct iceland_pt_defaults defaults_icelandxt = { + /* + * sviLoadLIneEn, SviLoadLineVddC, + * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt, + * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, + * BAPM_TEMP_GRADIENT + */ + 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0, + { 0xA7, 0x0, 0x0, 0xB5, 0x0, 0x0, 0x9F, 0x0, 0x0, 0xD6, 0x0, 0x0, 0xD7, 0x0, 0x0}, + { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0} +}; + +/* 25W - PRO, LE */ +static const struct iceland_pt_defaults defaults_icelandpro = { + /* + * sviLoadLIneEn, SviLoadLineVddC, + * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt, + * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, + * BAPM_TEMP_GRADIENT + */ + 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0, + { 0xB7, 0x0, 0x0, 0xC3, 0x0, 0x0, 0xB5, 0x0, 0x0, 0xEA, 0x0, 0x0, 0xE6, 0x0, 0x0}, + { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0} +}; + +static void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) +{ + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct cgs_system_info sys_info = {0}; + uint32_t dev_id; + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV; + cgs_query_system_info(hwmgr->device, &sys_info); + dev_id = (uint32_t)sys_info.value; + + switch (dev_id) { + case DEVICE_ID_VI_ICELAND_M_6900: + case DEVICE_ID_VI_ICELAND_M_6903: + smu_data->power_tune_defaults = &defaults_icelandxt; + break; + + case DEVICE_ID_VI_ICELAND_M_6901: + case DEVICE_ID_VI_ICELAND_M_6902: + smu_data->power_tune_defaults = &defaults_icelandpro; + break; + default: + smu_data->power_tune_defaults = &defaults_iceland; + pr_warning("Unknown V.I. Device ID.\n"); + break; + } + return; +} + +static int iceland_populate_svi_load_line(struct pp_hwmgr *hwmgr) +{ + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults; + + smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en; + smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc; + smu_data->power_tune_table.SviLoadLineTrimVddC = 3; + smu_data->power_tune_table.SviLoadLineOffsetVddC = 0; + + return 0; +} + +static int iceland_populate_tdc_limit(struct pp_hwmgr *hwmgr) +{ + uint16_t tdc_limit; + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults; + + tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256); + smu_data->power_tune_table.TDC_VDDC_PkgLimit = + CONVERT_FROM_HOST_TO_SMC_US(tdc_limit); + smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc = + defaults->tdc_vddc_throttle_release_limit_perc; + smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt; + + return 0; +} + +static int iceland_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) +{ + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults; + uint32_t temp; + + if (smu7_read_smc_sram_dword(hwmgr->smumgr, + fuse_table_offset + + offsetof(SMU71_Discrete_PmFuses, TdcWaterfallCtl), + (uint32_t *)&temp, SMC_RAM_END)) + PP_ASSERT_WITH_CODE(false, + "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!", + return -EINVAL); + else + smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl; + + return 0; +} + +static int iceland_populate_temperature_scaler(struct pp_hwmgr *hwmgr) +{ + return 0; +} + +static int iceland_populate_gnb_lpml(struct pp_hwmgr *hwmgr) +{ + int i; + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + + /* Currently not used. Set all to zero. */ + for (i = 0; i < 8; i++) + smu_data->power_tune_table.GnbLPML[i] = 0; + + return 0; +} + +static int iceland_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr) +{ + return 0; +} + +static int iceland_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) +{ + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd; + uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd; + struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table; + + HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256); + LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256); + + smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd = + CONVERT_FROM_HOST_TO_SMC_US(HiSidd); + smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd = + CONVERT_FROM_HOST_TO_SMC_US(LoSidd); + + return 0; +} + +static int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr) +{ + int i; + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd; + uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd; + + PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table, + "The CAC Leakage table does not exist!", return -EINVAL); + PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8, + "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL); + PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count, + "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) { + for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) { + lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1); + hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2); + } + } else { + PP_ASSERT_WITH_CODE(false, "Iceland should always support EVV", return -EINVAL); + } + + return 0; +} + +static int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr) +{ + int i; + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + uint8_t *vid = smu_data->power_tune_table.VddCVid; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8, + "There should never be more than 8 entries for VddcVid!!!", + return -EINVAL); + + for (i = 0; i < (int)data->vddc_voltage_table.count; i++) { + vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value); + } + + return 0; +} + + + +static int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr) +{ + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + uint32_t pm_fuse_table_offset; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + if (smu7_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, PmFuseTable), + &pm_fuse_table_offset, SMC_RAM_END)) + PP_ASSERT_WITH_CODE(false, + "Attempt to get pm_fuse_table_offset Failed!", + return -EINVAL); + + /* DW0 - DW3 */ + if (iceland_populate_bapm_vddc_vid_sidd(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate bapm vddc vid Failed!", + return -EINVAL); + + /* DW4 - DW5 */ + if (iceland_populate_vddc_vid(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate vddc vid Failed!", + return -EINVAL); + + /* DW6 */ + if (iceland_populate_svi_load_line(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate SviLoadLine Failed!", + return -EINVAL); + /* DW7 */ + if (iceland_populate_tdc_limit(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate TDCLimit Failed!", return -EINVAL); + /* DW8 */ + if (iceland_populate_dw8(hwmgr, pm_fuse_table_offset)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate TdcWaterfallCtl, " + "LPMLTemperature Min and Max Failed!", + return -EINVAL); + + /* DW9-DW12 */ + if (0 != iceland_populate_temperature_scaler(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate LPMLTemperatureScaler Failed!", + return -EINVAL); + + /* DW13-DW16 */ + if (iceland_populate_gnb_lpml(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate GnbLPML Failed!", + return -EINVAL); + + /* DW17 */ + if (iceland_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate GnbLPML Min and Max Vid Failed!", + return -EINVAL); + + /* DW18 */ + if (iceland_populate_bapm_vddc_base_leakage_sidd(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!", + return -EINVAL); + + if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, + (uint8_t *)&smu_data->power_tune_table, + sizeof(struct SMU71_Discrete_PmFuses), SMC_RAM_END)) + PP_ASSERT_WITH_CODE(false, + "Attempt to download PmFuseTable Failed!", + return -EINVAL); + } + return 0; +} + +static int iceland_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr, + struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table, + uint32_t clock, uint32_t *vol) +{ + uint32_t i = 0; + + /* clock - voltage dependency table is empty table */ + if (allowed_clock_voltage_table->count == 0) + return -EINVAL; + + for (i = 0; i < allowed_clock_voltage_table->count; i++) { + /* find first sclk bigger than request */ + if (allowed_clock_voltage_table->entries[i].clk >= clock) { + *vol = allowed_clock_voltage_table->entries[i].v; + return 0; + } + } + + /* sclk is bigger than max sclk in the dependence table */ + *vol = allowed_clock_voltage_table->entries[i - 1].v; + + return 0; +} + +static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr, + pp_atomctrl_voltage_table_entry *tab, uint16_t *hi, + uint16_t *lo) +{ + uint16_t v_index; + bool vol_found = false; + *hi = tab->value * VOLTAGE_SCALE; + *lo = tab->value * VOLTAGE_SCALE; + + /* SCLK/VDDC Dependency Table has to exist. */ + PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk, + "The SCLK/VDDC Dependency Table does not exist.\n", + return -EINVAL); + + if (NULL == hwmgr->dyn_state.cac_leakage_table) { + pr_warning("CAC Leakage Table does not exist, using vddc.\n"); + return 0; + } + + /* + * Since voltage in the sclk/vddc dependency table is not + * necessarily in ascending order because of ELB voltage + * patching, loop through entire list to find exact voltage. + */ + for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) { + if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) { + vol_found = true; + if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) { + *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE; + *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE); + } else { + pr_warning("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n"); + *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE; + *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE); + } + break; + } + } + + /* + * If voltage is not found in the first pass, loop again to + * find the best match, equal or higher value. + */ + if (!vol_found) { + for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) { + if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) { + vol_found = true; + if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) { + *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE; + *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE; + } else { + pr_warning("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table."); + *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE; + *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE); + } + break; + } + } + + if (!vol_found) + pr_warning("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n"); + } + + return 0; +} + +static int iceland_populate_smc_voltage_table(struct pp_hwmgr *hwmgr, + pp_atomctrl_voltage_table_entry *tab, + SMU71_Discrete_VoltageLevel *smc_voltage_tab) +{ + int result; + + result = iceland_get_std_voltage_value_sidd(hwmgr, tab, + &smc_voltage_tab->StdVoltageHiSidd, + &smc_voltage_tab->StdVoltageLoSidd); + if (0 != result) { + smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE; + smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE; + } + + smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE); + CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd); + CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd); + + return 0; +} + +static int iceland_populate_smc_vddc_table(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + unsigned int count; + int result; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + table->VddcLevelCount = data->vddc_voltage_table.count; + for (count = 0; count < table->VddcLevelCount; count++) { + result = iceland_populate_smc_voltage_table(hwmgr, + &(data->vddc_voltage_table.entries[count]), + &(table->VddcLevel[count])); + PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL); + + /* GPIO voltage control */ + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) + table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low; + else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) + table->VddcLevel[count].Smio = 0; + } + + CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount); + + return 0; +} + +static int iceland_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t count; + int result; + + table->VddciLevelCount = data->vddci_voltage_table.count; + + for (count = 0; count < table->VddciLevelCount; count++) { + result = iceland_populate_smc_voltage_table(hwmgr, + &(data->vddci_voltage_table.entries[count]), + &(table->VddciLevel[count])); + PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL); + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) + table->VddciLevel[count].Smio |= data->vddci_voltage_table.entries[count].smio_low; + else + table->VddciLevel[count].Smio |= 0; + } + + CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount); + + return 0; +} + +static int iceland_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t count; + int result; + + table->MvddLevelCount = data->mvdd_voltage_table.count; + + for (count = 0; count < table->VddciLevelCount; count++) { + result = iceland_populate_smc_voltage_table(hwmgr, + &(data->mvdd_voltage_table.entries[count]), + &table->MvddLevel[count]); + PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL); + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) + table->MvddLevel[count].Smio |= data->mvdd_voltage_table.entries[count].smio_low; + else + table->MvddLevel[count].Smio |= 0; + } + + CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount); + + return 0; +} + + +static int iceland_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + int result; + + result = iceland_populate_smc_vddc_table(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "can not populate VDDC voltage table to SMC", return -EINVAL); + + result = iceland_populate_smc_vdd_ci_table(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "can not populate VDDCI voltage table to SMC", return -EINVAL); + + result = iceland_populate_smc_mvdd_table(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "can not populate MVDD voltage table to SMC", return -EINVAL); + + return 0; +} + +static int iceland_populate_ulv_level(struct pp_hwmgr *hwmgr, + struct SMU71_Discrete_Ulv *state) +{ + uint32_t voltage_response_time, ulv_voltage; + int result; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + state->CcPwrDynRm = 0; + state->CcPwrDynRm1 = 0; + + result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage); + PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;); + + if (ulv_voltage == 0) { + data->ulv_supported = false; + return 0; + } + + if (data->voltage_control != SMU7_VOLTAGE_CONTROL_BY_SVID2) { + /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */ + if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v) + state->VddcOffset = 0; + else + /* used in SMIO Mode. not implemented for now. this is backup only for CI. */ + state->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage); + } else { + /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */ + if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v) + state->VddcOffsetVid = 0; + else /* used in SVI2 Mode */ + state->VddcOffsetVid = (uint8_t)( + (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage) + * VOLTAGE_VID_OFFSET_SCALE2 + / VOLTAGE_VID_OFFSET_SCALE1); + } + state->VddcPhase = 1; + + CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1); + CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset); + + return 0; +} + +static int iceland_populate_ulv_state(struct pp_hwmgr *hwmgr, + SMU71_Discrete_Ulv *ulv_level) +{ + return iceland_populate_ulv_level(hwmgr, ulv_level); +} + +static int iceland_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU71_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_dpm_table *dpm_table = &data->dpm_table; + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + uint32_t i; + + /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */ + for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) { + table->LinkLevel[i].PcieGenSpeed = + (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value; + table->LinkLevel[i].PcieLaneCount = + (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1); + table->LinkLevel[i].EnabledForActivity = + 1; + table->LinkLevel[i].SPC = + (uint8_t)(data->pcie_spc_cap & 0xff); + table->LinkLevel[i].DownThreshold = + PP_HOST_TO_SMC_UL(5); + table->LinkLevel[i].UpThreshold = + PP_HOST_TO_SMC_UL(30); + } + + smu_data->smc_state_table.LinkLevelCount = + (uint8_t)dpm_table->pcie_speed_table.count; + data->dpm_level_enable_mask.pcie_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table); + + return 0; +} + +/** + * Calculates the SCLK dividers using the provided engine clock + * + * @param hwmgr the address of the hardware manager + * @param engine_clock the engine clock to use to populate the structure + * @param sclk the SMC SCLK structure to be populated + */ +static int iceland_calculate_sclk_params(struct pp_hwmgr *hwmgr, + uint32_t engine_clock, SMU71_Discrete_GraphicsLevel *sclk) +{ + const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + pp_atomctrl_clock_dividers_vi dividers; + uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; + uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; + uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; + uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; + uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; + uint32_t reference_clock; + uint32_t reference_divider; + uint32_t fbdiv; + int result; + + /* get the engine clock dividers for this clock value*/ + result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, ÷rs); + + PP_ASSERT_WITH_CODE(result == 0, + "Error retrieving Engine Clock dividers from VBIOS.", return result); + + /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/ + reference_clock = atomctrl_get_reference_clock(hwmgr); + + reference_divider = 1 + dividers.uc_pll_ref_div; + + /* low 14 bits is fraction and high 12 bits is divider*/ + fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF; + + /* SPLL_FUNC_CNTL setup*/ + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, + CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div); + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, + CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div); + + /* SPLL_FUNC_CNTL_3 setup*/ + spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, + CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv); + + /* set to use fractional accumulation*/ + spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, + CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EngineSpreadSpectrumSupport)) { + pp_atomctrl_internal_ss_info ss_info; + + uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div; + if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) { + /* + * ss_info.speed_spectrum_percentage -- in unit of 0.01% + * ss_info.speed_spectrum_rate -- in unit of khz + */ + /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */ + uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate); + + /* clkv = 2 * D * fbdiv / NS */ + uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000); + + cg_spll_spread_spectrum = + PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS); + cg_spll_spread_spectrum = + PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1); + cg_spll_spread_spectrum_2 = + PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV); + } + } + + sclk->SclkFrequency = engine_clock; + sclk->CgSpllFuncCntl3 = spll_func_cntl_3; + sclk->CgSpllFuncCntl4 = spll_func_cntl_4; + sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum; + sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2; + sclk->SclkDid = (uint8_t)dividers.pll_post_divider; + + return 0; +} + +static int iceland_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr, + const struct phm_phase_shedding_limits_table *pl, + uint32_t sclk, uint32_t *p_shed) +{ + unsigned int i; + + /* use the minimum phase shedding */ + *p_shed = 1; + + for (i = 0; i < pl->count; i++) { + if (sclk < pl->entries[i].Sclk) { + *p_shed = i; + break; + } + } + return 0; +} + +/** + * Populates single SMC SCLK structure using the provided engine clock + * + * @param hwmgr the address of the hardware manager + * @param engine_clock the engine clock to use to populate the structure + * @param sclk the SMC SCLK structure to be populated + */ +static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr, + uint32_t engine_clock, + uint16_t sclk_activity_level_threshold, + SMU71_Discrete_GraphicsLevel *graphic_level) +{ + int result; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + result = iceland_calculate_sclk_params(hwmgr, engine_clock, graphic_level); + + /* populate graphics levels*/ + result = iceland_get_dependecy_volt_by_clk(hwmgr, + hwmgr->dyn_state.vddc_dependency_on_sclk, engine_clock, + &graphic_level->MinVddc); + PP_ASSERT_WITH_CODE((0 == result), + "can not find VDDC voltage value for VDDC \ + engine clock dependency table", return result); + + /* SCLK frequency in units of 10KHz*/ + graphic_level->SclkFrequency = engine_clock; + graphic_level->MinVddcPhases = 1; + + if (data->vddc_phase_shed_control) + iceland_populate_phase_value_based_on_sclk(hwmgr, + hwmgr->dyn_state.vddc_phase_shed_limits_table, + engine_clock, + &graphic_level->MinVddcPhases); + + /* Indicates maximum activity level for this performance level. 50% for now*/ + graphic_level->ActivityLevel = sclk_activity_level_threshold; + + graphic_level->CcPwrDynRm = 0; + graphic_level->CcPwrDynRm1 = 0; + /* this level can be used if activity is high enough.*/ + graphic_level->EnabledForActivity = 0; + /* this level can be used for throttling.*/ + graphic_level->EnabledForThrottle = 1; + graphic_level->UpHyst = 0; + graphic_level->DownHyst = 100; + graphic_level->VoltageDownHyst = 0; + graphic_level->PowerThrottle = 0; + + data->display_timing.min_clock_in_sr = + hwmgr->display_config.min_core_set_clock_in_sr; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep)) + graphic_level->DeepSleepDivId = + smu7_get_sleep_divider_id_from_clock(engine_clock, + data->display_timing.min_clock_in_sr); + + /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/ + graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + + if (0 == result) { + graphic_level->MinVddc = PP_HOST_TO_SMC_UL(graphic_level->MinVddc * VOLTAGE_SCALE); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency); + CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1); + } + + return result; +} + +/** + * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states + * + * @param hwmgr the address of the hardware manager + */ +int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct smu7_dpm_table *dpm_table = &data->dpm_table; + uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start + + offsetof(SMU71_Discrete_DpmTable, GraphicsLevel); + + uint32_t level_array_size = sizeof(SMU71_Discrete_GraphicsLevel) * + SMU71_MAX_LEVELS_GRAPHICS; + + SMU71_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel; + + uint32_t i; + uint8_t highest_pcie_level_enabled = 0; + uint8_t lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0; + uint8_t count = 0; + int result = 0; + + memset(levels, 0x00, level_array_size); + + for (i = 0; i < dpm_table->sclk_table.count; i++) { + result = iceland_populate_single_graphic_level(hwmgr, + dpm_table->sclk_table.dpm_levels[i].value, + (uint16_t)smu_data->activity_target[i], + &(smu_data->smc_state_table.GraphicsLevel[i])); + if (result != 0) + return result; + + /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */ + if (i > 1) + smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0; + } + + /* Only enable level 0 for now. */ + smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1; + + /* set highest level watermark to high */ + if (dpm_table->sclk_table.count > 1) + smu_data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark = + PPSMC_DISPLAY_WATERMARK_HIGH; + + smu_data->smc_state_table.GraphicsDpmLevelCount = + (uint8_t)dpm_table->sclk_table.count; + data->dpm_level_enable_mask.sclk_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); + + while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1 << (highest_pcie_level_enabled + 1))) != 0) { + highest_pcie_level_enabled++; + } + + while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1 << lowest_pcie_level_enabled)) == 0) { + lowest_pcie_level_enabled++; + } + + while ((count < highest_pcie_level_enabled) && + ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1 << (lowest_pcie_level_enabled + 1 + count))) == 0)) { + count++; + } + + mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ? + (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled; + + + /* set pcieDpmLevel to highest_pcie_level_enabled*/ + for (i = 2; i < dpm_table->sclk_table.count; i++) { + smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled; + } + + /* set pcieDpmLevel to lowest_pcie_level_enabled*/ + smu_data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled; + + /* set pcieDpmLevel to mid_pcie_level_enabled*/ + smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled; + + /* level count will send to smc once at init smc table and never change*/ + result = smu7_copy_bytes_to_smc(hwmgr->smumgr, level_array_adress, + (uint8_t *)levels, (uint32_t)level_array_size, + SMC_RAM_END); + + return result; +} + +/** + * Populates the SMC MCLK structure using the provided memory clock + * + * @param hwmgr the address of the hardware manager + * @param memory_clock the memory clock to use to populate the structure + * @param sclk the SMC SCLK structure to be populated + */ +static int iceland_calculate_mclk_params( + struct pp_hwmgr *hwmgr, + uint32_t memory_clock, + SMU71_Discrete_MemoryLevel *mclk, + bool strobe_mode, + bool dllStateOn + ) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + uint32_t dll_cntl = data->clock_registers.vDLL_CNTL; + uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL; + uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL; + uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL; + uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL; + uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1; + uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2; + uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1; + uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2; + + pp_atomctrl_memory_clock_param mpll_param; + int result; + + result = atomctrl_get_memory_pll_dividers_si(hwmgr, + memory_clock, &mpll_param, strobe_mode); + PP_ASSERT_WITH_CODE(0 == result, + "Error retrieving Memory Clock Parameters from VBIOS.", return result); + + /* MPLL_FUNC_CNTL setup*/ + mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl); + + /* MPLL_FUNC_CNTL_1 setup*/ + mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, + MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf); + mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, + MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac); + mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, + MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode); + + /* MPLL_AD_FUNC_CNTL setup*/ + mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl, + MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider); + + if (data->is_memory_gddr5) { + /* MPLL_DQ_FUNC_CNTL setup*/ + mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl, + MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel); + mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl, + MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MemorySpreadSpectrumSupport)) { + /* + ************************************ + Fref = Reference Frequency + NF = Feedback divider ratio + NR = Reference divider ratio + Fnom = Nominal VCO output frequency = Fref * NF / NR + Fs = Spreading Rate + D = Percentage down-spread / 2 + Fint = Reference input frequency to PFD = Fref / NR + NS = Spreading rate divider ratio = int(Fint / (2 * Fs)) + CLKS = NS - 1 = ISS_STEP_NUM[11:0] + NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2) + CLKV = 65536 * NV = ISS_STEP_SIZE[25:0] + ************************************* + */ + pp_atomctrl_internal_ss_info ss_info; + uint32_t freq_nom; + uint32_t tmp; + uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr); + + /* for GDDR5 for all modes and DDR3 */ + if (1 == mpll_param.qdr) + freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider); + else + freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider); + + /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/ + tmp = (freq_nom / reference_clock); + tmp = tmp * tmp; + + if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) { + /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */ + /* ss.Info.speed_spectrum_rate -- in unit of khz */ + /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */ + /* = reference_clock * 5 / speed_spectrum_rate */ + uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate; + + /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */ + /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */ + uint32_t clkv = + (uint32_t)((((131 * ss_info.speed_spectrum_percentage * + ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom); + + mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv); + mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks); + } + } + + /* MCLK_PWRMGT_CNTL setup */ + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn); + + + /* Save the result data to outpupt memory level structure */ + mclk->MclkFrequency = memory_clock; + mclk->MpllFuncCntl = mpll_func_cntl; + mclk->MpllFuncCntl_1 = mpll_func_cntl_1; + mclk->MpllFuncCntl_2 = mpll_func_cntl_2; + mclk->MpllAdFuncCntl = mpll_ad_func_cntl; + mclk->MpllDqFuncCntl = mpll_dq_func_cntl; + mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl; + mclk->DllCntl = dll_cntl; + mclk->MpllSs1 = mpll_ss1; + mclk->MpllSs2 = mpll_ss2; + + return 0; +} + +static uint8_t iceland_get_mclk_frequency_ratio(uint32_t memory_clock, + bool strobe_mode) +{ + uint8_t mc_para_index; + + if (strobe_mode) { + if (memory_clock < 12500) { + mc_para_index = 0x00; + } else if (memory_clock > 47500) { + mc_para_index = 0x0f; + } else { + mc_para_index = (uint8_t)((memory_clock - 10000) / 2500); + } + } else { + if (memory_clock < 65000) { + mc_para_index = 0x00; + } else if (memory_clock > 135000) { + mc_para_index = 0x0f; + } else { + mc_para_index = (uint8_t)((memory_clock - 60000) / 5000); + } + } + + return mc_para_index; +} + +static uint8_t iceland_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock) +{ + uint8_t mc_para_index; + + if (memory_clock < 10000) { + mc_para_index = 0; + } else if (memory_clock >= 80000) { + mc_para_index = 0x0f; + } else { + mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1); + } + + return mc_para_index; +} + +static int iceland_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl, + uint32_t memory_clock, uint32_t *p_shed) +{ + unsigned int i; + + *p_shed = 1; + + for (i = 0; i < pl->count; i++) { + if (memory_clock < pl->entries[i].Mclk) { + *p_shed = i; + break; + } + } + + return 0; +} + +static int iceland_populate_single_memory_level( + struct pp_hwmgr *hwmgr, + uint32_t memory_clock, + SMU71_Discrete_MemoryLevel *memory_level + ) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + int result = 0; + bool dll_state_on; + struct cgs_display_info info = {0}; + uint32_t mclk_edc_wr_enable_threshold = 40000; + uint32_t mclk_edc_enable_threshold = 40000; + uint32_t mclk_strobe_mode_threshold = 40000; + + if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) { + result = iceland_get_dependecy_volt_by_clk(hwmgr, + hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc); + PP_ASSERT_WITH_CODE((0 == result), + "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result); + } + + if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE) { + memory_level->MinVddci = memory_level->MinVddc; + } else if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) { + result = iceland_get_dependecy_volt_by_clk(hwmgr, + hwmgr->dyn_state.vddci_dependency_on_mclk, + memory_clock, + &memory_level->MinVddci); + PP_ASSERT_WITH_CODE((0 == result), + "can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result); + } + + memory_level->MinVddcPhases = 1; + + if (data->vddc_phase_shed_control) { + iceland_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table, + memory_clock, &memory_level->MinVddcPhases); + } + + memory_level->EnabledForThrottle = 1; + memory_level->EnabledForActivity = 0; + memory_level->UpHyst = 0; + memory_level->DownHyst = 100; + memory_level->VoltageDownHyst = 0; + + /* Indicates maximum activity level for this performance level.*/ + memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target; + memory_level->StutterEnable = 0; + memory_level->StrobeEnable = 0; + memory_level->EdcReadEnable = 0; + memory_level->EdcWriteEnable = 0; + memory_level->RttEnable = 0; + + /* default set to low watermark. Highest level will be set to high later.*/ + memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + + cgs_get_active_displays_info(hwmgr->device, &info); + data->display_timing.num_existing_displays = info.display_count; + + /* stutter mode not support on iceland */ + + /* decide strobe mode*/ + memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) && + (memory_clock <= mclk_strobe_mode_threshold); + + /* decide EDC mode and memory clock ratio*/ + if (data->is_memory_gddr5) { + memory_level->StrobeRatio = iceland_get_mclk_frequency_ratio(memory_clock, + memory_level->StrobeEnable); + + if ((mclk_edc_enable_threshold != 0) && + (memory_clock > mclk_edc_enable_threshold)) { + memory_level->EdcReadEnable = 1; + } + + if ((mclk_edc_wr_enable_threshold != 0) && + (memory_clock > mclk_edc_wr_enable_threshold)) { + memory_level->EdcWriteEnable = 1; + } + + if (memory_level->StrobeEnable) { + if (iceland_get_mclk_frequency_ratio(memory_clock, 1) >= + ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) + dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0; + else + dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0; + } else + dll_state_on = data->dll_default_on; + } else { + memory_level->StrobeRatio = + iceland_get_ddr3_mclk_frequency_ratio(memory_clock); + dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0; + } + + result = iceland_calculate_mclk_params(hwmgr, + memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on); + + if (0 == result) { + memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases); + memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE); + memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE); + /* MCLK frequency in units of 10KHz*/ + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency); + /* Indicates maximum activity level for this performance level.*/ + CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2); + } + + return result; +} + +/** + * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states + * + * @param hwmgr the address of the hardware manager + */ + +int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct smu7_dpm_table *dpm_table = &data->dpm_table; + int result; + + /* populate MCLK dpm table to SMU7 */ + uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start + offsetof(SMU71_Discrete_DpmTable, MemoryLevel); + uint32_t level_array_size = sizeof(SMU71_Discrete_MemoryLevel) * SMU71_MAX_LEVELS_MEMORY; + SMU71_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel; + uint32_t i; + + memset(levels, 0x00, level_array_size); + + for (i = 0; i < dpm_table->mclk_table.count; i++) { + PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value), + "can not populate memory level as memory clock is zero", return -EINVAL); + result = iceland_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value, + &(smu_data->smc_state_table.MemoryLevel[i])); + if (0 != result) { + return result; + } + } + + /* Only enable level 0 for now.*/ + smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1; + + /* + * in order to prevent MC activity from stutter mode to push DPM up. + * the UVD change complements this by putting the MCLK in a higher state + * by default such that we are not effected by up threshold or and MCLK DPM latency. + */ + smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F; + CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel); + + smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count; + data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); + /* set highest level watermark to high*/ + smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH; + + /* level count will send to smc once at init smc table and never change*/ + result = smu7_copy_bytes_to_smc(hwmgr->smumgr, + level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, + SMC_RAM_END); + + return result; +} + +static int iceland_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk, + SMU71_Discrete_VoltageLevel *voltage) +{ + const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + uint32_t i = 0; + + if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) { + /* find mvdd value which clock is more than request */ + for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) { + if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) { + /* Always round to higher voltage. */ + voltage->Voltage = data->mvdd_voltage_table.entries[i].value; + break; + } + } + + PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count, + "MVDD Voltage is outside the supported range.", return -EINVAL); + + } else { + return -EINVAL; + } + + return 0; +} + +static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + int result = 0; + const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct pp_atomctrl_clock_dividers_vi dividers; + uint32_t vddc_phase_shed_control = 0; + + SMU71_Discrete_VoltageLevel voltage_level; + uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; + uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2; + uint32_t dll_cntl = data->clock_registers.vDLL_CNTL; + uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL; + + + /* The ACPI state should not do DPM on DC (or ever).*/ + table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; + + if (data->acpi_vddc) + table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE); + else + table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE); + + table->ACPILevel.MinVddcPhases = vddc_phase_shed_control ? 0 : 1; + /* assign zero for now*/ + table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr); + + /* get the engine clock dividers for this clock value*/ + result = atomctrl_get_engine_pll_dividers_vi(hwmgr, + table->ACPILevel.SclkFrequency, ÷rs); + + PP_ASSERT_WITH_CODE(result == 0, + "Error retrieving Engine Clock dividers from VBIOS.", return result); + + /* divider ID for required SCLK*/ + table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider; + table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + table->ACPILevel.DeepSleepDivId = 0; + + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, + CG_SPLL_FUNC_CNTL, SPLL_PWRON, 0); + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, + CG_SPLL_FUNC_CNTL, SPLL_RESET, 1); + spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, + CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL, 4); + + table->ACPILevel.CgSpllFuncCntl = spll_func_cntl; + table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2; + table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; + table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; + table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; + table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; + table->ACPILevel.CcPwrDynRm = 0; + table->ACPILevel.CcPwrDynRm1 = 0; + + + /* For various features to be enabled/disabled while this level is active.*/ + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags); + /* SCLK frequency in units of 10KHz*/ + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1); + + /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/ + table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc; + table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases; + + if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control) + table->MemoryACPILevel.MinVddci = table->MemoryACPILevel.MinVddc; + else { + if (data->acpi_vddci != 0) + table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->acpi_vddci * VOLTAGE_SCALE); + else + table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE); + } + + if (0 == iceland_populate_mvdd_value(hwmgr, 0, &voltage_level)) + table->MemoryACPILevel.MinMvdd = + PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE); + else + table->MemoryACPILevel.MinMvdd = 0; + + /* Force reset on DLL*/ + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1); + + /* Disable DLL in ACPIState*/ + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0); + + /* Enable DLL bypass signal*/ + dll_cntl = PHM_SET_FIELD(dll_cntl, + DLL_CNTL, MRDCK0_BYPASS, 0); + dll_cntl = PHM_SET_FIELD(dll_cntl, + DLL_CNTL, MRDCK1_BYPASS, 0); + + table->MemoryACPILevel.DllCntl = + PP_HOST_TO_SMC_UL(dll_cntl); + table->MemoryACPILevel.MclkPwrmgtCntl = + PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl); + table->MemoryACPILevel.MpllAdFuncCntl = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL); + table->MemoryACPILevel.MpllDqFuncCntl = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL); + table->MemoryACPILevel.MpllFuncCntl = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL); + table->MemoryACPILevel.MpllFuncCntl_1 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1); + table->MemoryACPILevel.MpllFuncCntl_2 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2); + table->MemoryACPILevel.MpllSs1 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1); + table->MemoryACPILevel.MpllSs2 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2); + + table->MemoryACPILevel.EnabledForThrottle = 0; + table->MemoryACPILevel.EnabledForActivity = 0; + table->MemoryACPILevel.UpHyst = 0; + table->MemoryACPILevel.DownHyst = 100; + table->MemoryACPILevel.VoltageDownHyst = 0; + /* Indicates maximum activity level for this performance level.*/ + table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target); + + table->MemoryACPILevel.StutterEnable = 0; + table->MemoryACPILevel.StrobeEnable = 0; + table->MemoryACPILevel.EdcReadEnable = 0; + table->MemoryACPILevel.EdcWriteEnable = 0; + table->MemoryACPILevel.RttEnable = 0; + + return result; +} + +static int iceland_populate_smc_uvd_level(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + return 0; +} + +static int iceland_populate_smc_vce_level(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + return 0; +} + +static int iceland_populate_smc_acp_level(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + return 0; +} + +static int iceland_populate_smc_samu_level(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + return 0; +} + +static int iceland_populate_memory_timing_parameters( + struct pp_hwmgr *hwmgr, + uint32_t engine_clock, + uint32_t memory_clock, + struct SMU71_Discrete_MCArbDramTimingTableEntry *arb_regs + ) +{ + uint32_t dramTiming; + uint32_t dramTiming2; + uint32_t burstTime; + int result; + + result = atomctrl_set_engine_dram_timings_rv770(hwmgr, + engine_clock, memory_clock); + + PP_ASSERT_WITH_CODE(result == 0, + "Error calling VBIOS to set DRAM_TIMING.", return result); + + dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); + dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); + burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); + + arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming); + arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2); + arb_regs->McArbBurstTime = (uint8_t)burstTime; + + return 0; +} + +/** + * Setup parameters for the MC ARB. + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + * This function is to be called from the SetPowerState table. + */ +static int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + int result = 0; + SMU71_Discrete_MCArbDramTimingTable arb_regs; + uint32_t i, j; + + memset(&arb_regs, 0x00, sizeof(SMU71_Discrete_MCArbDramTimingTable)); + + for (i = 0; i < data->dpm_table.sclk_table.count; i++) { + for (j = 0; j < data->dpm_table.mclk_table.count; j++) { + result = iceland_populate_memory_timing_parameters + (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value, + data->dpm_table.mclk_table.dpm_levels[j].value, + &arb_regs.entries[i][j]); + + if (0 != result) { + break; + } + } + } + + if (0 == result) { + result = smu7_copy_bytes_to_smc( + hwmgr->smumgr, + smu_data->smu7_data.arb_table_start, + (uint8_t *)&arb_regs, + sizeof(SMU71_Discrete_MCArbDramTimingTable), + SMC_RAM_END + ); + } + + return result; +} + +static int iceland_populate_smc_boot_level(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + int result = 0; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + table->GraphicsBootLevel = 0; + table->MemoryBootLevel = 0; + + /* find boot level from dpm table*/ + result = phm_find_boot_level(&(data->dpm_table.sclk_table), + data->vbios_boot_state.sclk_bootup_value, + (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel)); + + if (0 != result) { + smu_data->smc_state_table.GraphicsBootLevel = 0; + printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \ + in dependency table. Using Graphics DPM level 0!"); + result = 0; + } + + result = phm_find_boot_level(&(data->dpm_table.mclk_table), + data->vbios_boot_state.mclk_bootup_value, + (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel)); + + if (0 != result) { + smu_data->smc_state_table.MemoryBootLevel = 0; + printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \ + in dependency table. Using Memory DPM level 0!"); + result = 0; + } + + table->BootVddc = data->vbios_boot_state.vddc_bootup_value; + if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control) + table->BootVddci = table->BootVddc; + else + table->BootVddci = data->vbios_boot_state.vddci_bootup_value; + + table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value; + + return result; +} + +static int iceland_populate_mc_reg_address(struct pp_smumgr *smumgr, + SMU71_Discrete_MCRegisters *mc_reg_table) +{ + const struct iceland_smumgr *smu_data = (struct iceland_smumgr *)smumgr->backend; + + uint32_t i, j; + + for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) { + if (smu_data->mc_reg_table.validflag & 1<<j) { + PP_ASSERT_WITH_CODE(i < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE, + "Index of mc_reg_table->address[] array out of boundary", return -EINVAL); + mc_reg_table->address[i].s0 = + PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0); + mc_reg_table->address[i].s1 = + PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1); + i++; + } + } + + mc_reg_table->last = (uint8_t)i; + + return 0; +} + +/*convert register values from driver to SMC format */ +static void iceland_convert_mc_registers( + const struct iceland_mc_reg_entry *entry, + SMU71_Discrete_MCRegisterSet *data, + uint32_t num_entries, uint32_t valid_flag) +{ + uint32_t i, j; + + for (i = 0, j = 0; j < num_entries; j++) { + if (valid_flag & 1<<j) { + data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]); + i++; + } + } +} + +static int iceland_convert_mc_reg_table_entry_to_smc( + struct pp_smumgr *smumgr, + const uint32_t memory_clock, + SMU71_Discrete_MCRegisterSet *mc_reg_table_data + ) +{ + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend); + uint32_t i = 0; + + for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) { + if (memory_clock <= + smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) { + break; + } + } + + if ((i == smu_data->mc_reg_table.num_entries) && (i > 0)) + --i; + + iceland_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i], + mc_reg_table_data, smu_data->mc_reg_table.last, + smu_data->mc_reg_table.validflag); + + return 0; +} + +static int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr, + SMU71_Discrete_MCRegisters *mc_regs) +{ + int result = 0; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + int res; + uint32_t i; + + for (i = 0; i < data->dpm_table.mclk_table.count; i++) { + res = iceland_convert_mc_reg_table_entry_to_smc( + hwmgr->smumgr, + data->dpm_table.mclk_table.dpm_levels[i].value, + &mc_regs->data[i] + ); + + if (0 != res) + result = res; + } + + return result; +} + +static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) +{ + struct pp_smumgr *smumgr = hwmgr->smumgr; + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend); + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t address; + int32_t result; + + if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) + return 0; + + + memset(&smu_data->mc_regs, 0, sizeof(SMU71_Discrete_MCRegisters)); + + result = iceland_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs)); + + if (result != 0) + return result; + + + address = smu_data->smu7_data.mc_reg_table_start + (uint32_t)offsetof(SMU71_Discrete_MCRegisters, data[0]); + + return smu7_copy_bytes_to_smc(hwmgr->smumgr, address, + (uint8_t *)&smu_data->mc_regs.data[0], + sizeof(SMU71_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count, + SMC_RAM_END); +} + +static int iceland_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr) +{ + int result; + struct pp_smumgr *smumgr = hwmgr->smumgr; + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend); + + memset(&smu_data->mc_regs, 0x00, sizeof(SMU71_Discrete_MCRegisters)); + result = iceland_populate_mc_reg_address(smumgr, &(smu_data->mc_regs)); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize MCRegTable for the MC register addresses!", return result;); + + result = iceland_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize MCRegTable for driver state!", return result;); + + return smu7_copy_bytes_to_smc(smumgr, smu_data->smu7_data.mc_reg_table_start, + (uint8_t *)&smu_data->mc_regs, sizeof(SMU71_Discrete_MCRegisters), SMC_RAM_END); +} + +static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + uint8_t count, level; + + count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count); + + for (level = 0; level < count; level++) { + if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk + >= data->vbios_boot_state.sclk_bootup_value) { + smu_data->smc_state_table.GraphicsBootLevel = level; + break; + } + } + + count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count); + + for (level = 0; level < count; level++) { + if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk + >= data->vbios_boot_state.mclk_bootup_value) { + smu_data->smc_state_table.MemoryBootLevel = level; + break; + } + } + + return 0; +} + +static int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults; + SMU71_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table); + struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table; + struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table; + const uint16_t *def1, *def2; + int i, j, k; + + + /* + * TDP number of fraction bits are changed from 8 to 7 for Iceland + * as requested by SMC team + */ + + dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256)); + dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256)); + + + dpm_table->DTETjOffset = 0; + + dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES); + dpm_table->GpuTjHyst = 8; + + dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base; + + /* The following are for new Iceland Multi-input fan/thermal control */ + if (NULL != ppm) { + dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000; + dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256; + } else { + dpm_table->PPM_PkgPwrLimit = 0; + dpm_table->PPM_TemperatureLimit = 0; + } + + CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit); + CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit); + + dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient); + def1 = defaults->bapmti_r; + def2 = defaults->bapmti_rc; + + for (i = 0; i < SMU71_DTE_ITERATIONS; i++) { + for (j = 0; j < SMU71_DTE_SOURCES; j++) { + for (k = 0; k < SMU71_DTE_SINKS; k++) { + dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1); + dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2); + def1++; + def2++; + } + } + } + + return 0; +} + +static int iceland_populate_smc_svi2_config(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *tab) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) + tab->SVI2Enable |= VDDC_ON_SVI2; + + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) + tab->SVI2Enable |= VDDCI_ON_SVI2; + else + tab->MergedVddci = 1; + + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) + tab->SVI2Enable |= MVDD_ON_SVI2; + + PP_ASSERT_WITH_CODE(tab->SVI2Enable != (VDDC_ON_SVI2 | VDDCI_ON_SVI2 | MVDD_ON_SVI2) && + (tab->SVI2Enable & VDDC_ON_SVI2), "SVI2 domain configuration is incorrect!", return -EINVAL); + + return 0; +} + +/** + * Initializes the SMC table and uploads it + * + * @param hwmgr the address of the powerplay hardware manager. + * @param pInput the pointer to input data (PowerState) + * @return always 0 + */ +int iceland_init_smc_table(struct pp_hwmgr *hwmgr) +{ + int result; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + SMU71_Discrete_DpmTable *table = &(smu_data->smc_state_table); + + + iceland_initialize_power_tune_defaults(hwmgr); + memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table)); + + if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control) { + iceland_populate_smc_voltage_tables(hwmgr, table); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition)) + table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; + + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StepVddc)) + table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; + + if (data->is_memory_gddr5) + table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; + + + if (data->ulv_supported) { + result = iceland_populate_ulv_state(hwmgr, &(smu_data->ulv_setting)); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ULV state!", return result;); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_ULV_PARAMETER, 0x40035); + } + + result = iceland_populate_smc_link_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Link Level!", return result;); + + result = iceland_populate_all_graphic_levels(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Graphics Level!", return result;); + + result = iceland_populate_all_memory_levels(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Memory Level!", return result;); + + result = iceland_populate_smc_acpi_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ACPI Level!", return result;); + + result = iceland_populate_smc_vce_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize VCE Level!", return result;); + + result = iceland_populate_smc_acp_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ACP Level!", return result;); + + result = iceland_populate_smc_samu_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize SAMU Level!", return result;); + + /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */ + /* need to populate the ARB settings for the initial state. */ + result = iceland_program_memory_timing_parameters(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to Write ARB settings for the initial state.", return result;); + + result = iceland_populate_smc_uvd_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize UVD Level!", return result;); + + table->GraphicsBootLevel = 0; + table->MemoryBootLevel = 0; + + result = iceland_populate_smc_boot_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Boot Level!", return result;); + + result = iceland_populate_smc_initial_state(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result); + + result = iceland_populate_bapm_parameters_in_dpm_table(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result); + + table->GraphicsVoltageChangeEnable = 1; + table->GraphicsThermThrottleEnable = 1; + table->GraphicsInterval = 1; + table->VoltageInterval = 1; + table->ThermalInterval = 1; + + table->TemperatureLimitHigh = + (data->thermal_temp_setting.temperature_high * + SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + table->TemperatureLimitLow = + (data->thermal_temp_setting.temperature_low * + SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + + table->MemoryVoltageChangeEnable = 1; + table->MemoryInterval = 1; + table->VoltageResponseTime = 0; + table->PhaseResponseTime = 0; + table->MemoryThermThrottleEnable = 1; + table->PCIeBootLinkLevel = 0; + table->PCIeGenInterval = 1; + + result = iceland_populate_smc_svi2_config(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to populate SVI2 setting!", return result); + + table->ThermGpio = 17; + table->SclkStepSize = 0x4000; + + CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid); + CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); + CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); + CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); + CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); + CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); + + table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE); + table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE); + table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE); + + /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ + result = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.dpm_table_start + + offsetof(SMU71_Discrete_DpmTable, SystemFlags), + (uint8_t *)&(table->SystemFlags), + sizeof(SMU71_Discrete_DpmTable)-3 * sizeof(SMU71_PIDController), + SMC_RAM_END); + + PP_ASSERT_WITH_CODE(0 == result, + "Failed to upload dpm data to SMC memory!", return result;); + + /* Upload all ulv setting to SMC memory.(dpm level, dpm level count etc) */ + result = smu7_copy_bytes_to_smc(hwmgr->smumgr, + smu_data->smu7_data.ulv_setting_starts, + (uint8_t *)&(smu_data->ulv_setting), + sizeof(SMU71_Discrete_Ulv), + SMC_RAM_END); + + + result = iceland_populate_initial_mc_reg_table(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to populate initialize MC Reg table!", return result); + + result = iceland_populate_pm_fuses(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to populate PM fuses to SMC memory!", return result); + + return 0; +} + +/** +* Set up the fan table to control the fan using the SMC. +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from set temperature range routine +*/ +int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) +{ + struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend); + SMU71_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; + uint32_t duty100; + uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2; + uint16_t fdo_min, slope1, slope2; + uint32_t reference_clock; + int res; + uint64_t tmp64; + + if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) + return 0; + + if (0 == smu7_data->fan_table_start) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); + return 0; + } + + duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100); + + if (0 == duty100) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); + return 0; + } + + tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100; + do_div(tmp64, 10000); + fdo_min = (uint16_t)tmp64; + + t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin; + t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed; + + pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin; + pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed; + + slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); + slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); + + fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100); + fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100); + fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100); + + fan_table.Slope1 = cpu_to_be16(slope1); + fan_table.Slope2 = cpu_to_be16(slope2); + + fan_table.FdoMin = cpu_to_be16(fdo_min); + + fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst); + + fan_table.HystUp = cpu_to_be16(1); + + fan_table.HystSlope = cpu_to_be16(1); + + fan_table.TempRespLim = cpu_to_be16(5); + + reference_clock = smu7_get_xclk(hwmgr); + + fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600); + + fan_table.FdoMax = cpu_to_be16((uint16_t)duty100); + + fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL); + + /* fan_table.FanControl_GL_Flag = 1; */ + + res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu7_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END); + + return 0; +} + + +static int iceland_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK)) + return iceland_program_memory_timing_parameters(hwmgr); + + return 0; +} + +int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + + int result = 0; + uint32_t low_sclk_interrupt_threshold = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkThrottleLowNotification) + && (hwmgr->gfx_arbiter.sclk_threshold != + data->low_sclk_interrupt_threshold)) { + data->low_sclk_interrupt_threshold = + hwmgr->gfx_arbiter.sclk_threshold; + low_sclk_interrupt_threshold = + data->low_sclk_interrupt_threshold; + + CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); + + result = smu7_copy_bytes_to_smc( + hwmgr->smumgr, + smu_data->smu7_data.dpm_table_start + + offsetof(SMU71_Discrete_DpmTable, + LowSclkInterruptThreshold), + (uint8_t *)&low_sclk_interrupt_threshold, + sizeof(uint32_t), + SMC_RAM_END); + } + + result = iceland_update_and_upload_mc_reg_table(hwmgr); + + PP_ASSERT_WITH_CODE((0 == result), "Failed to upload MC reg table!", return result); + + result = iceland_program_mem_timing_parameters(hwmgr); + PP_ASSERT_WITH_CODE((result == 0), + "Failed to program memory timing parameters!", + ); + + return result; +} + +uint32_t iceland_get_offsetof(uint32_t type, uint32_t member) +{ + switch (type) { + case SMU_SoftRegisters: + switch (member) { + case HandshakeDisables: + return offsetof(SMU71_SoftRegisters, HandshakeDisables); + case VoltageChangeTimeout: + return offsetof(SMU71_SoftRegisters, VoltageChangeTimeout); + case AverageGraphicsActivity: + return offsetof(SMU71_SoftRegisters, AverageGraphicsActivity); + case PreVBlankGap: + return offsetof(SMU71_SoftRegisters, PreVBlankGap); + case VBlankTimeout: + return offsetof(SMU71_SoftRegisters, VBlankTimeout); + case UcodeLoadStatus: + return offsetof(SMU71_SoftRegisters, UcodeLoadStatus); + } + case SMU_Discrete_DpmTable: + switch (member) { + case LowSclkInterruptThreshold: + return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold); + } + } + printk("cant't get the offset of type %x member %x \n", type, member); + return 0; +} + +uint32_t iceland_get_mac_definition(uint32_t value) +{ + switch (value) { + case SMU_MAX_LEVELS_GRAPHICS: + return SMU71_MAX_LEVELS_GRAPHICS; + case SMU_MAX_LEVELS_MEMORY: + return SMU71_MAX_LEVELS_MEMORY; + case SMU_MAX_LEVELS_LINK: + return SMU71_MAX_LEVELS_LINK; + case SMU_MAX_ENTRIES_SMIO: + return SMU71_MAX_ENTRIES_SMIO; + case SMU_MAX_LEVELS_VDDC: + return SMU71_MAX_LEVELS_VDDC; + case SMU_MAX_LEVELS_VDDCI: + return SMU71_MAX_LEVELS_VDDCI; + case SMU_MAX_LEVELS_MVDD: + return SMU71_MAX_LEVELS_MVDD; + } + + printk("cant't get the mac of %x \n", value); + return 0; +} + +/** + * Get the location of various tables inside the FW image. + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int iceland_process_firmware_header(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend); + + uint32_t tmp; + int result; + bool error = false; + + result = smu7_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, DpmTable), + &tmp, SMC_RAM_END); + + if (0 == result) { + smu7_data->dpm_table_start = tmp; + } + + error |= (0 != result); + + result = smu7_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, SoftRegisters), + &tmp, SMC_RAM_END); + + if (0 == result) { + data->soft_regs_start = tmp; + smu7_data->soft_regs_start = tmp; + } + + error |= (0 != result); + + + result = smu7_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, mcRegisterTable), + &tmp, SMC_RAM_END); + + if (0 == result) { + smu7_data->mc_reg_table_start = tmp; + } + + result = smu7_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, FanTable), + &tmp, SMC_RAM_END); + + if (0 == result) { + smu7_data->fan_table_start = tmp; + } + + error |= (0 != result); + + result = smu7_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, mcArbDramTimingTable), + &tmp, SMC_RAM_END); + + if (0 == result) { + smu7_data->arb_table_start = tmp; + } + + error |= (0 != result); + + + result = smu7_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, Version), + &tmp, SMC_RAM_END); + + if (0 == result) { + hwmgr->microcode_version_info.SMC = tmp; + } + + error |= (0 != result); + + result = smu7_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, UlvSettings), + &tmp, SMC_RAM_END); + + if (0 == result) { + smu7_data->ulv_setting_starts = tmp; + } + + error |= (0 != result); + + return error ? 1 : 0; +} + +/*---------------------------MC----------------------------*/ + +static uint8_t iceland_get_memory_modile_index(struct pp_hwmgr *hwmgr) +{ + return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16)); +} + +static bool iceland_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg) +{ + bool result = true; + + switch (in_reg) { + case mmMC_SEQ_RAS_TIMING: + *out_reg = mmMC_SEQ_RAS_TIMING_LP; + break; + + case mmMC_SEQ_DLL_STBY: + *out_reg = mmMC_SEQ_DLL_STBY_LP; + break; + + case mmMC_SEQ_G5PDX_CMD0: + *out_reg = mmMC_SEQ_G5PDX_CMD0_LP; + break; + + case mmMC_SEQ_G5PDX_CMD1: + *out_reg = mmMC_SEQ_G5PDX_CMD1_LP; + break; + + case mmMC_SEQ_G5PDX_CTRL: + *out_reg = mmMC_SEQ_G5PDX_CTRL_LP; + break; + + case mmMC_SEQ_CAS_TIMING: + *out_reg = mmMC_SEQ_CAS_TIMING_LP; + break; + + case mmMC_SEQ_MISC_TIMING: + *out_reg = mmMC_SEQ_MISC_TIMING_LP; + break; + + case mmMC_SEQ_MISC_TIMING2: + *out_reg = mmMC_SEQ_MISC_TIMING2_LP; + break; + + case mmMC_SEQ_PMG_DVS_CMD: + *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP; + break; + + case mmMC_SEQ_PMG_DVS_CTL: + *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP; + break; + + case mmMC_SEQ_RD_CTL_D0: + *out_reg = mmMC_SEQ_RD_CTL_D0_LP; + break; + + case mmMC_SEQ_RD_CTL_D1: + *out_reg = mmMC_SEQ_RD_CTL_D1_LP; + break; + + case mmMC_SEQ_WR_CTL_D0: + *out_reg = mmMC_SEQ_WR_CTL_D0_LP; + break; + + case mmMC_SEQ_WR_CTL_D1: + *out_reg = mmMC_SEQ_WR_CTL_D1_LP; + break; + + case mmMC_PMG_CMD_EMRS: + *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP; + break; + + case mmMC_PMG_CMD_MRS: + *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP; + break; + + case mmMC_PMG_CMD_MRS1: + *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP; + break; + + case mmMC_SEQ_PMG_TIMING: + *out_reg = mmMC_SEQ_PMG_TIMING_LP; + break; + + case mmMC_PMG_CMD_MRS2: + *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP; + break; + + case mmMC_SEQ_WR_CTL_2: + *out_reg = mmMC_SEQ_WR_CTL_2_LP; + break; + + default: + result = false; + break; + } + + return result; +} + +static int iceland_set_s0_mc_reg_index(struct iceland_mc_reg_table *table) +{ + uint32_t i; + uint16_t address; + + for (i = 0; i < table->last; i++) { + table->mc_reg_address[i].s0 = + iceland_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) + ? address : table->mc_reg_address[i].s1; + } + return 0; +} + +static int iceland_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table, + struct iceland_mc_reg_table *ni_table) +{ + uint8_t i, j; + + PP_ASSERT_WITH_CODE((table->last <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES), + "Invalid VramInfo table.", return -EINVAL); + + for (i = 0; i < table->last; i++) { + ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; + } + ni_table->last = table->last; + + for (i = 0; i < table->num_entries; i++) { + ni_table->mc_reg_table_entry[i].mclk_max = + table->mc_reg_table_entry[i].mclk_max; + for (j = 0; j < table->last; j++) { + ni_table->mc_reg_table_entry[i].mc_data[j] = + table->mc_reg_table_entry[i].mc_data[j]; + } + } + + ni_table->num_entries = table->num_entries; + + return 0; +} + +/** + * VBIOS omits some information to reduce size, we need to recover them here. + * 1. when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to mmMC_PMG_CMD_EMRS /_LP[15:0]. + * Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0] + * 2. when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to mmMC_PMG_CMD_MRS1/_LP[15:0]. + * 3. need to set these data for each clock range + * + * @param hwmgr the address of the powerplay hardware manager. + * @param table the address of MCRegTable + * @return always 0 + */ +static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr, + struct iceland_mc_reg_table *table) +{ + uint8_t i, j, k; + uint32_t temp_reg; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + for (i = 0, j = table->last; i < table->last; i++) { + PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + + switch (table->mc_reg_address[i].s1) { + + case mmMC_SEQ_MISC1: + temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS); + table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS; + table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + ((temp_reg & 0xffff0000)) | + ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); + } + j++; + PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + + temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS); + table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS; + table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (temp_reg & 0xffff0000) | + (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); + + if (!data->is_memory_gddr5) { + table->mc_reg_table_entry[k].mc_data[j] |= 0x100; + } + } + j++; + PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + + if (!data->is_memory_gddr5) { + table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD; + table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; + } + j++; + PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + } + + break; + + case mmMC_SEQ_RESERVE_M: + temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1); + table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1; + table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (temp_reg & 0xffff0000) | + (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); + } + j++; + PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + break; + + default: + break; + } + + } + + table->last = j; + + return 0; +} + +static int iceland_set_valid_flag(struct iceland_mc_reg_table *table) +{ + uint8_t i, j; + for (i = 0; i < table->last; i++) { + for (j = 1; j < table->num_entries; j++) { + if (table->mc_reg_table_entry[j-1].mc_data[i] != + table->mc_reg_table_entry[j].mc_data[i]) { + table->validflag |= (1<<i); + break; + } + } + } + + return 0; +} + +int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) +{ + int result; + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + pp_atomctrl_mc_reg_table *table; + struct iceland_mc_reg_table *ni_table = &smu_data->mc_reg_table; + uint8_t module_index = iceland_get_memory_modile_index(hwmgr); + + table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL); + + if (NULL == table) + return -ENOMEM; + + /* Program additional LP registers that are no longer programmed by VBIOS */ + cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY)); + cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0)); + cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL)); + cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0)); + cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0)); + cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2)); + cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2)); + + memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table)); + + result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table); + + if (0 == result) + result = iceland_copy_vbios_smc_reg_table(table, ni_table); + + if (0 == result) { + iceland_set_s0_mc_reg_index(ni_table); + result = iceland_set_mc_special_registers(hwmgr, ni_table); + } + + if (0 == result) + iceland_set_valid_flag(ni_table); + + kfree(table); + + return result; +} + +bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr) +{ + return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON)) + ? true : false; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h index 8bc38cb17b7f..13c8dbbccaf2 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h @@ -20,17 +20,21 @@ * OTHER DEALINGS IN THE SOFTWARE. * */ +#ifndef _ICELAND_SMC_H +#define _ICELAND_SMC_H -#ifndef _TONGA_CLOCK_POWER_GATING_H_ -#define _TONGA_CLOCK_POWER_GATING_H_ +#include "smumgr.h" -#include "tonga_hwmgr.h" -#include "pp_asicblocks.h" -extern int tonga_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating); -extern int tonga_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate); -extern int tonga_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate); -extern int tonga_phm_powerdown_uvd(struct pp_hwmgr *hwmgr); -extern int tonga_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr); -extern int tonga_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, const uint32_t *msg_id); -#endif /* _TONGA_CLOCK_POWER_GATING_H_ */ +int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr); +int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr); +int iceland_init_smc_table(struct pp_hwmgr *hwmgr); +int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr); +int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr); +uint32_t iceland_get_offsetof(uint32_t type, uint32_t member); +uint32_t iceland_get_mac_definition(uint32_t value); +int iceland_process_firmware_header(struct pp_hwmgr *hwmgr); +int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr); +bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr); +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c new file mode 100644 index 000000000000..eeafefc4acba --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c @@ -0,0 +1,250 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Huang Rui <ray.huang@amd.com> + * + */ +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/gfp.h> + +#include "smumgr.h" +#include "iceland_smumgr.h" +#include "pp_debug.h" +#include "smu_ucode_xfer_vi.h" +#include "ppsmc.h" +#include "smu/smu_7_1_1_d.h" +#include "smu/smu_7_1_1_sh_mask.h" +#include "cgs_common.h" +#include "iceland_smc.h" + +#define ICELAND_SMC_SIZE 0x20000 + +static int iceland_start_smc(struct pp_smumgr *smumgr) +{ + SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_RESET_CNTL, rst_reg, 0); + + return 0; +} + +static void iceland_reset_smc(struct pp_smumgr *smumgr) +{ + SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_RESET_CNTL, + rst_reg, 1); +} + + +static void iceland_stop_smc_clock(struct pp_smumgr *smumgr) +{ + SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_CLOCK_CNTL_0, + ck_disable, 1); +} + +static void iceland_start_smc_clock(struct pp_smumgr *smumgr) +{ + SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_CLOCK_CNTL_0, + ck_disable, 0); +} + +static int iceland_smu_start_smc(struct pp_smumgr *smumgr) +{ + /* set smc instruct start point at 0x0 */ + smu7_program_jump_on_start(smumgr); + + /* enable smc clock */ + iceland_start_smc_clock(smumgr); + + /* de-assert reset */ + iceland_start_smc(smumgr); + + SMUM_WAIT_INDIRECT_FIELD(smumgr, SMC_IND, FIRMWARE_FLAGS, + INTERRUPTS_ENABLED, 1); + + return 0; +} + + +static int iceland_upload_smc_firmware_data(struct pp_smumgr *smumgr, + uint32_t length, const uint8_t *src, + uint32_t limit, uint32_t start_addr) +{ + uint32_t byte_count = length; + uint32_t data; + + PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL); + + cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, start_addr); + SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1); + + while (byte_count >= 4) { + data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3]; + cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data); + src += 4; + byte_count -= 4; + } + + SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); + + PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -EINVAL); + + return 0; +} + + +static int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr) +{ + uint32_t val; + struct cgs_firmware_info info = {0}; + + if (smumgr == NULL || smumgr->device == NULL) + return -EINVAL; + + /* load SMC firmware */ + cgs_get_firmware_info(smumgr->device, + smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info); + + if (info.image_size & 3) { + pr_err("[ powerplay ] SMC ucode is not 4 bytes aligned\n"); + return -EINVAL; + } + + if (info.image_size > ICELAND_SMC_SIZE) { + pr_err("[ powerplay ] SMC address is beyond the SMC RAM area\n"); + return -EINVAL; + } + + /* wait for smc boot up */ + SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, + RCU_UC_EVENTS, boot_seq_done, 0); + + /* clear firmware interrupt enable flag */ + val = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, + ixSMC_SYSCON_MISC_CNTL); + cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, + ixSMC_SYSCON_MISC_CNTL, val | 1); + + /* stop smc clock */ + iceland_stop_smc_clock(smumgr); + + /* reset smc */ + iceland_reset_smc(smumgr); + iceland_upload_smc_firmware_data(smumgr, info.image_size, + (uint8_t *)info.kptr, ICELAND_SMC_SIZE, + info.ucode_start_address); + + return 0; +} + +static int iceland_request_smu_load_specific_fw(struct pp_smumgr *smumgr, + uint32_t firmwareType) +{ + return 0; +} + +static int iceland_start_smu(struct pp_smumgr *smumgr) +{ + int result; + + result = iceland_smu_upload_firmware_image(smumgr); + if (result) + return result; + result = iceland_smu_start_smc(smumgr); + if (result) + return result; + + if (!smu7_is_smc_ram_running(smumgr)) { + printk("smu not running, upload firmware again \n"); + result = iceland_smu_upload_firmware_image(smumgr); + if (result) + return result; + + result = iceland_smu_start_smc(smumgr); + if (result) + return result; + } + + result = smu7_request_smu_load_fw(smumgr); + + return result; +} + +/** + * Write a 32bit value to the SMC SRAM space. + * ALL PARAMETERS ARE IN HOST BYTE ORDER. + * @param smumgr the address of the powerplay hardware manager. + * @param smcAddress the address in the SMC RAM to access. + * @param value to write to the SMC SRAM. + */ +static int iceland_smu_init(struct pp_smumgr *smumgr) +{ + int i; + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend); + if (smu7_init(smumgr)) + return -EINVAL; + + for (i = 0; i < SMU71_MAX_LEVELS_GRAPHICS; i++) + smu_data->activity_target[i] = 30; + + return 0; +} + +static const struct pp_smumgr_func iceland_smu_funcs = { + .smu_init = &iceland_smu_init, + .smu_fini = &smu7_smu_fini, + .start_smu = &iceland_start_smu, + .check_fw_load_finish = &smu7_check_fw_load_finish, + .request_smu_load_fw = &smu7_reload_firmware, + .request_smu_load_specific_fw = &iceland_request_smu_load_specific_fw, + .send_msg_to_smc = &smu7_send_msg_to_smc, + .send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter, + .download_pptable_settings = NULL, + .upload_pptable_settings = NULL, + .get_offsetof = iceland_get_offsetof, + .process_firmware_header = iceland_process_firmware_header, + .init_smc_table = iceland_init_smc_table, + .update_sclk_threshold = iceland_update_sclk_threshold, + .thermal_setup_fan_table = iceland_thermal_setup_fan_table, + .populate_all_graphic_levels = iceland_populate_all_graphic_levels, + .populate_all_memory_levels = iceland_populate_all_memory_levels, + .get_mac_definition = iceland_get_mac_definition, + .initialize_mc_reg_table = iceland_initialize_mc_reg_table, + .is_dpm_running = iceland_is_dpm_running, +}; + +int iceland_smum_init(struct pp_smumgr *smumgr) +{ + struct iceland_smumgr *iceland_smu = NULL; + + iceland_smu = kzalloc(sizeof(struct iceland_smumgr), GFP_KERNEL); + + if (iceland_smu == NULL) + return -ENOMEM; + + smumgr->backend = iceland_smu; + smumgr->smumgr_funcs = &iceland_smu_funcs; + + return 0; +} diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h new file mode 100644 index 000000000000..8eae01b37c40 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h @@ -0,0 +1,71 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Huang Rui <ray.huang@amd.com> + * + */ + +#ifndef _ICELAND_SMUMGR_H_ +#define _ICELAND_SMUMGR_H_ + + +#include "smu7_smumgr.h" +#include "pp_endian.h" +#include "smu71_discrete.h" + +struct iceland_pt_defaults { + uint8_t svi_load_line_en; + uint8_t svi_load_line_vddc; + uint8_t tdc_vddc_throttle_release_limit_perc; + uint8_t tdc_mawt; + uint8_t tdc_waterfall_ctl; + uint8_t dte_ambient_temp_base; + uint32_t display_cac; + uint32_t bamp_temp_gradient; + uint16_t bapmti_r[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS]; + uint16_t bapmti_rc[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS]; +}; + +struct iceland_mc_reg_entry { + uint32_t mclk_max; + uint32_t mc_data[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE]; +}; + +struct iceland_mc_reg_table { + uint8_t last; /* number of registers*/ + uint8_t num_entries; /* number of entries in mc_reg_table_entry used*/ + uint16_t validflag; /* indicate the corresponding register is valid or not. 1: valid, 0: invalid. bit0->address[0], bit1->address[1], etc.*/ + struct iceland_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES]; + SMU71_Discrete_MCRegisterAddress mc_reg_address[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE]; +}; + +struct iceland_smumgr { + struct smu7_smumgr smu7_data; + struct SMU71_Discrete_DpmTable smc_state_table; + struct SMU71_Discrete_PmFuses power_tune_table; + struct SMU71_Discrete_Ulv ulv_setting; + const struct iceland_pt_defaults *power_tune_defaults; + SMU71_Discrete_MCRegisters mc_regs; + struct iceland_mc_reg_table mc_reg_table; + uint32_t activity_target[SMU71_MAX_LEVELS_GRAPHICS]; +}; + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c new file mode 100644 index 000000000000..4ccc0b72324d --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c @@ -0,0 +1,2287 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "polaris10_smc.h" +#include "smu7_dyn_defaults.h" + +#include "smu7_hwmgr.h" +#include "hardwaremanager.h" +#include "ppatomctrl.h" +#include "pp_debug.h" +#include "cgs_common.h" +#include "atombios.h" +#include "polaris10_smumgr.h" +#include "pppcielanes.h" + +#include "smu_ucode_xfer_vi.h" +#include "smu74_discrete.h" +#include "smu/smu_7_1_3_d.h" +#include "smu/smu_7_1_3_sh_mask.h" +#include "gmc/gmc_8_1_d.h" +#include "gmc/gmc_8_1_sh_mask.h" +#include "oss/oss_3_0_d.h" +#include "gca/gfx_8_0_d.h" +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" +#include "dce/dce_10_0_d.h" +#include "dce/dce_10_0_sh_mask.h" +#include "polaris10_pwrvirus.h" +#include "smu7_ppsmc.h" +#include "smu7_smumgr.h" + +#define POLARIS10_SMC_SIZE 0x20000 +#define VOLTAGE_VID_OFFSET_SCALE1 625 +#define VOLTAGE_VID_OFFSET_SCALE2 100 +#define POWERTUNE_DEFAULT_SET_MAX 1 +#define VDDC_VDDCI_DELTA 200 +#define MC_CG_ARB_FREQ_F1 0x0b + +static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = { + /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt, + * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */ + { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000, + { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61}, + { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } }, +}; + +static const sclkFcwRange_t Range_Table[NUM_SCLK_RANGE] = { + {VCO_2_4, POSTDIV_DIV_BY_16, 75, 160, 112}, + {VCO_3_6, POSTDIV_DIV_BY_16, 112, 224, 160}, + {VCO_2_4, POSTDIV_DIV_BY_8, 75, 160, 112}, + {VCO_3_6, POSTDIV_DIV_BY_8, 112, 224, 160}, + {VCO_2_4, POSTDIV_DIV_BY_4, 75, 160, 112}, + {VCO_3_6, POSTDIV_DIV_BY_4, 112, 216, 160}, + {VCO_2_4, POSTDIV_DIV_BY_2, 75, 160, 108}, + {VCO_3_6, POSTDIV_DIV_BY_2, 112, 216, 160} }; + +static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table, + uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd) +{ + uint32_t i; + uint16_t vddci; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + *voltage = *mvdd = 0; + + /* clock - voltage dependency table is empty table */ + if (dep_table->count == 0) + return -EINVAL; + + for (i = 0; i < dep_table->count; i++) { + /* find first sclk bigger than request */ + if (dep_table->entries[i].clk >= clock) { + *voltage |= (dep_table->entries[i].vddc * + VOLTAGE_SCALE) << VDDC_SHIFT; + if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control) + *voltage |= (data->vbios_boot_state.vddci_bootup_value * + VOLTAGE_SCALE) << VDDCI_SHIFT; + else if (dep_table->entries[i].vddci) + *voltage |= (dep_table->entries[i].vddci * + VOLTAGE_SCALE) << VDDCI_SHIFT; + else { + vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), + (dep_table->entries[i].vddc - + (uint16_t)VDDC_VDDCI_DELTA)); + *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; + } + + if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) + *mvdd = data->vbios_boot_state.mvdd_bootup_value * + VOLTAGE_SCALE; + else if (dep_table->entries[i].mvdd) + *mvdd = (uint32_t) dep_table->entries[i].mvdd * + VOLTAGE_SCALE; + + *voltage |= 1 << PHASES_SHIFT; + return 0; + } + } + + /* sclk is bigger than max sclk in the dependence table */ + *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; + + if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control) + *voltage |= (data->vbios_boot_state.vddci_bootup_value * + VOLTAGE_SCALE) << VDDCI_SHIFT; + else if (dep_table->entries[i-1].vddci) { + vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), + (dep_table->entries[i].vddc - + (uint16_t)VDDC_VDDCI_DELTA)); + *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; + } + + if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) + *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE; + else if (dep_table->entries[i].mvdd) + *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE; + + return 0; +} + +static uint16_t scale_fan_gain_settings(uint16_t raw_setting) +{ + uint32_t tmp; + tmp = raw_setting * 4096 / 100; + return (uint16_t)tmp; +} + +static int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) +{ + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + + const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults; + SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table; + struct pp_advance_fan_control_parameters *fan_table = + &hwmgr->thermal_controller.advanceFanControlParameters; + int i, j, k; + const uint16_t *pdef1; + const uint16_t *pdef2; + + table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128)); + table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128)); + + PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255, + "Target Operating Temp is out of Range!", + ); + + table->TemperatureLimitEdge = PP_HOST_TO_SMC_US( + cac_dtp_table->usTargetOperatingTemp * 256); + table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US( + cac_dtp_table->usTemperatureLimitHotspot * 256); + table->FanGainEdge = PP_HOST_TO_SMC_US( + scale_fan_gain_settings(fan_table->usFanGainEdge)); + table->FanGainHotspot = PP_HOST_TO_SMC_US( + scale_fan_gain_settings(fan_table->usFanGainHotspot)); + + pdef1 = defaults->BAPMTI_R; + pdef2 = defaults->BAPMTI_RC; + + for (i = 0; i < SMU74_DTE_ITERATIONS; i++) { + for (j = 0; j < SMU74_DTE_SOURCES; j++) { + for (k = 0; k < SMU74_DTE_SINKS; k++) { + table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1); + table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2); + pdef1++; + pdef2++; + } + } + } + + return 0; +} + +static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr) +{ + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults; + + smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn; + smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC; + smu_data->power_tune_table.SviLoadLineTrimVddC = 3; + smu_data->power_tune_table.SviLoadLineOffsetVddC = 0; + + return 0; +} + +static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr) +{ + uint16_t tdc_limit; + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults; + + tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128); + smu_data->power_tune_table.TDC_VDDC_PkgLimit = + CONVERT_FROM_HOST_TO_SMC_US(tdc_limit); + smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc = + defaults->TDC_VDDC_ThrottleReleaseLimitPerc; + smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt; + + return 0; +} + +static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) +{ + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults; + uint32_t temp; + + if (smu7_read_smc_sram_dword(hwmgr->smumgr, + fuse_table_offset + + offsetof(SMU74_Discrete_PmFuses, TdcWaterfallCtl), + (uint32_t *)&temp, SMC_RAM_END)) + PP_ASSERT_WITH_CODE(false, + "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!", + return -EINVAL); + else { + smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl; + smu_data->power_tune_table.LPMLTemperatureMin = + (uint8_t)((temp >> 16) & 0xff); + smu_data->power_tune_table.LPMLTemperatureMax = + (uint8_t)((temp >> 8) & 0xff); + smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff); + } + return 0; +} + +static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr) +{ + int i; + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + + /* Currently not used. Set all to zero. */ + for (i = 0; i < 16; i++) + smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0; + + return 0; +} + +static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr) +{ + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + +/* TO DO move to hwmgr */ + if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15)) + || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity) + hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity = + hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity; + + smu_data->power_tune_table.FuzzyFan_PwmSetDelta = PP_HOST_TO_SMC_US( + hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity); + return 0; +} + +static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr) +{ + int i; + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + + /* Currently not used. Set all to zero. */ + for (i = 0; i < 16; i++) + smu_data->power_tune_table.GnbLPML[i] = 0; + + return 0; +} + +static int polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr) +{ + return 0; +} + +static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) +{ + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd; + uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd; + struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table; + + hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256); + lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256); + + smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd = + CONVERT_FROM_HOST_TO_SMC_US(hi_sidd); + smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd = + CONVERT_FROM_HOST_TO_SMC_US(lo_sidd); + + return 0; +} + +static int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr) +{ + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + uint32_t pm_fuse_table_offset; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + if (smu7_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU74_Firmware_Header, PmFuseTable), + &pm_fuse_table_offset, SMC_RAM_END)) + PP_ASSERT_WITH_CODE(false, + "Attempt to get pm_fuse_table_offset Failed!", + return -EINVAL); + + if (polaris10_populate_svi_load_line(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate SviLoadLine Failed!", + return -EINVAL); + + if (polaris10_populate_tdc_limit(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate TDCLimit Failed!", return -EINVAL); + + if (polaris10_populate_dw8(hwmgr, pm_fuse_table_offset)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate TdcWaterfallCtl, " + "LPMLTemperature Min and Max Failed!", + return -EINVAL); + + if (0 != polaris10_populate_temperature_scaler(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate LPMLTemperatureScaler Failed!", + return -EINVAL); + + if (polaris10_populate_fuzzy_fan(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate Fuzzy Fan Control parameters Failed!", + return -EINVAL); + + if (polaris10_populate_gnb_lpml(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate GnbLPML Failed!", + return -EINVAL); + + if (polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate GnbLPML Min and Max Vid Failed!", + return -EINVAL); + + if (polaris10_populate_bapm_vddc_base_leakage_sidd(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate BapmVddCBaseLeakage Hi and Lo " + "Sidd Failed!", return -EINVAL); + + if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, + (uint8_t *)&smu_data->power_tune_table, + (sizeof(struct SMU74_Discrete_PmFuses) - 92), SMC_RAM_END)) + PP_ASSERT_WITH_CODE(false, + "Attempt to download PmFuseTable Failed!", + return -EINVAL); + } + return 0; +} + +/** + * Mvdd table preparation for SMC. + * + * @param *hwmgr The address of the hardware manager. + * @param *table The SMC DPM table structure to be populated. + * @return 0 + */ +static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr, + SMU74_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t count, level; + + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { + count = data->mvdd_voltage_table.count; + if (count > SMU_MAX_SMIO_LEVELS) + count = SMU_MAX_SMIO_LEVELS; + for (level = 0; level < count; level++) { + table->SmioTable2.Pattern[level].Voltage = + PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE); + /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/ + table->SmioTable2.Pattern[level].Smio = + (uint8_t) level; + table->Smio[level] |= + data->mvdd_voltage_table.entries[level].smio_low; + } + table->SmioMask2 = data->mvdd_voltage_table.mask_low; + + table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count); + } + + return 0; +} + +static int polaris10_populate_smc_vddci_table(struct pp_hwmgr *hwmgr, + struct SMU74_Discrete_DpmTable *table) +{ + uint32_t count, level; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + count = data->vddci_voltage_table.count; + + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) { + if (count > SMU_MAX_SMIO_LEVELS) + count = SMU_MAX_SMIO_LEVELS; + for (level = 0; level < count; ++level) { + table->SmioTable1.Pattern[level].Voltage = + PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE); + table->SmioTable1.Pattern[level].Smio = (uint8_t) level; + + table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low; + } + } + + table->SmioMask1 = data->vddci_voltage_table.mask_low; + + return 0; +} + +/** +* Preparation of vddc and vddgfx CAC tables for SMC. +* +* @param hwmgr the address of the hardware manager +* @param table the SMC DPM table structure to be populated +* @return always 0 +*/ +static int polaris10_populate_cac_table(struct pp_hwmgr *hwmgr, + struct SMU74_Discrete_DpmTable *table) +{ + uint32_t count; + uint8_t index; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_voltage_lookup_table *lookup_table = + table_info->vddc_lookup_table; + /* tables is already swapped, so in order to use the value from it, + * we need to swap it back. + * We are populating vddc CAC data to BapmVddc table + * in split and merged mode + */ + for (count = 0; count < lookup_table->count; count++) { + index = phm_get_voltage_index(lookup_table, + data->vddc_voltage_table.entries[count].value); + table->BapmVddcVidLoSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_low); + table->BapmVddcVidHiSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_mid); + table->BapmVddcVidHiSidd2[count] = convert_to_vid(lookup_table->entries[index].us_cac_high); + } + + return 0; +} + +/** +* Preparation of voltage tables for SMC. +* +* @param hwmgr the address of the hardware manager +* @param table the SMC DPM table structure to be populated +* @return always 0 +*/ + +static int polaris10_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr, + struct SMU74_Discrete_DpmTable *table) +{ + polaris10_populate_smc_vddci_table(hwmgr, table); + polaris10_populate_smc_mvdd_table(hwmgr, table); + polaris10_populate_cac_table(hwmgr, table); + + return 0; +} + +static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr, + struct SMU74_Discrete_Ulv *state) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + state->CcPwrDynRm = 0; + state->CcPwrDynRm1 = 0; + + state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset; + state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset * + VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1); + + state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1; + + CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1); + CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset); + + return 0; +} + +static int polaris10_populate_ulv_state(struct pp_hwmgr *hwmgr, + struct SMU74_Discrete_DpmTable *table) +{ + return polaris10_populate_ulv_level(hwmgr, &table->Ulv); +} + +static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr, + struct SMU74_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct smu7_dpm_table *dpm_table = &data->dpm_table; + int i; + + /* Index (dpm_table->pcie_speed_table.count) + * is reserved for PCIE boot level. */ + for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) { + table->LinkLevel[i].PcieGenSpeed = + (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value; + table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width( + dpm_table->pcie_speed_table.dpm_levels[i].param1); + table->LinkLevel[i].EnabledForActivity = 1; + table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff); + table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5); + table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30); + } + + smu_data->smc_state_table.LinkLevelCount = + (uint8_t)dpm_table->pcie_speed_table.count; + +/* To Do move to hwmgr */ + data->dpm_level_enable_mask.pcie_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table); + + return 0; +} + + +static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr, + SMU74_Discrete_DpmTable *table) +{ + struct pp_smumgr *smumgr = hwmgr->smumgr; + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + uint32_t i, ref_clk; + + struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } }; + + ref_clk = smu7_get_xclk(hwmgr); + + if (0 == atomctrl_get_smc_sclk_range_table(hwmgr, &range_table_from_vbios)) { + for (i = 0; i < NUM_SCLK_RANGE; i++) { + table->SclkFcwRangeTable[i].vco_setting = range_table_from_vbios.entry[i].ucVco_setting; + table->SclkFcwRangeTable[i].postdiv = range_table_from_vbios.entry[i].ucPostdiv; + table->SclkFcwRangeTable[i].fcw_pcc = range_table_from_vbios.entry[i].usFcw_pcc; + + table->SclkFcwRangeTable[i].fcw_trans_upper = range_table_from_vbios.entry[i].usFcw_trans_upper; + table->SclkFcwRangeTable[i].fcw_trans_lower = range_table_from_vbios.entry[i].usRcw_trans_lower; + + CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc); + CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper); + CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower); + } + return; + } + + for (i = 0; i < NUM_SCLK_RANGE; i++) { + smu_data->range_table[i].trans_lower_frequency = (ref_clk * Range_Table[i].fcw_trans_lower) >> Range_Table[i].postdiv; + smu_data->range_table[i].trans_upper_frequency = (ref_clk * Range_Table[i].fcw_trans_upper) >> Range_Table[i].postdiv; + + table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting; + table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv; + table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc; + + table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper; + table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower; + + CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc); + CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper); + CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower); + } +} + +/** +* Calculates the SCLK dividers using the provided engine clock +* +* @param hwmgr the address of the hardware manager +* @param clock the engine clock to use to populate the structure +* @param sclk the SMC SCLK structure to be populated +*/ +static int polaris10_calculate_sclk_params(struct pp_hwmgr *hwmgr, + uint32_t clock, SMU_SclkSetting *sclk_setting) +{ + struct pp_smumgr *smumgr = hwmgr->smumgr; + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + const SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table); + struct pp_atomctrl_clock_dividers_ai dividers; + uint32_t ref_clock; + uint32_t pcc_target_percent, pcc_target_freq, ss_target_percent, ss_target_freq; + uint8_t i; + int result; + uint64_t temp; + + sclk_setting->SclkFrequency = clock; + /* get the engine clock dividers for this clock value */ + result = atomctrl_get_engine_pll_dividers_ai(hwmgr, clock, ÷rs); + if (result == 0) { + sclk_setting->Fcw_int = dividers.usSclk_fcw_int; + sclk_setting->Fcw_frac = dividers.usSclk_fcw_frac; + sclk_setting->Pcc_fcw_int = dividers.usPcc_fcw_int; + sclk_setting->PllRange = dividers.ucSclkPllRange; + sclk_setting->Sclk_slew_rate = 0x400; + sclk_setting->Pcc_up_slew_rate = dividers.usPcc_fcw_slew_frac; + sclk_setting->Pcc_down_slew_rate = 0xffff; + sclk_setting->SSc_En = dividers.ucSscEnable; + sclk_setting->Fcw1_int = dividers.usSsc_fcw1_int; + sclk_setting->Fcw1_frac = dividers.usSsc_fcw1_frac; + sclk_setting->Sclk_ss_slew_rate = dividers.usSsc_fcw_slew_frac; + return result; + } + + ref_clock = smu7_get_xclk(hwmgr); + + for (i = 0; i < NUM_SCLK_RANGE; i++) { + if (clock > smu_data->range_table[i].trans_lower_frequency + && clock <= smu_data->range_table[i].trans_upper_frequency) { + sclk_setting->PllRange = i; + break; + } + } + + sclk_setting->Fcw_int = (uint16_t)((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock); + temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv; + temp <<= 0x10; + do_div(temp, ref_clock); + sclk_setting->Fcw_frac = temp & 0xffff; + + pcc_target_percent = 10; /* Hardcode 10% for now. */ + pcc_target_freq = clock - (clock * pcc_target_percent / 100); + sclk_setting->Pcc_fcw_int = (uint16_t)((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock); + + ss_target_percent = 2; /* Hardcode 2% for now. */ + sclk_setting->SSc_En = 0; + if (ss_target_percent) { + sclk_setting->SSc_En = 1; + ss_target_freq = clock - (clock * ss_target_percent / 100); + sclk_setting->Fcw1_int = (uint16_t)((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock); + temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv; + temp <<= 0x10; + do_div(temp, ref_clock); + sclk_setting->Fcw1_frac = temp & 0xffff; + } + + return 0; +} + +/** +* Populates single SMC SCLK structure using the provided engine clock +* +* @param hwmgr the address of the hardware manager +* @param clock the engine clock to use to populate the structure +* @param sclk the SMC SCLK structure to be populated +*/ + +static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr, + uint32_t clock, uint16_t sclk_al_threshold, + struct SMU74_Discrete_GraphicsLevel *level) +{ + int result; + /* PP_Clocks minClocks; */ + uint32_t mvdd; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + SMU_SclkSetting curr_sclk_setting = { 0 }; + + result = polaris10_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting); + + /* populate graphics levels */ + result = polaris10_get_dependency_volt_by_clk(hwmgr, + table_info->vdd_dep_on_sclk, clock, + &level->MinVoltage, &mvdd); + + PP_ASSERT_WITH_CODE((0 == result), + "can not find VDDC voltage value for " + "VDDC engine clock dependency table", + return result); + level->ActivityLevel = sclk_al_threshold; + + level->CcPwrDynRm = 0; + level->CcPwrDynRm1 = 0; + level->EnabledForActivity = 0; + level->EnabledForThrottle = 1; + level->UpHyst = 10; + level->DownHyst = 0; + level->VoltageDownHyst = 0; + level->PowerThrottle = 0; + data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) + level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock, + hwmgr->display_config.min_core_set_clock_in_sr); + + /* Default to slow, highest DPM level will be + * set to PPSMC_DISPLAY_WATERMARK_LOW later. + */ + if (data->update_up_hyst) + level->UpHyst = (uint8_t)data->up_hyst; + if (data->update_down_hyst) + level->DownHyst = (uint8_t)data->down_hyst; + + level->SclkSetting = curr_sclk_setting; + + CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage); + CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1); + CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel); + CONVERT_FROM_HOST_TO_SMC_UL(level->SclkSetting.SclkFrequency); + CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_int); + CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_frac); + CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_fcw_int); + CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_slew_rate); + CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_up_slew_rate); + CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_down_slew_rate); + CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_int); + CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_frac); + CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_ss_slew_rate); + return 0; +} + +/** +* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states +* +* @param hwmgr the address of the hardware manager +*/ +int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) +{ + struct pp_smumgr *smumgr = hwmgr->smumgr; + struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct smu7_dpm_table *dpm_table = &hw_data->dpm_table; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table; + uint8_t pcie_entry_cnt = (uint8_t) hw_data->dpm_table.pcie_speed_table.count; + int result = 0; + uint32_t array = smu_data->smu7_data.dpm_table_start + + offsetof(SMU74_Discrete_DpmTable, GraphicsLevel); + uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) * + SMU74_MAX_LEVELS_GRAPHICS; + struct SMU74_Discrete_GraphicsLevel *levels = + smu_data->smc_state_table.GraphicsLevel; + uint32_t i, max_entry; + uint8_t hightest_pcie_level_enabled = 0, + lowest_pcie_level_enabled = 0, + mid_pcie_level_enabled = 0, + count = 0; + + polaris10_get_sclk_range_table(hwmgr, &(smu_data->smc_state_table)); + + for (i = 0; i < dpm_table->sclk_table.count; i++) { + + result = polaris10_populate_single_graphic_level(hwmgr, + dpm_table->sclk_table.dpm_levels[i].value, + (uint16_t)smu_data->activity_target[i], + &(smu_data->smc_state_table.GraphicsLevel[i])); + if (result) + return result; + + /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */ + if (i > 1) + levels[i].DeepSleepDivId = 0; + } + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SPLLShutdownSupport)) + smu_data->smc_state_table.GraphicsLevel[0].SclkSetting.SSc_En = 0; + + smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1; + smu_data->smc_state_table.GraphicsDpmLevelCount = + (uint8_t)dpm_table->sclk_table.count; + hw_data->dpm_level_enable_mask.sclk_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); + + + if (pcie_table != NULL) { + PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt), + "There must be 1 or more PCIE levels defined in PPTable.", + return -EINVAL); + max_entry = pcie_entry_cnt - 1; + for (i = 0; i < dpm_table->sclk_table.count; i++) + levels[i].pcieDpmLevel = + (uint8_t) ((i < max_entry) ? i : max_entry); + } else { + while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask && + ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1 << (hightest_pcie_level_enabled + 1))) != 0)) + hightest_pcie_level_enabled++; + + while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask && + ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1 << lowest_pcie_level_enabled)) == 0)) + lowest_pcie_level_enabled++; + + while ((count < hightest_pcie_level_enabled) && + ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1 << (lowest_pcie_level_enabled + 1 + count))) == 0)) + count++; + + mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) < + hightest_pcie_level_enabled ? + (lowest_pcie_level_enabled + 1 + count) : + hightest_pcie_level_enabled; + + /* set pcieDpmLevel to hightest_pcie_level_enabled */ + for (i = 2; i < dpm_table->sclk_table.count; i++) + levels[i].pcieDpmLevel = hightest_pcie_level_enabled; + + /* set pcieDpmLevel to lowest_pcie_level_enabled */ + levels[0].pcieDpmLevel = lowest_pcie_level_enabled; + + /* set pcieDpmLevel to mid_pcie_level_enabled */ + levels[1].pcieDpmLevel = mid_pcie_level_enabled; + } + /* level count will send to smc once at init smc table and never change */ + result = smu7_copy_bytes_to_smc(smumgr, array, (uint8_t *)levels, + (uint32_t)array_size, SMC_RAM_END); + + return result; +} + + +static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr, + uint32_t clock, struct SMU74_Discrete_MemoryLevel *mem_level) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + int result = 0; + struct cgs_display_info info = {0, 0, NULL}; + uint32_t mclk_stutter_mode_threshold = 40000; + + cgs_get_active_displays_info(hwmgr->device, &info); + + if (table_info->vdd_dep_on_mclk) { + result = polaris10_get_dependency_volt_by_clk(hwmgr, + table_info->vdd_dep_on_mclk, clock, + &mem_level->MinVoltage, &mem_level->MinMvdd); + PP_ASSERT_WITH_CODE((0 == result), + "can not find MinVddc voltage value from memory " + "VDDC voltage dependency table", return result); + } + + mem_level->MclkFrequency = clock; + mem_level->EnabledForThrottle = 1; + mem_level->EnabledForActivity = 0; + mem_level->UpHyst = 0; + mem_level->DownHyst = 100; + mem_level->VoltageDownHyst = 0; + mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target; + mem_level->StutterEnable = false; + mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + + data->display_timing.num_existing_displays = info.display_count; + + if (mclk_stutter_mode_threshold && + (clock <= mclk_stutter_mode_threshold) && + (SMUM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, + STUTTER_ENABLE) & 0x1)) + mem_level->StutterEnable = true; + + if (!result) { + CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd); + CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency); + CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel); + CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage); + } + return result; +} + +/** +* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states +* +* @param hwmgr the address of the hardware manager +*/ +int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr) +{ + struct pp_smumgr *smumgr = hwmgr->smumgr; + struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct smu7_dpm_table *dpm_table = &hw_data->dpm_table; + int result; + /* populate MCLK dpm table to SMU7 */ + uint32_t array = smu_data->smu7_data.dpm_table_start + + offsetof(SMU74_Discrete_DpmTable, MemoryLevel); + uint32_t array_size = sizeof(SMU74_Discrete_MemoryLevel) * + SMU74_MAX_LEVELS_MEMORY; + struct SMU74_Discrete_MemoryLevel *levels = + smu_data->smc_state_table.MemoryLevel; + uint32_t i; + + for (i = 0; i < dpm_table->mclk_table.count; i++) { + PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value), + "can not populate memory level as memory clock is zero", + return -EINVAL); + result = polaris10_populate_single_memory_level(hwmgr, + dpm_table->mclk_table.dpm_levels[i].value, + &levels[i]); + if (i == dpm_table->mclk_table.count - 1) { + levels[i].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH; + levels[i].EnabledForActivity = 1; + } + if (result) + return result; + } + + /* In order to prevent MC activity from stutter mode to push DPM up, + * the UVD change complements this by putting the MCLK in + * a higher state by default such that we are not affected by + * up threshold or and MCLK DPM latency. + */ + levels[0].ActivityLevel = 0x1f; + CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel); + + smu_data->smc_state_table.MemoryDpmLevelCount = + (uint8_t)dpm_table->mclk_table.count; + hw_data->dpm_level_enable_mask.mclk_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); + + /* level count will send to smc once at init smc table and never change */ + result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, + (uint32_t)array_size, SMC_RAM_END); + + return result; +} + +/** +* Populates the SMC MVDD structure using the provided memory clock. +* +* @param hwmgr the address of the hardware manager +* @param mclk the MCLK value to be used in the decision if MVDD should be high or low. +* @param voltage the SMC VOLTAGE structure to be populated +*/ +static int polaris10_populate_mvdd_value(struct pp_hwmgr *hwmgr, + uint32_t mclk, SMIO_Pattern *smio_pat) +{ + const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint32_t i = 0; + + if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) { + /* find mvdd value which clock is more than request */ + for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) { + if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) { + smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value; + break; + } + } + PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count, + "MVDD Voltage is outside the supported range.", + return -EINVAL); + } else + return -EINVAL; + + return 0; +} + +static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, + SMU74_Discrete_DpmTable *table) +{ + int result = 0; + uint32_t sclk_frequency; + const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + SMIO_Pattern vol_level; + uint32_t mvdd; + uint16_t us_mvdd; + + table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; + + /* Get MinVoltage and Frequency from DPM0, + * already converted to SMC_UL */ + sclk_frequency = data->vbios_boot_state.sclk_bootup_value; + result = polaris10_get_dependency_volt_by_clk(hwmgr, + table_info->vdd_dep_on_sclk, + sclk_frequency, + &table->ACPILevel.MinVoltage, &mvdd); + PP_ASSERT_WITH_CODE((0 == result), + "Cannot find ACPI VDDC voltage value " + "in Clock Dependency Table", + ); + + result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency, &(table->ACPILevel.SclkSetting)); + PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result); + + table->ACPILevel.DeepSleepDivId = 0; + table->ACPILevel.CcPwrDynRm = 0; + table->ACPILevel.CcPwrDynRm1 = 0; + + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1); + + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency); + CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int); + CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac); + CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int); + CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate); + CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate); + CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate); + CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int); + CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac); + CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate); + + + /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */ + table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value; + result = polaris10_get_dependency_volt_by_clk(hwmgr, + table_info->vdd_dep_on_mclk, + table->MemoryACPILevel.MclkFrequency, + &table->MemoryACPILevel.MinVoltage, &mvdd); + PP_ASSERT_WITH_CODE((0 == result), + "Cannot find ACPI VDDCI voltage value " + "in Clock Dependency Table", + ); + + us_mvdd = 0; + if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) || + (data->mclk_dpm_key_disabled)) + us_mvdd = data->vbios_boot_state.mvdd_bootup_value; + else { + if (!polaris10_populate_mvdd_value(hwmgr, + data->dpm_table.mclk_table.dpm_levels[0].value, + &vol_level)) + us_mvdd = vol_level.Voltage; + } + + if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level)) + table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage); + else + table->MemoryACPILevel.MinMvdd = 0; + + table->MemoryACPILevel.StutterEnable = false; + + table->MemoryACPILevel.EnabledForThrottle = 0; + table->MemoryACPILevel.EnabledForActivity = 0; + table->MemoryACPILevel.UpHyst = 0; + table->MemoryACPILevel.DownHyst = 100; + table->MemoryACPILevel.VoltageDownHyst = 0; + table->MemoryACPILevel.ActivityLevel = + PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target); + + CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage); + + return result; +} + +static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr, + SMU74_Discrete_DpmTable *table) +{ + int result = -EINVAL; + uint8_t count; + struct pp_atomctrl_clock_dividers_vi dividers; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = + table_info->mm_dep_table; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t vddci; + + table->VceLevelCount = (uint8_t)(mm_table->count); + table->VceBootLevel = 0; + + for (count = 0; count < table->VceLevelCount; count++) { + table->VceLevel[count].Frequency = mm_table->entries[count].eclk; + table->VceLevel[count].MinVoltage = 0; + table->VceLevel[count].MinVoltage |= + (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; + + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) + vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table), + mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); + else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) + vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA; + else + vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT; + + + table->VceLevel[count].MinVoltage |= + (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; + table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT; + + /*retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->VceLevel[count].Frequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for VCE engine clock", + return result); + + table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage); + } + return result; +} + + +static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr, + SMU74_Discrete_DpmTable *table) +{ + int result = -EINVAL; + uint8_t count; + struct pp_atomctrl_clock_dividers_vi dividers; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = + table_info->mm_dep_table; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t vddci; + + table->SamuBootLevel = 0; + table->SamuLevelCount = (uint8_t)(mm_table->count); + + for (count = 0; count < table->SamuLevelCount; count++) { + /* not sure whether we need evclk or not */ + table->SamuLevel[count].MinVoltage = 0; + table->SamuLevel[count].Frequency = mm_table->entries[count].samclock; + table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc * + VOLTAGE_SCALE) << VDDC_SHIFT; + + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) + vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table), + mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); + else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) + vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA; + else + vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT; + + table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; + table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT; + + /* retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->SamuLevel[count].Frequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for samu clock", return result); + + table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage); + } + return result; +} + +static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr, + int32_t eng_clock, int32_t mem_clock, + SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs) +{ + uint32_t dram_timing; + uint32_t dram_timing2; + uint32_t burst_time; + int result; + + result = atomctrl_set_engine_dram_timings_rv770(hwmgr, + eng_clock, mem_clock); + PP_ASSERT_WITH_CODE(result == 0, + "Error calling VBIOS to set DRAM_TIMING.", return result); + + dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); + dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); + burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); + + + arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing); + arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2); + arb_regs->McArbBurstTime = (uint8_t)burst_time; + + return 0; +} + +static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) +{ + struct pp_smumgr *smumgr = hwmgr->smumgr; + struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct SMU74_Discrete_MCArbDramTimingTable arb_regs; + uint32_t i, j; + int result = 0; + + for (i = 0; i < hw_data->dpm_table.sclk_table.count; i++) { + for (j = 0; j < hw_data->dpm_table.mclk_table.count; j++) { + result = polaris10_populate_memory_timing_parameters(hwmgr, + hw_data->dpm_table.sclk_table.dpm_levels[i].value, + hw_data->dpm_table.mclk_table.dpm_levels[j].value, + &arb_regs.entries[i][j]); + if (result == 0) + result = atomctrl_set_ac_timing_ai(hwmgr, hw_data->dpm_table.mclk_table.dpm_levels[j].value, j); + if (result != 0) + return result; + } + } + + result = smu7_copy_bytes_to_smc( + hwmgr->smumgr, + smu_data->smu7_data.arb_table_start, + (uint8_t *)&arb_regs, + sizeof(SMU74_Discrete_MCArbDramTimingTable), + SMC_RAM_END); + return result; +} + +static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr, + struct SMU74_Discrete_DpmTable *table) +{ + int result = -EINVAL; + uint8_t count; + struct pp_atomctrl_clock_dividers_vi dividers; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = + table_info->mm_dep_table; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t vddci; + + table->UvdLevelCount = (uint8_t)(mm_table->count); + table->UvdBootLevel = 0; + + for (count = 0; count < table->UvdLevelCount; count++) { + table->UvdLevel[count].MinVoltage = 0; + table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk; + table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk; + table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc * + VOLTAGE_SCALE) << VDDC_SHIFT; + + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) + vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table), + mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); + else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) + vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA; + else + vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT; + + table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; + table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT; + + /* retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->UvdLevel[count].VclkFrequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for Vclk clock", return result); + + table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider; + + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->UvdLevel[count].DclkFrequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for Dclk clock", return result); + + table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage); + } + + return result; +} + +static int polaris10_populate_smc_boot_level(struct pp_hwmgr *hwmgr, + struct SMU74_Discrete_DpmTable *table) +{ + int result = 0; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + table->GraphicsBootLevel = 0; + table->MemoryBootLevel = 0; + + /* find boot level from dpm table */ + result = phm_find_boot_level(&(data->dpm_table.sclk_table), + data->vbios_boot_state.sclk_bootup_value, + (uint32_t *)&(table->GraphicsBootLevel)); + + result = phm_find_boot_level(&(data->dpm_table.mclk_table), + data->vbios_boot_state.mclk_bootup_value, + (uint32_t *)&(table->MemoryBootLevel)); + + table->BootVddc = data->vbios_boot_state.vddc_bootup_value * + VOLTAGE_SCALE; + table->BootVddci = data->vbios_boot_state.vddci_bootup_value * + VOLTAGE_SCALE; + table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value * + VOLTAGE_SCALE; + + CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc); + CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci); + CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd); + + return 0; +} + +static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr) +{ + struct pp_smumgr *smumgr = hwmgr->smumgr; + struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint8_t count, level; + + count = (uint8_t)(table_info->vdd_dep_on_sclk->count); + + for (level = 0; level < count; level++) { + if (table_info->vdd_dep_on_sclk->entries[level].clk >= + hw_data->vbios_boot_state.sclk_bootup_value) { + smu_data->smc_state_table.GraphicsBootLevel = level; + break; + } + } + + count = (uint8_t)(table_info->vdd_dep_on_mclk->count); + for (level = 0; level < count; level++) { + if (table_info->vdd_dep_on_mclk->entries[level].clk >= + hw_data->vbios_boot_state.mclk_bootup_value) { + smu_data->smc_state_table.MemoryBootLevel = level; + break; + } + } + + return 0; +} + + +static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) +{ + uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min; + struct pp_smumgr *smumgr = hwmgr->smumgr; + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + + uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = + table_info->vdd_dep_on_sclk; + + stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount; + + /* Read SMU_Eefuse to read and calculate RO and determine + * if the part is SS or FF. if RO >= 1660MHz, part is FF. + */ + efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixSMU_EFUSE_0 + (67 * 4)); + efuse &= 0xFF000000; + efuse = efuse >> 24; + + if (hwmgr->chip_id == CHIP_POLARIS10) { + min = 1000; + max = 2300; + } else { + min = 1100; + max = 2100; + } + + ro = efuse * (max - min) / 255 + min; + + /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */ + for (i = 0; i < sclk_table->count; i++) { + smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |= + sclk_table->entries[i].cks_enable << i; + if (hwmgr->chip_id == CHIP_POLARIS10) { + volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 - (ro - 70) * 1000000) / \ + (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000)); + volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \ + (2522480 - sclk_table->entries[i].clk/100 * 115764/100)); + } else { + volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 - (ro - 50) * 1000000) / \ + (2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000))); + volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \ + (3422454 - sclk_table->entries[i].clk/100 * (18886376/10000))); + } + + if (volt_without_cks >= volt_with_cks) + volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks + + sclk_table->entries[i].cks_voffset) * 100 + 624) / 625); + + smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset; + } + + smu_data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6; + /* Populate CKS Lookup Table */ + if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5) + stretch_amount2 = 0; + else if (stretch_amount == 3 || stretch_amount == 4) + stretch_amount2 = 1; + else { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ClockStretcher); + PP_ASSERT_WITH_CODE(false, + "Stretch Amount in PPTable not supported\n", + return -EINVAL); + } + + value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL); + value &= 0xFFFFFFFE; + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value); + + return 0; +} + +/** +* Populates the SMC VRConfig field in DPM table. +* +* @param hwmgr the address of the hardware manager +* @param table the SMC DPM table structure to be populated +* @return always 0 +*/ +static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr, + struct SMU74_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + uint16_t config; + + config = VR_MERGED_WITH_VDDC; + table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT); + + /* Set Vddc Voltage Controller */ + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { + config = VR_SVI2_PLANE_1; + table->VRConfig |= config; + } else { + PP_ASSERT_WITH_CODE(false, + "VDDC should be on SVI2 control in merged mode!", + ); + } + /* Set Vddci Voltage Controller */ + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) { + config = VR_SVI2_PLANE_2; /* only in merged mode */ + table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); + } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) { + config = VR_SMIO_PATTERN_1; + table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); + } else { + config = VR_STATIC_VOLTAGE; + table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); + } + /* Set Mvdd Voltage Controller */ + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) { + config = VR_SVI2_PLANE_2; + table->VRConfig |= (config << VRCONF_MVDD_SHIFT); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, smu_data->smu7_data.soft_regs_start + + offsetof(SMU74_SoftRegisters, AllowMvddSwitch), 0x1); + } else { + config = VR_STATIC_VOLTAGE; + table->VRConfig |= (config << VRCONF_MVDD_SHIFT); + } + + return 0; +} + + +static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct pp_smumgr *smumgr = hwmgr->smumgr; + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + + SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table); + int result = 0; + struct pp_atom_ctrl__avfs_parameters avfs_params = {0}; + AVFS_meanNsigma_t AVFS_meanNsigma = { {0} }; + AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} }; + uint32_t tmp, i; + + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)hwmgr->pptable; + struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = + table_info->vdd_dep_on_sclk; + + + if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED) + return result; + + result = atomctrl_get_avfs_information(hwmgr, &avfs_params); + + if (0 == result) { + table->BTCGB_VDROOP_TABLE[0].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0); + table->BTCGB_VDROOP_TABLE[0].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1); + table->BTCGB_VDROOP_TABLE[0].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2); + table->BTCGB_VDROOP_TABLE[1].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0); + table->BTCGB_VDROOP_TABLE[1].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1); + table->BTCGB_VDROOP_TABLE[1].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2); + table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1); + table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2); + table->AVFSGB_VDROOP_TABLE[0].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b); + table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24; + table->AVFSGB_VDROOP_TABLE[0].m2_shift = 12; + table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1); + table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2); + table->AVFSGB_VDROOP_TABLE[1].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b); + table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24; + table->AVFSGB_VDROOP_TABLE[1].m2_shift = 12; + table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv); + AVFS_meanNsigma.Aconstant[0] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0); + AVFS_meanNsigma.Aconstant[1] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1); + AVFS_meanNsigma.Aconstant[2] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2); + AVFS_meanNsigma.DC_tol_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma); + AVFS_meanNsigma.Platform_mean = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean); + AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor); + AVFS_meanNsigma.Platform_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma); + + for (i = 0; i < NUM_VFT_COLUMNS; i++) { + AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625); + AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100); + } + + result = smu7_read_smc_sram_dword(smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma), + &tmp, SMC_RAM_END); + + smu7_copy_bytes_to_smc(smumgr, + tmp, + (uint8_t *)&AVFS_meanNsigma, + sizeof(AVFS_meanNsigma_t), + SMC_RAM_END); + + result = smu7_read_smc_sram_dword(smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable), + &tmp, SMC_RAM_END); + smu7_copy_bytes_to_smc(smumgr, + tmp, + (uint8_t *)&AVFS_SclkOffset, + sizeof(AVFS_Sclk_Offset_t), + SMC_RAM_END); + + data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) | + (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) | + (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) | + (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT); + data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false; + } + return result; +} + + +/** +* Initialize the ARB DRAM timing table's index field. +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always 0 +*/ +static int polaris10_init_arb_table_index(struct pp_smumgr *smumgr) +{ + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + uint32_t tmp; + int result; + + /* This is a read-modify-write on the first byte of the ARB table. + * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure + * is the field 'current'. + * This solution is ugly, but we never write the whole table only + * individual fields in it. + * In reality this field should not be in that structure + * but in a soft register. + */ + result = smu7_read_smc_sram_dword(smumgr, + smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END); + + if (result) + return result; + + tmp &= 0x00FFFFFF; + tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24; + + return smu7_write_smc_sram_dword(smumgr, + smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END); +} + +static void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) +{ + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + if (table_info && + table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX && + table_info->cac_dtp_table->usPowerTuneDataSetID) + smu_data->power_tune_defaults = + &polaris10_power_tune_data_set_array + [table_info->cac_dtp_table->usPowerTuneDataSetID - 1]; + else + smu_data->power_tune_defaults = &polaris10_power_tune_data_set_array[0]; + +} + +/** +* Initializes the SMC table and uploads it +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always 0 +*/ +int polaris10_init_smc_table(struct pp_hwmgr *hwmgr) +{ + int result; + struct pp_smumgr *smumgr = hwmgr->smumgr; + struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table); + uint8_t i; + struct pp_atomctrl_gpio_pin_assignment gpio_pin; + pp_atomctrl_clock_dividers_vi dividers; + + polaris10_initialize_power_tune_defaults(hwmgr); + + if (SMU7_VOLTAGE_CONTROL_NONE != hw_data->voltage_control) + polaris10_populate_smc_voltage_tables(hwmgr, table); + + table->SystemFlags = 0; + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition)) + table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StepVddc)) + table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; + + if (hw_data->is_memory_gddr5) + table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; + + if (hw_data->ulv_supported && table_info->us_ulv_voltage_offset) { + result = polaris10_populate_ulv_state(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ULV state!", return result); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_ULV_PARAMETER, SMU7_CGULVPARAMETER_DFLT); + } + + result = polaris10_populate_smc_link_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Link Level!", return result); + + result = polaris10_populate_all_graphic_levels(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Graphics Level!", return result); + + result = polaris10_populate_all_memory_levels(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Memory Level!", return result); + + result = polaris10_populate_smc_acpi_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ACPI Level!", return result); + + result = polaris10_populate_smc_vce_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize VCE Level!", return result); + + result = polaris10_populate_smc_samu_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize SAMU Level!", return result); + + /* Since only the initial state is completely set up at this point + * (the other states are just copies of the boot state) we only + * need to populate the ARB settings for the initial state. + */ + result = polaris10_program_memory_timing_parameters(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to Write ARB settings for the initial state.", return result); + + result = polaris10_populate_smc_uvd_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize UVD Level!", return result); + + result = polaris10_populate_smc_boot_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Boot Level!", return result); + + result = polaris10_populate_smc_initailial_state(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Boot State!", return result); + + result = polaris10_populate_bapm_parameters_in_dpm_table(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to populate BAPM Parameters!", return result); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ClockStretcher)) { + result = polaris10_populate_clock_stretcher_data_table(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to populate Clock Stretcher Data Table!", + return result); + } + + result = polaris10_populate_avfs_parameters(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;); + + table->CurrSclkPllRange = 0xff; + table->GraphicsVoltageChangeEnable = 1; + table->GraphicsThermThrottleEnable = 1; + table->GraphicsInterval = 1; + table->VoltageInterval = 1; + table->ThermalInterval = 1; + table->TemperatureLimitHigh = + table_info->cac_dtp_table->usTargetOperatingTemp * + SMU7_Q88_FORMAT_CONVERSION_UNIT; + table->TemperatureLimitLow = + (table_info->cac_dtp_table->usTargetOperatingTemp - 1) * + SMU7_Q88_FORMAT_CONVERSION_UNIT; + table->MemoryVoltageChangeEnable = 1; + table->MemoryInterval = 1; + table->VoltageResponseTime = 0; + table->PhaseResponseTime = 0; + table->MemoryThermThrottleEnable = 1; + table->PCIeBootLinkLevel = 0; + table->PCIeGenInterval = 1; + table->VRConfig = 0; + + result = polaris10_populate_vr_config(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to populate VRConfig setting!", return result); + + table->ThermGpio = 17; + table->SclkStepSize = 0x4000; + + if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) { + table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift; + } else { + table->VRHotGpio = SMU7_UNUSED_GPIO_PIN; + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot); + } + + if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID, + &gpio_pin)) { + table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift; + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition); + } else { + table->AcDcGpio = SMU7_UNUSED_GPIO_PIN; + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition); + } + + /* Thermal Output GPIO */ + if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID, + &gpio_pin)) { + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalOutGPIO); + + table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift; + + /* For porlarity read GPIOPAD_A with assigned Gpio pin + * since VBIOS will program this register to set 'inactive state', + * driver can then determine 'active state' from this and + * program SMU with correct polarity + */ + table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) + & (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0; + table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY; + + /* if required, combine VRHot/PCC with thermal out GPIO */ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_RegulatorHot) + && phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CombinePCCWithThermalSignal)) + table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT; + } else { + table->ThermOutGpio = 17; + table->ThermOutPolarity = 1; + table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE; + } + + /* Populate BIF_SCLK levels into SMC DPM table */ + for (i = 0; i <= hw_data->dpm_table.pcie_speed_table.count; i++) { + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, smu_data->bif_sclk_table[i], ÷rs); + PP_ASSERT_WITH_CODE((result == 0), "Can not find DFS divide id for Sclk", return result); + + if (i == 0) + table->Ulv.BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider)); + else + table->LinkLevel[i-1].BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider)); + } + + for (i = 0; i < SMU74_MAX_ENTRIES_SMIO; i++) + table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]); + + CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags); + CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2); + CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); + CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange); + CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); + CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); + CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); + CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); + + /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ + result = smu7_copy_bytes_to_smc(hwmgr->smumgr, + smu_data->smu7_data.dpm_table_start + + offsetof(SMU74_Discrete_DpmTable, SystemFlags), + (uint8_t *)&(table->SystemFlags), + sizeof(SMU74_Discrete_DpmTable) - 3 * sizeof(SMU74_PIDController), + SMC_RAM_END); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to upload dpm data to SMC memory!", return result); + + result = polaris10_init_arb_table_index(hwmgr->smumgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to upload arb data to SMC memory!", return result); + + result = polaris10_populate_pm_fuses(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to populate PM fuses to SMC memory!", return result); + return 0; +} + +static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK)) + return polaris10_program_memory_timing_parameters(hwmgr); + + return 0; +} + +int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr) +{ + int ret; + struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED) + return 0; + + ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting); + + ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ? + 0 : -1; + + if (!ret) + /* If this param is not changed, this function could fire unnecessarily */ + smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY; + + return ret; +} + +/** +* Set up the fan table to control the fan using the SMC. +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from set temperature range routine +*/ +int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) +{ + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + SMU74_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; + uint32_t duty100; + uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2; + uint16_t fdo_min, slope1, slope2; + uint32_t reference_clock; + int res; + uint64_t tmp64; + + if (smu_data->smu7_data.fan_table_start == 0) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl); + return 0; + } + + duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_FDO_CTRL1, FMAX_DUTY100); + + if (duty100 == 0) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl); + return 0; + } + + tmp64 = hwmgr->thermal_controller.advanceFanControlParameters. + usPWMMin * duty100; + do_div(tmp64, 10000); + fdo_min = (uint16_t)tmp64; + + t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - + hwmgr->thermal_controller.advanceFanControlParameters.usTMin; + t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - + hwmgr->thermal_controller.advanceFanControlParameters.usTMed; + + pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - + hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin; + pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - + hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed; + + slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); + slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); + + fan_table.TempMin = cpu_to_be16((50 + hwmgr-> + thermal_controller.advanceFanControlParameters.usTMin) / 100); + fan_table.TempMed = cpu_to_be16((50 + hwmgr-> + thermal_controller.advanceFanControlParameters.usTMed) / 100); + fan_table.TempMax = cpu_to_be16((50 + hwmgr-> + thermal_controller.advanceFanControlParameters.usTMax) / 100); + + fan_table.Slope1 = cpu_to_be16(slope1); + fan_table.Slope2 = cpu_to_be16(slope2); + + fan_table.FdoMin = cpu_to_be16(fdo_min); + + fan_table.HystDown = cpu_to_be16(hwmgr-> + thermal_controller.advanceFanControlParameters.ucTHyst); + + fan_table.HystUp = cpu_to_be16(1); + + fan_table.HystSlope = cpu_to_be16(1); + + fan_table.TempRespLim = cpu_to_be16(5); + + reference_clock = smu7_get_xclk(hwmgr); + + fan_table.RefreshPeriod = cpu_to_be32((hwmgr-> + thermal_controller.advanceFanControlParameters.ulCycleDelay * + reference_clock) / 1600); + + fan_table.FdoMax = cpu_to_be16((uint16_t)duty100); + + fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD( + hwmgr->device, CGS_IND_REG__SMC, + CG_MULT_THERMAL_CTRL, TEMP_SEL); + + res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.fan_table_start, + (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), + SMC_RAM_END); + + if (!res && hwmgr->thermal_controller. + advanceFanControlParameters.ucMinimumPWMLimit) + res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetFanMinPwm, + hwmgr->thermal_controller. + advanceFanControlParameters.ucMinimumPWMLimit); + + if (!res && hwmgr->thermal_controller. + advanceFanControlParameters.ulMinFanSCLKAcousticLimit) + res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetFanSclkTarget, + hwmgr->thermal_controller. + advanceFanControlParameters.ulMinFanSCLKAcousticLimit); + + if (res) + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl); + + return 0; +} + +static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr) +{ + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + uint32_t mm_boot_level_offset, mm_boot_level_value; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + smu_data->smc_state_table.UvdBootLevel = 0; + if (table_info->mm_dep_table->count > 0) + smu_data->smc_state_table.UvdBootLevel = + (uint8_t) (table_info->mm_dep_table->count - 1); + mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable, + UvdBootLevel); + mm_boot_level_offset /= 4; + mm_boot_level_offset *= 4; + mm_boot_level_value = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset); + mm_boot_level_value &= 0x00FFFFFF; + mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24; + cgs_write_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); + + if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDDPM) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_UVDDPM_SetEnabledMask, + (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel)); + return 0; +} + +static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr) +{ + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + uint32_t mm_boot_level_offset, mm_boot_level_value; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) + smu_data->smc_state_table.VceBootLevel = + (uint8_t) (table_info->mm_dep_table->count - 1); + else + smu_data->smc_state_table.VceBootLevel = 0; + + mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + + offsetof(SMU74_Discrete_DpmTable, VceBootLevel); + mm_boot_level_offset /= 4; + mm_boot_level_offset *= 4; + mm_boot_level_value = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset); + mm_boot_level_value &= 0xFF00FFFF; + mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16; + cgs_write_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_VCEDPM_SetEnabledMask, + (uint32_t)1 << smu_data->smc_state_table.VceBootLevel); + return 0; +} + +static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr) +{ + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + uint32_t mm_boot_level_offset, mm_boot_level_value; + + + smu_data->smc_state_table.SamuBootLevel = 0; + mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + + offsetof(SMU74_Discrete_DpmTable, SamuBootLevel); + + mm_boot_level_offset /= 4; + mm_boot_level_offset *= 4; + mm_boot_level_value = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset); + mm_boot_level_value &= 0xFFFFFF00; + mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0; + cgs_write_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SAMUDPM_SetEnabledMask, + (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel)); + return 0; +} + + +static int polaris10_update_bif_smc_table(struct pp_hwmgr *hwmgr) +{ + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table; + int max_entry, i; + + max_entry = (SMU74_MAX_LEVELS_LINK < pcie_table->count) ? + SMU74_MAX_LEVELS_LINK : + pcie_table->count; + /* Setup BIF_SCLK levels */ + for (i = 0; i < max_entry; i++) + smu_data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk; + return 0; +} + +int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) +{ + switch (type) { + case SMU_UVD_TABLE: + polaris10_update_uvd_smc_table(hwmgr); + break; + case SMU_VCE_TABLE: + polaris10_update_vce_smc_table(hwmgr); + break; + case SMU_SAMU_TABLE: + polaris10_update_samu_smc_table(hwmgr); + break; + case SMU_BIF_TABLE: + polaris10_update_bif_smc_table(hwmgr); + default: + break; + } + return 0; +} + +int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + + int result = 0; + uint32_t low_sclk_interrupt_threshold = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkThrottleLowNotification) + && (hwmgr->gfx_arbiter.sclk_threshold != + data->low_sclk_interrupt_threshold)) { + data->low_sclk_interrupt_threshold = + hwmgr->gfx_arbiter.sclk_threshold; + low_sclk_interrupt_threshold = + data->low_sclk_interrupt_threshold; + + CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); + + result = smu7_copy_bytes_to_smc( + hwmgr->smumgr, + smu_data->smu7_data.dpm_table_start + + offsetof(SMU74_Discrete_DpmTable, + LowSclkInterruptThreshold), + (uint8_t *)&low_sclk_interrupt_threshold, + sizeof(uint32_t), + SMC_RAM_END); + } + PP_ASSERT_WITH_CODE((result == 0), + "Failed to update SCLK threshold!", return result); + + result = polaris10_program_mem_timing_parameters(hwmgr); + PP_ASSERT_WITH_CODE((result == 0), + "Failed to program memory timing parameters!", + ); + + return result; +} + +uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member) +{ + switch (type) { + case SMU_SoftRegisters: + switch (member) { + case HandshakeDisables: + return offsetof(SMU74_SoftRegisters, HandshakeDisables); + case VoltageChangeTimeout: + return offsetof(SMU74_SoftRegisters, VoltageChangeTimeout); + case AverageGraphicsActivity: + return offsetof(SMU74_SoftRegisters, AverageGraphicsActivity); + case PreVBlankGap: + return offsetof(SMU74_SoftRegisters, PreVBlankGap); + case VBlankTimeout: + return offsetof(SMU74_SoftRegisters, VBlankTimeout); + case UcodeLoadStatus: + return offsetof(SMU74_SoftRegisters, UcodeLoadStatus); + } + case SMU_Discrete_DpmTable: + switch (member) { + case UvdBootLevel: + return offsetof(SMU74_Discrete_DpmTable, UvdBootLevel); + case VceBootLevel: + return offsetof(SMU74_Discrete_DpmTable, VceBootLevel); + case SamuBootLevel: + return offsetof(SMU74_Discrete_DpmTable, SamuBootLevel); + case LowSclkInterruptThreshold: + return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold); + } + } + printk("cant't get the offset of type %x member %x \n", type, member); + return 0; +} + +uint32_t polaris10_get_mac_definition(uint32_t value) +{ + switch (value) { + case SMU_MAX_LEVELS_GRAPHICS: + return SMU74_MAX_LEVELS_GRAPHICS; + case SMU_MAX_LEVELS_MEMORY: + return SMU74_MAX_LEVELS_MEMORY; + case SMU_MAX_LEVELS_LINK: + return SMU74_MAX_LEVELS_LINK; + case SMU_MAX_ENTRIES_SMIO: + return SMU74_MAX_ENTRIES_SMIO; + case SMU_MAX_LEVELS_VDDC: + return SMU74_MAX_LEVELS_VDDC; + case SMU_MAX_LEVELS_VDDGFX: + return SMU74_MAX_LEVELS_VDDGFX; + case SMU_MAX_LEVELS_VDDCI: + return SMU74_MAX_LEVELS_VDDCI; + case SMU_MAX_LEVELS_MVDD: + return SMU74_MAX_LEVELS_MVDD; + case SMU_UVD_MCLK_HANDSHAKE_DISABLE: + return SMU7_UVD_MCLK_HANDSHAKE_DISABLE; + } + + printk("cant't get the mac of %x \n", value); + return 0; +} + +/** +* Get the location of various tables inside the FW image. +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always 0 +*/ +int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr) +{ + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + uint32_t tmp; + int result; + bool error = false; + + result = smu7_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU74_Firmware_Header, DpmTable), + &tmp, SMC_RAM_END); + + if (0 == result) + smu_data->smu7_data.dpm_table_start = tmp; + + error |= (0 != result); + + result = smu7_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU74_Firmware_Header, SoftRegisters), + &tmp, SMC_RAM_END); + + if (!result) + smu_data->smu7_data.soft_regs_start = tmp; + + error |= (0 != result); + + result = smu7_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU74_Firmware_Header, mcRegisterTable), + &tmp, SMC_RAM_END); + + if (!result) + smu_data->smu7_data.mc_reg_table_start = tmp; + + result = smu7_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU74_Firmware_Header, FanTable), + &tmp, SMC_RAM_END); + + if (!result) + smu_data->smu7_data.fan_table_start = tmp; + + error |= (0 != result); + + result = smu7_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU74_Firmware_Header, mcArbDramTimingTable), + &tmp, SMC_RAM_END); + + if (!result) + smu_data->smu7_data.arb_table_start = tmp; + + error |= (0 != result); + + result = smu7_read_smc_sram_dword(hwmgr->smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU74_Firmware_Header, Version), + &tmp, SMC_RAM_END); + + if (!result) + hwmgr->microcode_version_info.SMC = tmp; + + error |= (0 != result); + + return error ? -1 : 0; +} + +bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr) +{ + return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON)) + ? true : false; +} diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h new file mode 100644 index 000000000000..5ade3cea8bb7 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h @@ -0,0 +1,42 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef POLARIS10_SMC_H +#define POLARIS10_SMC_H + +#include "smumgr.h" + + +int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr); +int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr); +int polaris10_init_smc_table(struct pp_hwmgr *hwmgr); +int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr); +int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr); +int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type); +int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr); +uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member); +uint32_t polaris10_get_mac_definition(uint32_t value); +int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr); +bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr); + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index 5dba7c509710..5c3598ab7dae 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -38,16 +38,11 @@ #include "ppatomctrl.h" #include "pp_debug.h" #include "cgs_common.h" +#include "polaris10_smc.h" +#include "smu7_ppsmc.h" +#include "smu7_smumgr.h" -#define POLARIS10_SMC_SIZE 0x20000 -#define VOLTAGE_SCALE 4 - -/* Microcode file is stored in this buffer */ -#define BUFFER_SIZE 80000 -#define MAX_STRING_SIZE 15 -#define BUFFER_SIZETWO 131072 /* 128 *1024 */ - -#define SMC_RAM_END 0x40000 +#define PPPOLARIS10_TARGETACTIVITY_DFLT 50 static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = { /* Min pcie DeepSleep Activity CgSpll CgSpll CcPwr CcPwr Sclk Enabled Enabled Voltage Power */ @@ -62,572 +57,9 @@ static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = { { 0xa00fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0xa0860100, 0x2800, 0, 0x2000, 2, 1, 0x0004, 0x0c02, 0xffff, 0x2700, 0x6433, 0x2100 } } }; -static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = - {0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00}; - -/** -* Set the address for reading/writing the SMC SRAM space. -* @param smumgr the address of the powerplay hardware manager. -* @param smcAddress the address in the SMC RAM to access. -*/ -static int polaris10_set_smc_sram_address(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t limit) -{ - PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)), "SMC address must be 4 byte aligned.", return -EINVAL); - PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)), "SMC addr is beyond the SMC RAM area.", return -EINVAL); - - cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, smc_addr); - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); - - return 0; -} - -/** -* Copy bytes from SMC RAM space into driver memory. -* -* @param smumgr the address of the powerplay SMU manager. -* @param smc_start_address the start address in the SMC RAM to copy bytes from -* @param src the byte array to copy the bytes to. -* @param byte_count the number of bytes to copy. -*/ -int polaris10_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit) -{ - uint32_t data; - uint32_t addr; - uint8_t *dest_byte; - uint8_t i, data_byte[4] = {0}; - uint32_t *pdata = (uint32_t *)&data_byte; - - PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -1;); - PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -1); - - addr = smc_start_address; - - while (byte_count >= 4) { - polaris10_read_smc_sram_dword(smumgr, addr, &data, limit); - - *dest = PP_SMC_TO_HOST_UL(data); - - dest += 1; - byte_count -= 4; - addr += 4; - } - - if (byte_count) { - polaris10_read_smc_sram_dword(smumgr, addr, &data, limit); - *pdata = PP_SMC_TO_HOST_UL(data); - /* Cast dest into byte type in dest_byte. This way, we don't overflow if the allocated memory is not 4-byte aligned. */ - dest_byte = (uint8_t *)dest; - for (i = 0; i < byte_count; i++) - dest_byte[i] = data_byte[i]; - } - - return 0; -} - -/** -* Copy bytes from an array into the SMC RAM space. -* -* @param pSmuMgr the address of the powerplay SMU manager. -* @param smc_start_address the start address in the SMC RAM to copy bytes to. -* @param src the byte array to copy the bytes from. -* @param byte_count the number of bytes to copy. -*/ -int polaris10_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, - const uint8_t *src, uint32_t byte_count, uint32_t limit) -{ - int result; - uint32_t data = 0; - uint32_t original_data; - uint32_t addr = 0; - uint32_t extra_shift; - - PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -1); - PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -1); - - addr = smc_start_address; - - while (byte_count >= 4) { - /* Bytes are written into the SMC addres space with the MSB first. */ - data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3]; - - result = polaris10_set_smc_sram_address(smumgr, addr, limit); - - if (0 != result) - return result; - - cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data); - - src += 4; - byte_count -= 4; - addr += 4; - } - - if (0 != byte_count) { - - data = 0; - - result = polaris10_set_smc_sram_address(smumgr, addr, limit); - - if (0 != result) - return result; - - - original_data = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11); - - extra_shift = 8 * (4 - byte_count); - - while (byte_count > 0) { - /* Bytes are written into the SMC addres space with the MSB first. */ - data = (0x100 * data) + *src++; - byte_count--; - } - - data <<= extra_shift; - - data |= (original_data & ~((~0UL) << extra_shift)); - - result = polaris10_set_smc_sram_address(smumgr, addr, limit); - - if (0 != result) - return result; - - cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data); - } - - return 0; -} - - -static int polaris10_program_jump_on_start(struct pp_smumgr *smumgr) -{ - static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 }; - - polaris10_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data)+1); - - return 0; -} - -/** -* Return if the SMC is currently running. -* -* @param smumgr the address of the powerplay hardware manager. -*/ -bool polaris10_is_smc_ram_running(struct pp_smumgr *smumgr) -{ - return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) - && (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C))); -} - -static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr) -{ - uint32_t efuse; - - efuse = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4)); - efuse &= 0x00000001; - if (efuse) - return true; - - return false; -} - -/** -* Send a message to the SMC, and wait for its response. -* -* @param smumgr the address of the powerplay hardware manager. -* @param msg the message to send. -* @return The response that came from the SMC. -*/ -int polaris10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) -{ - int ret; - - if (!polaris10_is_smc_ram_running(smumgr)) - return -1; - - - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - - ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP); - - if (ret != 1) - printk("\n failed to send pre message %x ret is %d \n", msg, ret); - - cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); - - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - - ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP); - - if (ret != 1) - printk("\n failed to send message %x ret is %d \n", msg, ret); - - return 0; -} - - -/** -* Send a message to the SMC, and do not wait for its response. -* -* @param smumgr the address of the powerplay hardware manager. -* @param msg the message to send. -* @return Always return 0. -*/ -int polaris10_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg) -{ - cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); - - return 0; -} - -/** -* Send a message to the SMC with parameter -* -* @param smumgr: the address of the powerplay hardware manager. -* @param msg: the message to send. -* @param parameter: the parameter to send -* @return The response that came from the SMC. -*/ -int polaris10_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter) -{ - if (!polaris10_is_smc_ram_running(smumgr)) { - return -1; - } - - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - - cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter); - - return polaris10_send_msg_to_smc(smumgr, msg); -} - - -/** -* Send a message to the SMC with parameter, do not wait for response -* -* @param smumgr: the address of the powerplay hardware manager. -* @param msg: the message to send. -* @param parameter: the parameter to send -* @return The response that came from the SMC. -*/ -int polaris10_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter) -{ - cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter); - - return polaris10_send_msg_to_smc_without_waiting(smumgr, msg); -} - -int polaris10_send_msg_to_smc_offset(struct pp_smumgr *smumgr) -{ - cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000); - - cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test); - - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - - if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) - printk("Failed to send Message.\n"); - - return 0; -} - -/** -* Wait until the SMC is doing nithing. Doing nothing means that the SMC is either turned off or it is sitting on the STOP instruction. -* -* @param smumgr the address of the powerplay hardware manager. -* @param msg the message to send. -* @return The response that came from the SMC. -*/ -int polaris10_wait_for_smc_inactive(struct pp_smumgr *smumgr) -{ - /* If the SMC is not even on it qualifies as inactive. */ - if (!polaris10_is_smc_ram_running(smumgr)) - return -1; - - SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0); - return 0; -} - - -/** -* Upload the SMC firmware to the SMC microcontroller. -* -* @param smumgr the address of the powerplay hardware manager. -* @param pFirmware the data structure containing the various sections of the firmware. -*/ -static int polaris10_upload_smc_firmware_data(struct pp_smumgr *smumgr, uint32_t length, uint32_t *src, uint32_t limit) -{ - uint32_t byte_count = length; - - PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -1); +static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = { + 0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00}; - cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, 0x20000); - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1); - - for (; byte_count >= 4; byte_count -= 4) - cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, *src++); - - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); - - PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -1); - - return 0; -} - -static enum cgs_ucode_id polaris10_convert_fw_type_to_cgs(uint32_t fw_type) -{ - enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM; - - switch (fw_type) { - case UCODE_ID_SMU: - result = CGS_UCODE_ID_SMU; - break; - case UCODE_ID_SMU_SK: - result = CGS_UCODE_ID_SMU_SK; - break; - case UCODE_ID_SDMA0: - result = CGS_UCODE_ID_SDMA0; - break; - case UCODE_ID_SDMA1: - result = CGS_UCODE_ID_SDMA1; - break; - case UCODE_ID_CP_CE: - result = CGS_UCODE_ID_CP_CE; - break; - case UCODE_ID_CP_PFP: - result = CGS_UCODE_ID_CP_PFP; - break; - case UCODE_ID_CP_ME: - result = CGS_UCODE_ID_CP_ME; - break; - case UCODE_ID_CP_MEC: - result = CGS_UCODE_ID_CP_MEC; - break; - case UCODE_ID_CP_MEC_JT1: - result = CGS_UCODE_ID_CP_MEC_JT1; - break; - case UCODE_ID_CP_MEC_JT2: - result = CGS_UCODE_ID_CP_MEC_JT2; - break; - case UCODE_ID_RLC_G: - result = CGS_UCODE_ID_RLC_G; - break; - default: - break; - } - - return result; -} - -static int polaris10_upload_smu_firmware_image(struct pp_smumgr *smumgr) -{ - int result = 0; - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); - - struct cgs_firmware_info info = {0}; - - if (smu_data->security_hard_key == 1) - cgs_get_firmware_info(smumgr->device, - polaris10_convert_fw_type_to_cgs(UCODE_ID_SMU), &info); - else - cgs_get_firmware_info(smumgr->device, - polaris10_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info); - - /* TO DO cgs_init_samu_load_smu(smumgr->device, (uint32_t *)info.kptr, info.image_size, smu_data->post_initial_boot);*/ - result = polaris10_upload_smc_firmware_data(smumgr, info.image_size, (uint32_t *)info.kptr, POLARIS10_SMC_SIZE); - - return result; -} - -/** -* Read a 32bit value from the SMC SRAM space. -* ALL PARAMETERS ARE IN HOST BYTE ORDER. -* @param smumgr the address of the powerplay hardware manager. -* @param smcAddress the address in the SMC RAM to access. -* @param value and output parameter for the data read from the SMC SRAM. -*/ -int polaris10_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t *value, uint32_t limit) -{ - int result; - - result = polaris10_set_smc_sram_address(smumgr, smc_addr, limit); - - if (result) - return result; - - *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11); - return 0; -} - -/** -* Write a 32bit value to the SMC SRAM space. -* ALL PARAMETERS ARE IN HOST BYTE ORDER. -* @param smumgr the address of the powerplay hardware manager. -* @param smc_addr the address in the SMC RAM to access. -* @param value to write to the SMC SRAM. -*/ -int polaris10_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t value, uint32_t limit) -{ - int result; - - result = polaris10_set_smc_sram_address(smumgr, smc_addr, limit); - - if (result) - return result; - - cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, value); - - return 0; -} - - -int polaris10_smu_fini(struct pp_smumgr *smumgr) -{ - if (smumgr->backend) { - kfree(smumgr->backend); - smumgr->backend = NULL; - } - cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); - return 0; -} - -/* Convert the firmware type to SMU type mask. For MEC, we need to check all MEC related type */ -static uint32_t polaris10_get_mask_for_firmware_type(uint32_t fw_type) -{ - uint32_t result = 0; - - switch (fw_type) { - case UCODE_ID_SDMA0: - result = UCODE_ID_SDMA0_MASK; - break; - case UCODE_ID_SDMA1: - result = UCODE_ID_SDMA1_MASK; - break; - case UCODE_ID_CP_CE: - result = UCODE_ID_CP_CE_MASK; - break; - case UCODE_ID_CP_PFP: - result = UCODE_ID_CP_PFP_MASK; - break; - case UCODE_ID_CP_ME: - result = UCODE_ID_CP_ME_MASK; - break; - case UCODE_ID_CP_MEC_JT1: - case UCODE_ID_CP_MEC_JT2: - result = UCODE_ID_CP_MEC_MASK; - break; - case UCODE_ID_RLC_G: - result = UCODE_ID_RLC_G_MASK; - break; - default: - printk("UCode type is out of range! \n"); - result = 0; - } - - return result; -} - -/* Populate one firmware image to the data structure */ - -static int polaris10_populate_single_firmware_entry(struct pp_smumgr *smumgr, - uint32_t fw_type, - struct SMU_Entry *entry) -{ - int result = 0; - struct cgs_firmware_info info = {0}; - - result = cgs_get_firmware_info(smumgr->device, - polaris10_convert_fw_type_to_cgs(fw_type), - &info); - - if (!result) { - entry->version = info.version; - entry->id = (uint16_t)fw_type; - entry->image_addr_high = smu_upper_32_bits(info.mc_addr); - entry->image_addr_low = smu_lower_32_bits(info.mc_addr); - entry->meta_data_addr_high = 0; - entry->meta_data_addr_low = 0; - entry->data_size_byte = info.image_size; - entry->num_register_entries = 0; - } - - if (fw_type == UCODE_ID_RLC_G) - entry->flags = 1; - else - entry->flags = 0; - - return 0; -} - -static int polaris10_request_smu_load_fw(struct pp_smumgr *smumgr) -{ - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); - uint32_t fw_to_load; - - int result = 0; - struct SMU_DRAMData_TOC *toc; - - if (!smumgr->reload_fw) { - printk(KERN_INFO "[ powerplay ] skip reloading...\n"); - return 0; - } - - if (smu_data->soft_regs_start) - cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, - smu_data->soft_regs_start + offsetof(SMU74_SoftRegisters, UcodeLoadStatus), - 0x0); - - polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_HI, smu_data->smu_buffer.mc_addr_high); - polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_LO, smu_data->smu_buffer.mc_addr_low); - - toc = (struct SMU_DRAMData_TOC *)smu_data->header; - toc->num_entries = 0; - toc->structure_version = 1; - - PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1); - PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1); - PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1); - PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1); - PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1); - PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1); - PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1); - PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1); - PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1); - - polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high); - polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low); - - fw_to_load = UCODE_ID_RLC_G_MASK - + UCODE_ID_SDMA0_MASK - + UCODE_ID_SDMA1_MASK - + UCODE_ID_CP_CE_MASK - + UCODE_ID_CP_ME_MASK - + UCODE_ID_CP_PFP_MASK - + UCODE_ID_CP_MEC_MASK; - - if (polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_LoadUcodes, fw_to_load)) - printk(KERN_ERR "Fail to Request SMU Load uCode"); - - return result; -} - -/* Check if the FW has been loaded, SMU will not return if loading has not finished. */ -static int polaris10_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type) -{ - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); - uint32_t fw_mask = polaris10_get_mask_for_firmware_type(fw_type); - uint32_t ret; - /* Check SOFT_REGISTERS_TABLE_28.UcodeLoadStatus */ - ret = smum_wait_on_indirect_register(smumgr, mmSMC_IND_INDEX_11, - smu_data->soft_regs_start + offsetof(SMU74_SoftRegisters, UcodeLoadStatus), - fw_mask, fw_mask); - - return ret; -} - -static int polaris10_reload_firmware(struct pp_smumgr *smumgr) -{ - return smumgr->smumgr_funcs->start_smu(smumgr); -} static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr) { @@ -669,7 +101,7 @@ static int polaris10_perform_btc(struct pp_smumgr *smumgr) struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); if (0 != smu_data->avfs.avfs_btc_param) { - if (0 != polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) { + if (0 != smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) { printk("[AVFS][SmuPolaris10_PerformBtc] PerformBTC SMU msg failed"); result = -1; } @@ -697,7 +129,7 @@ int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr) graphics_level_size = sizeof(avfs_graphics_level_polaris10); u16_boot_mvdd = PP_HOST_TO_SMC_US(1300 * VOLTAGE_SCALE); - PP_ASSERT_WITH_CODE(0 == polaris10_read_smc_sram_dword(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, DpmTable), &dpm_table_start, 0x40000), "[AVFS][Polaris10_SetupGfxLvlStruct] SMU could not communicate starting address of DPM table", @@ -708,14 +140,14 @@ int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr) vr_config_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, VRConfig); - PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, vr_config_address, + PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, vr_config_address, (uint8_t *)&vr_config, sizeof(uint32_t), 0x40000), "[AVFS][Polaris10_SetupGfxLvlStruct] Problems copying VRConfig value over to SMC", return -1); graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, GraphicsLevel); - PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, graphics_level_address, + PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address, (uint8_t *)(&avfs_graphics_level_polaris10), graphics_level_size, 0x40000), "[AVFS][Polaris10_SetupGfxLvlStruct] Copying of SCLK DPM table failed!", @@ -723,7 +155,7 @@ int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr) graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, MemoryLevel); - PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, graphics_level_address, + PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address, (uint8_t *)(&avfs_memory_level_polaris10), sizeof(avfs_memory_level_polaris10), 0x40000), "[AVFS][Polaris10_SetupGfxLvlStruct] Copying of MCLK DPM table failed!", return -1); @@ -732,7 +164,7 @@ int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr) graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, BootMVdd); - PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, graphics_level_address, + PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address, (uint8_t *)(&u16_boot_mvdd), sizeof(u16_boot_mvdd), 0x40000), "[AVFS][Polaris10_SetupGfxLvlStruct] Copying of DPM table failed!", return -1); @@ -793,7 +225,7 @@ static int polaris10_start_smu_in_protection_mode(struct pp_smumgr *smumgr) SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - result = polaris10_upload_smu_firmware_image(smumgr); + result = smu7_upload_smu_firmware_image(smumgr); if (result != 0) return result; @@ -812,7 +244,7 @@ static int polaris10_start_smu_in_protection_mode(struct pp_smumgr *smumgr) /* Call Test SMU message with 0x20000 offset to trigger SMU start */ - polaris10_send_msg_to_smc_offset(smumgr); + smu7_send_msg_to_smc_offset(smumgr); /* Wait done bit to be set */ /* Check pass/failed indicator */ @@ -853,12 +285,12 @@ static int polaris10_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr) SMC_SYSCON_RESET_CNTL, rst_reg, 1); - result = polaris10_upload_smu_firmware_image(smumgr); + result = smu7_upload_smu_firmware_image(smumgr); if (result != 0) return result; /* Set smc instruct start point at 0x0 */ - polaris10_program_jump_on_start(smumgr); + smu7_program_jump_on_start(smumgr); SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); @@ -881,10 +313,10 @@ static int polaris10_start_smu(struct pp_smumgr *smumgr) bool SMU_VFT_INTACT; /* Only start SMC if SMC RAM is not running */ - if (!polaris10_is_smc_ram_running(smumgr)) { + if (!smu7_is_smc_ram_running(smumgr)) { SMU_VFT_INTACT = false; smu_data->protected_mode = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE)); - smu_data->security_hard_key = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL)); + smu_data->smu7_data.security_hard_key = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL)); /* Check if SMU is running in protected mode */ if (smu_data->protected_mode == 0) { @@ -894,7 +326,7 @@ static int polaris10_start_smu(struct pp_smumgr *smumgr) /* If failed, try with different security Key. */ if (result != 0) { - smu_data->security_hard_key ^= 1; + smu_data->smu7_data.security_hard_key ^= 1; result = polaris10_start_smu_in_protection_mode(smumgr); } } @@ -906,89 +338,69 @@ static int polaris10_start_smu(struct pp_smumgr *smumgr) } else SMU_VFT_INTACT = true; /*Driver went offline but SMU was still alive and contains the VFT table */ - smu_data->post_initial_boot = true; polaris10_avfs_event_mgr(smumgr, SMU_VFT_INTACT); /* Setup SoftRegsStart here for register lookup in case DummyBackEnd is used and ProcessFirmwareHeader is not executed */ - polaris10_read_smc_sram_dword(smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters), - &(smu_data->soft_regs_start), 0x40000); + smu7_read_smc_sram_dword(smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters), + &(smu_data->smu7_data.soft_regs_start), 0x40000); - result = polaris10_request_smu_load_fw(smumgr); + result = smu7_request_smu_load_fw(smumgr); return result; } +static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr) +{ + uint32_t efuse; + + efuse = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4)); + efuse &= 0x00000001; + if (efuse) + return true; + + return false; +} + static int polaris10_smu_init(struct pp_smumgr *smumgr) { - struct polaris10_smumgr *smu_data; - uint8_t *internal_buf; - uint64_t mc_addr = 0; - /* Allocate memory for backend private data */ - smu_data = (struct polaris10_smumgr *)(smumgr->backend); - smu_data->header_buffer.data_size = - ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096; - smu_data->smu_buffer.data_size = 200*4096; - smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED; -/* Allocate FW image data structure and header buffer and - * send the header buffer address to SMU */ - smu_allocate_memory(smumgr->device, - smu_data->header_buffer.data_size, - CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, - PAGE_SIZE, - &mc_addr, - &smu_data->header_buffer.kaddr, - &smu_data->header_buffer.handle); - - smu_data->header = smu_data->header_buffer.kaddr; - smu_data->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); - smu_data->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr); - - PP_ASSERT_WITH_CODE((NULL != smu_data->header), - "Out of memory.", - kfree(smumgr->backend); - cgs_free_gpu_mem(smumgr->device, - (cgs_handle_t)smu_data->header_buffer.handle); - return -1); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + int i; -/* Allocate buffer for SMU internal buffer and send the address to SMU. - * Iceland SMU does not need internal buffer.*/ - smu_allocate_memory(smumgr->device, - smu_data->smu_buffer.data_size, - CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, - PAGE_SIZE, - &mc_addr, - &smu_data->smu_buffer.kaddr, - &smu_data->smu_buffer.handle); - - internal_buf = smu_data->smu_buffer.kaddr; - smu_data->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); - smu_data->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr); - - PP_ASSERT_WITH_CODE((NULL != internal_buf), - "Out of memory.", - kfree(smumgr->backend); - cgs_free_gpu_mem(smumgr->device, - (cgs_handle_t)smu_data->smu_buffer.handle); - return -1;); + if (smu7_init(smumgr)) + return -EINVAL; if (polaris10_is_hw_avfs_present(smumgr)) smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT; else smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED; + for (i = 0; i < SMU74_MAX_LEVELS_GRAPHICS; i++) + smu_data->activity_target[i] = PPPOLARIS10_TARGETACTIVITY_DFLT; + return 0; } -static const struct pp_smumgr_func ellsemere_smu_funcs = { +static const struct pp_smumgr_func polaris10_smu_funcs = { .smu_init = polaris10_smu_init, - .smu_fini = polaris10_smu_fini, + .smu_fini = smu7_smu_fini, .start_smu = polaris10_start_smu, - .check_fw_load_finish = polaris10_check_fw_load_finish, - .request_smu_load_fw = polaris10_reload_firmware, + .check_fw_load_finish = smu7_check_fw_load_finish, + .request_smu_load_fw = smu7_reload_firmware, .request_smu_load_specific_fw = NULL, - .send_msg_to_smc = polaris10_send_msg_to_smc, - .send_msg_to_smc_with_parameter = polaris10_send_msg_to_smc_with_parameter, + .send_msg_to_smc = smu7_send_msg_to_smc, + .send_msg_to_smc_with_parameter = smu7_send_msg_to_smc_with_parameter, .download_pptable_settings = NULL, .upload_pptable_settings = NULL, + .update_smc_table = polaris10_update_smc_table, + .get_offsetof = polaris10_get_offsetof, + .process_firmware_header = polaris10_process_firmware_header, + .init_smc_table = polaris10_init_smc_table, + .update_sclk_threshold = polaris10_update_sclk_threshold, + .thermal_avfs_enable = polaris10_thermal_avfs_enable, + .thermal_setup_fan_table = polaris10_thermal_setup_fan_table, + .populate_all_graphic_levels = polaris10_populate_all_graphic_levels, + .populate_all_memory_levels = polaris10_populate_all_memory_levels, + .get_mac_definition = polaris10_get_mac_definition, + .is_dpm_running = polaris10_is_dpm_running, }; int polaris10_smum_init(struct pp_smumgr *smumgr) @@ -998,10 +410,10 @@ int polaris10_smum_init(struct pp_smumgr *smumgr) polaris10_smu = kzalloc(sizeof(struct polaris10_smumgr), GFP_KERNEL); if (polaris10_smu == NULL) - return -1; + return -EINVAL; smumgr->backend = polaris10_smu; - smumgr->smumgr_funcs = &ellsemere_smu_funcs; + smumgr->smumgr_funcs = &polaris10_smu_funcs; return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h index e5377aec057f..49ebf1d5a53c 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h @@ -24,45 +24,52 @@ #ifndef _POLARIS10_SMUMANAGER_H #define _POLARIS10_SMUMANAGER_H -#include <polaris10_ppsmc.h> + #include <pp_endian.h> +#include "smu74.h" +#include "smu74_discrete.h" +#include "smu7_smumgr.h" + +#define SMC_RAM_END 0x40000 struct polaris10_avfs { enum AVFS_BTC_STATUS avfs_btc_status; uint32_t avfs_btc_param; }; -struct polaris10_buffer_entry { - uint32_t data_size; - uint32_t mc_addr_low; - uint32_t mc_addr_high; - void *kaddr; - unsigned long handle; +struct polaris10_pt_defaults { + uint8_t SviLoadLineEn; + uint8_t SviLoadLineVddC; + uint8_t TDC_VDDC_ThrottleReleaseLimitPerc; + uint8_t TDC_MAWt; + uint8_t TdcWaterfallCtl; + uint8_t DTEAmbientTempBase; + + uint32_t DisplayCac; + uint32_t BAPM_TEMP_GRADIENT; + uint16_t BAPMTI_R[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS]; + uint16_t BAPMTI_RC[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS]; +}; + + + +struct polaris10_range_table { + uint32_t trans_lower_frequency; /* in 10khz */ + uint32_t trans_upper_frequency; }; struct polaris10_smumgr { - uint8_t *header; - uint8_t *mec_image; - struct polaris10_buffer_entry smu_buffer; - struct polaris10_buffer_entry header_buffer; - uint32_t soft_regs_start; - uint8_t *read_rrm_straps; - uint32_t read_drm_straps_mc_address_high; - uint32_t read_drm_straps_mc_address_low; - uint32_t acpi_optimization; - bool post_initial_boot; + struct smu7_smumgr smu7_data; uint8_t protected_mode; - uint8_t security_hard_key; struct polaris10_avfs avfs; + SMU74_Discrete_DpmTable smc_state_table; + struct SMU74_Discrete_Ulv ulv_setting; + struct SMU74_Discrete_PmFuses power_tune_table; + struct polaris10_range_table range_table[NUM_SCLK_RANGE]; + const struct polaris10_pt_defaults *power_tune_defaults; + uint32_t activity_target[SMU74_MAX_LEVELS_GRAPHICS]; + uint32_t bif_sclk_table[SMU74_MAX_LEVELS_LINK]; }; -int polaris10_smum_init(struct pp_smumgr *smumgr); - -int polaris10_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t *value, uint32_t limit); -int polaris10_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t value, uint32_t limit); -int polaris10_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, - const uint8_t *src, uint32_t byte_count, uint32_t limit); - #endif - diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c new file mode 100644 index 000000000000..6af744f42ec9 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c @@ -0,0 +1,589 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + + +#include "smumgr.h" +#include "smu_ucode_xfer_vi.h" +#include "smu/smu_7_1_3_d.h" +#include "smu/smu_7_1_3_sh_mask.h" +#include "ppatomctrl.h" +#include "pp_debug.h" +#include "cgs_common.h" +#include "smu7_ppsmc.h" +#include "smu7_smumgr.h" + +#define SMU7_SMC_SIZE 0x20000 + +static int smu7_set_smc_sram_address(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t limit) +{ + PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)), "SMC address must be 4 byte aligned.", return -EINVAL); + PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)), "SMC addr is beyond the SMC RAM area.", return -EINVAL); + + cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, smc_addr); + SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); /* on ci, SMC_IND_ACCESS_CNTL is different */ + return 0; +} + + +int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit) +{ + uint32_t data; + uint32_t addr; + uint8_t *dest_byte; + uint8_t i, data_byte[4] = {0}; + uint32_t *pdata = (uint32_t *)&data_byte; + + PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -EINVAL); + PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -EINVAL); + + addr = smc_start_address; + + while (byte_count >= 4) { + smu7_read_smc_sram_dword(smumgr, addr, &data, limit); + + *dest = PP_SMC_TO_HOST_UL(data); + + dest += 1; + byte_count -= 4; + addr += 4; + } + + if (byte_count) { + smu7_read_smc_sram_dword(smumgr, addr, &data, limit); + *pdata = PP_SMC_TO_HOST_UL(data); + /* Cast dest into byte type in dest_byte. This way, we don't overflow if the allocated memory is not 4-byte aligned. */ + dest_byte = (uint8_t *)dest; + for (i = 0; i < byte_count; i++) + dest_byte[i] = data_byte[i]; + } + + return 0; +} + + +int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, + const uint8_t *src, uint32_t byte_count, uint32_t limit) +{ + int result; + uint32_t data = 0; + uint32_t original_data; + uint32_t addr = 0; + uint32_t extra_shift; + + PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -EINVAL); + PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -EINVAL); + + addr = smc_start_address; + + while (byte_count >= 4) { + /* Bytes are written into the SMC addres space with the MSB first. */ + data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3]; + + result = smu7_set_smc_sram_address(smumgr, addr, limit); + + if (0 != result) + return result; + + cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data); + + src += 4; + byte_count -= 4; + addr += 4; + } + + if (0 != byte_count) { + + data = 0; + + result = smu7_set_smc_sram_address(smumgr, addr, limit); + + if (0 != result) + return result; + + + original_data = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11); + + extra_shift = 8 * (4 - byte_count); + + while (byte_count > 0) { + /* Bytes are written into the SMC addres space with the MSB first. */ + data = (0x100 * data) + *src++; + byte_count--; + } + + data <<= extra_shift; + + data |= (original_data & ~((~0UL) << extra_shift)); + + result = smu7_set_smc_sram_address(smumgr, addr, limit); + + if (0 != result) + return result; + + cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data); + } + + return 0; +} + + +int smu7_program_jump_on_start(struct pp_smumgr *smumgr) +{ + static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 }; + + smu7_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data)+1); + + return 0; +} + +bool smu7_is_smc_ram_running(struct pp_smumgr *smumgr) +{ + return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) + && (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C))); +} + +int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) +{ + int ret; + + if (!smu7_is_smc_ram_running(smumgr)) + return -EINVAL; + + + SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + + ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP); + + if (ret != 1) + printk("\n failed to send pre message %x ret is %d \n", msg, ret); + + cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); + + SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + + ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP); + + if (ret != 1) + printk("\n failed to send message %x ret is %d \n", msg, ret); + + return 0; +} + +int smu7_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg) +{ + cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); + + return 0; +} + +int smu7_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter) +{ + if (!smu7_is_smc_ram_running(smumgr)) { + return -EINVAL; + } + + SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + + cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter); + + return smu7_send_msg_to_smc(smumgr, msg); +} + +int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter) +{ + cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter); + + return smu7_send_msg_to_smc_without_waiting(smumgr, msg); +} + +int smu7_send_msg_to_smc_offset(struct pp_smumgr *smumgr) +{ + cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000); + + cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test); + + SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + + if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) + printk("Failed to send Message.\n"); + + return 0; +} + +int smu7_wait_for_smc_inactive(struct pp_smumgr *smumgr) +{ + if (!smu7_is_smc_ram_running(smumgr)) + return -EINVAL; + + SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0); + return 0; +} + + +enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type) +{ + enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM; + + switch (fw_type) { + case UCODE_ID_SMU: + result = CGS_UCODE_ID_SMU; + break; + case UCODE_ID_SMU_SK: + result = CGS_UCODE_ID_SMU_SK; + break; + case UCODE_ID_SDMA0: + result = CGS_UCODE_ID_SDMA0; + break; + case UCODE_ID_SDMA1: + result = CGS_UCODE_ID_SDMA1; + break; + case UCODE_ID_CP_CE: + result = CGS_UCODE_ID_CP_CE; + break; + case UCODE_ID_CP_PFP: + result = CGS_UCODE_ID_CP_PFP; + break; + case UCODE_ID_CP_ME: + result = CGS_UCODE_ID_CP_ME; + break; + case UCODE_ID_CP_MEC: + result = CGS_UCODE_ID_CP_MEC; + break; + case UCODE_ID_CP_MEC_JT1: + result = CGS_UCODE_ID_CP_MEC_JT1; + break; + case UCODE_ID_CP_MEC_JT2: + result = CGS_UCODE_ID_CP_MEC_JT2; + break; + case UCODE_ID_RLC_G: + result = CGS_UCODE_ID_RLC_G; + break; + default: + break; + } + + return result; +} + + +int smu7_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t *value, uint32_t limit) +{ + int result; + + result = smu7_set_smc_sram_address(smumgr, smc_addr, limit); + + if (result) + return result; + + *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11); + return 0; +} + +int smu7_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t value, uint32_t limit) +{ + int result; + + result = smu7_set_smc_sram_address(smumgr, smc_addr, limit); + + if (result) + return result; + + cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, value); + + return 0; +} + +/* Convert the firmware type to SMU type mask. For MEC, we need to check all MEC related type */ + +static uint32_t smu7_get_mask_for_firmware_type(uint32_t fw_type) +{ + uint32_t result = 0; + + switch (fw_type) { + case UCODE_ID_SDMA0: + result = UCODE_ID_SDMA0_MASK; + break; + case UCODE_ID_SDMA1: + result = UCODE_ID_SDMA1_MASK; + break; + case UCODE_ID_CP_CE: + result = UCODE_ID_CP_CE_MASK; + break; + case UCODE_ID_CP_PFP: + result = UCODE_ID_CP_PFP_MASK; + break; + case UCODE_ID_CP_ME: + result = UCODE_ID_CP_ME_MASK; + break; + case UCODE_ID_CP_MEC: + case UCODE_ID_CP_MEC_JT1: + case UCODE_ID_CP_MEC_JT2: + result = UCODE_ID_CP_MEC_MASK; + break; + case UCODE_ID_RLC_G: + result = UCODE_ID_RLC_G_MASK; + break; + default: + printk("UCode type is out of range! \n"); + result = 0; + } + + return result; +} + +static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr, + uint32_t fw_type, + struct SMU_Entry *entry) +{ + int result = 0; + struct cgs_firmware_info info = {0}; + + result = cgs_get_firmware_info(smumgr->device, + smu7_convert_fw_type_to_cgs(fw_type), + &info); + + if (!result) { + entry->version = info.version; + entry->id = (uint16_t)fw_type; + entry->image_addr_high = smu_upper_32_bits(info.mc_addr); + entry->image_addr_low = smu_lower_32_bits(info.mc_addr); + entry->meta_data_addr_high = 0; + entry->meta_data_addr_low = 0; + entry->data_size_byte = info.image_size; + entry->num_register_entries = 0; + } + + if (fw_type == UCODE_ID_RLC_G) + entry->flags = 1; + else + entry->flags = 0; + + return 0; +} + +int smu7_request_smu_load_fw(struct pp_smumgr *smumgr) +{ + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + uint32_t fw_to_load; + int result = 0; + struct SMU_DRAMData_TOC *toc; + + if (!smumgr->reload_fw) { + printk(KERN_INFO "[ powerplay ] skip reloading...\n"); + return 0; + } + + if (smu_data->soft_regs_start) + cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, + smu_data->soft_regs_start + smum_get_offsetof(smumgr, + SMU_SoftRegisters, UcodeLoadStatus), + 0x0); + + if (smumgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */ + smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_HI, smu_data->smu_buffer.mc_addr_high); + smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_LO, smu_data->smu_buffer.mc_addr_low); + fw_to_load = UCODE_ID_RLC_G_MASK + + UCODE_ID_SDMA0_MASK + + UCODE_ID_SDMA1_MASK + + UCODE_ID_CP_CE_MASK + + UCODE_ID_CP_ME_MASK + + UCODE_ID_CP_PFP_MASK + + UCODE_ID_CP_MEC_MASK; + } else { + fw_to_load = UCODE_ID_RLC_G_MASK + + UCODE_ID_SDMA0_MASK + + UCODE_ID_SDMA1_MASK + + UCODE_ID_CP_CE_MASK + + UCODE_ID_CP_ME_MASK + + UCODE_ID_CP_PFP_MASK + + UCODE_ID_CP_MEC_MASK + + UCODE_ID_CP_MEC_JT1_MASK + + UCODE_ID_CP_MEC_JT2_MASK; + } + + toc = (struct SMU_DRAMData_TOC *)smu_data->header; + toc->num_entries = 0; + toc->structure_version = 1; + + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.", return -EINVAL); + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.", return -EINVAL); + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.", return -EINVAL); + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.", return -EINVAL); + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.", return -EINVAL); + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.", return -EINVAL); + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.", return -EINVAL); + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.", return -EINVAL); + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.", return -EINVAL); + + smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high); + smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low); + + if (smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_LoadUcodes, fw_to_load)) + printk(KERN_ERR "Fail to Request SMU Load uCode"); + + return result; +} + +/* Check if the FW has been loaded, SMU will not return if loading has not finished. */ +int smu7_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type) +{ + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + uint32_t fw_mask = smu7_get_mask_for_firmware_type(fw_type); + uint32_t ret; + + ret = smum_wait_on_indirect_register(smumgr, mmSMC_IND_INDEX_11, + smu_data->soft_regs_start + smum_get_offsetof(smumgr, + SMU_SoftRegisters, UcodeLoadStatus), + fw_mask, fw_mask); + + return ret; +} + +int smu7_reload_firmware(struct pp_smumgr *smumgr) +{ + return smumgr->smumgr_funcs->start_smu(smumgr); +} + +static int smu7_upload_smc_firmware_data(struct pp_smumgr *smumgr, uint32_t length, uint32_t *src, uint32_t limit) +{ + uint32_t byte_count = length; + + PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL); + + cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, 0x20000); + SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1); + + for (; byte_count >= 4; byte_count -= 4) + cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, *src++); + + SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); + + PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -EINVAL); + + return 0; +} + + +int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr) +{ + int result = 0; + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + + struct cgs_firmware_info info = {0}; + + if (smu_data->security_hard_key == 1) + cgs_get_firmware_info(smumgr->device, + smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info); + else + cgs_get_firmware_info(smumgr->device, + smu7_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info); + + result = smu7_upload_smc_firmware_data(smumgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE); + + return result; +} + + +int smu7_init(struct pp_smumgr *smumgr) +{ + struct smu7_smumgr *smu_data; + uint8_t *internal_buf; + uint64_t mc_addr = 0; + + /* Allocate memory for backend private data */ + smu_data = (struct smu7_smumgr *)(smumgr->backend); + smu_data->header_buffer.data_size = + ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096; + smu_data->smu_buffer.data_size = 200*4096; + +/* Allocate FW image data structure and header buffer and + * send the header buffer address to SMU */ + smu_allocate_memory(smumgr->device, + smu_data->header_buffer.data_size, + CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, + PAGE_SIZE, + &mc_addr, + &smu_data->header_buffer.kaddr, + &smu_data->header_buffer.handle); + + smu_data->header = smu_data->header_buffer.kaddr; + smu_data->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); + smu_data->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr); + + PP_ASSERT_WITH_CODE((NULL != smu_data->header), + "Out of memory.", + kfree(smumgr->backend); + cgs_free_gpu_mem(smumgr->device, + (cgs_handle_t)smu_data->header_buffer.handle); + return -EINVAL); + + smu_allocate_memory(smumgr->device, + smu_data->smu_buffer.data_size, + CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, + PAGE_SIZE, + &mc_addr, + &smu_data->smu_buffer.kaddr, + &smu_data->smu_buffer.handle); + + internal_buf = smu_data->smu_buffer.kaddr; + smu_data->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); + smu_data->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr); + + PP_ASSERT_WITH_CODE((NULL != internal_buf), + "Out of memory.", + kfree(smumgr->backend); + cgs_free_gpu_mem(smumgr->device, + (cgs_handle_t)smu_data->smu_buffer.handle); + return -EINVAL); + + return 0; +} + + +int smu7_smu_fini(struct pp_smumgr *smumgr) +{ + if (smumgr->backend) { + kfree(smumgr->backend); + smumgr->backend = NULL; + } + cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); + return 0; +} diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h new file mode 100644 index 000000000000..76352f2423ae --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h @@ -0,0 +1,87 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _SMU7_SMUMANAGER_H +#define _SMU7_SMUMANAGER_H + + +#include <pp_endian.h> + +#define SMC_RAM_END 0x40000 +#define mmSMC_IND_INDEX_11 0x01AC +#define mmSMC_IND_DATA_11 0x01AD + +struct smu7_buffer_entry { + uint32_t data_size; + uint32_t mc_addr_low; + uint32_t mc_addr_high; + void *kaddr; + unsigned long handle; +}; + +struct smu7_smumgr { + uint8_t *header; + uint8_t *mec_image; + struct smu7_buffer_entry smu_buffer; + struct smu7_buffer_entry header_buffer; + + uint32_t soft_regs_start; + uint32_t dpm_table_start; + uint32_t mc_reg_table_start; + uint32_t fan_table_start; + uint32_t arb_table_start; + uint32_t ulv_setting_starts; + uint8_t security_hard_key; + uint32_t acpi_optimization; +}; + + +int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, + uint32_t *dest, uint32_t byte_count, uint32_t limit); +int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, + const uint8_t *src, uint32_t byte_count, uint32_t limit); +int smu7_program_jump_on_start(struct pp_smumgr *smumgr); +bool smu7_is_smc_ram_running(struct pp_smumgr *smumgr); +int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg); +int smu7_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg); +int smu7_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg, + uint32_t parameter); +int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr, + uint16_t msg, uint32_t parameter); +int smu7_send_msg_to_smc_offset(struct pp_smumgr *smumgr); +int smu7_wait_for_smc_inactive(struct pp_smumgr *smumgr); + +enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type); +int smu7_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, + uint32_t *value, uint32_t limit); +int smu7_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, + uint32_t value, uint32_t limit); + +int smu7_request_smu_load_fw(struct pp_smumgr *smumgr); +int smu7_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type); +int smu7_reload_firmware(struct pp_smumgr *smumgr); +int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr); +int smu7_init(struct pp_smumgr *smumgr); +int smu7_smu_fini(struct pp_smumgr *smumgr); + +#endif
\ No newline at end of file diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index 7723473e51a0..e5812aa456f3 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c @@ -28,10 +28,7 @@ #include "smumgr.h" #include "cgs_common.h" #include "linux/delay.h" -#include "cz_smumgr.h" -#include "tonga_smumgr.h" -#include "fiji_smumgr.h" -#include "polaris10_smumgr.h" + int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle) { @@ -47,7 +44,6 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle) smumgr->device = pp_init->device; smumgr->chip_family = pp_init->chip_family; smumgr->chip_id = pp_init->chip_id; - smumgr->hw_revision = pp_init->rev_id; smumgr->usec_timeout = AMD_MAX_USEC_TIMEOUT; smumgr->reload_fw = 1; handle->smu_mgr = smumgr; @@ -58,6 +54,9 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle) break; case AMDGPU_FAMILY_VI: switch (smumgr->chip_id) { + case CHIP_TOPAZ: + iceland_smum_init(smumgr); + break; case CHIP_TONGA: tonga_smum_init(smumgr); break; @@ -87,6 +86,57 @@ int smum_fini(struct pp_smumgr *smumgr) return 0; } +int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result) +{ + if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable) + return hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable(hwmgr); + + return 0; +} + +int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result) +{ + if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table) + return hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table(hwmgr); + + return 0; +} + +int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr) +{ + + if (NULL != hwmgr->smumgr->smumgr_funcs->update_sclk_threshold) + return hwmgr->smumgr->smumgr_funcs->update_sclk_threshold(hwmgr); + + return 0; +} + +int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) +{ + + if (NULL != hwmgr->smumgr->smumgr_funcs->update_smc_table) + return hwmgr->smumgr->smumgr_funcs->update_smc_table(hwmgr, type); + + return 0; +} + +uint32_t smum_get_offsetof(struct pp_smumgr *smumgr, uint32_t type, uint32_t member) +{ + if (NULL != smumgr->smumgr_funcs->get_offsetof) + return smumgr->smumgr_funcs->get_offsetof(type, member); + + return 0; +} + +int smum_process_firmware_header(struct pp_hwmgr *hwmgr) +{ + if (NULL != hwmgr->smumgr->smumgr_funcs->process_firmware_header) + return hwmgr->smumgr->smumgr_funcs->process_firmware_header(hwmgr); + return 0; +} + int smum_get_argument(struct pp_smumgr *smumgr) { if (NULL != smumgr->smumgr_funcs->get_argument) @@ -95,13 +145,20 @@ int smum_get_argument(struct pp_smumgr *smumgr) return 0; } +uint32_t smum_get_mac_definition(struct pp_smumgr *smumgr, uint32_t value) +{ + if (NULL != smumgr->smumgr_funcs->get_mac_definition) + return smumgr->smumgr_funcs->get_mac_definition(value); + + return 0; +} + int smum_download_powerplay_table(struct pp_smumgr *smumgr, void **table) { if (NULL != smumgr->smumgr_funcs->download_pptable_settings) return smumgr->smumgr_funcs->download_pptable_settings(smumgr, table); - return 0; } @@ -268,3 +325,44 @@ int smu_free_memory(void *device, void *handle) return 0; } + +int smum_init_smc_table(struct pp_hwmgr *hwmgr) +{ + if (NULL != hwmgr->smumgr->smumgr_funcs->init_smc_table) + return hwmgr->smumgr->smumgr_funcs->init_smc_table(hwmgr); + + return 0; +} + +int smum_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) +{ + if (NULL != hwmgr->smumgr->smumgr_funcs->populate_all_graphic_levels) + return hwmgr->smumgr->smumgr_funcs->populate_all_graphic_levels(hwmgr); + + return 0; +} + +int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr) +{ + if (NULL != hwmgr->smumgr->smumgr_funcs->populate_all_memory_levels) + return hwmgr->smumgr->smumgr_funcs->populate_all_memory_levels(hwmgr); + + return 0; +} + +/*this interface is needed by island ci/vi */ +int smum_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) +{ + if (NULL != hwmgr->smumgr->smumgr_funcs->initialize_mc_reg_table) + return hwmgr->smumgr->smumgr_funcs->initialize_mc_reg_table(hwmgr); + + return 0; +} + +bool smum_is_dpm_running(struct pp_hwmgr *hwmgr) +{ + if (NULL != hwmgr->smumgr->smumgr_funcs->is_dpm_running) + return hwmgr->smumgr->smumgr_funcs->is_dpm_running(hwmgr); + + return true; +} diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c new file mode 100644 index 000000000000..de2a24d85f48 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c @@ -0,0 +1,3206 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * + */ + +#include "tonga_smc.h" +#include "smu7_dyn_defaults.h" + +#include "smu7_hwmgr.h" +#include "hardwaremanager.h" +#include "ppatomctrl.h" +#include "pp_debug.h" +#include "cgs_common.h" +#include "atombios.h" +#include "tonga_smumgr.h" +#include "pppcielanes.h" +#include "pp_endian.h" +#include "smu7_ppsmc.h" + +#include "smu72_discrete.h" + +#include "smu/smu_7_1_2_d.h" +#include "smu/smu_7_1_2_sh_mask.h" + +#include "gmc/gmc_8_1_d.h" +#include "gmc/gmc_8_1_sh_mask.h" + +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" + +#include "dce/dce_10_0_d.h" +#include "dce/dce_10_0_sh_mask.h" + + +#define VOLTAGE_SCALE 4 +#define POWERTUNE_DEFAULT_SET_MAX 1 +#define VOLTAGE_VID_OFFSET_SCALE1 625 +#define VOLTAGE_VID_OFFSET_SCALE2 100 +#define MC_CG_ARB_FREQ_F1 0x0b +#define VDDC_VDDCI_DELTA 200 + + +static const struct tonga_pt_defaults tonga_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = { +/* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt, + * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT + */ + {1, 0xF, 0xFD, 0x19, + 5, 45, 0, 0xB0000, + {0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, + 0xC9, 0xC9, 0x2F, 0x4D, 0x61}, + {0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, + 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4} + }, +}; + +/* [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ] */ +static const uint16_t tonga_clock_stretcher_lookup_table[2][4] = { + {600, 1050, 3, 0}, + {600, 1050, 6, 1} +}; + +/* [FF, SS] type, [] 4 voltage ranges, + * and [Floor Freq, Boundary Freq, VID min , VID max] + */ +static const uint32_t tonga_clock_stretcher_ddt_table[2][4][4] = { + { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} }, + { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } +}; + +/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] */ +static const uint8_t tonga_clock_stretch_amount_conversion[2][6] = { + {0, 1, 3, 2, 4, 5}, + {0, 2, 4, 5, 6, 5} +}; + +/* PPGen has the gain setting generated in x * 100 unit + * This function is to convert the unit to x * 4096(0x1000) unit. + * This is the unit expected by SMC firmware + */ + + +static int tonga_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr, + phm_ppt_v1_clock_voltage_dependency_table *allowed_clock_voltage_table, + uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd) +{ + uint32_t i = 0; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + /* clock - voltage dependency table is empty table */ + if (allowed_clock_voltage_table->count == 0) + return -EINVAL; + + for (i = 0; i < allowed_clock_voltage_table->count; i++) { + /* find first sclk bigger than request */ + if (allowed_clock_voltage_table->entries[i].clk >= clock) { + voltage->VddGfx = phm_get_voltage_index( + pptable_info->vddgfx_lookup_table, + allowed_clock_voltage_table->entries[i].vddgfx); + voltage->Vddc = phm_get_voltage_index( + pptable_info->vddc_lookup_table, + allowed_clock_voltage_table->entries[i].vddc); + + if (allowed_clock_voltage_table->entries[i].vddci) + voltage->Vddci = + phm_get_voltage_id(&data->vddci_voltage_table, allowed_clock_voltage_table->entries[i].vddci); + else + voltage->Vddci = + phm_get_voltage_id(&data->vddci_voltage_table, + allowed_clock_voltage_table->entries[i].vddc - VDDC_VDDCI_DELTA); + + + if (allowed_clock_voltage_table->entries[i].mvdd) + *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i].mvdd; + + voltage->Phases = 1; + return 0; + } + } + + /* sclk is bigger than max sclk in the dependence table */ + voltage->VddGfx = phm_get_voltage_index(pptable_info->vddgfx_lookup_table, + allowed_clock_voltage_table->entries[i-1].vddgfx); + voltage->Vddc = phm_get_voltage_index(pptable_info->vddc_lookup_table, + allowed_clock_voltage_table->entries[i-1].vddc); + + if (allowed_clock_voltage_table->entries[i-1].vddci) + voltage->Vddci = phm_get_voltage_id(&data->vddci_voltage_table, + allowed_clock_voltage_table->entries[i-1].vddci); + + if (allowed_clock_voltage_table->entries[i-1].mvdd) + *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i-1].mvdd; + + return 0; +} + + +/** + * Vddc table preparation for SMC. + * + * @param hwmgr the address of the hardware manager + * @param table the SMC DPM table structure to be populated + * @return always 0 + */ +static int tonga_populate_smc_vddc_table(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + unsigned int count; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { + table->VddcLevelCount = data->vddc_voltage_table.count; + for (count = 0; count < table->VddcLevelCount; count++) { + table->VddcTable[count] = + PP_HOST_TO_SMC_US(data->vddc_voltage_table.entries[count].value * VOLTAGE_SCALE); + } + CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount); + } + return 0; +} + +/** + * VddGfx table preparation for SMC. + * + * @param hwmgr the address of the hardware manager + * @param table the SMC DPM table structure to be populated + * @return always 0 + */ +static int tonga_populate_smc_vdd_gfx_table(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + unsigned int count; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) { + table->VddGfxLevelCount = data->vddgfx_voltage_table.count; + for (count = 0; count < data->vddgfx_voltage_table.count; count++) { + table->VddGfxTable[count] = + PP_HOST_TO_SMC_US(data->vddgfx_voltage_table.entries[count].value * VOLTAGE_SCALE); + } + CONVERT_FROM_HOST_TO_SMC_UL(table->VddGfxLevelCount); + } + return 0; +} + +/** + * Vddci table preparation for SMC. + * + * @param *hwmgr The address of the hardware manager. + * @param *table The SMC DPM table structure to be populated. + * @return 0 + */ +static int tonga_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t count; + + table->VddciLevelCount = data->vddci_voltage_table.count; + for (count = 0; count < table->VddciLevelCount; count++) { + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) { + table->VddciTable[count] = + PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE); + } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) { + table->SmioTable1.Pattern[count].Voltage = + PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE); + /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level. */ + table->SmioTable1.Pattern[count].Smio = + (uint8_t) count; + table->Smio[count] |= + data->vddci_voltage_table.entries[count].smio_low; + table->VddciTable[count] = + PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE); + } + } + + table->SmioMask1 = data->vddci_voltage_table.mask_low; + CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount); + + return 0; +} + +/** + * Mvdd table preparation for SMC. + * + * @param *hwmgr The address of the hardware manager. + * @param *table The SMC DPM table structure to be populated. + * @return 0 + */ +static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t count; + + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { + table->MvddLevelCount = data->mvdd_voltage_table.count; + for (count = 0; count < table->MvddLevelCount; count++) { + table->SmioTable2.Pattern[count].Voltage = + PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE); + /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/ + table->SmioTable2.Pattern[count].Smio = + (uint8_t) count; + table->Smio[count] |= + data->mvdd_voltage_table.entries[count].smio_low; + } + table->SmioMask2 = data->mvdd_voltage_table.mask_low; + + CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount); + } + + return 0; +} + +/** + * Preparation of vddc and vddgfx CAC tables for SMC. + * + * @param hwmgr the address of the hardware manager + * @param table the SMC DPM table structure to be populated + * @return always 0 + */ +static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + uint32_t count; + uint8_t index = 0; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table = + pptable_info->vddgfx_lookup_table; + struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table = + pptable_info->vddc_lookup_table; + + /* table is already swapped, so in order to use the value from it + * we need to swap it back. + */ + uint32_t vddc_level_count = PP_SMC_TO_HOST_UL(table->VddcLevelCount); + uint32_t vddgfx_level_count = PP_SMC_TO_HOST_UL(table->VddGfxLevelCount); + + for (count = 0; count < vddc_level_count; count++) { + /* We are populating vddc CAC data to BapmVddc table in split and merged mode */ + index = phm_get_voltage_index(vddc_lookup_table, + data->vddc_voltage_table.entries[count].value); + table->BapmVddcVidLoSidd[count] = + convert_to_vid(vddc_lookup_table->entries[index].us_cac_low); + table->BapmVddcVidHiSidd[count] = + convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid); + table->BapmVddcVidHiSidd2[count] = + convert_to_vid(vddc_lookup_table->entries[index].us_cac_high); + } + + if ((data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2)) { + /* We are populating vddgfx CAC data to BapmVddgfx table in split mode */ + for (count = 0; count < vddgfx_level_count; count++) { + index = phm_get_voltage_index(vddgfx_lookup_table, + convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_mid)); + table->BapmVddGfxVidHiSidd2[count] = + convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_high); + } + } else { + for (count = 0; count < vddc_level_count; count++) { + index = phm_get_voltage_index(vddc_lookup_table, + data->vddc_voltage_table.entries[count].value); + table->BapmVddGfxVidLoSidd[count] = + convert_to_vid(vddc_lookup_table->entries[index].us_cac_low); + table->BapmVddGfxVidHiSidd[count] = + convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid); + table->BapmVddGfxVidHiSidd2[count] = + convert_to_vid(vddc_lookup_table->entries[index].us_cac_high); + } + } + + return 0; +} + +/** + * Preparation of voltage tables for SMC. + * + * @param hwmgr the address of the hardware manager + * @param table the SMC DPM table structure to be populated + * @return always 0 + */ + +static int tonga_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + int result; + + result = tonga_populate_smc_vddc_table(hwmgr, table); + PP_ASSERT_WITH_CODE(!result, + "can not populate VDDC voltage table to SMC", + return -EINVAL); + + result = tonga_populate_smc_vdd_ci_table(hwmgr, table); + PP_ASSERT_WITH_CODE(!result, + "can not populate VDDCI voltage table to SMC", + return -EINVAL); + + result = tonga_populate_smc_vdd_gfx_table(hwmgr, table); + PP_ASSERT_WITH_CODE(!result, + "can not populate VDDGFX voltage table to SMC", + return -EINVAL); + + result = tonga_populate_smc_mvdd_table(hwmgr, table); + PP_ASSERT_WITH_CODE(!result, + "can not populate MVDD voltage table to SMC", + return -EINVAL); + + result = tonga_populate_cac_tables(hwmgr, table); + PP_ASSERT_WITH_CODE(!result, + "can not populate CAC voltage tables to SMC", + return -EINVAL); + + return 0; +} + +static int tonga_populate_ulv_level(struct pp_hwmgr *hwmgr, + struct SMU72_Discrete_Ulv *state) +{ + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + state->CcPwrDynRm = 0; + state->CcPwrDynRm1 = 0; + + state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset; + state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset * + VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1); + + state->VddcPhase = 1; + + CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1); + CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset); + + return 0; +} + +static int tonga_populate_ulv_state(struct pp_hwmgr *hwmgr, + struct SMU72_Discrete_DpmTable *table) +{ + return tonga_populate_ulv_level(hwmgr, &table->Ulv); +} + +static int tonga_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU72_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_dpm_table *dpm_table = &data->dpm_table; + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + uint32_t i; + + /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */ + for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) { + table->LinkLevel[i].PcieGenSpeed = + (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value; + table->LinkLevel[i].PcieLaneCount = + (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1); + table->LinkLevel[i].EnabledForActivity = + 1; + table->LinkLevel[i].SPC = + (uint8_t)(data->pcie_spc_cap & 0xff); + table->LinkLevel[i].DownThreshold = + PP_HOST_TO_SMC_UL(5); + table->LinkLevel[i].UpThreshold = + PP_HOST_TO_SMC_UL(30); + } + + smu_data->smc_state_table.LinkLevelCount = + (uint8_t)dpm_table->pcie_speed_table.count; + data->dpm_level_enable_mask.pcie_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table); + + return 0; +} + +/** + * Calculates the SCLK dividers using the provided engine clock + * + * @param hwmgr the address of the hardware manager + * @param engine_clock the engine clock to use to populate the structure + * @param sclk the SMC SCLK structure to be populated + */ +static int tonga_calculate_sclk_params(struct pp_hwmgr *hwmgr, + uint32_t engine_clock, SMU72_Discrete_GraphicsLevel *sclk) +{ + const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + pp_atomctrl_clock_dividers_vi dividers; + uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; + uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; + uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; + uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; + uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; + uint32_t reference_clock; + uint32_t reference_divider; + uint32_t fbdiv; + int result; + + /* get the engine clock dividers for this clock value*/ + result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, ÷rs); + + PP_ASSERT_WITH_CODE(result == 0, + "Error retrieving Engine Clock dividers from VBIOS.", return result); + + /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/ + reference_clock = atomctrl_get_reference_clock(hwmgr); + + reference_divider = 1 + dividers.uc_pll_ref_div; + + /* low 14 bits is fraction and high 12 bits is divider*/ + fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF; + + /* SPLL_FUNC_CNTL setup*/ + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, + CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div); + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, + CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div); + + /* SPLL_FUNC_CNTL_3 setup*/ + spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, + CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv); + + /* set to use fractional accumulation*/ + spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, + CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EngineSpreadSpectrumSupport)) { + pp_atomctrl_internal_ss_info ss_info; + + uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div; + if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) { + /* + * ss_info.speed_spectrum_percentage -- in unit of 0.01% + * ss_info.speed_spectrum_rate -- in unit of khz + */ + /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */ + uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate); + + /* clkv = 2 * D * fbdiv / NS */ + uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000); + + cg_spll_spread_spectrum = + PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS); + cg_spll_spread_spectrum = + PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1); + cg_spll_spread_spectrum_2 = + PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV); + } + } + + sclk->SclkFrequency = engine_clock; + sclk->CgSpllFuncCntl3 = spll_func_cntl_3; + sclk->CgSpllFuncCntl4 = spll_func_cntl_4; + sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum; + sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2; + sclk->SclkDid = (uint8_t)dividers.pll_post_divider; + + return 0; +} + +/** + * Populates single SMC SCLK structure using the provided engine clock + * + * @param hwmgr the address of the hardware manager + * @param engine_clock the engine clock to use to populate the structure + * @param sclk the SMC SCLK structure to be populated + */ +static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr, + uint32_t engine_clock, + uint16_t sclk_activity_level_threshold, + SMU72_Discrete_GraphicsLevel *graphic_level) +{ + int result; + uint32_t mvdd; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + result = tonga_calculate_sclk_params(hwmgr, engine_clock, graphic_level); + + /* populate graphics levels*/ + result = tonga_get_dependecy_volt_by_clk(hwmgr, + pptable_info->vdd_dep_on_sclk, engine_clock, + &graphic_level->MinVoltage, &mvdd); + PP_ASSERT_WITH_CODE((!result), + "can not find VDDC voltage value for VDDC " + "engine clock dependency table", return result); + + /* SCLK frequency in units of 10KHz*/ + graphic_level->SclkFrequency = engine_clock; + /* Indicates maximum activity level for this performance level. 50% for now*/ + graphic_level->ActivityLevel = sclk_activity_level_threshold; + + graphic_level->CcPwrDynRm = 0; + graphic_level->CcPwrDynRm1 = 0; + /* this level can be used if activity is high enough.*/ + graphic_level->EnabledForActivity = 0; + /* this level can be used for throttling.*/ + graphic_level->EnabledForThrottle = 1; + graphic_level->UpHyst = 0; + graphic_level->DownHyst = 0; + graphic_level->VoltageDownHyst = 0; + graphic_level->PowerThrottle = 0; + + data->display_timing.min_clock_in_sr = + hwmgr->display_config.min_core_set_clock_in_sr; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep)) + graphic_level->DeepSleepDivId = + smu7_get_sleep_divider_id_from_clock(engine_clock, + data->display_timing.min_clock_in_sr); + + /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/ + graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + + if (!result) { + /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVoltage);*/ + /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);*/ + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency); + CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1); + } + + return result; +} + +/** + * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states + * + * @param hwmgr the address of the hardware manager + */ +int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct smu7_dpm_table *dpm_table = &data->dpm_table; + struct phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table; + uint8_t pcie_entry_count = (uint8_t) data->dpm_table.pcie_speed_table.count; + uint32_t level_array_address = smu_data->smu7_data.dpm_table_start + + offsetof(SMU72_Discrete_DpmTable, GraphicsLevel); + + uint32_t level_array_size = sizeof(SMU72_Discrete_GraphicsLevel) * + SMU72_MAX_LEVELS_GRAPHICS; + + SMU72_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel; + + uint32_t i, max_entry; + uint8_t highest_pcie_level_enabled = 0; + uint8_t lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0; + uint8_t count = 0; + int result = 0; + + memset(levels, 0x00, level_array_size); + + for (i = 0; i < dpm_table->sclk_table.count; i++) { + result = tonga_populate_single_graphic_level(hwmgr, + dpm_table->sclk_table.dpm_levels[i].value, + (uint16_t)smu_data->activity_target[i], + &(smu_data->smc_state_table.GraphicsLevel[i])); + if (result != 0) + return result; + + /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */ + if (i > 1) + smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0; + } + + /* Only enable level 0 for now. */ + smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1; + + /* set highest level watermark to high */ + if (dpm_table->sclk_table.count > 1) + smu_data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark = + PPSMC_DISPLAY_WATERMARK_HIGH; + + smu_data->smc_state_table.GraphicsDpmLevelCount = + (uint8_t)dpm_table->sclk_table.count; + data->dpm_level_enable_mask.sclk_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); + + if (pcie_table != NULL) { + PP_ASSERT_WITH_CODE((pcie_entry_count >= 1), + "There must be 1 or more PCIE levels defined in PPTable.", + return -EINVAL); + max_entry = pcie_entry_count - 1; /* for indexing, we need to decrement by 1.*/ + for (i = 0; i < dpm_table->sclk_table.count; i++) { + smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = + (uint8_t) ((i < max_entry) ? i : max_entry); + } + } else { + if (0 == data->dpm_level_enable_mask.pcie_dpm_enable_mask) + printk(KERN_ERR "[ powerplay ] Pcie Dpm Enablemask is 0 !"); + + while (data->dpm_level_enable_mask.pcie_dpm_enable_mask && + ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1<<(highest_pcie_level_enabled+1))) != 0)) { + highest_pcie_level_enabled++; + } + + while (data->dpm_level_enable_mask.pcie_dpm_enable_mask && + ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1<<lowest_pcie_level_enabled)) == 0)) { + lowest_pcie_level_enabled++; + } + + while ((count < highest_pcie_level_enabled) && + ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1<<(lowest_pcie_level_enabled+1+count))) == 0)) { + count++; + } + mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ? + (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled; + + + /* set pcieDpmLevel to highest_pcie_level_enabled*/ + for (i = 2; i < dpm_table->sclk_table.count; i++) + smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled; + + /* set pcieDpmLevel to lowest_pcie_level_enabled*/ + smu_data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled; + + /* set pcieDpmLevel to mid_pcie_level_enabled*/ + smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled; + } + /* level count will send to smc once at init smc table and never change*/ + result = smu7_copy_bytes_to_smc(hwmgr->smumgr, level_array_address, + (uint8_t *)levels, (uint32_t)level_array_size, + SMC_RAM_END); + + return result; +} + +/** + * Populates the SMC MCLK structure using the provided memory clock + * + * @param hwmgr the address of the hardware manager + * @param memory_clock the memory clock to use to populate the structure + * @param sclk the SMC SCLK structure to be populated + */ +static int tonga_calculate_mclk_params( + struct pp_hwmgr *hwmgr, + uint32_t memory_clock, + SMU72_Discrete_MemoryLevel *mclk, + bool strobe_mode, + bool dllStateOn + ) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + uint32_t dll_cntl = data->clock_registers.vDLL_CNTL; + uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL; + uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL; + uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL; + uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL; + uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1; + uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2; + uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1; + uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2; + + pp_atomctrl_memory_clock_param mpll_param; + int result; + + result = atomctrl_get_memory_pll_dividers_si(hwmgr, + memory_clock, &mpll_param, strobe_mode); + PP_ASSERT_WITH_CODE( + !result, + "Error retrieving Memory Clock Parameters from VBIOS.", + return result); + + /* MPLL_FUNC_CNTL setup*/ + mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, + mpll_param.bw_ctrl); + + /* MPLL_FUNC_CNTL_1 setup*/ + mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, + MPLL_FUNC_CNTL_1, CLKF, + mpll_param.mpll_fb_divider.cl_kf); + mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, + MPLL_FUNC_CNTL_1, CLKFRAC, + mpll_param.mpll_fb_divider.clk_frac); + mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, + MPLL_FUNC_CNTL_1, VCO_MODE, + mpll_param.vco_mode); + + /* MPLL_AD_FUNC_CNTL setup*/ + mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl, + MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, + mpll_param.mpll_post_divider); + + if (data->is_memory_gddr5) { + /* MPLL_DQ_FUNC_CNTL setup*/ + mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl, + MPLL_DQ_FUNC_CNTL, YCLK_SEL, + mpll_param.yclk_sel); + mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl, + MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, + mpll_param.mpll_post_divider); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MemorySpreadSpectrumSupport)) { + /* + ************************************ + Fref = Reference Frequency + NF = Feedback divider ratio + NR = Reference divider ratio + Fnom = Nominal VCO output frequency = Fref * NF / NR + Fs = Spreading Rate + D = Percentage down-spread / 2 + Fint = Reference input frequency to PFD = Fref / NR + NS = Spreading rate divider ratio = int(Fint / (2 * Fs)) + CLKS = NS - 1 = ISS_STEP_NUM[11:0] + NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2) + CLKV = 65536 * NV = ISS_STEP_SIZE[25:0] + ************************************* + */ + pp_atomctrl_internal_ss_info ss_info; + uint32_t freq_nom; + uint32_t tmp; + uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr); + + /* for GDDR5 for all modes and DDR3 */ + if (1 == mpll_param.qdr) + freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider); + else + freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider); + + /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/ + tmp = (freq_nom / reference_clock); + tmp = tmp * tmp; + + if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) { + /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */ + /* ss.Info.speed_spectrum_rate -- in unit of khz */ + /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */ + /* = reference_clock * 5 / speed_spectrum_rate */ + uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate; + + /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */ + /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */ + uint32_t clkv = + (uint32_t)((((131 * ss_info.speed_spectrum_percentage * + ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom); + + mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv); + mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks); + } + } + + /* MCLK_PWRMGT_CNTL setup */ + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn); + + /* Save the result data to outpupt memory level structure */ + mclk->MclkFrequency = memory_clock; + mclk->MpllFuncCntl = mpll_func_cntl; + mclk->MpllFuncCntl_1 = mpll_func_cntl_1; + mclk->MpllFuncCntl_2 = mpll_func_cntl_2; + mclk->MpllAdFuncCntl = mpll_ad_func_cntl; + mclk->MpllDqFuncCntl = mpll_dq_func_cntl; + mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl; + mclk->DllCntl = dll_cntl; + mclk->MpllSs1 = mpll_ss1; + mclk->MpllSs2 = mpll_ss2; + + return 0; +} + +static uint8_t tonga_get_mclk_frequency_ratio(uint32_t memory_clock, + bool strobe_mode) +{ + uint8_t mc_para_index; + + if (strobe_mode) { + if (memory_clock < 12500) + mc_para_index = 0x00; + else if (memory_clock > 47500) + mc_para_index = 0x0f; + else + mc_para_index = (uint8_t)((memory_clock - 10000) / 2500); + } else { + if (memory_clock < 65000) + mc_para_index = 0x00; + else if (memory_clock > 135000) + mc_para_index = 0x0f; + else + mc_para_index = (uint8_t)((memory_clock - 60000) / 5000); + } + + return mc_para_index; +} + +static uint8_t tonga_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock) +{ + uint8_t mc_para_index; + + if (memory_clock < 10000) + mc_para_index = 0; + else if (memory_clock >= 80000) + mc_para_index = 0x0f; + else + mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1); + + return mc_para_index; +} + + +static int tonga_populate_single_memory_level( + struct pp_hwmgr *hwmgr, + uint32_t memory_clock, + SMU72_Discrete_MemoryLevel *memory_level + ) +{ + uint32_t mvdd = 0; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + int result = 0; + bool dll_state_on; + struct cgs_display_info info = {0}; + uint32_t mclk_edc_wr_enable_threshold = 40000; + uint32_t mclk_stutter_mode_threshold = 30000; + uint32_t mclk_edc_enable_threshold = 40000; + uint32_t mclk_strobe_mode_threshold = 40000; + + if (NULL != pptable_info->vdd_dep_on_mclk) { + result = tonga_get_dependecy_volt_by_clk(hwmgr, + pptable_info->vdd_dep_on_mclk, + memory_clock, + &memory_level->MinVoltage, &mvdd); + PP_ASSERT_WITH_CODE( + !result, + "can not find MinVddc voltage value from memory VDDC " + "voltage dependency table", + return result); + } + + if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE) + memory_level->MinMvdd = data->vbios_boot_state.mvdd_bootup_value; + else + memory_level->MinMvdd = mvdd; + + memory_level->EnabledForThrottle = 1; + memory_level->EnabledForActivity = 0; + memory_level->UpHyst = 0; + memory_level->DownHyst = 100; + memory_level->VoltageDownHyst = 0; + + /* Indicates maximum activity level for this performance level.*/ + memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target; + memory_level->StutterEnable = 0; + memory_level->StrobeEnable = 0; + memory_level->EdcReadEnable = 0; + memory_level->EdcWriteEnable = 0; + memory_level->RttEnable = 0; + + /* default set to low watermark. Highest level will be set to high later.*/ + memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + + cgs_get_active_displays_info(hwmgr->device, &info); + data->display_timing.num_existing_displays = info.display_count; + + if ((mclk_stutter_mode_threshold != 0) && + (memory_clock <= mclk_stutter_mode_threshold) && + (!data->is_uvd_enabled) + && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1) + && (data->display_timing.num_existing_displays <= 2) + && (data->display_timing.num_existing_displays != 0)) + memory_level->StutterEnable = 1; + + /* decide strobe mode*/ + memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) && + (memory_clock <= mclk_strobe_mode_threshold); + + /* decide EDC mode and memory clock ratio*/ + if (data->is_memory_gddr5) { + memory_level->StrobeRatio = tonga_get_mclk_frequency_ratio(memory_clock, + memory_level->StrobeEnable); + + if ((mclk_edc_enable_threshold != 0) && + (memory_clock > mclk_edc_enable_threshold)) { + memory_level->EdcReadEnable = 1; + } + + if ((mclk_edc_wr_enable_threshold != 0) && + (memory_clock > mclk_edc_wr_enable_threshold)) { + memory_level->EdcWriteEnable = 1; + } + + if (memory_level->StrobeEnable) { + if (tonga_get_mclk_frequency_ratio(memory_clock, 1) >= + ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) { + dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0; + } else { + dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0; + } + + } else { + dll_state_on = data->dll_default_on; + } + } else { + memory_level->StrobeRatio = + tonga_get_ddr3_mclk_frequency_ratio(memory_clock); + dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0; + } + + result = tonga_calculate_mclk_params(hwmgr, + memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on); + + if (!result) { + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinMvdd); + /* MCLK frequency in units of 10KHz*/ + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency); + /* Indicates maximum activity level for this performance level.*/ + CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2); + } + + return result; +} + +int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct tonga_smumgr *smu_data = + (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct smu7_dpm_table *dpm_table = &data->dpm_table; + int result; + + /* populate MCLK dpm table to SMU7 */ + uint32_t level_array_address = + smu_data->smu7_data.dpm_table_start + + offsetof(SMU72_Discrete_DpmTable, MemoryLevel); + uint32_t level_array_size = + sizeof(SMU72_Discrete_MemoryLevel) * + SMU72_MAX_LEVELS_MEMORY; + SMU72_Discrete_MemoryLevel *levels = + smu_data->smc_state_table.MemoryLevel; + uint32_t i; + + memset(levels, 0x00, level_array_size); + + for (i = 0; i < dpm_table->mclk_table.count; i++) { + PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value), + "can not populate memory level as memory clock is zero", + return -EINVAL); + result = tonga_populate_single_memory_level( + hwmgr, + dpm_table->mclk_table.dpm_levels[i].value, + &(smu_data->smc_state_table.MemoryLevel[i])); + if (result) + return result; + } + + /* Only enable level 0 for now.*/ + smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1; + + /* + * in order to prevent MC activity from stutter mode to push DPM up. + * the UVD change complements this by putting the MCLK in a higher state + * by default such that we are not effected by up threshold or and MCLK DPM latency. + */ + smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F; + CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel); + + smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count; + data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); + /* set highest level watermark to high*/ + smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH; + + /* level count will send to smc once at init smc table and never change*/ + result = smu7_copy_bytes_to_smc(hwmgr->smumgr, + level_array_address, (uint8_t *)levels, (uint32_t)level_array_size, + SMC_RAM_END); + + return result; +} + +static int tonga_populate_mvdd_value(struct pp_hwmgr *hwmgr, + uint32_t mclk, SMIO_Pattern *smio_pattern) +{ + const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint32_t i = 0; + + if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) { + /* find mvdd value which clock is more than request */ + for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) { + if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) { + /* Always round to higher voltage. */ + smio_pattern->Voltage = + data->mvdd_voltage_table.entries[i].value; + break; + } + } + + PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count, + "MVDD Voltage is outside the supported range.", + return -EINVAL); + } else { + return -EINVAL; + } + + return 0; +} + + +static int tonga_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + int result = 0; + struct tonga_smumgr *smu_data = + (struct tonga_smumgr *)(hwmgr->smumgr->backend); + const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct pp_atomctrl_clock_dividers_vi dividers; + + SMIO_Pattern voltage_level; + uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; + uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2; + uint32_t dll_cntl = data->clock_registers.vDLL_CNTL; + uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL; + + /* The ACPI state should not do DPM on DC (or ever).*/ + table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; + + table->ACPILevel.MinVoltage = + smu_data->smc_state_table.GraphicsLevel[0].MinVoltage; + + /* assign zero for now*/ + table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr); + + /* get the engine clock dividers for this clock value*/ + result = atomctrl_get_engine_pll_dividers_vi(hwmgr, + table->ACPILevel.SclkFrequency, ÷rs); + + PP_ASSERT_WITH_CODE(result == 0, + "Error retrieving Engine Clock dividers from VBIOS.", + return result); + + /* divider ID for required SCLK*/ + table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider; + table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + table->ACPILevel.DeepSleepDivId = 0; + + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL, + SPLL_PWRON, 0); + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL, + SPLL_RESET, 1); + spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2, + SCLK_MUX_SEL, 4); + + table->ACPILevel.CgSpllFuncCntl = spll_func_cntl; + table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2; + table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; + table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; + table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; + table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; + table->ACPILevel.CcPwrDynRm = 0; + table->ACPILevel.CcPwrDynRm1 = 0; + + + /* For various features to be enabled/disabled while this level is active.*/ + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags); + /* SCLK frequency in units of 10KHz*/ + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1); + + /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/ + table->MemoryACPILevel.MinVoltage = + smu_data->smc_state_table.MemoryLevel[0].MinVoltage; + + /* CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);*/ + + if (0 == tonga_populate_mvdd_value(hwmgr, 0, &voltage_level)) + table->MemoryACPILevel.MinMvdd = + PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE); + else + table->MemoryACPILevel.MinMvdd = 0; + + /* Force reset on DLL*/ + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1); + + /* Disable DLL in ACPIState*/ + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0); + + /* Enable DLL bypass signal*/ + dll_cntl = PHM_SET_FIELD(dll_cntl, + DLL_CNTL, MRDCK0_BYPASS, 0); + dll_cntl = PHM_SET_FIELD(dll_cntl, + DLL_CNTL, MRDCK1_BYPASS, 0); + + table->MemoryACPILevel.DllCntl = + PP_HOST_TO_SMC_UL(dll_cntl); + table->MemoryACPILevel.MclkPwrmgtCntl = + PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl); + table->MemoryACPILevel.MpllAdFuncCntl = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL); + table->MemoryACPILevel.MpllDqFuncCntl = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL); + table->MemoryACPILevel.MpllFuncCntl = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL); + table->MemoryACPILevel.MpllFuncCntl_1 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1); + table->MemoryACPILevel.MpllFuncCntl_2 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2); + table->MemoryACPILevel.MpllSs1 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1); + table->MemoryACPILevel.MpllSs2 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2); + + table->MemoryACPILevel.EnabledForThrottle = 0; + table->MemoryACPILevel.EnabledForActivity = 0; + table->MemoryACPILevel.UpHyst = 0; + table->MemoryACPILevel.DownHyst = 100; + table->MemoryACPILevel.VoltageDownHyst = 0; + /* Indicates maximum activity level for this performance level.*/ + table->MemoryACPILevel.ActivityLevel = + PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target); + + table->MemoryACPILevel.StutterEnable = 0; + table->MemoryACPILevel.StrobeEnable = 0; + table->MemoryACPILevel.EdcReadEnable = 0; + table->MemoryACPILevel.EdcWriteEnable = 0; + table->MemoryACPILevel.RttEnable = 0; + + return result; +} + +static int tonga_populate_smc_uvd_level(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + int result = 0; + + uint8_t count; + pp_atomctrl_clock_dividers_vi dividers; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = + pptable_info->mm_dep_table; + + table->UvdLevelCount = (uint8_t) (mm_table->count); + table->UvdBootLevel = 0; + + for (count = 0; count < table->UvdLevelCount; count++) { + table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk; + table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk; + table->UvdLevel[count].MinVoltage.Vddc = + phm_get_voltage_index(pptable_info->vddc_lookup_table, + mm_table->entries[count].vddc); + table->UvdLevel[count].MinVoltage.VddGfx = + (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ? + phm_get_voltage_index(pptable_info->vddgfx_lookup_table, + mm_table->entries[count].vddgfx) : 0; + table->UvdLevel[count].MinVoltage.Vddci = + phm_get_voltage_id(&data->vddci_voltage_table, + mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); + table->UvdLevel[count].MinVoltage.Phases = 1; + + /* retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi( + hwmgr, + table->UvdLevel[count].VclkFrequency, + ÷rs); + + PP_ASSERT_WITH_CODE((!result), + "can not find divide id for Vclk clock", + return result); + + table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider; + + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->UvdLevel[count].DclkFrequency, ÷rs); + PP_ASSERT_WITH_CODE((!result), + "can not find divide id for Dclk clock", + return result); + + table->UvdLevel[count].DclkDivider = + (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency); + } + + return result; + +} + +static int tonga_populate_smc_vce_level(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + int result = 0; + + uint8_t count; + pp_atomctrl_clock_dividers_vi dividers; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = + pptable_info->mm_dep_table; + + table->VceLevelCount = (uint8_t) (mm_table->count); + table->VceBootLevel = 0; + + for (count = 0; count < table->VceLevelCount; count++) { + table->VceLevel[count].Frequency = + mm_table->entries[count].eclk; + table->VceLevel[count].MinVoltage.Vddc = + phm_get_voltage_index(pptable_info->vddc_lookup_table, + mm_table->entries[count].vddc); + table->VceLevel[count].MinVoltage.VddGfx = + (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ? + phm_get_voltage_index(pptable_info->vddgfx_lookup_table, + mm_table->entries[count].vddgfx) : 0; + table->VceLevel[count].MinVoltage.Vddci = + phm_get_voltage_id(&data->vddci_voltage_table, + mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); + table->VceLevel[count].MinVoltage.Phases = 1; + + /* retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->VceLevel[count].Frequency, ÷rs); + PP_ASSERT_WITH_CODE((!result), + "can not find divide id for VCE engine clock", + return result); + + table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency); + } + + return result; +} + +static int tonga_populate_smc_acp_level(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + int result = 0; + uint8_t count; + pp_atomctrl_clock_dividers_vi dividers; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = + pptable_info->mm_dep_table; + + table->AcpLevelCount = (uint8_t) (mm_table->count); + table->AcpBootLevel = 0; + + for (count = 0; count < table->AcpLevelCount; count++) { + table->AcpLevel[count].Frequency = + pptable_info->mm_dep_table->entries[count].aclk; + table->AcpLevel[count].MinVoltage.Vddc = + phm_get_voltage_index(pptable_info->vddc_lookup_table, + mm_table->entries[count].vddc); + table->AcpLevel[count].MinVoltage.VddGfx = + (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ? + phm_get_voltage_index(pptable_info->vddgfx_lookup_table, + mm_table->entries[count].vddgfx) : 0; + table->AcpLevel[count].MinVoltage.Vddci = + phm_get_voltage_id(&data->vddci_voltage_table, + mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); + table->AcpLevel[count].MinVoltage.Phases = 1; + + /* retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->AcpLevel[count].Frequency, ÷rs); + PP_ASSERT_WITH_CODE((!result), + "can not find divide id for engine clock", return result); + + table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency); + } + + return result; +} + +static int tonga_populate_smc_samu_level(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + int result = 0; + uint8_t count; + pp_atomctrl_clock_dividers_vi dividers; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *pptable_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = + pptable_info->mm_dep_table; + + table->SamuBootLevel = 0; + table->SamuLevelCount = (uint8_t) (mm_table->count); + + for (count = 0; count < table->SamuLevelCount; count++) { + /* not sure whether we need evclk or not */ + table->SamuLevel[count].Frequency = + pptable_info->mm_dep_table->entries[count].samclock; + table->SamuLevel[count].MinVoltage.Vddc = + phm_get_voltage_index(pptable_info->vddc_lookup_table, + mm_table->entries[count].vddc); + table->SamuLevel[count].MinVoltage.VddGfx = + (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ? + phm_get_voltage_index(pptable_info->vddgfx_lookup_table, + mm_table->entries[count].vddgfx) : 0; + table->SamuLevel[count].MinVoltage.Vddci = + phm_get_voltage_id(&data->vddci_voltage_table, + mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); + table->SamuLevel[count].MinVoltage.Phases = 1; + + /* retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->SamuLevel[count].Frequency, ÷rs); + PP_ASSERT_WITH_CODE((!result), + "can not find divide id for samu clock", return result); + + table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency); + } + + return result; +} + +static int tonga_populate_memory_timing_parameters( + struct pp_hwmgr *hwmgr, + uint32_t engine_clock, + uint32_t memory_clock, + struct SMU72_Discrete_MCArbDramTimingTableEntry *arb_regs + ) +{ + uint32_t dramTiming; + uint32_t dramTiming2; + uint32_t burstTime; + int result; + + result = atomctrl_set_engine_dram_timings_rv770(hwmgr, + engine_clock, memory_clock); + + PP_ASSERT_WITH_CODE(result == 0, + "Error calling VBIOS to set DRAM_TIMING.", return result); + + dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); + dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); + burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); + + arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming); + arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2); + arb_regs->McArbBurstTime = (uint8_t)burstTime; + + return 0; +} + +/** + * Setup parameters for the MC ARB. + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + * This function is to be called from the SetPowerState table. + */ +static int tonga_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct tonga_smumgr *smu_data = + (struct tonga_smumgr *)(hwmgr->smumgr->backend); + int result = 0; + SMU72_Discrete_MCArbDramTimingTable arb_regs; + uint32_t i, j; + + memset(&arb_regs, 0x00, sizeof(SMU72_Discrete_MCArbDramTimingTable)); + + for (i = 0; i < data->dpm_table.sclk_table.count; i++) { + for (j = 0; j < data->dpm_table.mclk_table.count; j++) { + result = tonga_populate_memory_timing_parameters + (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value, + data->dpm_table.mclk_table.dpm_levels[j].value, + &arb_regs.entries[i][j]); + + if (result) + break; + } + } + + if (!result) { + result = smu7_copy_bytes_to_smc( + hwmgr->smumgr, + smu_data->smu7_data.arb_table_start, + (uint8_t *)&arb_regs, + sizeof(SMU72_Discrete_MCArbDramTimingTable), + SMC_RAM_END + ); + } + + return result; +} + +static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + int result = 0; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct tonga_smumgr *smu_data = + (struct tonga_smumgr *)(hwmgr->smumgr->backend); + table->GraphicsBootLevel = 0; + table->MemoryBootLevel = 0; + + /* find boot level from dpm table*/ + result = phm_find_boot_level(&(data->dpm_table.sclk_table), + data->vbios_boot_state.sclk_bootup_value, + (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel)); + + if (result != 0) { + smu_data->smc_state_table.GraphicsBootLevel = 0; + printk(KERN_ERR "[powerplay] VBIOS did not find boot engine " + "clock value in dependency table. " + "Using Graphics DPM level 0 !"); + result = 0; + } + + result = phm_find_boot_level(&(data->dpm_table.mclk_table), + data->vbios_boot_state.mclk_bootup_value, + (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel)); + + if (result != 0) { + smu_data->smc_state_table.MemoryBootLevel = 0; + printk(KERN_ERR "[powerplay] VBIOS did not find boot " + "engine clock value in dependency table." + "Using Memory DPM level 0 !"); + result = 0; + } + + table->BootVoltage.Vddc = + phm_get_voltage_id(&(data->vddc_voltage_table), + data->vbios_boot_state.vddc_bootup_value); + table->BootVoltage.VddGfx = + phm_get_voltage_id(&(data->vddgfx_voltage_table), + data->vbios_boot_state.vddgfx_bootup_value); + table->BootVoltage.Vddci = + phm_get_voltage_id(&(data->vddci_voltage_table), + data->vbios_boot_state.vddci_bootup_value); + table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value; + + CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd); + + return result; +} + +static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) +{ + uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks, + volt_with_cks, value; + uint16_t clock_freq_u16; + struct tonga_smumgr *smu_data = + (struct tonga_smumgr *)(hwmgr->smumgr->backend); + uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2, + volt_offset = 0; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = + table_info->vdd_dep_on_sclk; + uint32_t hw_revision, dev_id; + struct cgs_system_info sys_info = {0}; + + stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount; + + sys_info.size = sizeof(struct cgs_system_info); + + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV; + cgs_query_system_info(hwmgr->device, &sys_info); + hw_revision = (uint32_t)sys_info.value; + + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV; + cgs_query_system_info(hwmgr->device, &sys_info); + dev_id = (uint32_t)sys_info.value; + + /* Read SMU_Eefuse to read and calculate RO and determine + * if the part is SS or FF. if RO >= 1660MHz, part is FF. + */ + efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixSMU_EFUSE_0 + (146 * 4)); + efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixSMU_EFUSE_0 + (148 * 4)); + efuse &= 0xFF000000; + efuse = efuse >> 24; + efuse2 &= 0xF; + + if (efuse2 == 1) + ro = (2300 - 1350) * efuse / 255 + 1350; + else + ro = (2500 - 1000) * efuse / 255 + 1000; + + if (ro >= 1660) + type = 0; + else + type = 1; + + /* Populate Stretch amount */ + smu_data->smc_state_table.ClockStretcherAmount = stretch_amount; + + + /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */ + for (i = 0; i < sclk_table->count; i++) { + smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |= + sclk_table->entries[i].cks_enable << i; + if (ASICID_IS_TONGA_P(dev_id, hw_revision)) { + volt_without_cks = (uint32_t)((7732 + 60 - ro - 20838 * + (sclk_table->entries[i].clk/100) / 10000) * 1000 / + (8730 - (5301 * (sclk_table->entries[i].clk/100) / 1000))); + volt_with_cks = (uint32_t)((5250 + 51 - ro - 2404 * + (sclk_table->entries[i].clk/100) / 100000) * 1000 / + (6146 - (3193 * (sclk_table->entries[i].clk/100) / 1000))); + } else { + volt_without_cks = (uint32_t)((14041 * + (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 / + (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000))); + volt_with_cks = (uint32_t)((13946 * + (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 / + (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000))); + } + if (volt_without_cks >= volt_with_cks) + volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks + + sclk_table->entries[i].cks_voffset) * 100 / 625) + 1); + smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset; + } + + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, + STRETCH_ENABLE, 0x0); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, + masterReset, 0x1); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, + staticEnable, 0x1); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, + masterReset, 0x0); + + /* Populate CKS Lookup Table */ + if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5) + stretch_amount2 = 0; + else if (stretch_amount == 3 || stretch_amount == 4) + stretch_amount2 = 1; + else { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ClockStretcher); + PP_ASSERT_WITH_CODE(false, + "Stretch Amount in PPTable not supported\n", + return -EINVAL); + } + + value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixPWR_CKS_CNTL); + value &= 0xFFC2FF87; + smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq = + tonga_clock_stretcher_lookup_table[stretch_amount2][0]; + smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq = + tonga_clock_stretcher_lookup_table[stretch_amount2][1]; + clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(smu_data->smc_state_table. + GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1]. + SclkFrequency) / 100); + if (tonga_clock_stretcher_lookup_table[stretch_amount2][0] < + clock_freq_u16 && + tonga_clock_stretcher_lookup_table[stretch_amount2][1] > + clock_freq_u16) { + /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */ + value |= (tonga_clock_stretcher_lookup_table[stretch_amount2][3]) << 16; + /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */ + value |= (tonga_clock_stretcher_lookup_table[stretch_amount2][2]) << 18; + /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */ + value |= (tonga_clock_stretch_amount_conversion + [tonga_clock_stretcher_lookup_table[stretch_amount2][3]] + [stretch_amount]) << 3; + } + CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable. + CKS_LOOKUPTableEntry[0].minFreq); + CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable. + CKS_LOOKUPTableEntry[0].maxFreq); + smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting = + tonga_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F; + smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |= + (tonga_clock_stretcher_lookup_table[stretch_amount2][3]) << 7; + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixPWR_CKS_CNTL, value); + + /* Populate DDT Lookup Table */ + for (i = 0; i < 4; i++) { + /* Assign the minimum and maximum VID stored + * in the last row of Clock Stretcher Voltage Table. + */ + smu_data->smc_state_table.ClockStretcherDataTable. + ClockStretcherDataTableEntry[i].minVID = + (uint8_t) tonga_clock_stretcher_ddt_table[type][i][2]; + smu_data->smc_state_table.ClockStretcherDataTable. + ClockStretcherDataTableEntry[i].maxVID = + (uint8_t) tonga_clock_stretcher_ddt_table[type][i][3]; + /* Loop through each SCLK and check the frequency + * to see if it lies within the frequency for clock stretcher. + */ + for (j = 0; j < smu_data->smc_state_table.GraphicsDpmLevelCount; j++) { + cks_setting = 0; + clock_freq = PP_SMC_TO_HOST_UL( + smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency); + /* Check the allowed frequency against the sclk level[j]. + * Sclk's endianness has already been converted, + * and it's in 10Khz unit, + * as opposed to Data table, which is in Mhz unit. + */ + if (clock_freq >= tonga_clock_stretcher_ddt_table[type][i][0] * 100) { + cks_setting |= 0x2; + if (clock_freq < tonga_clock_stretcher_ddt_table[type][i][1] * 100) + cks_setting |= 0x1; + } + smu_data->smc_state_table.ClockStretcherDataTable. + ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2); + } + CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table. + ClockStretcherDataTable. + ClockStretcherDataTableEntry[i].setting); + } + + value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixPWR_CKS_CNTL); + value &= 0xFFFFFFFE; + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixPWR_CKS_CNTL, value); + + return 0; +} + +/** + * Populates the SMC VRConfig field in DPM table. + * + * @param hwmgr the address of the hardware manager + * @param table the SMC DPM table structure to be populated + * @return always 0 + */ +static int tonga_populate_vr_config(struct pp_hwmgr *hwmgr, + SMU72_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint16_t config; + + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) { + /* Splitted mode */ + config = VR_SVI2_PLANE_1; + table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT); + + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { + config = VR_SVI2_PLANE_2; + table->VRConfig |= config; + } else { + printk(KERN_ERR "[ powerplay ] VDDC and VDDGFX should " + "be both on SVI2 control in splitted mode !\n"); + } + } else { + /* Merged mode */ + config = VR_MERGED_WITH_VDDC; + table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT); + + /* Set Vddc Voltage Controller */ + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { + config = VR_SVI2_PLANE_1; + table->VRConfig |= config; + } else { + printk(KERN_ERR "[ powerplay ] VDDC should be on " + "SVI2 control in merged mode !\n"); + } + } + + /* Set Vddci Voltage Controller */ + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) { + config = VR_SVI2_PLANE_2; /* only in merged mode */ + table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT); + } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) { + config = VR_SMIO_PATTERN_1; + table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT); + } + + /* Set Mvdd Voltage Controller */ + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { + config = VR_SMIO_PATTERN_2; + table->VRConfig |= (config<<VRCONF_MVDD_SHIFT); + } + + return 0; +} + + +/** + * Initialize the ARB DRAM timing table's index field. + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +static int tonga_init_arb_table_index(struct pp_smumgr *smumgr) +{ + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend); + uint32_t tmp; + int result; + + /* + * This is a read-modify-write on the first byte of the ARB table. + * The first byte in the SMU72_Discrete_MCArbDramTimingTable structure + * is the field 'current'. + * This solution is ugly, but we never write the whole table only + * individual fields in it. + * In reality this field should not be in that structure + * but in a soft register. + */ + result = smu7_read_smc_sram_dword(smumgr, + smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END); + + if (result != 0) + return result; + + tmp &= 0x00FFFFFF; + tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24; + + return smu7_write_smc_sram_dword(smumgr, + smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END); +} + + +static int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) +{ + struct tonga_smumgr *smu_data = + (struct tonga_smumgr *)(hwmgr->smumgr->backend); + const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults; + SMU72_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table; + int i, j, k; + const uint16_t *pdef1, *pdef2; + + dpm_table->DefaultTdp = PP_HOST_TO_SMC_US( + (uint16_t)(cac_dtp_table->usTDP * 256)); + dpm_table->TargetTdp = PP_HOST_TO_SMC_US( + (uint16_t)(cac_dtp_table->usConfigurableTDP * 256)); + + PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255, + "Target Operating Temp is out of Range !", + ); + + dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp); + dpm_table->GpuTjHyst = 8; + + dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base; + + dpm_table->BAPM_TEMP_GRADIENT = + PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient); + pdef1 = defaults->bapmti_r; + pdef2 = defaults->bapmti_rc; + + for (i = 0; i < SMU72_DTE_ITERATIONS; i++) { + for (j = 0; j < SMU72_DTE_SOURCES; j++) { + for (k = 0; k < SMU72_DTE_SINKS; k++) { + dpm_table->BAPMTI_R[i][j][k] = + PP_HOST_TO_SMC_US(*pdef1); + dpm_table->BAPMTI_RC[i][j][k] = + PP_HOST_TO_SMC_US(*pdef2); + pdef1++; + pdef2++; + } + } + } + + return 0; +} + +static int tonga_populate_svi_load_line(struct pp_hwmgr *hwmgr) +{ + struct tonga_smumgr *smu_data = + (struct tonga_smumgr *)(hwmgr->smumgr->backend); + const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults; + + smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en; + smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddC; + smu_data->power_tune_table.SviLoadLineTrimVddC = 3; + smu_data->power_tune_table.SviLoadLineOffsetVddC = 0; + + return 0; +} + +static int tonga_populate_tdc_limit(struct pp_hwmgr *hwmgr) +{ + uint16_t tdc_limit; + struct tonga_smumgr *smu_data = + (struct tonga_smumgr *)(hwmgr->smumgr->backend); + const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + /* TDC number of fraction bits are changed from 8 to 7 + * for Fiji as requested by SMC team + */ + tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 256); + smu_data->power_tune_table.TDC_VDDC_PkgLimit = + CONVERT_FROM_HOST_TO_SMC_US(tdc_limit); + smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc = + defaults->tdc_vddc_throttle_release_limit_perc; + smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt; + + return 0; +} + +static int tonga_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) +{ + struct tonga_smumgr *smu_data = + (struct tonga_smumgr *)(hwmgr->smumgr->backend); + const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults; + uint32_t temp; + + if (smu7_read_smc_sram_dword(hwmgr->smumgr, + fuse_table_offset + + offsetof(SMU72_Discrete_PmFuses, TdcWaterfallCtl), + (uint32_t *)&temp, SMC_RAM_END)) + PP_ASSERT_WITH_CODE(false, + "Attempt to read PmFuses.DW6 " + "(SviLoadLineEn) from SMC Failed !", + return -EINVAL); + else + smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl; + + return 0; +} + +static int tonga_populate_temperature_scaler(struct pp_hwmgr *hwmgr) +{ + int i; + struct tonga_smumgr *smu_data = + (struct tonga_smumgr *)(hwmgr->smumgr->backend); + + /* Currently not used. Set all to zero. */ + for (i = 0; i < 16; i++) + smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0; + + return 0; +} + +static int tonga_populate_fuzzy_fan(struct pp_hwmgr *hwmgr) +{ + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + + if ((hwmgr->thermal_controller.advanceFanControlParameters. + usFanOutputSensitivity & (1 << 15)) || + (hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity == 0)) + hwmgr->thermal_controller.advanceFanControlParameters. + usFanOutputSensitivity = hwmgr->thermal_controller. + advanceFanControlParameters.usDefaultFanOutputSensitivity; + + smu_data->power_tune_table.FuzzyFan_PwmSetDelta = + PP_HOST_TO_SMC_US(hwmgr->thermal_controller. + advanceFanControlParameters.usFanOutputSensitivity); + return 0; +} + +static int tonga_populate_gnb_lpml(struct pp_hwmgr *hwmgr) +{ + int i; + struct tonga_smumgr *smu_data = + (struct tonga_smumgr *)(hwmgr->smumgr->backend); + + /* Currently not used. Set all to zero. */ + for (i = 0; i < 16; i++) + smu_data->power_tune_table.GnbLPML[i] = 0; + + return 0; +} + +static int tonga_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr) +{ + return 0; +} + +static int tonga_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) +{ + struct tonga_smumgr *smu_data = + (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd; + uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd; + struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table; + + hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256); + lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256); + + smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd = + CONVERT_FROM_HOST_TO_SMC_US(hi_sidd); + smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd = + CONVERT_FROM_HOST_TO_SMC_US(lo_sidd); + + return 0; +} + +static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr) +{ + struct tonga_smumgr *smu_data = + (struct tonga_smumgr *)(hwmgr->smumgr->backend); + uint32_t pm_fuse_table_offset; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + if (smu7_read_smc_sram_dword(hwmgr->smumgr, + SMU72_FIRMWARE_HEADER_LOCATION + + offsetof(SMU72_Firmware_Header, PmFuseTable), + &pm_fuse_table_offset, SMC_RAM_END)) + PP_ASSERT_WITH_CODE(false, + "Attempt to get pm_fuse_table_offset Failed !", + return -EINVAL); + + /* DW6 */ + if (tonga_populate_svi_load_line(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate SviLoadLine Failed !", + return -EINVAL); + /* DW7 */ + if (tonga_populate_tdc_limit(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate TDCLimit Failed !", + return -EINVAL); + /* DW8 */ + if (tonga_populate_dw8(hwmgr, pm_fuse_table_offset)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate TdcWaterfallCtl Failed !", + return -EINVAL); + + /* DW9-DW12 */ + if (tonga_populate_temperature_scaler(hwmgr) != 0) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate LPMLTemperatureScaler Failed !", + return -EINVAL); + + /* DW13-DW14 */ + if (tonga_populate_fuzzy_fan(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate Fuzzy Fan " + "Control parameters Failed !", + return -EINVAL); + + /* DW15-DW18 */ + if (tonga_populate_gnb_lpml(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate GnbLPML Failed !", + return -EINVAL); + + /* DW19 */ + if (tonga_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate GnbLPML " + "Min and Max Vid Failed !", + return -EINVAL); + + /* DW20 */ + if (tonga_populate_bapm_vddc_base_leakage_sidd(hwmgr)) + PP_ASSERT_WITH_CODE( + false, + "Attempt to populate BapmVddCBaseLeakage " + "Hi and Lo Sidd Failed !", + return -EINVAL); + + if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, + (uint8_t *)&smu_data->power_tune_table, + sizeof(struct SMU72_Discrete_PmFuses), SMC_RAM_END)) + PP_ASSERT_WITH_CODE(false, + "Attempt to download PmFuseTable Failed !", + return -EINVAL); + } + return 0; +} + +static int tonga_populate_mc_reg_address(struct pp_smumgr *smumgr, + SMU72_Discrete_MCRegisters *mc_reg_table) +{ + const struct tonga_smumgr *smu_data = (struct tonga_smumgr *)smumgr->backend; + + uint32_t i, j; + + for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) { + if (smu_data->mc_reg_table.validflag & 1<<j) { + PP_ASSERT_WITH_CODE( + i < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE, + "Index of mc_reg_table->address[] array " + "out of boundary", + return -EINVAL); + mc_reg_table->address[i].s0 = + PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0); + mc_reg_table->address[i].s1 = + PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1); + i++; + } + } + + mc_reg_table->last = (uint8_t)i; + + return 0; +} + +/*convert register values from driver to SMC format */ +static void tonga_convert_mc_registers( + const struct tonga_mc_reg_entry *entry, + SMU72_Discrete_MCRegisterSet *data, + uint32_t num_entries, uint32_t valid_flag) +{ + uint32_t i, j; + + for (i = 0, j = 0; j < num_entries; j++) { + if (valid_flag & 1<<j) { + data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]); + i++; + } + } +} + +static int tonga_convert_mc_reg_table_entry_to_smc( + struct pp_smumgr *smumgr, + const uint32_t memory_clock, + SMU72_Discrete_MCRegisterSet *mc_reg_table_data + ) +{ + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend); + uint32_t i = 0; + + for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) { + if (memory_clock <= + smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) { + break; + } + } + + if ((i == smu_data->mc_reg_table.num_entries) && (i > 0)) + --i; + + tonga_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i], + mc_reg_table_data, smu_data->mc_reg_table.last, + smu_data->mc_reg_table.validflag); + + return 0; +} + +static int tonga_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr, + SMU72_Discrete_MCRegisters *mc_regs) +{ + int result = 0; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + int res; + uint32_t i; + + for (i = 0; i < data->dpm_table.mclk_table.count; i++) { + res = tonga_convert_mc_reg_table_entry_to_smc( + hwmgr->smumgr, + data->dpm_table.mclk_table.dpm_levels[i].value, + &mc_regs->data[i] + ); + + if (0 != res) + result = res; + } + + return result; +} + +static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) +{ + struct pp_smumgr *smumgr = hwmgr->smumgr; + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend); + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t address; + int32_t result; + + if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) + return 0; + + + memset(&smu_data->mc_regs, 0, sizeof(SMU72_Discrete_MCRegisters)); + + result = tonga_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs)); + + if (result != 0) + return result; + + + address = smu_data->smu7_data.mc_reg_table_start + + (uint32_t)offsetof(SMU72_Discrete_MCRegisters, data[0]); + + return smu7_copy_bytes_to_smc( + hwmgr->smumgr, address, + (uint8_t *)&smu_data->mc_regs.data[0], + sizeof(SMU72_Discrete_MCRegisterSet) * + data->dpm_table.mclk_table.count, + SMC_RAM_END); +} + +static int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr) +{ + int result; + struct pp_smumgr *smumgr = hwmgr->smumgr; + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend); + + memset(&smu_data->mc_regs, 0x00, sizeof(SMU72_Discrete_MCRegisters)); + result = tonga_populate_mc_reg_address(smumgr, &(smu_data->mc_regs)); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize MCRegTable for the MC register addresses !", + return result;); + + result = tonga_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize MCRegTable for driver state !", + return result;); + + return smu7_copy_bytes_to_smc(smumgr, smu_data->smu7_data.mc_reg_table_start, + (uint8_t *)&smu_data->mc_regs, sizeof(SMU72_Discrete_MCRegisters), SMC_RAM_END); +} + +static void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) +{ + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + if (table_info && + table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX && + table_info->cac_dtp_table->usPowerTuneDataSetID) + smu_data->power_tune_defaults = + &tonga_power_tune_data_set_array + [table_info->cac_dtp_table->usPowerTuneDataSetID - 1]; + else + smu_data->power_tune_defaults = &tonga_power_tune_data_set_array[0]; +} + +/** + * Initializes the SMC table and uploads it + * + * @param hwmgr the address of the powerplay hardware manager. + * @param pInput the pointer to input data (PowerState) + * @return always 0 + */ +int tonga_init_smc_table(struct pp_hwmgr *hwmgr) +{ + int result; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct tonga_smumgr *smu_data = + (struct tonga_smumgr *)(hwmgr->smumgr->backend); + SMU72_Discrete_DpmTable *table = &(smu_data->smc_state_table); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + uint8_t i; + pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; + + + memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table)); + + tonga_initialize_power_tune_defaults(hwmgr); + + if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control) + tonga_populate_smc_voltage_tables(hwmgr, table); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition)) + table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; + + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StepVddc)) + table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; + + if (data->is_memory_gddr5) + table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; + + i = PHM_READ_FIELD(hwmgr->device, CC_MC_MAX_CHANNEL, NOOFCHAN); + + if (i == 1 || i == 0) + table->SystemFlags |= 0x40; + + if (data->ulv_supported && table_info->us_ulv_voltage_offset) { + result = tonga_populate_ulv_state(hwmgr, table); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize ULV state !", + return result;); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_ULV_PARAMETER, 0x40035); + } + + result = tonga_populate_smc_link_level(hwmgr, table); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize Link Level !", return result); + + result = tonga_populate_all_graphic_levels(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize Graphics Level !", return result); + + result = tonga_populate_all_memory_levels(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize Memory Level !", return result); + + result = tonga_populate_smc_acpi_level(hwmgr, table); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize ACPI Level !", return result); + + result = tonga_populate_smc_vce_level(hwmgr, table); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize VCE Level !", return result); + + result = tonga_populate_smc_acp_level(hwmgr, table); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize ACP Level !", return result); + + result = tonga_populate_smc_samu_level(hwmgr, table); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize SAMU Level !", return result); + + /* Since only the initial state is completely set up at this + * point (the other states are just copies of the boot state) we only + * need to populate the ARB settings for the initial state. + */ + result = tonga_program_memory_timing_parameters(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to Write ARB settings for the initial state.", + return result;); + + result = tonga_populate_smc_uvd_level(hwmgr, table); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize UVD Level !", return result); + + result = tonga_populate_smc_boot_level(hwmgr, table); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize Boot Level !", return result); + + tonga_populate_bapm_parameters_in_dpm_table(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to populate BAPM Parameters !", return result); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ClockStretcher)) { + result = tonga_populate_clock_stretcher_data_table(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to populate Clock Stretcher Data Table !", + return result;); + } + table->GraphicsVoltageChangeEnable = 1; + table->GraphicsThermThrottleEnable = 1; + table->GraphicsInterval = 1; + table->VoltageInterval = 1; + table->ThermalInterval = 1; + table->TemperatureLimitHigh = + table_info->cac_dtp_table->usTargetOperatingTemp * + SMU7_Q88_FORMAT_CONVERSION_UNIT; + table->TemperatureLimitLow = + (table_info->cac_dtp_table->usTargetOperatingTemp - 1) * + SMU7_Q88_FORMAT_CONVERSION_UNIT; + table->MemoryVoltageChangeEnable = 1; + table->MemoryInterval = 1; + table->VoltageResponseTime = 0; + table->PhaseResponseTime = 0; + table->MemoryThermThrottleEnable = 1; + + /* + * Cail reads current link status and reports it as cap (we cannot + * change this due to some previous issues we had) + * SMC drops the link status to lowest level after enabling + * DPM by PowerPlay. After pnp or toggling CF, driver gets reloaded again + * but this time Cail reads current link status which was set to low by + * SMC and reports it as cap to powerplay + * To avoid it, we set PCIeBootLinkLevel to highest dpm level + */ + PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count), + "There must be 1 or more PCIE levels defined in PPTable.", + return -EINVAL); + + table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count); + + table->PCIeGenInterval = 1; + + result = tonga_populate_vr_config(hwmgr, table); + PP_ASSERT_WITH_CODE(!result, + "Failed to populate VRConfig setting !", return result); + + table->ThermGpio = 17; + table->SclkStepSize = 0x4000; + + if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, + &gpio_pin_assignment)) { + table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift; + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot); + } else { + table->VRHotGpio = SMU7_UNUSED_GPIO_PIN; + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot); + } + + if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID, + &gpio_pin_assignment)) { + table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift; + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition); + } else { + table->AcDcGpio = SMU7_UNUSED_GPIO_PIN; + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition); + } + + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_Falcon_QuickTransition); + + if (0) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_Falcon_QuickTransition); + } + + if (atomctrl_get_pp_assign_pin(hwmgr, + THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin_assignment)) { + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalOutGPIO); + + table->ThermOutGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift; + + table->ThermOutPolarity = + (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) & + (1 << gpio_pin_assignment.uc_gpio_pin_bit_shift))) ? 1 : 0; + + table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY; + + /* if required, combine VRHot/PCC with thermal out GPIO*/ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot) && + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CombinePCCWithThermalSignal)){ + table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT; + } + } else { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalOutGPIO); + + table->ThermOutGpio = 17; + table->ThermOutPolarity = 1; + table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE; + } + + for (i = 0; i < SMU72_MAX_ENTRIES_SMIO; i++) + table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]); + + CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags); + CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2); + CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); + CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); + CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); + CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); + CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); + + /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ + result = smu7_copy_bytes_to_smc( + hwmgr->smumgr, + smu_data->smu7_data.dpm_table_start + offsetof(SMU72_Discrete_DpmTable, SystemFlags), + (uint8_t *)&(table->SystemFlags), + sizeof(SMU72_Discrete_DpmTable) - 3 * sizeof(SMU72_PIDController), + SMC_RAM_END); + + PP_ASSERT_WITH_CODE(!result, + "Failed to upload dpm data to SMC memory !", return result;); + + result = tonga_init_arb_table_index(hwmgr->smumgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to upload arb data to SMC memory !", return result); + + tonga_populate_pm_fuses(hwmgr); + PP_ASSERT_WITH_CODE((!result), + "Failed to populate initialize pm fuses !", return result); + + result = tonga_populate_initial_mc_reg_table(hwmgr); + PP_ASSERT_WITH_CODE((!result), + "Failed to populate initialize MC Reg table !", return result); + + return 0; +} + +/** +* Set up the fan table to control the fan using the SMC. +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from set temperature range routine +*/ +int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) +{ + struct tonga_smumgr *smu_data = + (struct tonga_smumgr *)(hwmgr->smumgr->backend); + SMU72_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; + uint32_t duty100; + uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2; + uint16_t fdo_min, slope1, slope2; + uint32_t reference_clock; + int res; + uint64_t tmp64; + + if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl)) + return 0; + + if (0 == smu_data->smu7_data.fan_table_start) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl); + return 0; + } + + duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, + CG_FDO_CTRL1, FMAX_DUTY100); + + if (0 == duty100) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl); + return 0; + } + + tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100; + do_div(tmp64, 10000); + fdo_min = (uint16_t)tmp64; + + t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - + hwmgr->thermal_controller.advanceFanControlParameters.usTMin; + t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - + hwmgr->thermal_controller.advanceFanControlParameters.usTMed; + + pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - + hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin; + pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - + hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed; + + slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); + slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); + + fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100); + fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100); + fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100); + + fan_table.Slope1 = cpu_to_be16(slope1); + fan_table.Slope2 = cpu_to_be16(slope2); + + fan_table.FdoMin = cpu_to_be16(fdo_min); + + fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst); + + fan_table.HystUp = cpu_to_be16(1); + + fan_table.HystSlope = cpu_to_be16(1); + + fan_table.TempRespLim = cpu_to_be16(5); + + reference_clock = smu7_get_xclk(hwmgr); + + fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600); + + fan_table.FdoMax = cpu_to_be16((uint16_t)duty100); + + fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL); + + fan_table.FanControl_GL_Flag = 1; + + res = smu7_copy_bytes_to_smc(hwmgr->smumgr, + smu_data->smu7_data.fan_table_start, + (uint8_t *)&fan_table, + (uint32_t)sizeof(fan_table), + SMC_RAM_END); + + return 0; +} + + +static int tonga_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK)) + return tonga_program_memory_timing_parameters(hwmgr); + + return 0; +} + +int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct tonga_smumgr *smu_data = + (struct tonga_smumgr *)(hwmgr->smumgr->backend); + + int result = 0; + uint32_t low_sclk_interrupt_threshold = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkThrottleLowNotification) + && (hwmgr->gfx_arbiter.sclk_threshold != + data->low_sclk_interrupt_threshold)) { + data->low_sclk_interrupt_threshold = + hwmgr->gfx_arbiter.sclk_threshold; + low_sclk_interrupt_threshold = + data->low_sclk_interrupt_threshold; + + CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); + + result = smu7_copy_bytes_to_smc( + hwmgr->smumgr, + smu_data->smu7_data.dpm_table_start + + offsetof(SMU72_Discrete_DpmTable, + LowSclkInterruptThreshold), + (uint8_t *)&low_sclk_interrupt_threshold, + sizeof(uint32_t), + SMC_RAM_END); + } + + result = tonga_update_and_upload_mc_reg_table(hwmgr); + + PP_ASSERT_WITH_CODE((!result), + "Failed to upload MC reg table !", + return result); + + result = tonga_program_mem_timing_parameters(hwmgr); + PP_ASSERT_WITH_CODE((result == 0), + "Failed to program memory timing parameters !", + ); + + return result; +} + +uint32_t tonga_get_offsetof(uint32_t type, uint32_t member) +{ + switch (type) { + case SMU_SoftRegisters: + switch (member) { + case HandshakeDisables: + return offsetof(SMU72_SoftRegisters, HandshakeDisables); + case VoltageChangeTimeout: + return offsetof(SMU72_SoftRegisters, VoltageChangeTimeout); + case AverageGraphicsActivity: + return offsetof(SMU72_SoftRegisters, AverageGraphicsActivity); + case PreVBlankGap: + return offsetof(SMU72_SoftRegisters, PreVBlankGap); + case VBlankTimeout: + return offsetof(SMU72_SoftRegisters, VBlankTimeout); + case UcodeLoadStatus: + return offsetof(SMU72_SoftRegisters, UcodeLoadStatus); + } + case SMU_Discrete_DpmTable: + switch (member) { + case UvdBootLevel: + return offsetof(SMU72_Discrete_DpmTable, UvdBootLevel); + case VceBootLevel: + return offsetof(SMU72_Discrete_DpmTable, VceBootLevel); + case SamuBootLevel: + return offsetof(SMU72_Discrete_DpmTable, SamuBootLevel); + case LowSclkInterruptThreshold: + return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold); + } + } + printk("cant't get the offset of type %x member %x\n", type, member); + return 0; +} + +uint32_t tonga_get_mac_definition(uint32_t value) +{ + switch (value) { + case SMU_MAX_LEVELS_GRAPHICS: + return SMU72_MAX_LEVELS_GRAPHICS; + case SMU_MAX_LEVELS_MEMORY: + return SMU72_MAX_LEVELS_MEMORY; + case SMU_MAX_LEVELS_LINK: + return SMU72_MAX_LEVELS_LINK; + case SMU_MAX_ENTRIES_SMIO: + return SMU72_MAX_ENTRIES_SMIO; + case SMU_MAX_LEVELS_VDDC: + return SMU72_MAX_LEVELS_VDDC; + case SMU_MAX_LEVELS_VDDGFX: + return SMU72_MAX_LEVELS_VDDGFX; + case SMU_MAX_LEVELS_VDDCI: + return SMU72_MAX_LEVELS_VDDCI; + case SMU_MAX_LEVELS_MVDD: + return SMU72_MAX_LEVELS_MVDD; + } + printk("cant't get the mac value %x\n", value); + + return 0; +} + + +static int tonga_update_uvd_smc_table(struct pp_hwmgr *hwmgr) +{ + struct tonga_smumgr *smu_data = + (struct tonga_smumgr *)(hwmgr->smumgr->backend); + uint32_t mm_boot_level_offset, mm_boot_level_value; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + smu_data->smc_state_table.UvdBootLevel = 0; + if (table_info->mm_dep_table->count > 0) + smu_data->smc_state_table.UvdBootLevel = + (uint8_t) (table_info->mm_dep_table->count - 1); + mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + + offsetof(SMU72_Discrete_DpmTable, UvdBootLevel); + mm_boot_level_offset /= 4; + mm_boot_level_offset *= 4; + mm_boot_level_value = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset); + mm_boot_level_value &= 0x00FFFFFF; + mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24; + cgs_write_ind_register(hwmgr->device, + CGS_IND_REG__SMC, + mm_boot_level_offset, mm_boot_level_value); + + if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDDPM) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_UVDDPM_SetEnabledMask, + (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel)); + return 0; +} + +static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr) +{ + struct tonga_smumgr *smu_data = + (struct tonga_smumgr *)(hwmgr->smumgr->backend); + uint32_t mm_boot_level_offset, mm_boot_level_value; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + + smu_data->smc_state_table.VceBootLevel = + (uint8_t) (table_info->mm_dep_table->count - 1); + + mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + + offsetof(SMU72_Discrete_DpmTable, VceBootLevel); + mm_boot_level_offset /= 4; + mm_boot_level_offset *= 4; + mm_boot_level_value = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset); + mm_boot_level_value &= 0xFF00FFFF; + mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16; + cgs_write_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_VCEDPM_SetEnabledMask, + (uint32_t)1 << smu_data->smc_state_table.VceBootLevel); + return 0; +} + +static int tonga_update_samu_smc_table(struct pp_hwmgr *hwmgr) +{ + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + uint32_t mm_boot_level_offset, mm_boot_level_value; + + smu_data->smc_state_table.SamuBootLevel = 0; + mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + + offsetof(SMU72_Discrete_DpmTable, SamuBootLevel); + + mm_boot_level_offset /= 4; + mm_boot_level_offset *= 4; + mm_boot_level_value = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset); + mm_boot_level_value &= 0xFFFFFF00; + mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0; + cgs_write_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SAMUDPM_SetEnabledMask, + (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel)); + return 0; +} + +int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) +{ + switch (type) { + case SMU_UVD_TABLE: + tonga_update_uvd_smc_table(hwmgr); + break; + case SMU_VCE_TABLE: + tonga_update_vce_smc_table(hwmgr); + break; + case SMU_SAMU_TABLE: + tonga_update_samu_smc_table(hwmgr); + break; + default: + break; + } + return 0; +} + + +/** + * Get the location of various tables inside the FW image. + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int tonga_process_firmware_header(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + + uint32_t tmp; + int result; + bool error = false; + + result = smu7_read_smc_sram_dword(hwmgr->smumgr, + SMU72_FIRMWARE_HEADER_LOCATION + + offsetof(SMU72_Firmware_Header, DpmTable), + &tmp, SMC_RAM_END); + + if (!result) + smu_data->smu7_data.dpm_table_start = tmp; + + error |= (result != 0); + + result = smu7_read_smc_sram_dword(hwmgr->smumgr, + SMU72_FIRMWARE_HEADER_LOCATION + + offsetof(SMU72_Firmware_Header, SoftRegisters), + &tmp, SMC_RAM_END); + + if (!result) { + data->soft_regs_start = tmp; + smu_data->smu7_data.soft_regs_start = tmp; + } + + error |= (result != 0); + + + result = smu7_read_smc_sram_dword(hwmgr->smumgr, + SMU72_FIRMWARE_HEADER_LOCATION + + offsetof(SMU72_Firmware_Header, mcRegisterTable), + &tmp, SMC_RAM_END); + + if (!result) + smu_data->smu7_data.mc_reg_table_start = tmp; + + result = smu7_read_smc_sram_dword(hwmgr->smumgr, + SMU72_FIRMWARE_HEADER_LOCATION + + offsetof(SMU72_Firmware_Header, FanTable), + &tmp, SMC_RAM_END); + + if (!result) + smu_data->smu7_data.fan_table_start = tmp; + + error |= (result != 0); + + result = smu7_read_smc_sram_dword(hwmgr->smumgr, + SMU72_FIRMWARE_HEADER_LOCATION + + offsetof(SMU72_Firmware_Header, mcArbDramTimingTable), + &tmp, SMC_RAM_END); + + if (!result) + smu_data->smu7_data.arb_table_start = tmp; + + error |= (result != 0); + + result = smu7_read_smc_sram_dword(hwmgr->smumgr, + SMU72_FIRMWARE_HEADER_LOCATION + + offsetof(SMU72_Firmware_Header, Version), + &tmp, SMC_RAM_END); + + if (!result) + hwmgr->microcode_version_info.SMC = tmp; + + error |= (result != 0); + + return error ? 1 : 0; +} + +/*---------------------------MC----------------------------*/ + +static uint8_t tonga_get_memory_modile_index(struct pp_hwmgr *hwmgr) +{ + return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16)); +} + +static bool tonga_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg) +{ + bool result = true; + + switch (in_reg) { + case mmMC_SEQ_RAS_TIMING: + *out_reg = mmMC_SEQ_RAS_TIMING_LP; + break; + + case mmMC_SEQ_DLL_STBY: + *out_reg = mmMC_SEQ_DLL_STBY_LP; + break; + + case mmMC_SEQ_G5PDX_CMD0: + *out_reg = mmMC_SEQ_G5PDX_CMD0_LP; + break; + + case mmMC_SEQ_G5PDX_CMD1: + *out_reg = mmMC_SEQ_G5PDX_CMD1_LP; + break; + + case mmMC_SEQ_G5PDX_CTRL: + *out_reg = mmMC_SEQ_G5PDX_CTRL_LP; + break; + + case mmMC_SEQ_CAS_TIMING: + *out_reg = mmMC_SEQ_CAS_TIMING_LP; + break; + + case mmMC_SEQ_MISC_TIMING: + *out_reg = mmMC_SEQ_MISC_TIMING_LP; + break; + + case mmMC_SEQ_MISC_TIMING2: + *out_reg = mmMC_SEQ_MISC_TIMING2_LP; + break; + + case mmMC_SEQ_PMG_DVS_CMD: + *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP; + break; + + case mmMC_SEQ_PMG_DVS_CTL: + *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP; + break; + + case mmMC_SEQ_RD_CTL_D0: + *out_reg = mmMC_SEQ_RD_CTL_D0_LP; + break; + + case mmMC_SEQ_RD_CTL_D1: + *out_reg = mmMC_SEQ_RD_CTL_D1_LP; + break; + + case mmMC_SEQ_WR_CTL_D0: + *out_reg = mmMC_SEQ_WR_CTL_D0_LP; + break; + + case mmMC_SEQ_WR_CTL_D1: + *out_reg = mmMC_SEQ_WR_CTL_D1_LP; + break; + + case mmMC_PMG_CMD_EMRS: + *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP; + break; + + case mmMC_PMG_CMD_MRS: + *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP; + break; + + case mmMC_PMG_CMD_MRS1: + *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP; + break; + + case mmMC_SEQ_PMG_TIMING: + *out_reg = mmMC_SEQ_PMG_TIMING_LP; + break; + + case mmMC_PMG_CMD_MRS2: + *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP; + break; + + case mmMC_SEQ_WR_CTL_2: + *out_reg = mmMC_SEQ_WR_CTL_2_LP; + break; + + default: + result = false; + break; + } + + return result; +} + +static int tonga_set_s0_mc_reg_index(struct tonga_mc_reg_table *table) +{ + uint32_t i; + uint16_t address; + + for (i = 0; i < table->last; i++) { + table->mc_reg_address[i].s0 = + tonga_check_s0_mc_reg_index(table->mc_reg_address[i].s1, + &address) ? + address : + table->mc_reg_address[i].s1; + } + return 0; +} + +static int tonga_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table, + struct tonga_mc_reg_table *ni_table) +{ + uint8_t i, j; + + PP_ASSERT_WITH_CODE((table->last <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES), + "Invalid VramInfo table.", return -EINVAL); + + for (i = 0; i < table->last; i++) + ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; + + ni_table->last = table->last; + + for (i = 0; i < table->num_entries; i++) { + ni_table->mc_reg_table_entry[i].mclk_max = + table->mc_reg_table_entry[i].mclk_max; + for (j = 0; j < table->last; j++) { + ni_table->mc_reg_table_entry[i].mc_data[j] = + table->mc_reg_table_entry[i].mc_data[j]; + } + } + + ni_table->num_entries = table->num_entries; + + return 0; +} + +/** + * VBIOS omits some information to reduce size, we need to recover them here. + * 1. when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to + * mmMC_PMG_CMD_EMRS /_LP[15:0]. Bit[15:0] MRS, need to be update + * mmMC_PMG_CMD_MRS/_LP[15:0] + * 2. when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to + * mmMC_PMG_CMD_MRS1/_LP[15:0]. + * 3. need to set these data for each clock range + * @param hwmgr the address of the powerplay hardware manager. + * @param table the address of MCRegTable + * @return always 0 + */ +static int tonga_set_mc_special_registers(struct pp_hwmgr *hwmgr, + struct tonga_mc_reg_table *table) +{ + uint8_t i, j, k; + uint32_t temp_reg; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + for (i = 0, j = table->last; i < table->last; i++) { + PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + + switch (table->mc_reg_address[i].s1) { + + case mmMC_SEQ_MISC1: + temp_reg = cgs_read_register(hwmgr->device, + mmMC_PMG_CMD_EMRS); + table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS; + table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + ((temp_reg & 0xffff0000)) | + ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); + } + j++; + PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + + temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS); + table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS; + table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (temp_reg & 0xffff0000) | + (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); + + if (!data->is_memory_gddr5) + table->mc_reg_table_entry[k].mc_data[j] |= 0x100; + } + j++; + PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + + if (!data->is_memory_gddr5) { + table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD; + table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD; + for (k = 0; k < table->num_entries; k++) + table->mc_reg_table_entry[k].mc_data[j] = + (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; + j++; + PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + } + + break; + + case mmMC_SEQ_RESERVE_M: + temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1); + table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1; + table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (temp_reg & 0xffff0000) | + (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); + } + j++; + PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + break; + + default: + break; + } + + } + + table->last = j; + + return 0; +} + +static int tonga_set_valid_flag(struct tonga_mc_reg_table *table) +{ + uint8_t i, j; + + for (i = 0; i < table->last; i++) { + for (j = 1; j < table->num_entries; j++) { + if (table->mc_reg_table_entry[j-1].mc_data[i] != + table->mc_reg_table_entry[j].mc_data[i]) { + table->validflag |= (1<<i); + break; + } + } + } + + return 0; +} + +int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) +{ + int result; + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + pp_atomctrl_mc_reg_table *table; + struct tonga_mc_reg_table *ni_table = &smu_data->mc_reg_table; + uint8_t module_index = tonga_get_memory_modile_index(hwmgr); + + table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL); + + if (table == NULL) + return -ENOMEM; + + /* Program additional LP registers that are no longer programmed by VBIOS */ + cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY)); + cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0)); + cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL)); + cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, + cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, + cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, + cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0)); + cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0)); + cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, + cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2)); + cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, + cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2)); + + memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table)); + + result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table); + + if (!result) + result = tonga_copy_vbios_smc_reg_table(table, ni_table); + + if (!result) { + tonga_set_s0_mc_reg_index(ni_table); + result = tonga_set_mc_special_registers(hwmgr, ni_table); + } + + if (!result) + tonga_set_valid_flag(ni_table); + + kfree(table); + + return result; +} + +bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr) +{ + return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON)) + ? true : false; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h index 8e6670b3cb67..8ae169ff541d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h @@ -20,35 +20,19 @@ * OTHER DEALINGS IN THE SOFTWARE. * */ +#ifndef _TONGA_SMC_H +#define _TONGA_SMC_H -#ifndef TONGA_POWERTUNE_H -#define TONGA_POWERTUNE_H +#include "smumgr.h" +#include "smu72.h" -enum _phw_tonga_ptc_config_reg_type { - TONGA_CONFIGREG_MMR = 0, - TONGA_CONFIGREG_SMC_IND, - TONGA_CONFIGREG_DIDT_IND, - TONGA_CONFIGREG_CACHE, - TONGA_CONFIGREG_MAX -}; -typedef enum _phw_tonga_ptc_config_reg_type phw_tonga_ptc_config_reg_type; - -/* PowerContainment Features */ -#define POWERCONTAINMENT_FEATURE_BAPM 0x00000001 -#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002 -#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004 +#define ASICID_IS_TONGA_P(wDID, bRID) \ + (((wDID == 0x6930) && ((bRID == 0xF0) || (bRID == 0xF1) || (bRID == 0xFF))) \ + || ((wDID == 0x6920) && ((bRID == 0) || (bRID == 1)))) -struct _phw_tonga_pt_config_reg { - uint32_t Offset; - uint32_t Mask; - uint32_t Shift; - uint32_t Value; - phw_tonga_ptc_config_reg_type Type; -}; -typedef struct _phw_tonga_pt_config_reg phw_tonga_pt_config_reg; -struct _phw_tonga_pt_defaults { +struct tonga_pt_defaults { uint8_t svi_load_line_en; uint8_t svi_load_line_vddC; uint8_t tdc_vddc_throttle_release_limit_perc; @@ -60,7 +44,17 @@ struct _phw_tonga_pt_defaults { uint16_t bapmti_r[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS]; uint16_t bapmti_rc[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS]; }; -typedef struct _phw_tonga_pt_defaults phw_tonga_pt_defaults; +int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr); +int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr); +int tonga_init_smc_table(struct pp_hwmgr *hwmgr); +int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr); +int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type); +int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr); +uint32_t tonga_get_offsetof(uint32_t type, uint32_t member); +uint32_t tonga_get_mac_definition(uint32_t value); +int tonga_process_firmware_header(struct pp_hwmgr *hwmgr); +int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr); +bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr); #endif diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c index f42c536b3af1..5f9124046b9b 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c @@ -33,587 +33,9 @@ #include "smu/smu_7_1_2_d.h" #include "smu/smu_7_1_2_sh_mask.h" #include "cgs_common.h" +#include "tonga_smc.h" +#include "smu7_smumgr.h" -#define TONGA_SMC_SIZE 0x20000 -#define BUFFER_SIZE 80000 -#define MAX_STRING_SIZE 15 -#define BUFFER_SIZETWO 131072 /*128 *1024*/ - -/** -* Set the address for reading/writing the SMC SRAM space. -* @param smumgr the address of the powerplay hardware manager. -* @param smcAddress the address in the SMC RAM to access. -*/ -static int tonga_set_smc_sram_address(struct pp_smumgr *smumgr, - uint32_t smcAddress, uint32_t limit) -{ - if (smumgr == NULL || smumgr->device == NULL) - return -EINVAL; - PP_ASSERT_WITH_CODE((0 == (3 & smcAddress)), - "SMC address must be 4 byte aligned.", - return -1;); - - PP_ASSERT_WITH_CODE((limit > (smcAddress + 3)), - "SMC address is beyond the SMC RAM area.", - return -1;); - - cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, smcAddress); - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); - - return 0; -} - -/** -* Copy bytes from an array into the SMC RAM space. -* -* @param smumgr the address of the powerplay SMU manager. -* @param smcStartAddress the start address in the SMC RAM to copy bytes to. -* @param src the byte array to copy the bytes from. -* @param byteCount the number of bytes to copy. -*/ -int tonga_copy_bytes_to_smc(struct pp_smumgr *smumgr, - uint32_t smcStartAddress, const uint8_t *src, - uint32_t byteCount, uint32_t limit) -{ - uint32_t addr; - uint32_t data, orig_data; - int result = 0; - uint32_t extra_shift; - - if (smumgr == NULL || smumgr->device == NULL) - return -EINVAL; - PP_ASSERT_WITH_CODE((0 == (3 & smcStartAddress)), - "SMC address must be 4 byte aligned.", - return 0;); - - PP_ASSERT_WITH_CODE((limit > (smcStartAddress + byteCount)), - "SMC address is beyond the SMC RAM area.", - return 0;); - - addr = smcStartAddress; - - while (byteCount >= 4) { - /* - * Bytes are written into the - * SMC address space with the MSB first - */ - data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3]; - - result = tonga_set_smc_sram_address(smumgr, addr, limit); - - if (result) - goto out; - - cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data); - - src += 4; - byteCount -= 4; - addr += 4; - } - - if (0 != byteCount) { - /* Now write odd bytes left, do a read modify write cycle */ - data = 0; - - result = tonga_set_smc_sram_address(smumgr, addr, limit); - if (result) - goto out; - - orig_data = cgs_read_register(smumgr->device, - mmSMC_IND_DATA_0); - extra_shift = 8 * (4 - byteCount); - - while (byteCount > 0) { - data = (data << 8) + *src++; - byteCount--; - } - - data <<= extra_shift; - data |= (orig_data & ~((~0UL) << extra_shift)); - - result = tonga_set_smc_sram_address(smumgr, addr, limit); - if (result) - goto out; - - cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data); - } - -out: - return result; -} - - -int tonga_program_jump_on_start(struct pp_smumgr *smumgr) -{ - static const unsigned char pData[] = { 0xE0, 0x00, 0x80, 0x40 }; - - tonga_copy_bytes_to_smc(smumgr, 0x0, pData, 4, sizeof(pData)+1); - - return 0; -} - -/** -* Return if the SMC is currently running. -* -* @param smumgr the address of the powerplay hardware manager. -*/ -static int tonga_is_smc_ram_running(struct pp_smumgr *smumgr) -{ - return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, - SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) - && (0x20100 <= cgs_read_ind_register(smumgr->device, - CGS_IND_REG__SMC, ixSMC_PC_C))); -} - -static int tonga_send_msg_to_smc_offset(struct pp_smumgr *smumgr) -{ - if (smumgr == NULL || smumgr->device == NULL) - return -EINVAL; - - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - - cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000); - cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test); - - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - - return 0; -} - -/** -* Send a message to the SMC, and wait for its response. -* -* @param smumgr the address of the powerplay hardware manager. -* @param msg the message to send. -* @return The response that came from the SMC. -*/ -static int tonga_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) -{ - if (smumgr == NULL || smumgr->device == NULL) - return -EINVAL; - - if (!tonga_is_smc_ram_running(smumgr)) - return -1; - - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - PP_ASSERT_WITH_CODE( - 1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP), - "Failed to send Previous Message.", - ); - - cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); - - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - PP_ASSERT_WITH_CODE( - 1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP), - "Failed to send Message.", - ); - - return 0; -} - -/* -* Send a message to the SMC, and do not wait for its response. -* -* @param smumgr the address of the powerplay hardware manager. -* @param msg the message to send. -* @return The response that came from the SMC. -*/ -static int tonga_send_msg_to_smc_without_waiting - (struct pp_smumgr *smumgr, uint16_t msg) -{ - if (smumgr == NULL || smumgr->device == NULL) - return -EINVAL; - - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - PP_ASSERT_WITH_CODE( - 1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP), - "Failed to send Previous Message.", - ); - cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); - - return 0; -} - -/* -* Send a message to the SMC with parameter -* -* @param smumgr: the address of the powerplay hardware manager. -* @param msg: the message to send. -* @param parameter: the parameter to send -* @return The response that came from the SMC. -*/ -static int tonga_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, - uint16_t msg, uint32_t parameter) -{ - if (smumgr == NULL || smumgr->device == NULL) - return -EINVAL; - - if (!tonga_is_smc_ram_running(smumgr)) - return PPSMC_Result_Failed; - - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter); - - return tonga_send_msg_to_smc(smumgr, msg); -} - -/* -* Send a message to the SMC with parameter, do not wait for response -* -* @param smumgr: the address of the powerplay hardware manager. -* @param msg: the message to send. -* @param parameter: the parameter to send -* @return The response that came from the SMC. -*/ -static int tonga_send_msg_to_smc_with_parameter_without_waiting( - struct pp_smumgr *smumgr, - uint16_t msg, uint32_t parameter) -{ - if (smumgr == NULL || smumgr->device == NULL) - return -EINVAL; - - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - - cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter); - - return tonga_send_msg_to_smc_without_waiting(smumgr, msg); -} - -/* - * Read a 32bit value from the SMC SRAM space. - * ALL PARAMETERS ARE IN HOST BYTE ORDER. - * @param smumgr the address of the powerplay hardware manager. - * @param smcAddress the address in the SMC RAM to access. - * @param value and output parameter for the data read from the SMC SRAM. - */ -int tonga_read_smc_sram_dword(struct pp_smumgr *smumgr, - uint32_t smcAddress, uint32_t *value, - uint32_t limit) -{ - int result; - - result = tonga_set_smc_sram_address(smumgr, smcAddress, limit); - - if (0 != result) - return result; - - *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0); - - return 0; -} - -/* - * Write a 32bit value to the SMC SRAM space. - * ALL PARAMETERS ARE IN HOST BYTE ORDER. - * @param smumgr the address of the powerplay hardware manager. - * @param smcAddress the address in the SMC RAM to access. - * @param value to write to the SMC SRAM. - */ -int tonga_write_smc_sram_dword(struct pp_smumgr *smumgr, - uint32_t smcAddress, uint32_t value, - uint32_t limit) -{ - int result; - - result = tonga_set_smc_sram_address(smumgr, smcAddress, limit); - - if (0 != result) - return result; - - cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, value); - - return 0; -} - -static int tonga_smu_fini(struct pp_smumgr *smumgr) -{ - struct tonga_smumgr *priv = (struct tonga_smumgr *)(smumgr->backend); - - smu_free_memory(smumgr->device, (void *)priv->smu_buffer.handle); - smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle); - - if (smumgr->backend != NULL) { - kfree(smumgr->backend); - smumgr->backend = NULL; - } - - cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); - return 0; -} - -static enum cgs_ucode_id tonga_convert_fw_type_to_cgs(uint32_t fw_type) -{ - enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM; - - switch (fw_type) { - case UCODE_ID_SMU: - result = CGS_UCODE_ID_SMU; - break; - case UCODE_ID_SDMA0: - result = CGS_UCODE_ID_SDMA0; - break; - case UCODE_ID_SDMA1: - result = CGS_UCODE_ID_SDMA1; - break; - case UCODE_ID_CP_CE: - result = CGS_UCODE_ID_CP_CE; - break; - case UCODE_ID_CP_PFP: - result = CGS_UCODE_ID_CP_PFP; - break; - case UCODE_ID_CP_ME: - result = CGS_UCODE_ID_CP_ME; - break; - case UCODE_ID_CP_MEC: - result = CGS_UCODE_ID_CP_MEC; - break; - case UCODE_ID_CP_MEC_JT1: - result = CGS_UCODE_ID_CP_MEC_JT1; - break; - case UCODE_ID_CP_MEC_JT2: - result = CGS_UCODE_ID_CP_MEC_JT2; - break; - case UCODE_ID_RLC_G: - result = CGS_UCODE_ID_RLC_G; - break; - default: - break; - } - - return result; -} - -/** - * Convert the PPIRI firmware type to SMU type mask. - * For MEC, we need to check all MEC related type -*/ -static uint16_t tonga_get_mask_for_firmware_type(uint16_t firmwareType) -{ - uint16_t result = 0; - - switch (firmwareType) { - case UCODE_ID_SDMA0: - result = UCODE_ID_SDMA0_MASK; - break; - case UCODE_ID_SDMA1: - result = UCODE_ID_SDMA1_MASK; - break; - case UCODE_ID_CP_CE: - result = UCODE_ID_CP_CE_MASK; - break; - case UCODE_ID_CP_PFP: - result = UCODE_ID_CP_PFP_MASK; - break; - case UCODE_ID_CP_ME: - result = UCODE_ID_CP_ME_MASK; - break; - case UCODE_ID_CP_MEC: - case UCODE_ID_CP_MEC_JT1: - case UCODE_ID_CP_MEC_JT2: - result = UCODE_ID_CP_MEC_MASK; - break; - case UCODE_ID_RLC_G: - result = UCODE_ID_RLC_G_MASK; - break; - default: - break; - } - - return result; -} - -/** - * Check if the FW has been loaded, - * SMU will not return if loading has not finished. -*/ -static int tonga_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fwType) -{ - uint16_t fwMask = tonga_get_mask_for_firmware_type(fwType); - - if (0 != SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, SMC_IND, - SOFT_REGISTERS_TABLE_28, fwMask, fwMask)) { - printk(KERN_ERR "[ powerplay ] check firmware loading failed\n"); - return -EINVAL; - } - - return 0; -} - -/* Populate one firmware image to the data structure */ -static int tonga_populate_single_firmware_entry(struct pp_smumgr *smumgr, - uint16_t firmware_type, - struct SMU_Entry *pentry) -{ - int result; - struct cgs_firmware_info info = {0}; - - result = cgs_get_firmware_info( - smumgr->device, - tonga_convert_fw_type_to_cgs(firmware_type), - &info); - - if (result == 0) { - pentry->version = 0; - pentry->id = (uint16_t)firmware_type; - pentry->image_addr_high = smu_upper_32_bits(info.mc_addr); - pentry->image_addr_low = smu_lower_32_bits(info.mc_addr); - pentry->meta_data_addr_high = 0; - pentry->meta_data_addr_low = 0; - pentry->data_size_byte = info.image_size; - pentry->num_register_entries = 0; - - if (firmware_type == UCODE_ID_RLC_G) - pentry->flags = 1; - else - pentry->flags = 0; - } else { - return result; - } - - return result; -} - -static int tonga_request_smu_reload_fw(struct pp_smumgr *smumgr) -{ - struct tonga_smumgr *tonga_smu = - (struct tonga_smumgr *)(smumgr->backend); - uint16_t fw_to_load; - struct SMU_DRAMData_TOC *toc; - /** - * First time this gets called during SmuMgr init, - * we haven't processed SMU header file yet, - * so Soft Register Start offset is unknown. - * However, for this case, UcodeLoadStatus is already 0, - * so we can skip this if the Soft Registers Start offset is 0. - */ - cgs_write_ind_register(smumgr->device, - CGS_IND_REG__SMC, ixSOFT_REGISTERS_TABLE_28, 0); - - tonga_send_msg_to_smc_with_parameter(smumgr, - PPSMC_MSG_SMU_DRAM_ADDR_HI, - tonga_smu->smu_buffer.mc_addr_high); - tonga_send_msg_to_smc_with_parameter(smumgr, - PPSMC_MSG_SMU_DRAM_ADDR_LO, - tonga_smu->smu_buffer.mc_addr_low); - - toc = (struct SMU_DRAMData_TOC *)tonga_smu->pHeader; - toc->num_entries = 0; - toc->structure_version = 1; - - PP_ASSERT_WITH_CODE( - 0 == tonga_populate_single_firmware_entry(smumgr, - UCODE_ID_RLC_G, - &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n", - return -1); - PP_ASSERT_WITH_CODE( - 0 == tonga_populate_single_firmware_entry(smumgr, - UCODE_ID_CP_CE, - &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n", - return -1); - PP_ASSERT_WITH_CODE( - 0 == tonga_populate_single_firmware_entry - (smumgr, UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n", return -1); - PP_ASSERT_WITH_CODE( - 0 == tonga_populate_single_firmware_entry - (smumgr, UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n", return -1); - PP_ASSERT_WITH_CODE( - 0 == tonga_populate_single_firmware_entry - (smumgr, UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n", return -1); - PP_ASSERT_WITH_CODE( - 0 == tonga_populate_single_firmware_entry - (smumgr, UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n", return -1); - PP_ASSERT_WITH_CODE( - 0 == tonga_populate_single_firmware_entry - (smumgr, UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n", return -1); - PP_ASSERT_WITH_CODE( - 0 == tonga_populate_single_firmware_entry - (smumgr, UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n", return -1); - PP_ASSERT_WITH_CODE( - 0 == tonga_populate_single_firmware_entry - (smumgr, UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.\n", return -1); - - tonga_send_msg_to_smc_with_parameter(smumgr, - PPSMC_MSG_DRV_DRAM_ADDR_HI, - tonga_smu->header_buffer.mc_addr_high); - tonga_send_msg_to_smc_with_parameter(smumgr, - PPSMC_MSG_DRV_DRAM_ADDR_LO, - tonga_smu->header_buffer.mc_addr_low); - - fw_to_load = UCODE_ID_RLC_G_MASK - + UCODE_ID_SDMA0_MASK - + UCODE_ID_SDMA1_MASK - + UCODE_ID_CP_CE_MASK - + UCODE_ID_CP_ME_MASK - + UCODE_ID_CP_PFP_MASK - + UCODE_ID_CP_MEC_MASK; - - PP_ASSERT_WITH_CODE( - 0 == tonga_send_msg_to_smc_with_parameter_without_waiting( - smumgr, PPSMC_MSG_LoadUcodes, fw_to_load), - "Fail to Request SMU Load uCode", return 0); - - return 0; -} - -static int tonga_request_smu_load_specific_fw(struct pp_smumgr *smumgr, - uint32_t firmwareType) -{ - return 0; -} - -/** - * Upload the SMC firmware to the SMC microcontroller. - * - * @param smumgr the address of the powerplay hardware manager. - * @param pFirmware the data structure containing the various sections of the firmware. - */ -static int tonga_smu_upload_firmware_image(struct pp_smumgr *smumgr) -{ - const uint8_t *src; - uint32_t byte_count; - uint32_t *data; - struct cgs_firmware_info info = {0}; - - if (smumgr == NULL || smumgr->device == NULL) - return -EINVAL; - - cgs_get_firmware_info(smumgr->device, - tonga_convert_fw_type_to_cgs(UCODE_ID_SMU), &info); - - if (info.image_size & 3) { - printk(KERN_ERR "[ powerplay ] SMC ucode is not 4 bytes aligned\n"); - return -EINVAL; - } - - if (info.image_size > TONGA_SMC_SIZE) { - printk(KERN_ERR "[ powerplay ] SMC address is beyond the SMC RAM area\n"); - return -EINVAL; - } - - cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, 0x20000); - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1); - - byte_count = info.image_size; - src = (const uint8_t *)info.kptr; - - data = (uint32_t *)src; - for (; byte_count >= 4; data++, byte_count -= 4) - cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data[0]); - - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); - - return 0; -} static int tonga_start_in_protection_mode(struct pp_smumgr *smumgr) { @@ -623,7 +45,7 @@ static int tonga_start_in_protection_mode(struct pp_smumgr *smumgr) SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - result = tonga_smu_upload_firmware_image(smumgr); + result = smu7_upload_smu_firmware_image(smumgr); if (result) return result; @@ -653,7 +75,7 @@ static int tonga_start_in_protection_mode(struct pp_smumgr *smumgr) /** * Call Test SMU message with 0x20000 offset to trigger SMU start */ - tonga_send_msg_to_smc_offset(smumgr); + smu7_send_msg_to_smc_offset(smumgr); /* Wait for done bit to be set */ SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, @@ -690,13 +112,13 @@ static int tonga_start_in_non_protection_mode(struct pp_smumgr *smumgr) SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - result = tonga_smu_upload_firmware_image(smumgr); + result = smu7_upload_smu_firmware_image(smumgr); if (result != 0) return result; /* Set smc instruct start point at 0x0 */ - tonga_program_jump_on_start(smumgr); + smu7_program_jump_on_start(smumgr); SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, @@ -718,7 +140,7 @@ static int tonga_start_smu(struct pp_smumgr *smumgr) int result; /* Only start SMC if SMC RAM is not running */ - if (!tonga_is_smc_ram_running(smumgr)) { + if (!smu7_is_smc_ram_running(smumgr)) { /*Check if SMU is running in protected mode*/ if (0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE)) { @@ -732,7 +154,7 @@ static int tonga_start_smu(struct pp_smumgr *smumgr) } } - result = tonga_request_smu_reload_fw(smumgr); + result = smu7_request_smu_load_fw(smumgr); return result; } @@ -746,67 +168,41 @@ static int tonga_start_smu(struct pp_smumgr *smumgr) */ static int tonga_smu_init(struct pp_smumgr *smumgr) { - struct tonga_smumgr *tonga_smu; - uint8_t *internal_buf; - uint64_t mc_addr = 0; - /* Allocate memory for backend private data */ - tonga_smu = (struct tonga_smumgr *)(smumgr->backend); - tonga_smu->header_buffer.data_size = - ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096; - tonga_smu->smu_buffer.data_size = 200*4096; - - smu_allocate_memory(smumgr->device, - tonga_smu->header_buffer.data_size, - CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, - PAGE_SIZE, - &mc_addr, - &tonga_smu->header_buffer.kaddr, - &tonga_smu->header_buffer.handle); - - tonga_smu->pHeader = tonga_smu->header_buffer.kaddr; - tonga_smu->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); - tonga_smu->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr); - - PP_ASSERT_WITH_CODE((NULL != tonga_smu->pHeader), - "Out of memory.", - kfree(smumgr->backend); - cgs_free_gpu_mem(smumgr->device, - (cgs_handle_t)tonga_smu->header_buffer.handle); - return -1); - - smu_allocate_memory(smumgr->device, - tonga_smu->smu_buffer.data_size, - CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, - PAGE_SIZE, - &mc_addr, - &tonga_smu->smu_buffer.kaddr, - &tonga_smu->smu_buffer.handle); - - internal_buf = tonga_smu->smu_buffer.kaddr; - tonga_smu->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); - tonga_smu->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr); - - PP_ASSERT_WITH_CODE((NULL != internal_buf), - "Out of memory.", - kfree(smumgr->backend); - cgs_free_gpu_mem(smumgr->device, - (cgs_handle_t)tonga_smu->smu_buffer.handle); - return -1;); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend); + + int i; + + if (smu7_init(smumgr)) + return -EINVAL; + + for (i = 0; i < SMU72_MAX_LEVELS_GRAPHICS; i++) + smu_data->activity_target[i] = 30; return 0; } static const struct pp_smumgr_func tonga_smu_funcs = { .smu_init = &tonga_smu_init, - .smu_fini = &tonga_smu_fini, + .smu_fini = &smu7_smu_fini, .start_smu = &tonga_start_smu, - .check_fw_load_finish = &tonga_check_fw_load_finish, - .request_smu_load_fw = &tonga_request_smu_reload_fw, - .request_smu_load_specific_fw = &tonga_request_smu_load_specific_fw, - .send_msg_to_smc = &tonga_send_msg_to_smc, - .send_msg_to_smc_with_parameter = &tonga_send_msg_to_smc_with_parameter, + .check_fw_load_finish = &smu7_check_fw_load_finish, + .request_smu_load_fw = &smu7_request_smu_load_fw, + .request_smu_load_specific_fw = NULL, + .send_msg_to_smc = &smu7_send_msg_to_smc, + .send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter, .download_pptable_settings = NULL, .upload_pptable_settings = NULL, + .update_smc_table = tonga_update_smc_table, + .get_offsetof = tonga_get_offsetof, + .process_firmware_header = tonga_process_firmware_header, + .init_smc_table = tonga_init_smc_table, + .update_sclk_threshold = tonga_update_sclk_threshold, + .thermal_setup_fan_table = tonga_thermal_setup_fan_table, + .populate_all_graphic_levels = tonga_populate_all_graphic_levels, + .populate_all_memory_levels = tonga_populate_all_memory_levels, + .get_mac_definition = tonga_get_mac_definition, + .initialize_mc_reg_table = tonga_initialize_mc_reg_table, + .is_dpm_running = tonga_is_dpm_running, }; int tonga_smum_init(struct pp_smumgr *smumgr) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h index 33c788d7f05c..8c4f761d5bc8 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h @@ -24,30 +24,36 @@ #ifndef _TONGA_SMUMGR_H_ #define _TONGA_SMUMGR_H_ -struct tonga_buffer_entry { - uint32_t data_size; - uint32_t mc_addr_low; - uint32_t mc_addr_high; - void *kaddr; - unsigned long handle; +#include "smu72_discrete.h" + +#include "smu7_smumgr.h" + +struct tonga_mc_reg_entry { + uint32_t mclk_max; + uint32_t mc_data[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE]; +}; + +struct tonga_mc_reg_table { + uint8_t last; /* number of registers*/ + uint8_t num_entries; /* number of entries in mc_reg_table_entry used*/ + uint16_t validflag; /* indicate the corresponding register is valid or not. 1: valid, 0: invalid. bit0->address[0], bit1->address[1], etc.*/ + struct tonga_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES]; + SMU72_Discrete_MCRegisterAddress mc_reg_address[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE]; }; + struct tonga_smumgr { - uint8_t *pHeader; - uint8_t *pMecImage; - uint32_t ulSoftRegsStart; - struct tonga_buffer_entry header_buffer; - struct tonga_buffer_entry smu_buffer; -}; + struct smu7_smumgr smu7_data; + struct SMU72_Discrete_DpmTable smc_state_table; + struct SMU72_Discrete_Ulv ulv_setting; + struct SMU72_Discrete_PmFuses power_tune_table; + const struct tonga_pt_defaults *power_tune_defaults; + SMU72_Discrete_MCRegisters mc_regs; + struct tonga_mc_reg_table mc_reg_table; -extern int tonga_smum_init(struct pp_smumgr *smumgr); -extern int tonga_copy_bytes_to_smc(struct pp_smumgr *smumgr, - uint32_t smcStartAddress, const uint8_t *src, - uint32_t byteCount, uint32_t limit); -extern int tonga_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smcAddress, - uint32_t *value, uint32_t limit); -extern int tonga_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smcAddress, - uint32_t value, uint32_t limit); + uint32_t activity_target[SMU72_MAX_LEVELS_GRAPHICS]; + +}; #endif |