summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/powerplay/amdgpu_smu.c')
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c376
1 files changed, 321 insertions, 55 deletions
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 9c67adee2c9e..f1565c448de5 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -60,6 +60,191 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t
return ret;
}
+int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t min, uint32_t max)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param;
+
+ if (min <= 0 && max <= 0)
+ return -EINVAL;
+
+ if (!smu_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
+ clk_id = smu_clk_get_index(smu, clk_type);
+ if (clk_id < 0)
+ return clk_id;
+
+ if (max > 0) {
+ param = (uint32_t)((clk_id << 16) | (max & 0xffff));
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
+ param);
+ if (ret)
+ return ret;
+ }
+
+ if (min > 0) {
+ param = (uint32_t)((clk_id << 16) | (min & 0xffff));
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
+ param);
+ if (ret)
+ return ret;
+ }
+
+
+ return ret;
+}
+
+int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t min, uint32_t max)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param;
+
+ if (min <= 0 && max <= 0)
+ return -EINVAL;
+
+ if (!smu_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
+ clk_id = smu_clk_get_index(smu, clk_type);
+ if (clk_id < 0)
+ return clk_id;
+
+ if (max > 0) {
+ param = (uint32_t)((clk_id << 16) | (max & 0xffff));
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
+ param);
+ if (ret)
+ return ret;
+ }
+
+ if (min > 0) {
+ param = (uint32_t)((clk_id << 16) | (min & 0xffff));
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
+ param);
+ if (ret)
+ return ret;
+ }
+
+
+ return ret;
+}
+
+int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t *min, uint32_t *max)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param = 0;
+
+ if (!min && !max)
+ return -EINVAL;
+
+ if (!smu_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
+ mutex_lock(&smu->mutex);
+ clk_id = smu_clk_get_index(smu, clk_type);
+ if (clk_id < 0) {
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ param = (clk_id & 0xffff) << 16;
+
+ if (max) {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param);
+ if (ret)
+ goto failed;
+ ret = smu_read_smc_arg(smu, max);
+ if (ret)
+ goto failed;
+ }
+
+ if (min) {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param);
+ if (ret)
+ goto failed;
+ ret = smu_read_smc_arg(smu, min);
+ if (ret)
+ goto failed;
+ }
+
+failed:
+ mutex_unlock(&smu->mutex);
+ return ret;
+}
+
+int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint16_t level, uint32_t *value)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param;
+
+ if (!value)
+ return -EINVAL;
+
+ if (!smu_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
+ clk_id = smu_clk_get_index(smu, clk_type);
+ if (clk_id < 0)
+ return clk_id;
+
+ param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
+
+ ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex,
+ param);
+ if (ret)
+ return ret;
+
+ ret = smu_read_smc_arg(smu, &param);
+ if (ret)
+ return ret;
+
+ /* BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM
+ * now, we un-support it */
+ *value = param & 0x7fffffff;
+
+ return ret;
+}
+
+int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t *value)
+{
+ return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
+}
+
+bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
+{
+ enum smu_feature_mask feature_id = 0;
+
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ feature_id = SMU_FEATURE_DPM_UCLK_BIT;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
+ break;
+ case SMU_SOCCLK:
+ feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
+ break;
+ default:
+ return true;
+ }
+
+ if(!smu_feature_is_enabled(smu, feature_id)) {
+ pr_warn("smu %d clk dpm feature %d is not enabled\n", clk_type, feature_id);
+ return false;
+ }
+
+ return true;
+}
+
+
int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
bool gate)
{
@@ -72,6 +257,9 @@ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
case AMD_IP_BLOCK_TYPE_VCE:
ret = smu_dpm_set_vce_enable(smu, gate);
break;
+ case AMD_IP_BLOCK_TYPE_GFX:
+ ret = smu_gfx_off_control(smu, gate);
+ break;
default:
break;
}
@@ -116,6 +304,14 @@ int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
*size = 8;
break;
+ case AMDGPU_PP_SENSOR_UVD_POWER:
+ *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_VCE_POWER:
+ *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
+ *size = 4;
+ break;
default:
ret = -EINVAL;
break;
@@ -127,20 +323,18 @@ int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
return ret;
}
-int smu_update_table_with_arg(struct smu_context *smu, uint16_t table_id, uint16_t exarg,
+int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
void *table_data, bool drv2smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *table = NULL;
int ret = 0;
- uint32_t table_index;
+ int table_id = smu_table_get_index(smu, table_index);
if (!table_data || table_id >= smu_table->table_count)
return -EINVAL;
- table_index = (exarg << 16) | table_id;
-
- table = &smu_table->tables[table_id];
+ table = &smu_table->tables[table_index];
if (drv2smu)
memcpy(table->cpu_addr, table_data, table->size);
@@ -156,7 +350,7 @@ int smu_update_table_with_arg(struct smu_context *smu, uint16_t table_id, uint16
ret = smu_send_smc_msg_with_param(smu, drv2smu ?
SMU_MSG_TransferTableDram2Smu :
SMU_MSG_TransferTableSmu2Dram,
- table_index);
+ table_id | ((argument & 0xFFFF) << 16));
if (ret)
return ret;
@@ -168,13 +362,12 @@ int smu_update_table_with_arg(struct smu_context *smu, uint16_t table_id, uint16
bool is_support_sw_smu(struct amdgpu_device *adev)
{
- if (amdgpu_dpm != 1)
- return false;
-
- if (adev->asic_type >= CHIP_VEGA20 && adev->asic_type != CHIP_RAVEN)
+ if (adev->asic_type == CHIP_VEGA20)
+ return (amdgpu_dpm == 2) ? true : false;
+ else if (adev->asic_type >= CHIP_NAVI10)
return true;
-
- return false;
+ else
+ return false;
}
int smu_sys_get_pp_table(struct smu_context *smu, void **table)
@@ -233,33 +426,36 @@ int smu_feature_init_dpm(struct smu_context *smu)
{
struct smu_feature *feature = &smu->smu_feature;
int ret = 0;
- uint32_t unallowed_feature_mask[SMU_FEATURE_MAX/32];
+ uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
if (!smu->pm_enabled)
return ret;
mutex_lock(&feature->mutex);
- bitmap_fill(feature->allowed, SMU_FEATURE_MAX);
+ bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
mutex_unlock(&feature->mutex);
- ret = smu_get_unallowed_feature_mask(smu, unallowed_feature_mask,
+ ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
SMU_FEATURE_MAX/32);
if (ret)
return ret;
mutex_lock(&feature->mutex);
- bitmap_andnot(feature->allowed, feature->allowed,
- (unsigned long *)unallowed_feature_mask,
+ bitmap_or(feature->allowed, feature->allowed,
+ (unsigned long *)allowed_feature_mask,
feature->feature_num);
mutex_unlock(&feature->mutex);
return ret;
}
-int smu_feature_is_enabled(struct smu_context *smu, int feature_id)
+int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
{
struct smu_feature *feature = &smu->smu_feature;
+ uint32_t feature_id;
int ret = 0;
+ feature_id = smu_feature_get_index(smu, mask);
+
WARN_ON(feature_id > feature->feature_num);
mutex_lock(&feature->mutex);
@@ -269,11 +465,15 @@ int smu_feature_is_enabled(struct smu_context *smu, int feature_id)
return ret;
}
-int smu_feature_set_enabled(struct smu_context *smu, int feature_id, bool enable)
+int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
+ bool enable)
{
struct smu_feature *feature = &smu->smu_feature;
+ uint32_t feature_id;
int ret = 0;
+ feature_id = smu_feature_get_index(smu, mask);
+
WARN_ON(feature_id > feature->feature_num);
mutex_lock(&feature->mutex);
@@ -292,11 +492,14 @@ failed:
return ret;
}
-int smu_feature_is_supported(struct smu_context *smu, int feature_id)
+int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
{
struct smu_feature *feature = &smu->smu_feature;
+ uint32_t feature_id;
int ret = 0;
+ feature_id = smu_feature_get_index(smu, mask);
+
WARN_ON(feature_id > feature->feature_num);
mutex_lock(&feature->mutex);
@@ -306,12 +509,16 @@ int smu_feature_is_supported(struct smu_context *smu, int feature_id)
return ret;
}
-int smu_feature_set_supported(struct smu_context *smu, int feature_id,
+int smu_feature_set_supported(struct smu_context *smu,
+ enum smu_feature_mask mask,
bool enable)
{
struct smu_feature *feature = &smu->smu_feature;
+ uint32_t feature_id;
int ret = 0;
+ feature_id = smu_feature_get_index(smu, mask);
+
WARN_ON(feature_id > feature->feature_num);
mutex_lock(&feature->mutex);
@@ -330,7 +537,7 @@ static int smu_set_funcs(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_VEGA20:
- adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ case CHIP_NAVI10:
if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
smu->od_enabled = true;
smu_v11_0_set_smu_funcs(smu);
@@ -450,6 +657,11 @@ static int smu_sw_init(void *handle)
bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
+
+ mutex_init(&smu->smu_baco.mutex);
+ smu->smu_baco.state = SMU_BACO_STATE_EXIT;
+ smu->smu_baco.platform_support = false;
+
smu->watermarks_bitmap = 0;
smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
@@ -622,17 +834,17 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
return 0;
}
- ret = smu_init_display(smu);
+ ret = smu_init_display_count(smu, 0);
if (ret)
return ret;
if (initialize) {
- ret = smu_read_pptable_from_vbios(smu);
+ /* get boot_values from vbios to set revision, gfxclk, and etc. */
+ ret = smu_get_vbios_bootup_values(smu);
if (ret)
return ret;
- /* get boot_values from vbios to set revision, gfxclk, and etc. */
- ret = smu_get_vbios_bootup_values(smu);
+ ret = smu_setup_pptable(smu);
if (ret)
return ret;
@@ -725,7 +937,7 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
return ret;
}
- ret = smu_set_od8_default_settings(smu, initialize);
+ ret = smu_set_default_od_settings(smu, initialize);
if (ret)
return ret;
@@ -819,20 +1031,14 @@ static int smu_hw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu;
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
- ret = smu_load_microcode(smu);
- if (ret)
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ ret = smu_check_fw_status(smu);
+ if (ret) {
+ pr_err("SMC firmware status is not correct\n");
return ret;
+ }
}
- ret = smu_check_fw_status(smu);
- if (ret) {
- pr_err("SMC firmware status is not correct\n");
- return ret;
- }
-
- mutex_lock(&smu->mutex);
-
ret = smu_feature_init_dpm(smu);
if (ret)
goto failed;
@@ -857,19 +1063,20 @@ static int smu_hw_init(void *handle)
if (ret)
goto failed;
- mutex_unlock(&smu->mutex);
+ ret = smu_register_irq_handler(smu);
+ if (ret)
+ goto failed;
if (!smu->pm_enabled)
adev->pm.dpm_enabled = false;
else
- adev->pm.dpm_enabled = true;
+ adev->pm.dpm_enabled = true; /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */
pr_info("SMU is initialized successfully!\n");
return 0;
failed:
- mutex_unlock(&smu->mutex);
return ret;
}
@@ -886,20 +1093,11 @@ static int smu_hw_fini(void *handle)
kfree(table_context->max_sustainable_clocks);
table_context->max_sustainable_clocks = NULL;
- kfree(table_context->od_feature_capabilities);
- table_context->od_feature_capabilities = NULL;
-
- kfree(table_context->od_settings_max);
- table_context->od_settings_max = NULL;
-
- kfree(table_context->od_settings_min);
- table_context->od_settings_min = NULL;
-
kfree(table_context->overdrive_table);
table_context->overdrive_table = NULL;
- kfree(table_context->od8_settings);
- table_context->od8_settings = NULL;
+ kfree(smu->irq_source);
+ smu->irq_source = NULL;
ret = smu_fini_fb_allocations(smu);
if (ret)
@@ -933,13 +1131,26 @@ static int smu_suspend(void *handle)
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu;
+ bool baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
ret = smu_system_features_control(smu, false);
if (ret)
return ret;
+ if (adev->in_gpu_reset && baco_feature_is_enabled) {
+ ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
+ if (ret) {
+ pr_warn("set BACO feature enabled failed, return %d\n", ret);
+ return ret;
+ }
+ }
+
smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
+ if (adev->asic_type >= CHIP_NAVI10 &&
+ adev->gfx.rlc.funcs->stop)
+ adev->gfx.rlc.funcs->stop(adev);
+
return 0;
}
@@ -1184,10 +1395,10 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
ret = smu_unforce_dpm_levels(smu);
break;
- case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
@@ -1197,8 +1408,9 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
&soc_mask);
if (ret)
return ret;
- smu_force_clk_levels(smu, PP_SCLK, 1 << sclk_mask);
- smu_force_clk_levels(smu, PP_MCLK, 1 << mclk_mask);
+ smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask);
+ smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
+ smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
@@ -1250,6 +1462,60 @@ int smu_handle_task(struct smu_context *smu,
return ret;
}
+enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
+{
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ enum amd_dpm_forced_level level;
+
+ if (!smu_dpm_ctx->dpm_context)
+ return -EINVAL;
+
+ mutex_lock(&(smu->mutex));
+ level = smu_dpm_ctx->dpm_level;
+ mutex_unlock(&(smu->mutex));
+
+ return level;
+}
+
+int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
+{
+ int ret = 0;
+ int i;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+
+ if (!smu_dpm_ctx->dpm_context)
+ return -EINVAL;
+
+ for (i = 0; i < smu->adev->num_ip_blocks; i++) {
+ if (smu->adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)
+ break;
+ }
+
+
+ smu->adev->ip_blocks[i].version->funcs->enable_umd_pstate(smu, &level);
+ ret = smu_handle_task(smu, level,
+ AMD_PP_TASK_READJUST_POWER_STATE);
+ if (ret)
+ return ret;
+
+ mutex_lock(&smu->mutex);
+ smu_dpm_ctx->dpm_level = level;
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_display_count(struct smu_context *smu, uint32_t count)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+ ret = smu_init_display_count(smu, count);
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
const struct amd_ip_funcs smu_ip_funcs = {
.name = "smu",
.early_init = smu_early_init,