summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c')
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c219
1 files changed, 12 insertions, 207 deletions
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index fc4f95fa87cf..39990790ed67 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -33,6 +33,7 @@
#include "navi10_ppt.h"
#include "sienna_cichlid_ppt.h"
#include "renoir_ppt.h"
+#include "vangogh_ppt.h"
#include "amd_pcie.h"
/*
@@ -396,11 +397,15 @@ static int smu_set_funcs(struct amdgpu_device *adev)
break;
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
+ case CHIP_DIMGREY_CAVEFISH:
sienna_cichlid_set_ppt_funcs(smu);
break;
case CHIP_RENOIR:
renoir_set_ppt_funcs(smu);
break;
+ case CHIP_VANGOGH:
+ vangogh_set_ppt_funcs(smu);
+ break;
default:
return -EINVAL;
}
@@ -469,6 +474,9 @@ static int smu_late_init(void *handle)
struct smu_context *smu = &adev->smu;
int ret = 0;
+ if (adev->asic_type == CHIP_VANGOGH)
+ return 0;
+
if (!smu->pm_enabled)
return 0;
@@ -1029,17 +1037,6 @@ static int smu_smc_hw_setup(struct smu_context *smu)
return ret;
}
- /*
- * Set initialized values (get from vbios) to dpm tables context such as
- * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
- * type of clks.
- */
- ret = smu_set_default_dpm_table(smu);
- if (ret) {
- dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
- return ret;
- }
-
ret = smu_notify_display_change(smu);
if (ret)
return ret;
@@ -1370,96 +1367,11 @@ int smu_display_configuration_change(struct smu_context *smu,
num_of_active_display++;
}
- smu_set_active_display_count(smu, num_of_active_display);
-
- smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
- display_config->cpu_cc6_disable,
- display_config->cpu_pstate_disable,
- display_config->nb_pstate_switch_disable);
-
mutex_unlock(&smu->mutex);
return 0;
}
-static int smu_get_clock_info(struct smu_context *smu,
- struct smu_clock_info *clk_info,
- enum smu_perf_level_designation designation)
-{
- int ret;
- struct smu_performance_level level = {0};
-
- if (!clk_info)
- return -EINVAL;
-
- ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
- if (ret)
- return -EINVAL;
-
- clk_info->min_mem_clk = level.memory_clock;
- clk_info->min_eng_clk = level.core_clock;
- clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
-
- ret = smu_get_perf_level(smu, designation, &level);
- if (ret)
- return -EINVAL;
-
- clk_info->min_mem_clk = level.memory_clock;
- clk_info->min_eng_clk = level.core_clock;
- clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
-
- return 0;
-}
-
-int smu_get_current_clocks(struct smu_context *smu,
- struct amd_pp_clock_info *clocks)
-{
- struct amd_pp_simple_clock_info simple_clocks = {0};
- struct smu_clock_info hw_clocks;
- int ret = 0;
-
- if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
- return -EOPNOTSUPP;
-
- mutex_lock(&smu->mutex);
-
- smu_get_dal_power_level(smu, &simple_clocks);
-
- if (smu->support_power_containment)
- ret = smu_get_clock_info(smu, &hw_clocks,
- PERF_LEVEL_POWER_CONTAINMENT);
- else
- ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
-
- if (ret) {
- dev_err(smu->adev->dev, "Error in smu_get_clock_info\n");
- goto failed;
- }
-
- clocks->min_engine_clock = hw_clocks.min_eng_clk;
- clocks->max_engine_clock = hw_clocks.max_eng_clk;
- clocks->min_memory_clock = hw_clocks.min_mem_clk;
- clocks->max_memory_clock = hw_clocks.max_mem_clk;
- clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
- clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
- clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
- clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
-
- if (simple_clocks.level == 0)
- clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
- else
- clocks->max_clocks_state = simple_clocks.level;
-
- if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
- clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
- clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
- }
-
-failed:
- mutex_unlock(&smu->mutex);
- return ret;
-}
-
static int smu_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -1491,6 +1403,7 @@ static int smu_enable_umd_pstate(void *handle,
if (*level & profile_mode_mask) {
smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
smu_dpm_ctx->enable_umd_pstate = true;
+ smu_gpo_control(smu, false);
amdgpu_device_ip_set_powergating_state(smu->adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_PG_STATE_UNGATE);
@@ -1499,6 +1412,7 @@ static int smu_enable_umd_pstate(void *handle,
AMD_CG_STATE_UNGATE);
smu_gfx_ulv_control(smu, false);
smu_deep_sleep_control(smu, false);
+ amdgpu_asic_update_umd_stable_pstate(smu->adev, true);
}
} else {
/* exit umd pstate, restore level, enable gfx cg*/
@@ -1506,6 +1420,7 @@ static int smu_enable_umd_pstate(void *handle,
if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
*level = smu_dpm_ctx->saved_dpm_level;
smu_dpm_ctx->enable_umd_pstate = false;
+ amdgpu_asic_update_umd_stable_pstate(smu->adev, false);
smu_deep_sleep_control(smu, true);
smu_gfx_ulv_control(smu, true);
amdgpu_device_ip_set_clockgating_state(smu->adev,
@@ -1514,6 +1429,7 @@ static int smu_enable_umd_pstate(void *handle,
amdgpu_device_ip_set_powergating_state(smu->adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_PG_STATE_GATE);
+ smu_gpo_control(smu, true);
}
}
@@ -1592,9 +1508,6 @@ int smu_handle_task(struct smu_context *smu,
ret = smu_pre_display_config_changed(smu);
if (ret)
goto out;
- ret = smu_set_cpu_power_state(smu);
- if (ret)
- goto out;
ret = smu_adjust_power_state_dynamic(smu, level, false);
break;
case AMD_PP_TASK_COMPLETE_INIT:
@@ -2044,40 +1957,6 @@ int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, ch
return ret;
}
-int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
-{
- int ret = 0;
-
- if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
- return -EOPNOTSUPP;
-
- mutex_lock(&smu->mutex);
-
- if (smu->ppt_funcs->get_od_percentage)
- ret = smu->ppt_funcs->get_od_percentage(smu, type);
-
- mutex_unlock(&smu->mutex);
-
- return ret;
-}
-
-int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
-{
- int ret = 0;
-
- if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
- return -EOPNOTSUPP;
-
- mutex_lock(&smu->mutex);
-
- if (smu->ppt_funcs->set_od_percentage)
- ret = smu->ppt_funcs->set_od_percentage(smu, type, value);
-
- mutex_unlock(&smu->mutex);
-
- return ret;
-}
-
int smu_od_edit_dpm_table(struct smu_context *smu,
enum PP_OD_DPM_TABLE_COMMAND type,
long *input, uint32_t size)
@@ -2318,43 +2197,6 @@ int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
return ret;
}
-int smu_get_clock_by_type(struct smu_context *smu,
- enum amd_pp_clock_type type,
- struct amd_pp_clocks *clocks)
-{
- int ret = 0;
-
- if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
- return -EOPNOTSUPP;
-
- mutex_lock(&smu->mutex);
-
- if (smu->ppt_funcs->get_clock_by_type)
- ret = smu->ppt_funcs->get_clock_by_type(smu, type, clocks);
-
- mutex_unlock(&smu->mutex);
-
- return ret;
-}
-
-int smu_get_max_high_clocks(struct smu_context *smu,
- struct amd_pp_simple_clock_info *clocks)
-{
- int ret = 0;
-
- if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
- return -EOPNOTSUPP;
-
- mutex_lock(&smu->mutex);
-
- if (smu->ppt_funcs->get_max_high_clocks)
- ret = smu->ppt_funcs->get_max_high_clocks(smu, clocks);
-
- mutex_unlock(&smu->mutex);
-
- return ret;
-}
-
int smu_get_clock_by_type_with_latency(struct smu_context *smu,
enum smu_clk_type clk_type,
struct pp_clock_levels_with_latency *clocks)
@@ -2374,26 +2216,6 @@ int smu_get_clock_by_type_with_latency(struct smu_context *smu,
return ret;
}
-int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
- enum amd_pp_clock_type type,
- struct pp_clock_levels_with_voltage *clocks)
-{
- int ret = 0;
-
- if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
- return -EOPNOTSUPP;
-
- mutex_lock(&smu->mutex);
-
- if (smu->ppt_funcs->get_clock_by_type_with_voltage)
- ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks);
-
- mutex_unlock(&smu->mutex);
-
- return ret;
-}
-
-
int smu_display_clock_voltage_request(struct smu_context *smu,
struct pp_display_clock_request *clock_req)
{
@@ -2430,23 +2252,6 @@ int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disabl
return ret;
}
-int smu_notify_smu_enable_pwe(struct smu_context *smu)
-{
- int ret = 0;
-
- if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
- return -EOPNOTSUPP;
-
- mutex_lock(&smu->mutex);
-
- if (smu->ppt_funcs->notify_smu_enable_pwe)
- ret = smu->ppt_funcs->notify_smu_enable_pwe(smu);
-
- mutex_unlock(&smu->mutex);
-
- return ret;
-}
-
int smu_set_xgmi_pstate(struct smu_context *smu,
uint32_t pstate)
{