summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c')
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c137
1 files changed, 115 insertions, 22 deletions
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index e1796ecf9c05..fb8643d25d1b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -717,6 +717,7 @@ static int smu_set_funcs(struct amdgpu_device *adev)
smu_v13_0_0_set_ppt_funcs(smu);
break;
case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 14):
smu_v13_0_6_set_ppt_funcs(smu);
/* Enable pp_od_clk_voltage node */
smu->od_enabled = true;
@@ -726,6 +727,7 @@ static int smu_set_funcs(struct amdgpu_device *adev)
break;
case IP_VERSION(14, 0, 0):
case IP_VERSION(14, 0, 1):
+ case IP_VERSION(14, 0, 4):
smu_v14_0_0_set_ppt_funcs(smu);
break;
case IP_VERSION(14, 0, 2):
@@ -1208,17 +1210,28 @@ static void smu_swctf_delayed_work_handler(struct work_struct *work)
static void smu_init_xgmi_plpd_mode(struct smu_context *smu)
{
+ struct smu_dpm_context *dpm_ctxt = &(smu->smu_dpm);
+ struct smu_dpm_policy_ctxt *policy_ctxt;
+ struct smu_dpm_policy *policy;
+
+ policy = smu_get_pm_policy(smu, PP_PM_POLICY_XGMI_PLPD);
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) {
- smu->plpd_mode = XGMI_PLPD_DEFAULT;
+ if (policy)
+ policy->current_level = XGMI_PLPD_DEFAULT;
return;
}
/* PMFW put PLPD into default policy after enabling the feature */
if (smu_feature_is_enabled(smu,
- SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT))
- smu->plpd_mode = XGMI_PLPD_DEFAULT;
- else
- smu->plpd_mode = XGMI_PLPD_NONE;
+ SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT)) {
+ if (policy)
+ policy->current_level = XGMI_PLPD_DEFAULT;
+ } else {
+ policy_ctxt = dpm_ctxt->dpm_policies;
+ if (policy_ctxt)
+ policy_ctxt->policy_mask &=
+ ~BIT(PP_PM_POLICY_XGMI_PLPD);
+ }
}
static int smu_sw_init(void *handle)
@@ -1742,6 +1755,8 @@ static int smu_start_smc_engine(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ smu->smc_fw_state = SMU_FW_INIT;
+
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
if (amdgpu_ip_version(adev, MP1_HWIP, 0) < IP_VERSION(11, 0, 0)) {
if (smu->ppt_funcs->load_microcode) {
@@ -1857,6 +1872,8 @@ static int smu_disable_dpms(struct smu_context *smu)
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 10):
+ case IP_VERSION(14, 0, 2):
+ case IP_VERSION(14, 0, 3):
return 0;
default:
break;
@@ -2220,7 +2237,7 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
{
int ret = 0;
int index = 0;
- long workload;
+ long workload[1];
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
if (!skip_display_settings) {
@@ -2260,10 +2277,10 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
index = fls(smu->workload_mask);
index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
- workload = smu->workload_setting[index];
+ workload[0] = smu->workload_setting[index];
- if (smu->power_profile_mode != workload)
- smu_bump_power_profile_mode(smu, &workload, 0);
+ if (smu->power_profile_mode != workload[0])
+ smu_bump_power_profile_mode(smu, workload, 0);
}
return ret;
@@ -2313,7 +2330,7 @@ static int smu_switch_power_profile(void *handle,
{
struct smu_context *smu = handle;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- long workload;
+ long workload[1];
uint32_t index;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -2326,17 +2343,17 @@ static int smu_switch_power_profile(void *handle,
smu->workload_mask &= ~(1 << smu->workload_prority[type]);
index = fls(smu->workload_mask);
index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
- workload = smu->workload_setting[index];
+ workload[0] = smu->workload_setting[index];
} else {
smu->workload_mask |= (1 << smu->workload_prority[type]);
index = fls(smu->workload_mask);
index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
- workload = smu->workload_setting[index];
+ workload[0] = smu->workload_setting[index];
}
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
- smu_bump_power_profile_mode(smu, &workload, 0);
+ smu_bump_power_profile_mode(smu, workload, 0);
return 0;
}
@@ -2729,6 +2746,7 @@ int smu_get_power_limit(void *handle,
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 14):
case IP_VERSION(11, 0, 7):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 0, 12):
@@ -3507,26 +3525,101 @@ static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)
return 0;
}
-int smu_set_xgmi_plpd_mode(struct smu_context *smu,
- enum pp_xgmi_plpd_mode mode)
+static void smu_print_dpm_policy(struct smu_dpm_policy *policy, char *sysbuf,
+ size_t *size)
+{
+ size_t offset = *size;
+ int level;
+
+ for_each_set_bit(level, &policy->level_mask, PP_POLICY_MAX_LEVELS) {
+ if (level == policy->current_level)
+ offset += sysfs_emit_at(sysbuf, offset,
+ "%d : %s*\n", level,
+ policy->desc->get_desc(policy, level));
+ else
+ offset += sysfs_emit_at(sysbuf, offset,
+ "%d : %s\n", level,
+ policy->desc->get_desc(policy, level));
+ }
+
+ *size = offset;
+}
+
+ssize_t smu_get_pm_policy_info(struct smu_context *smu,
+ enum pp_pm_policy p_type, char *sysbuf)
+{
+ struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;
+ struct smu_dpm_policy_ctxt *policy_ctxt;
+ struct smu_dpm_policy *dpm_policy;
+ size_t offset = 0;
+
+ policy_ctxt = dpm_ctxt->dpm_policies;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !policy_ctxt ||
+ !policy_ctxt->policy_mask)
+ return -EOPNOTSUPP;
+
+ if (p_type == PP_PM_POLICY_NONE)
+ return -EINVAL;
+
+ dpm_policy = smu_get_pm_policy(smu, p_type);
+ if (!dpm_policy || !dpm_policy->level_mask || !dpm_policy->desc)
+ return -ENOENT;
+
+ if (!sysbuf)
+ return -EINVAL;
+
+ smu_print_dpm_policy(dpm_policy, sysbuf, &offset);
+
+ return offset;
+}
+
+struct smu_dpm_policy *smu_get_pm_policy(struct smu_context *smu,
+ enum pp_pm_policy p_type)
{
+ struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;
+ struct smu_dpm_policy_ctxt *policy_ctxt;
+ int i;
+
+ policy_ctxt = dpm_ctxt->dpm_policies;
+ if (!policy_ctxt)
+ return NULL;
+
+ for (i = 0; i < hweight32(policy_ctxt->policy_mask); ++i) {
+ if (policy_ctxt->policies[i].policy_type == p_type)
+ return &policy_ctxt->policies[i];
+ }
+
+ return NULL;
+}
+
+int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
+ int level)
+{
+ struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;
+ struct smu_dpm_policy *dpm_policy = NULL;
+ struct smu_dpm_policy_ctxt *policy_ctxt;
int ret = -EOPNOTSUPP;
- if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ policy_ctxt = dpm_ctxt->dpm_policies;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !policy_ctxt ||
+ !policy_ctxt->policy_mask)
return ret;
- /* PLPD policy is not supported if it's NONE */
- if (smu->plpd_mode == XGMI_PLPD_NONE)
+ if (level < 0 || level >= PP_POLICY_MAX_LEVELS)
+ return -EINVAL;
+
+ dpm_policy = smu_get_pm_policy(smu, p_type);
+
+ if (!dpm_policy || !dpm_policy->level_mask || !dpm_policy->set_policy)
return ret;
- if (smu->plpd_mode == mode)
+ if (dpm_policy->current_level == level)
return 0;
- if (smu->ppt_funcs && smu->ppt_funcs->select_xgmi_plpd_policy)
- ret = smu->ppt_funcs->select_xgmi_plpd_policy(smu, mode);
+ ret = dpm_policy->set_policy(smu, level);
if (!ret)
- smu->plpd_mode = mode;
+ dpm_policy->current_level = level;
return ret;
}