diff options
Diffstat (limited to 'drivers/gpu/drm/amd/pm/amdgpu_pm.c')
| -rw-r--r-- | drivers/gpu/drm/amd/pm/amdgpu_pm.c | 990 | 
1 files changed, 340 insertions, 650 deletions
| diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 5fa65f191a37..8128603ef495 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -23,13 +23,10 @@   *          Alex Deucher <alexdeucher@gmail.com>   */ -#include <drm/drm_debugfs.h> -  #include "amdgpu.h"  #include "amdgpu_drv.h"  #include "amdgpu_pm.h"  #include "amdgpu_dpm.h" -#include "amdgpu_smu.h"  #include "atom.h"  #include <linux/pci.h>  #include <linux/hwmon.h> @@ -125,11 +122,14 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,  {  	struct drm_device *ddev = dev_get_drvdata(dev);  	struct amdgpu_device *adev = drm_to_adev(ddev); +	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;  	enum amd_pm_state_type pm;  	int ret;  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	ret = pm_runtime_get_sync(ddev->dev);  	if (ret < 0) { @@ -137,12 +137,7 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,  		return ret;  	} -	if (is_support_sw_smu(adev)) { -		if (adev->smu.ppt_funcs->get_current_power_state) -			pm = smu_get_current_power_state(&adev->smu); -		else -			pm = adev->pm.dpm.user_state; -	} else if (adev->powerplay.pp_funcs->get_current_power_state) { +	if (pp_funcs->get_current_power_state) {  		pm = amdgpu_dpm_get_current_power_state(adev);  	} else {  		pm = adev->pm.dpm.user_state; @@ -151,9 +146,9 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,  	pm_runtime_mark_last_busy(ddev->dev);  	pm_runtime_put_autosuspend(ddev->dev); -	return snprintf(buf, PAGE_SIZE, "%s\n", -			(pm == POWER_STATE_TYPE_BATTERY) ? "battery" : -			(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); +	return sysfs_emit(buf, "%s\n", +			  (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : +			  (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");  }  static ssize_t amdgpu_set_power_dpm_state(struct device *dev, @@ -168,6 +163,8 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	if (strncmp("battery", buf, strlen("battery")) == 0)  		state = POWER_STATE_TYPE_BATTERY; @@ -274,6 +271,8 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	ret = pm_runtime_get_sync(ddev->dev);  	if (ret < 0) { @@ -281,9 +280,7 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,  		return ret;  	} -	if (is_support_sw_smu(adev)) -		level = smu_get_performance_level(&adev->smu); -	else if (adev->powerplay.pp_funcs->get_performance_level) +	if (adev->powerplay.pp_funcs->get_performance_level)  		level = amdgpu_dpm_get_performance_level(adev);  	else  		level = adev->pm.dpm.forced_level; @@ -291,16 +288,17 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,  	pm_runtime_mark_last_busy(ddev->dev);  	pm_runtime_put_autosuspend(ddev->dev); -	return snprintf(buf, PAGE_SIZE, "%s\n", -			(level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" : -			(level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : -			(level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" : -			(level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" : -			(level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" : -			(level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" : -			(level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" : -			(level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" : -			"unknown"); +	return sysfs_emit(buf, "%s\n", +			  (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" : +			  (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : +			  (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" : +			  (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" : +			  (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" : +			  (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" : +			  (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" : +			  (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" : +			  (level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" : +			  "unknown");  }  static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, @@ -310,12 +308,15 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,  {  	struct drm_device *ddev = dev_get_drvdata(dev);  	struct amdgpu_device *adev = drm_to_adev(ddev); +	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;  	enum amd_dpm_forced_level level;  	enum amd_dpm_forced_level current_level = 0xff;  	int ret = 0;  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	if (strncmp("low", buf, strlen("low")) == 0) {  		level = AMD_DPM_FORCED_LEVEL_LOW; @@ -335,6 +336,8 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,  		level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;  	} else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {  		level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; +	} else if (strncmp("perf_determinism", buf, strlen("perf_determinism")) == 0) { +		level = AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM;  	}  else {  		return -EINVAL;  	} @@ -345,9 +348,7 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,  		return ret;  	} -	if (is_support_sw_smu(adev)) -		current_level = smu_get_performance_level(&adev->smu); -	else if (adev->powerplay.pp_funcs->get_performance_level) +	if (pp_funcs->get_performance_level)  		current_level = amdgpu_dpm_get_performance_level(adev);  	if (current_level == level) { @@ -377,14 +378,7 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,  		return -EINVAL;  	} -	if (is_support_sw_smu(adev)) { -		ret = smu_force_performance_level(&adev->smu, level); -		if (ret) { -			pm_runtime_mark_last_busy(ddev->dev); -			pm_runtime_put_autosuspend(ddev->dev); -			return -EINVAL; -		} -	} else if (adev->powerplay.pp_funcs->force_performance_level) { +	if (pp_funcs->force_performance_level) {  		mutex_lock(&adev->pm.mutex);  		if (adev->pm.dpm.thermal_active) {  			mutex_unlock(&adev->pm.mutex); @@ -415,11 +409,14 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,  {  	struct drm_device *ddev = dev_get_drvdata(dev);  	struct amdgpu_device *adev = drm_to_adev(ddev); +	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;  	struct pp_states_info data;  	int i, buf_len, ret;  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	ret = pm_runtime_get_sync(ddev->dev);  	if (ret < 0) { @@ -427,11 +424,7 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,  		return ret;  	} -	if (is_support_sw_smu(adev)) { -		ret = smu_get_power_num_states(&adev->smu, &data); -		if (ret) -			return ret; -	} else if (adev->powerplay.pp_funcs->get_pp_num_states) { +	if (pp_funcs->get_pp_num_states) {  		amdgpu_dpm_get_pp_num_states(adev, &data);  	} else {  		memset(&data, 0, sizeof(data)); @@ -457,13 +450,15 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,  {  	struct drm_device *ddev = dev_get_drvdata(dev);  	struct amdgpu_device *adev = drm_to_adev(ddev); +	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;  	struct pp_states_info data; -	struct smu_context *smu = &adev->smu;  	enum amd_pm_state_type pm = 0;  	int i = 0, ret = 0;  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	ret = pm_runtime_get_sync(ddev->dev);  	if (ret < 0) { @@ -471,13 +466,8 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,  		return ret;  	} -	if (is_support_sw_smu(adev)) { -		pm = smu_get_current_power_state(smu); -		ret = smu_get_power_num_states(smu, &data); -		if (ret) -			return ret; -	} else if (adev->powerplay.pp_funcs->get_current_power_state -		 && adev->powerplay.pp_funcs->get_pp_num_states) { +	if (pp_funcs->get_current_power_state +		 && pp_funcs->get_pp_num_states) {  		pm = amdgpu_dpm_get_current_power_state(adev);  		amdgpu_dpm_get_pp_num_states(adev, &data);  	} @@ -493,7 +483,7 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,  	if (i == data.nums)  		i = -EINVAL; -	return snprintf(buf, PAGE_SIZE, "%d\n", i); +	return sysfs_emit(buf, "%d\n", i);  }  static ssize_t amdgpu_get_pp_force_state(struct device *dev, @@ -505,11 +495,13 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	if (adev->pp_force_state_enabled)  		return amdgpu_get_pp_cur_state(dev, attr, buf);  	else -		return snprintf(buf, PAGE_SIZE, "\n"); +		return sysfs_emit(buf, "\n");  }  static ssize_t amdgpu_set_pp_force_state(struct device *dev, @@ -525,6 +517,8 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	if (strlen(buf) == 1)  		adev->pp_force_state_enabled = false; @@ -585,6 +579,8 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	ret = pm_runtime_get_sync(ddev->dev);  	if (ret < 0) { @@ -592,13 +588,7 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,  		return ret;  	} -	if (is_support_sw_smu(adev)) { -		size = smu_sys_get_pp_table(&adev->smu, (void **)&table); -		pm_runtime_mark_last_busy(ddev->dev); -		pm_runtime_put_autosuspend(ddev->dev); -		if (size < 0) -			return size; -	} else if (adev->powerplay.pp_funcs->get_pp_table) { +	if (adev->powerplay.pp_funcs->get_pp_table) {  		size = amdgpu_dpm_get_pp_table(adev, &table);  		pm_runtime_mark_last_busy(ddev->dev);  		pm_runtime_put_autosuspend(ddev->dev); @@ -629,6 +619,8 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	ret = pm_runtime_get_sync(ddev->dev);  	if (ret < 0) { @@ -636,15 +628,12 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,  		return ret;  	} -	if (is_support_sw_smu(adev)) { -		ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count); -		if (ret) { -			pm_runtime_mark_last_busy(ddev->dev); -			pm_runtime_put_autosuspend(ddev->dev); -			return ret; -		} -	} else if (adev->powerplay.pp_funcs->set_pp_table) -		amdgpu_dpm_set_pp_table(adev, buf, count); +	ret = amdgpu_dpm_set_pp_table(adev, buf, count); +	if (ret) { +		pm_runtime_mark_last_busy(ddev->dev); +		pm_runtime_put_autosuspend(ddev->dev); +		return ret; +	}  	pm_runtime_mark_last_busy(ddev->dev);  	pm_runtime_put_autosuspend(ddev->dev); @@ -794,6 +783,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	if (count > 127)  		return -EINVAL; @@ -842,53 +833,42 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,  		return ret;  	} -	if (is_support_sw_smu(adev)) { -		ret = smu_od_edit_dpm_table(&adev->smu, type, -					    parameter, parameter_size); - +	if (adev->powerplay.pp_funcs->set_fine_grain_clk_vol) { +		ret = amdgpu_dpm_set_fine_grain_clk_vol(adev, type, +							parameter, +							parameter_size);  		if (ret) {  			pm_runtime_mark_last_busy(ddev->dev);  			pm_runtime_put_autosuspend(ddev->dev);  			return -EINVAL;  		} -	} else { - -		if (adev->powerplay.pp_funcs->set_fine_grain_clk_vol) { -			ret = amdgpu_dpm_set_fine_grain_clk_vol(adev, type, -								parameter, -								parameter_size); -			if (ret) { -				pm_runtime_mark_last_busy(ddev->dev); -				pm_runtime_put_autosuspend(ddev->dev); -				return -EINVAL; -			} -		} +	} -		if (adev->powerplay.pp_funcs->odn_edit_dpm_table) { -			ret = amdgpu_dpm_odn_edit_dpm_table(adev, type, -						parameter, parameter_size); -			if (ret) { -				pm_runtime_mark_last_busy(ddev->dev); -				pm_runtime_put_autosuspend(ddev->dev); -				return -EINVAL; -			} +	if (adev->powerplay.pp_funcs->odn_edit_dpm_table) { +		ret = amdgpu_dpm_odn_edit_dpm_table(adev, type, +						    parameter, parameter_size); +		if (ret) { +			pm_runtime_mark_last_busy(ddev->dev); +			pm_runtime_put_autosuspend(ddev->dev); +			return -EINVAL;  		} +	} -		if (type == PP_OD_COMMIT_DPM_TABLE) { -			if (adev->powerplay.pp_funcs->dispatch_tasks) { -				amdgpu_dpm_dispatch_task(adev, -						AMD_PP_TASK_READJUST_POWER_STATE, -						NULL); -				pm_runtime_mark_last_busy(ddev->dev); -				pm_runtime_put_autosuspend(ddev->dev); -				return count; -			} else { -				pm_runtime_mark_last_busy(ddev->dev); -				pm_runtime_put_autosuspend(ddev->dev); -				return -EINVAL; -			} +	if (type == PP_OD_COMMIT_DPM_TABLE) { +		if (adev->powerplay.pp_funcs->dispatch_tasks) { +			amdgpu_dpm_dispatch_task(adev, +						 AMD_PP_TASK_READJUST_POWER_STATE, +						 NULL); +			pm_runtime_mark_last_busy(ddev->dev); +			pm_runtime_put_autosuspend(ddev->dev); +			return count; +		} else { +			pm_runtime_mark_last_busy(ddev->dev); +			pm_runtime_put_autosuspend(ddev->dev); +			return -EINVAL;  		}  	} +  	pm_runtime_mark_last_busy(ddev->dev);  	pm_runtime_put_autosuspend(ddev->dev); @@ -906,6 +886,8 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	ret = pm_runtime_get_sync(ddev->dev);  	if (ret < 0) { @@ -913,18 +895,13 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,  		return ret;  	} -	if (is_support_sw_smu(adev)) { -		size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf); -		size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size); -		size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, buf+size); -		size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDGFX_OFFSET, buf+size); -		size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, buf+size); -		size += smu_print_clk_levels(&adev->smu, SMU_OD_CCLK, buf+size); -	} else if (adev->powerplay.pp_funcs->print_clock_levels) { +	if (adev->powerplay.pp_funcs->print_clock_levels) {  		size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);  		size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);  		size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size); +		size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf+size);  		size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size); +		size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf+size);  	} else {  		size = snprintf(buf, PAGE_SIZE, "\n");  	} @@ -962,6 +939,8 @@ static ssize_t amdgpu_set_pp_features(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	ret = kstrtou64(buf, 0, &featuremask);  	if (ret) @@ -973,14 +952,7 @@ static ssize_t amdgpu_set_pp_features(struct device *dev,  		return ret;  	} -	if (is_support_sw_smu(adev)) { -		ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask); -		if (ret) { -			pm_runtime_mark_last_busy(ddev->dev); -			pm_runtime_put_autosuspend(ddev->dev); -			return -EINVAL; -		} -	} else if (adev->powerplay.pp_funcs->set_ppfeature_status) { +	if (adev->powerplay.pp_funcs->set_ppfeature_status) {  		ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);  		if (ret) {  			pm_runtime_mark_last_busy(ddev->dev); @@ -1005,6 +977,8 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	ret = pm_runtime_get_sync(ddev->dev);  	if (ret < 0) { @@ -1012,9 +986,7 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,  		return ret;  	} -	if (is_support_sw_smu(adev)) -		size = smu_sys_get_pp_feature_mask(&adev->smu, buf); -	else if (adev->powerplay.pp_funcs->get_ppfeature_status) +	if (adev->powerplay.pp_funcs->get_ppfeature_status)  		size = amdgpu_dpm_get_ppfeature_status(adev, buf);  	else  		size = snprintf(buf, PAGE_SIZE, "\n"); @@ -1055,8 +1027,8 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,   * NOTE: change to the dcefclk max dpm level is not supported now   */ -static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, -		struct device_attribute *attr, +static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev, +		enum pp_clock_type type,  		char *buf)  {  	struct drm_device *ddev = dev_get_drvdata(dev); @@ -1066,6 +1038,8 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	ret = pm_runtime_get_sync(ddev->dev);  	if (ret < 0) { @@ -1073,10 +1047,8 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,  		return ret;  	} -	if (is_support_sw_smu(adev)) -		size = smu_print_clk_levels(&adev->smu, SMU_SCLK, buf); -	else if (adev->powerplay.pp_funcs->print_clock_levels) -		size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf); +	if (adev->powerplay.pp_funcs->print_clock_levels) +		size = amdgpu_dpm_print_clock_levels(adev, type, buf);  	else  		size = snprintf(buf, PAGE_SIZE, "\n"); @@ -1121,8 +1093,8 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)  	return 0;  } -static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, -		struct device_attribute *attr, +static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev, +		enum pp_clock_type type,  		const char *buf,  		size_t count)  { @@ -1133,6 +1105,8 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	ret = amdgpu_read_mask(buf, count, &mask);  	if (ret) @@ -1144,10 +1118,10 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,  		return ret;  	} -	if (is_support_sw_smu(adev)) -		ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask); -	else if (adev->powerplay.pp_funcs->force_clock_level) -		ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); +	if (adev->powerplay.pp_funcs->force_clock_level) +		ret = amdgpu_dpm_force_clock_level(adev, type, mask); +	else +		ret = 0;  	pm_runtime_mark_last_busy(ddev->dev);  	pm_runtime_put_autosuspend(ddev->dev); @@ -1158,35 +1132,26 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,  	return count;  } -static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev, +static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,  		struct device_attribute *attr,  		char *buf)  { -	struct drm_device *ddev = dev_get_drvdata(dev); -	struct amdgpu_device *adev = drm_to_adev(ddev); -	ssize_t size; -	int ret; - -	if (amdgpu_in_reset(adev)) -		return -EPERM; - -	ret = pm_runtime_get_sync(ddev->dev); -	if (ret < 0) { -		pm_runtime_put_autosuspend(ddev->dev); -		return ret; -	} - -	if (is_support_sw_smu(adev)) -		size = smu_print_clk_levels(&adev->smu, SMU_MCLK, buf); -	else if (adev->powerplay.pp_funcs->print_clock_levels) -		size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf); -	else -		size = snprintf(buf, PAGE_SIZE, "\n"); +	return amdgpu_get_pp_dpm_clock(dev, PP_SCLK, buf); +} -	pm_runtime_mark_last_busy(ddev->dev); -	pm_runtime_put_autosuspend(ddev->dev); +static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, +		struct device_attribute *attr, +		const char *buf, +		size_t count) +{ +	return amdgpu_set_pp_dpm_clock(dev, PP_SCLK, buf, count); +} -	return size; +static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev, +		struct device_attribute *attr, +		char *buf) +{ +	return amdgpu_get_pp_dpm_clock(dev, PP_MCLK, buf);  }  static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, @@ -1194,67 +1159,14 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,  		const char *buf,  		size_t count)  { -	struct drm_device *ddev = dev_get_drvdata(dev); -	struct amdgpu_device *adev = drm_to_adev(ddev); -	uint32_t mask = 0; -	int ret; - -	if (amdgpu_in_reset(adev)) -		return -EPERM; - -	ret = amdgpu_read_mask(buf, count, &mask); -	if (ret) -		return ret; - -	ret = pm_runtime_get_sync(ddev->dev); -	if (ret < 0) { -		pm_runtime_put_autosuspend(ddev->dev); -		return ret; -	} - -	if (is_support_sw_smu(adev)) -		ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask); -	else if (adev->powerplay.pp_funcs->force_clock_level) -		ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); - -	pm_runtime_mark_last_busy(ddev->dev); -	pm_runtime_put_autosuspend(ddev->dev); - -	if (ret) -		return -EINVAL; - -	return count; +	return amdgpu_set_pp_dpm_clock(dev, PP_MCLK, buf, count);  }  static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,  		struct device_attribute *attr,  		char *buf)  { -	struct drm_device *ddev = dev_get_drvdata(dev); -	struct amdgpu_device *adev = drm_to_adev(ddev); -	ssize_t size; -	int ret; - -	if (amdgpu_in_reset(adev)) -		return -EPERM; - -	ret = pm_runtime_get_sync(ddev->dev); -	if (ret < 0) { -		pm_runtime_put_autosuspend(ddev->dev); -		return ret; -	} - -	if (is_support_sw_smu(adev)) -		size = smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf); -	else if (adev->powerplay.pp_funcs->print_clock_levels) -		size = amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf); -	else -		size = snprintf(buf, PAGE_SIZE, "\n"); - -	pm_runtime_mark_last_busy(ddev->dev); -	pm_runtime_put_autosuspend(ddev->dev); - -	return size; +	return amdgpu_get_pp_dpm_clock(dev, PP_SOCCLK, buf);  }  static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev, @@ -1262,69 +1174,14 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,  		const char *buf,  		size_t count)  { -	struct drm_device *ddev = dev_get_drvdata(dev); -	struct amdgpu_device *adev = drm_to_adev(ddev); -	int ret; -	uint32_t mask = 0; - -	if (amdgpu_in_reset(adev)) -		return -EPERM; - -	ret = amdgpu_read_mask(buf, count, &mask); -	if (ret) -		return ret; - -	ret = pm_runtime_get_sync(ddev->dev); -	if (ret < 0) { -		pm_runtime_put_autosuspend(ddev->dev); -		return ret; -	} - -	if (is_support_sw_smu(adev)) -		ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask); -	else if (adev->powerplay.pp_funcs->force_clock_level) -		ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask); -	else -		ret = 0; - -	pm_runtime_mark_last_busy(ddev->dev); -	pm_runtime_put_autosuspend(ddev->dev); - -	if (ret) -		return -EINVAL; - -	return count; +	return amdgpu_set_pp_dpm_clock(dev, PP_SOCCLK, buf, count);  }  static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,  		struct device_attribute *attr,  		char *buf)  { -	struct drm_device *ddev = dev_get_drvdata(dev); -	struct amdgpu_device *adev = drm_to_adev(ddev); -	ssize_t size; -	int ret; - -	if (amdgpu_in_reset(adev)) -		return -EPERM; - -	ret = pm_runtime_get_sync(ddev->dev); -	if (ret < 0) { -		pm_runtime_put_autosuspend(ddev->dev); -		return ret; -	} - -	if (is_support_sw_smu(adev)) -		size = smu_print_clk_levels(&adev->smu, SMU_FCLK, buf); -	else if (adev->powerplay.pp_funcs->print_clock_levels) -		size = amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf); -	else -		size = snprintf(buf, PAGE_SIZE, "\n"); - -	pm_runtime_mark_last_busy(ddev->dev); -	pm_runtime_put_autosuspend(ddev->dev); - -	return size; +	return amdgpu_get_pp_dpm_clock(dev, PP_FCLK, buf);  }  static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev, @@ -1332,67 +1189,14 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,  		const char *buf,  		size_t count)  { -	struct drm_device *ddev = dev_get_drvdata(dev); -	struct amdgpu_device *adev = drm_to_adev(ddev); -	int ret; -	uint32_t mask = 0; - -	if (amdgpu_in_reset(adev)) -		return -EPERM; - -	ret = amdgpu_read_mask(buf, count, &mask); -	if (ret) -		return ret; - -	ret = pm_runtime_get_sync(ddev->dev); -	if (ret < 0) { -		pm_runtime_put_autosuspend(ddev->dev); -		return ret; -	} - -	if (is_support_sw_smu(adev)) -		ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask); -	else if (adev->powerplay.pp_funcs->force_clock_level) -		ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask); -	else -		ret = 0; - -	pm_runtime_mark_last_busy(ddev->dev); -	pm_runtime_put_autosuspend(ddev->dev); - -	if (ret) -		return -EINVAL; - -	return count; +	return amdgpu_set_pp_dpm_clock(dev, PP_FCLK, buf, count);  }  static ssize_t amdgpu_get_pp_dpm_vclk(struct device *dev,  		struct device_attribute *attr,  		char *buf)  { -	struct drm_device *ddev = dev_get_drvdata(dev); -	struct amdgpu_device *adev = drm_to_adev(ddev); -	ssize_t size; -	int ret; - -	if (amdgpu_in_reset(adev)) -		return -EPERM; - -	ret = pm_runtime_get_sync(ddev->dev); -	if (ret < 0) { -		pm_runtime_put_autosuspend(ddev->dev); -		return ret; -	} - -	if (is_support_sw_smu(adev)) -		size = smu_print_clk_levels(&adev->smu, SMU_VCLK, buf); -	else -		size = snprintf(buf, PAGE_SIZE, "\n"); - -	pm_runtime_mark_last_busy(ddev->dev); -	pm_runtime_put_autosuspend(ddev->dev); - -	return size; +	return amdgpu_get_pp_dpm_clock(dev, PP_VCLK, buf);  }  static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev, @@ -1400,65 +1204,14 @@ static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev,  		const char *buf,  		size_t count)  { -	struct drm_device *ddev = dev_get_drvdata(dev); -	struct amdgpu_device *adev = drm_to_adev(ddev); -	int ret; -	uint32_t mask = 0; - -	if (amdgpu_in_reset(adev)) -		return -EPERM; - -	ret = amdgpu_read_mask(buf, count, &mask); -	if (ret) -		return ret; - -	ret = pm_runtime_get_sync(ddev->dev); -	if (ret < 0) { -		pm_runtime_put_autosuspend(ddev->dev); -		return ret; -	} - -	if (is_support_sw_smu(adev)) -		ret = smu_force_clk_levels(&adev->smu, SMU_VCLK, mask); -	else -		ret = 0; - -	pm_runtime_mark_last_busy(ddev->dev); -	pm_runtime_put_autosuspend(ddev->dev); - -	if (ret) -		return -EINVAL; - -	return count; +	return amdgpu_set_pp_dpm_clock(dev, PP_VCLK, buf, count);  }  static ssize_t amdgpu_get_pp_dpm_dclk(struct device *dev,  		struct device_attribute *attr,  		char *buf)  { -	struct drm_device *ddev = dev_get_drvdata(dev); -	struct amdgpu_device *adev = drm_to_adev(ddev); -	ssize_t size; -	int ret; - -	if (amdgpu_in_reset(adev)) -		return -EPERM; - -	ret = pm_runtime_get_sync(ddev->dev); -	if (ret < 0) { -		pm_runtime_put_autosuspend(ddev->dev); -		return ret; -	} - -	if (is_support_sw_smu(adev)) -		size = smu_print_clk_levels(&adev->smu, SMU_DCLK, buf); -	else -		size = snprintf(buf, PAGE_SIZE, "\n"); - -	pm_runtime_mark_last_busy(ddev->dev); -	pm_runtime_put_autosuspend(ddev->dev); - -	return size; +	return amdgpu_get_pp_dpm_clock(dev, PP_DCLK, buf);  }  static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev, @@ -1466,67 +1219,14 @@ static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev,  		const char *buf,  		size_t count)  { -	struct drm_device *ddev = dev_get_drvdata(dev); -	struct amdgpu_device *adev = drm_to_adev(ddev); -	int ret; -	uint32_t mask = 0; - -	if (amdgpu_in_reset(adev)) -		return -EPERM; - -	ret = amdgpu_read_mask(buf, count, &mask); -	if (ret) -		return ret; - -	ret = pm_runtime_get_sync(ddev->dev); -	if (ret < 0) { -		pm_runtime_put_autosuspend(ddev->dev); -		return ret; -	} - -	if (is_support_sw_smu(adev)) -		ret = smu_force_clk_levels(&adev->smu, SMU_DCLK, mask); -	else -		ret = 0; - -	pm_runtime_mark_last_busy(ddev->dev); -	pm_runtime_put_autosuspend(ddev->dev); - -	if (ret) -		return -EINVAL; - -	return count; +	return amdgpu_set_pp_dpm_clock(dev, PP_DCLK, buf, count);  }  static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,  		struct device_attribute *attr,  		char *buf)  { -	struct drm_device *ddev = dev_get_drvdata(dev); -	struct amdgpu_device *adev = drm_to_adev(ddev); -	ssize_t size; -	int ret; - -	if (amdgpu_in_reset(adev)) -		return -EPERM; - -	ret = pm_runtime_get_sync(ddev->dev); -	if (ret < 0) { -		pm_runtime_put_autosuspend(ddev->dev); -		return ret; -	} - -	if (is_support_sw_smu(adev)) -		size = smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf); -	else if (adev->powerplay.pp_funcs->print_clock_levels) -		size = amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf); -	else -		size = snprintf(buf, PAGE_SIZE, "\n"); - -	pm_runtime_mark_last_busy(ddev->dev); -	pm_runtime_put_autosuspend(ddev->dev); - -	return size; +	return amdgpu_get_pp_dpm_clock(dev, PP_DCEFCLK, buf);  }  static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev, @@ -1534,69 +1234,14 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,  		const char *buf,  		size_t count)  { -	struct drm_device *ddev = dev_get_drvdata(dev); -	struct amdgpu_device *adev = drm_to_adev(ddev); -	int ret; -	uint32_t mask = 0; - -	if (amdgpu_in_reset(adev)) -		return -EPERM; - -	ret = amdgpu_read_mask(buf, count, &mask); -	if (ret) -		return ret; - -	ret = pm_runtime_get_sync(ddev->dev); -	if (ret < 0) { -		pm_runtime_put_autosuspend(ddev->dev); -		return ret; -	} - -	if (is_support_sw_smu(adev)) -		ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask); -	else if (adev->powerplay.pp_funcs->force_clock_level) -		ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask); -	else -		ret = 0; - -	pm_runtime_mark_last_busy(ddev->dev); -	pm_runtime_put_autosuspend(ddev->dev); - -	if (ret) -		return -EINVAL; - -	return count; +	return amdgpu_set_pp_dpm_clock(dev, PP_DCEFCLK, buf, count);  }  static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,  		struct device_attribute *attr,  		char *buf)  { -	struct drm_device *ddev = dev_get_drvdata(dev); -	struct amdgpu_device *adev = drm_to_adev(ddev); -	ssize_t size; -	int ret; - -	if (amdgpu_in_reset(adev)) -		return -EPERM; - -	ret = pm_runtime_get_sync(ddev->dev); -	if (ret < 0) { -		pm_runtime_put_autosuspend(ddev->dev); -		return ret; -	} - -	if (is_support_sw_smu(adev)) -		size = smu_print_clk_levels(&adev->smu, SMU_PCIE, buf); -	else if (adev->powerplay.pp_funcs->print_clock_levels) -		size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf); -	else -		size = snprintf(buf, PAGE_SIZE, "\n"); - -	pm_runtime_mark_last_busy(ddev->dev); -	pm_runtime_put_autosuspend(ddev->dev); - -	return size; +	return amdgpu_get_pp_dpm_clock(dev, PP_PCIE, buf);  }  static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, @@ -1604,38 +1249,7 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,  		const char *buf,  		size_t count)  { -	struct drm_device *ddev = dev_get_drvdata(dev); -	struct amdgpu_device *adev = drm_to_adev(ddev); -	int ret; -	uint32_t mask = 0; - -	if (amdgpu_in_reset(adev)) -		return -EPERM; - -	ret = amdgpu_read_mask(buf, count, &mask); -	if (ret) -		return ret; - -	ret = pm_runtime_get_sync(ddev->dev); -	if (ret < 0) { -		pm_runtime_put_autosuspend(ddev->dev); -		return ret; -	} - -	if (is_support_sw_smu(adev)) -		ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask); -	else if (adev->powerplay.pp_funcs->force_clock_level) -		ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); -	else -		ret = 0; - -	pm_runtime_mark_last_busy(ddev->dev); -	pm_runtime_put_autosuspend(ddev->dev); - -	if (ret) -		return -EINVAL; - -	return count; +	return amdgpu_set_pp_dpm_clock(dev, PP_PCIE, buf, count);  }  static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, @@ -1649,6 +1263,8 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	ret = pm_runtime_get_sync(ddev->dev);  	if (ret < 0) { @@ -1664,7 +1280,7 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,  	pm_runtime_mark_last_busy(ddev->dev);  	pm_runtime_put_autosuspend(ddev->dev); -	return snprintf(buf, PAGE_SIZE, "%d\n", value); +	return sysfs_emit(buf, "%d\n", value);  }  static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, @@ -1679,6 +1295,8 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	ret = kstrtol(buf, 0, &value); @@ -1722,6 +1340,8 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	ret = pm_runtime_get_sync(ddev->dev);  	if (ret < 0) { @@ -1737,7 +1357,7 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,  	pm_runtime_mark_last_busy(ddev->dev);  	pm_runtime_put_autosuspend(ddev->dev); -	return snprintf(buf, PAGE_SIZE, "%d\n", value); +	return sysfs_emit(buf, "%d\n", value);  }  static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, @@ -1752,6 +1372,8 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	ret = kstrtol(buf, 0, &value); @@ -1815,6 +1437,8 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	ret = pm_runtime_get_sync(ddev->dev);  	if (ret < 0) { @@ -1822,9 +1446,7 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,  		return ret;  	} -	if (is_support_sw_smu(adev)) -		size = smu_get_power_profile_mode(&adev->smu, buf); -	else if (adev->powerplay.pp_funcs->get_power_profile_mode) +	if (adev->powerplay.pp_funcs->get_power_profile_mode)  		size = amdgpu_dpm_get_power_profile_mode(adev, buf);  	else  		size = snprintf(buf, PAGE_SIZE, "\n"); @@ -1855,6 +1477,8 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	tmp[0] = *(buf);  	tmp[1] = '\0'; @@ -1888,9 +1512,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,  		return ret;  	} -	if (is_support_sw_smu(adev)) -		ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true); -	else if (adev->powerplay.pp_funcs->set_power_profile_mode) +	if (adev->powerplay.pp_funcs->set_power_profile_mode)  		ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);  	pm_runtime_mark_last_busy(ddev->dev); @@ -1920,6 +1542,8 @@ static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	r = pm_runtime_get_sync(ddev->dev);  	if (r < 0) { @@ -1937,7 +1561,7 @@ static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,  	if (r)  		return r; -	return snprintf(buf, PAGE_SIZE, "%d\n", value); +	return sysfs_emit(buf, "%d\n", value);  }  /** @@ -1958,6 +1582,8 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	r = pm_runtime_get_sync(ddev->dev);  	if (r < 0) { @@ -1975,7 +1601,7 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,  	if (r)  		return r; -	return snprintf(buf, PAGE_SIZE, "%d\n", value); +	return sysfs_emit(buf, "%d\n", value);  }  /** @@ -2001,6 +1627,8 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	if (adev->flags & AMD_IS_APU)  		return -ENODATA; @@ -2019,8 +1647,8 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev,  	pm_runtime_mark_last_busy(ddev->dev);  	pm_runtime_put_autosuspend(ddev->dev); -	return snprintf(buf, PAGE_SIZE,	"%llu %llu %i\n", -			count0, count1, pcie_get_mps(adev->pdev)); +	return sysfs_emit(buf, "%llu %llu %i\n", +			  count0, count1, pcie_get_mps(adev->pdev));  }  /** @@ -2042,9 +1670,11 @@ static ssize_t amdgpu_get_unique_id(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	if (adev->unique_id) -		return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id); +		return sysfs_emit(buf, "%016llx\n", adev->unique_id);  	return 0;  } @@ -2071,10 +1701,10 @@ static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,  	struct drm_device *ddev = dev_get_drvdata(dev);  	struct amdgpu_device *adev = drm_to_adev(ddev); -	return snprintf(buf, PAGE_SIZE, "%s: thermal throttling logging %s, with interval %d seconds\n", -			adev_to_drm(adev)->unique, -			atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled", -			adev->throttling_logging_rs.interval / HZ + 1); +	return sysfs_emit(buf, "%s: thermal throttling logging %s, with interval %d seconds\n", +			  adev_to_drm(adev)->unique, +			  atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled", +			  adev->throttling_logging_rs.interval / HZ + 1);  }  static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev, @@ -2140,6 +1770,8 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	ret = pm_runtime_get_sync(ddev->dev);  	if (ret < 0) { @@ -2147,9 +1779,7 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev,  		return ret;  	} -	if (is_support_sw_smu(adev)) -		size = smu_sys_get_gpu_metrics(&adev->smu, &gpu_metrics); -	else if (adev->powerplay.pp_funcs->get_gpu_metrics) +	if (adev->powerplay.pp_funcs->get_gpu_metrics)  		size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);  	if (size <= 0) @@ -2169,7 +1799,7 @@ out:  static struct amdgpu_device_attr amdgpu_device_attrs[] = {  	AMDGPU_DEVICE_ATTR_RW(power_dpm_state,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), -	AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level,	ATTR_FLAG_BASIC), +	AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level,	ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),  	AMDGPU_DEVICE_ATTR_RO(pp_num_states,				ATTR_FLAG_BASIC),  	AMDGPU_DEVICE_ATTR_RO(pp_cur_state,				ATTR_FLAG_BASIC),  	AMDGPU_DEVICE_ATTR_RW(pp_force_state,				ATTR_FLAG_BASIC), @@ -2214,7 +1844,9 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_  		if (asic_type < CHIP_VEGA10)  			*states = ATTR_STATE_UNSUPPORTED;  	} else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) { -		if (asic_type < CHIP_VEGA10 || asic_type == CHIP_ARCTURUS) +		if (asic_type < CHIP_VEGA10 || +		    asic_type == CHIP_ARCTURUS || +		    asic_type == CHIP_ALDEBARAN)  			*states = ATTR_STATE_UNSUPPORTED;  	} else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {  		if (asic_type < CHIP_VEGA20) @@ -2370,6 +2002,8 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	if (channel >= PP_TEMP_MAX)  		return -EINVAL; @@ -2407,7 +2041,7 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,  	if (r)  		return r; -	return snprintf(buf, PAGE_SIZE, "%d\n", temp); +	return sysfs_emit(buf, "%d\n", temp);  }  static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev, @@ -2423,7 +2057,7 @@ static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,  	else  		temp = adev->pm.dpm.thermal.max_temp; -	return snprintf(buf, PAGE_SIZE, "%d\n", temp); +	return sysfs_emit(buf, "%d\n", temp);  }  static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev, @@ -2439,7 +2073,7 @@ static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,  	else  		temp = adev->pm.dpm.thermal.max_hotspot_crit_temp; -	return snprintf(buf, PAGE_SIZE, "%d\n", temp); +	return sysfs_emit(buf, "%d\n", temp);  }  static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev, @@ -2455,7 +2089,7 @@ static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,  	else  		temp = adev->pm.dpm.thermal.max_mem_crit_temp; -	return snprintf(buf, PAGE_SIZE, "%d\n", temp); +	return sysfs_emit(buf, "%d\n", temp);  }  static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev, @@ -2467,7 +2101,7 @@ static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,  	if (channel >= PP_TEMP_MAX)  		return -EINVAL; -	return snprintf(buf, PAGE_SIZE, "%s\n", temp_label[channel].label); +	return sysfs_emit(buf, "%s\n", temp_label[channel].label);  }  static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev, @@ -2493,7 +2127,7 @@ static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,  		break;  	} -	return snprintf(buf, PAGE_SIZE, "%d\n", temp); +	return sysfs_emit(buf, "%d\n", temp);  }  static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, @@ -2506,6 +2140,8 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);  	if (ret < 0) { @@ -2513,22 +2149,18 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,  		return ret;  	} -	if (is_support_sw_smu(adev)) { -		pwm_mode = smu_get_fan_control_mode(&adev->smu); -	} else { -		if (!adev->powerplay.pp_funcs->get_fan_control_mode) { -			pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); -			pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); -			return -EINVAL; -		} - -		pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); +	if (!adev->powerplay.pp_funcs->get_fan_control_mode) { +		pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); +		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); +		return -EINVAL;  	} +	pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); +  	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);  	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); -	return sprintf(buf, "%i\n", pwm_mode); +	return sprintf(buf, "%u\n", pwm_mode);  }  static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, @@ -2542,6 +2174,8 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	err = kstrtoint(buf, 10, &value);  	if (err) @@ -2553,18 +2187,14 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,  		return ret;  	} -	if (is_support_sw_smu(adev)) { -		smu_set_fan_control_mode(&adev->smu, value); -	} else { -		if (!adev->powerplay.pp_funcs->set_fan_control_mode) { -			pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); -			pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); -			return -EINVAL; -		} - -		amdgpu_dpm_set_fan_control_mode(adev, value); +	if (!adev->powerplay.pp_funcs->set_fan_control_mode) { +		pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); +		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); +		return -EINVAL;  	} +	amdgpu_dpm_set_fan_control_mode(adev, value); +  	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);  	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); @@ -2596,6 +2226,8 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);  	if (err < 0) { @@ -2603,11 +2235,7 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,  		return err;  	} -	if (is_support_sw_smu(adev)) -		pwm_mode = smu_get_fan_control_mode(&adev->smu); -	else -		pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); - +	pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);  	if (pwm_mode != AMD_FAN_CTRL_MANUAL) {  		pr_info("manual fan speed control should be enabled first\n");  		pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); @@ -2624,9 +2252,7 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,  	value = (value * 100) / 255; -	if (is_support_sw_smu(adev)) -		err = smu_set_fan_speed_percent(&adev->smu, value); -	else if (adev->powerplay.pp_funcs->set_fan_speed_percent) +	if (adev->powerplay.pp_funcs->set_fan_speed_percent)  		err = amdgpu_dpm_set_fan_speed_percent(adev, value);  	else  		err = -EINVAL; @@ -2650,6 +2276,8 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);  	if (err < 0) { @@ -2657,9 +2285,7 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,  		return err;  	} -	if (is_support_sw_smu(adev)) -		err = smu_get_fan_speed_percent(&adev->smu, &speed); -	else if (adev->powerplay.pp_funcs->get_fan_speed_percent) +	if (adev->powerplay.pp_funcs->get_fan_speed_percent)  		err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);  	else  		err = -EINVAL; @@ -2685,6 +2311,8 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);  	if (err < 0) { @@ -2692,9 +2320,7 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,  		return err;  	} -	if (is_support_sw_smu(adev)) -		err = smu_get_fan_speed_rpm(&adev->smu, &speed); -	else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) +	if (adev->powerplay.pp_funcs->get_fan_speed_rpm)  		err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);  	else  		err = -EINVAL; @@ -2719,6 +2345,8 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);  	if (r < 0) { @@ -2735,7 +2363,7 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,  	if (r)  		return r; -	return snprintf(buf, PAGE_SIZE, "%d\n", min_rpm); +	return sysfs_emit(buf, "%d\n", min_rpm);  }  static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev, @@ -2749,6 +2377,8 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);  	if (r < 0) { @@ -2765,7 +2395,7 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,  	if (r)  		return r; -	return snprintf(buf, PAGE_SIZE, "%d\n", max_rpm); +	return sysfs_emit(buf, "%d\n", max_rpm);  }  static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, @@ -2778,6 +2408,8 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);  	if (err < 0) { @@ -2785,9 +2417,7 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,  		return err;  	} -	if (is_support_sw_smu(adev)) -		err = smu_get_fan_speed_rpm(&adev->smu, &rpm); -	else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) +	if (adev->powerplay.pp_funcs->get_fan_speed_rpm)  		err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);  	else  		err = -EINVAL; @@ -2812,6 +2442,8 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);  	if (err < 0) { @@ -2819,10 +2451,7 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,  		return err;  	} -	if (is_support_sw_smu(adev)) -		pwm_mode = smu_get_fan_control_mode(&adev->smu); -	else -		pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); +	pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);  	if (pwm_mode != AMD_FAN_CTRL_MANUAL) {  		pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); @@ -2837,9 +2466,7 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,  		return err;  	} -	if (is_support_sw_smu(adev)) -		err = smu_set_fan_speed_rpm(&adev->smu, value); -	else if (adev->powerplay.pp_funcs->set_fan_speed_rpm) +	if (adev->powerplay.pp_funcs->set_fan_speed_rpm)  		err = amdgpu_dpm_set_fan_speed_rpm(adev, value);  	else  		err = -EINVAL; @@ -2863,6 +2490,8 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);  	if (ret < 0) { @@ -2870,18 +2499,14 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,  		return ret;  	} -	if (is_support_sw_smu(adev)) { -		pwm_mode = smu_get_fan_control_mode(&adev->smu); -	} else { -		if (!adev->powerplay.pp_funcs->get_fan_control_mode) { -			pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); -			pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); -			return -EINVAL; -		} - -		pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); +	if (!adev->powerplay.pp_funcs->get_fan_control_mode) { +		pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); +		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); +		return -EINVAL;  	} +	pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); +  	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);  	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); @@ -2900,6 +2525,8 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	err = kstrtoint(buf, 10, &value);  	if (err) @@ -2918,16 +2545,12 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,  		return err;  	} -	if (is_support_sw_smu(adev)) { -		smu_set_fan_control_mode(&adev->smu, pwm_mode); -	} else { -		if (!adev->powerplay.pp_funcs->set_fan_control_mode) { -			pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); -			pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); -			return -EINVAL; -		} -		amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); +	if (!adev->powerplay.pp_funcs->set_fan_control_mode) { +		pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); +		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); +		return -EINVAL;  	} +	amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);  	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);  	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); @@ -2945,6 +2568,8 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);  	if (r < 0) { @@ -2962,14 +2587,14 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,  	if (r)  		return r; -	return snprintf(buf, PAGE_SIZE, "%d\n", vddgfx); +	return sysfs_emit(buf, "%d\n", vddgfx);  }  static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,  					      struct device_attribute *attr,  					      char *buf)  { -	return snprintf(buf, PAGE_SIZE, "vddgfx\n"); +	return sysfs_emit(buf, "vddgfx\n");  }  static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev, @@ -2982,6 +2607,8 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	/* only APUs have vddnb */  	if  (!(adev->flags & AMD_IS_APU)) @@ -3003,14 +2630,14 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,  	if (r)  		return r; -	return snprintf(buf, PAGE_SIZE, "%d\n", vddnb); +	return sysfs_emit(buf, "%d\n", vddnb);  }  static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,  					      struct device_attribute *attr,  					      char *buf)  { -	return snprintf(buf, PAGE_SIZE, "vddnb\n"); +	return sysfs_emit(buf, "vddnb\n");  }  static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev, @@ -3024,6 +2651,8 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);  	if (r < 0) { @@ -3044,7 +2673,7 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,  	/* convert to microwatts */  	uw = (query >> 8) * 1000000 + (query & 0xff) * 1000; -	return snprintf(buf, PAGE_SIZE, "%u\n", uw); +	return sysfs_emit(buf, "%u\n", uw);  }  static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev, @@ -3059,13 +2688,17 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,  					 char *buf)  {  	struct amdgpu_device *adev = dev_get_drvdata(dev); +	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;  	int limit_type = to_sensor_dev_attr(attr)->index;  	uint32_t limit = limit_type << 24; +	uint32_t max_limit = 0;  	ssize_t size;  	int r;  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);  	if (r < 0) { @@ -3076,9 +2709,10 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,  	if (is_support_sw_smu(adev)) {  		smu_get_power_limit(&adev->smu, &limit, SMU_PPT_LIMIT_MAX);  		size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); -	} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { -		adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true); -		size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); +	} else if (pp_funcs && pp_funcs->get_power_limit) { +		pp_funcs->get_power_limit(adev->powerplay.pp_handle, +				&limit, &max_limit, true); +		size = snprintf(buf, PAGE_SIZE, "%u\n", max_limit * 1000000);  	} else {  		size = snprintf(buf, PAGE_SIZE, "\n");  	} @@ -3094,6 +2728,7 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,  					 char *buf)  {  	struct amdgpu_device *adev = dev_get_drvdata(dev); +	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;  	int limit_type = to_sensor_dev_attr(attr)->index;  	uint32_t limit = limit_type << 24;  	ssize_t size; @@ -3101,6 +2736,8 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);  	if (r < 0) { @@ -3111,8 +2748,9 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,  	if (is_support_sw_smu(adev)) {  		smu_get_power_limit(&adev->smu, &limit, SMU_PPT_LIMIT_CURRENT);  		size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); -	} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { -		adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false); +	} else if (pp_funcs && pp_funcs->get_power_limit) { +		pp_funcs->get_power_limit(adev->powerplay.pp_handle, +				&limit, NULL, false);  		size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);  	} else {  		size = snprintf(buf, PAGE_SIZE, "\n"); @@ -3124,13 +2762,51 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,  	return size;  } +static ssize_t amdgpu_hwmon_show_power_cap_default(struct device *dev, +					 struct device_attribute *attr, +					 char *buf) +{ +	struct amdgpu_device *adev = dev_get_drvdata(dev); +	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; +	int limit_type = to_sensor_dev_attr(attr)->index; +	uint32_t limit = limit_type << 24; +	ssize_t size; +	int r; + +	if (amdgpu_in_reset(adev)) +		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM; + +	r = pm_runtime_get_sync(adev_to_drm(adev)->dev); +	if (r < 0) { +		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); +		return r; +	} + +	if (is_support_sw_smu(adev)) { +		smu_get_power_limit(&adev->smu, &limit, SMU_PPT_LIMIT_DEFAULT); +		size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); +	} else if (pp_funcs && pp_funcs->get_power_limit) { +		pp_funcs->get_power_limit(adev->powerplay.pp_handle, +				&limit, NULL, true); +		size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); +	} else { +		size = snprintf(buf, PAGE_SIZE, "\n"); +	} + +	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); +	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + +	return size; +}  static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,  					 struct device_attribute *attr,  					 char *buf)  {  	int limit_type = to_sensor_dev_attr(attr)->index; -	return snprintf(buf, PAGE_SIZE, "%s\n", +	return sysfs_emit(buf, "%s\n",  		limit_type == SMU_FAST_PPT_LIMIT ? "fastPPT" : "slowPPT");  } @@ -3140,12 +2816,15 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,  		size_t count)  {  	struct amdgpu_device *adev = dev_get_drvdata(dev); +	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;  	int limit_type = to_sensor_dev_attr(attr)->index;  	int err;  	u32 value;  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	if (amdgpu_sriov_vf(adev))  		return -EINVAL; @@ -3163,10 +2842,8 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,  		return err;  	} -	if (is_support_sw_smu(adev)) -		err = smu_set_power_limit(&adev->smu, value); -	else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) -		err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value); +	if (pp_funcs && pp_funcs->set_power_limit) +		err = pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);  	else  		err = -EINVAL; @@ -3189,6 +2866,8 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);  	if (r < 0) { @@ -3206,14 +2885,14 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,  	if (r)  		return r; -	return snprintf(buf, PAGE_SIZE, "%u\n", sclk * 10 * 1000); +	return sysfs_emit(buf, "%u\n", sclk * 10 * 1000);  }  static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,  					    struct device_attribute *attr,  					    char *buf)  { -	return snprintf(buf, PAGE_SIZE, "sclk\n"); +	return sysfs_emit(buf, "sclk\n");  }  static ssize_t amdgpu_hwmon_show_mclk(struct device *dev, @@ -3226,6 +2905,8 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);  	if (r < 0) { @@ -3243,14 +2924,14 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,  	if (r)  		return r; -	return snprintf(buf, PAGE_SIZE, "%u\n", mclk * 10 * 1000); +	return sysfs_emit(buf, "%u\n", mclk * 10 * 1000);  }  static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,  					    struct device_attribute *attr,  					    char *buf)  { -	return snprintf(buf, PAGE_SIZE, "mclk\n"); +	return sysfs_emit(buf, "mclk\n");  }  /** @@ -3315,9 +2996,9 @@ static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,   *   * - pwm1_max: pulse width modulation fan control maximum level (255)   * - * - fan1_min: an minimum value Unit: revolution/min (RPM) + * - fan1_min: a minimum value Unit: revolution/min (RPM)   * - * - fan1_max: an maxmum value Unit: revolution/max (RPM) + * - fan1_max: a maximum value Unit: revolution/max (RPM)   *   * - fan1_input: fan speed in RPM   * @@ -3367,11 +3048,13 @@ static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg,  static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);  static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);  static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0); +static SENSOR_DEVICE_ATTR(power1_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 0);  static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0);  static SENSOR_DEVICE_ATTR(power2_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 1);  static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1);  static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1);  static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1); +static SENSOR_DEVICE_ATTR(power2_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 1);  static SENSOR_DEVICE_ATTR(power2_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 1);  static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);  static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0); @@ -3411,11 +3094,13 @@ static struct attribute *hwmon_attributes[] = {  	&sensor_dev_attr_power1_cap_max.dev_attr.attr,  	&sensor_dev_attr_power1_cap_min.dev_attr.attr,  	&sensor_dev_attr_power1_cap.dev_attr.attr, +	&sensor_dev_attr_power1_cap_default.dev_attr.attr,  	&sensor_dev_attr_power1_label.dev_attr.attr,  	&sensor_dev_attr_power2_average.dev_attr.attr,  	&sensor_dev_attr_power2_cap_max.dev_attr.attr,  	&sensor_dev_attr_power2_cap_min.dev_attr.attr,  	&sensor_dev_attr_power2_cap.dev_attr.attr, +	&sensor_dev_attr_power2_cap_default.dev_attr.attr,  	&sensor_dev_attr_power2_label.dev_attr.attr,  	&sensor_dev_attr_freq1_input.dev_attr.attr,  	&sensor_dev_attr_freq1_label.dev_attr.attr, @@ -3514,7 +3199,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,  	      (adev->asic_type != CHIP_VANGOGH))) &&	/* not implemented yet */  	    (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||  	     attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr|| -	     attr == &sensor_dev_attr_power1_cap.dev_attr.attr)) +	     attr == &sensor_dev_attr_power1_cap.dev_attr.attr || +	     attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr))  		return 0;  	if (((adev->family == AMDGPU_FAMILY_SI) || @@ -3580,6 +3266,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,  		 attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr ||  	     attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||  		 attr == &sensor_dev_attr_power2_cap.dev_attr.attr || +		 attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||  		 attr == &sensor_dev_attr_power2_label.dev_attr.attr ||  		 attr == &sensor_dev_attr_power1_label.dev_attr.attr))  		return 0; @@ -3784,16 +3471,17 @@ static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags)  			   (flags & clocks[i].flag) ? "On" : "Off");  } -static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) +static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)  { -	struct drm_info_node *node = (struct drm_info_node *) m->private; -	struct drm_device *dev = node->minor->dev; -	struct amdgpu_device *adev = drm_to_adev(dev); +	struct amdgpu_device *adev = (struct amdgpu_device *)m->private; +	struct drm_device *dev = adev_to_drm(adev);  	u32 flags = 0;  	int r;  	if (amdgpu_in_reset(adev))  		return -EPERM; +	if (adev->in_suspend && !adev->in_runpm) +		return -EPERM;  	r = pm_runtime_get_sync(dev->dev);  	if (r < 0) { @@ -3836,16 +3524,18 @@ out:  	return r;  } -static const struct drm_info_list amdgpu_pm_info_list[] = { -	{"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL}, -}; +DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_pm_info); +  #endif -int amdgpu_debugfs_pm_init(struct amdgpu_device *adev) +void amdgpu_debugfs_pm_init(struct amdgpu_device *adev)  {  #if defined(CONFIG_DEBUG_FS) -	return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list)); -#else -	return 0; +	struct drm_minor *minor = adev_to_drm(adev)->primary; +	struct dentry *root = minor->debugfs_root; + +	debugfs_create_file("amdgpu_pm_info", 0444, root, adev, +			    &amdgpu_debugfs_pm_info_fops); +  #endif  } | 
