summaryrefslogtreecommitdiff
path: root/drivers/cpufreq/cpufreq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq/cpufreq.c')
-rw-r--r--drivers/cpufreq/cpufreq.c158
1 files changed, 119 insertions, 39 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 1a4cae54a01b..f45ded62b0e0 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -25,6 +25,7 @@
#include <linux/mutex.h>
#include <linux/pm_qos.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/suspend.h>
#include <linux/syscore_ops.h>
#include <linux/tick.h>
@@ -87,6 +88,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_governor *new_gov,
unsigned int new_pol);
static bool cpufreq_boost_supported(void);
+static int cpufreq_boost_trigger_state(int state);
/*
* Two notifier lists: the "policy" list is involved in the
@@ -534,16 +536,18 @@ void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
static unsigned int __resolve_freq(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+ unsigned int target_freq,
+ unsigned int min, unsigned int max,
+ unsigned int relation)
{
unsigned int idx;
- target_freq = clamp_val(target_freq, policy->min, policy->max);
+ target_freq = clamp_val(target_freq, min, max);
if (!policy->freq_table)
return target_freq;
- idx = cpufreq_frequency_table_target(policy, target_freq, relation);
+ idx = cpufreq_frequency_table_target(policy, target_freq, min, max, relation);
policy->cached_resolved_idx = idx;
policy->cached_target_freq = target_freq;
return policy->freq_table[idx].frequency;
@@ -563,7 +567,21 @@ static unsigned int __resolve_freq(struct cpufreq_policy *policy,
unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
unsigned int target_freq)
{
- return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_LE);
+ unsigned int min = READ_ONCE(policy->min);
+ unsigned int max = READ_ONCE(policy->max);
+
+ /*
+ * If this function runs in parallel with cpufreq_set_policy(), it may
+ * read policy->min before the update and policy->max after the update
+ * or the other way around, so there is no ordering guarantee.
+ *
+ * Resolve this by always honoring the max (in case it comes from
+ * thermal throttling or similar).
+ */
+ if (unlikely(min > max))
+ min = max;
+
+ return __resolve_freq(policy, target_freq, min, max, CPUFREQ_RELATION_LE);
}
EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
@@ -602,12 +620,12 @@ static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
if (cpufreq_boost_trigger_state(enable)) {
pr_err("%s: Cannot %s BOOST!\n",
- __func__, enable ? "enable" : "disable");
+ __func__, str_enable_disable(enable));
return -EINVAL;
}
pr_debug("%s: cpufreq BOOST %s\n",
- __func__, enable ? "enabled" : "disabled");
+ __func__, str_enabled_disabled(enable));
return count;
}
@@ -630,6 +648,9 @@ static ssize_t store_local_boost(struct cpufreq_policy *policy,
if (!cpufreq_driver->boost_enabled)
return -EINVAL;
+ if (!policy->boost_supported)
+ return -EINVAL;
+
if (policy->boost_enabled == enable)
return count;
@@ -728,18 +749,26 @@ show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
show_one(scaling_min_freq, min);
show_one(scaling_max_freq, max);
-__weak unsigned int arch_freq_get_on_cpu(int cpu)
+__weak int arch_freq_get_on_cpu(int cpu)
{
- return 0;
+ return -EOPNOTSUPP;
+}
+
+static inline bool cpufreq_avg_freq_supported(struct cpufreq_policy *policy)
+{
+ return arch_freq_get_on_cpu(policy->cpu) != -EOPNOTSUPP;
}
static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
{
ssize_t ret;
- unsigned int freq;
+ int freq;
+
+ freq = IS_ENABLED(CONFIG_CPUFREQ_ARCH_CUR_FREQ)
+ ? arch_freq_get_on_cpu(policy->cpu)
+ : 0;
- freq = arch_freq_get_on_cpu(policy->cpu);
- if (freq)
+ if (freq > 0)
ret = sysfs_emit(buf, "%u\n", freq);
else if (cpufreq_driver->setpolicy && cpufreq_driver->get)
ret = sysfs_emit(buf, "%u\n", cpufreq_driver->get(policy->cpu));
@@ -784,6 +813,19 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
}
/*
+ * show_cpuinfo_avg_freq - average CPU frequency as detected by hardware
+ */
+static ssize_t show_cpuinfo_avg_freq(struct cpufreq_policy *policy,
+ char *buf)
+{
+ int avg_freq = arch_freq_get_on_cpu(policy->cpu);
+
+ if (avg_freq > 0)
+ return sysfs_emit(buf, "%u\n", avg_freq);
+ return avg_freq != 0 ? avg_freq : -EINVAL;
+}
+
+/*
* show_scaling_governor - show the current policy for the specified CPU
*/
static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
@@ -945,6 +987,7 @@ static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
}
cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
+cpufreq_freq_attr_ro(cpuinfo_avg_freq);
cpufreq_freq_attr_ro(cpuinfo_min_freq);
cpufreq_freq_attr_ro(cpuinfo_max_freq);
cpufreq_freq_attr_ro(cpuinfo_transition_latency);
@@ -1058,6 +1101,21 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
struct freq_attr **drv_attr;
int ret = 0;
+ /* Attributes that need freq_table */
+ if (policy->freq_table) {
+ ret = sysfs_create_file(&policy->kobj,
+ &cpufreq_freq_attr_scaling_available_freqs.attr);
+ if (ret)
+ return ret;
+
+ if (cpufreq_boost_supported()) {
+ ret = sysfs_create_file(&policy->kobj,
+ &cpufreq_freq_attr_scaling_boost_freqs.attr);
+ if (ret)
+ return ret;
+ }
+ }
+
/* set up files for this cpu device */
drv_attr = cpufreq_driver->attr;
while (drv_attr && *drv_attr) {
@@ -1072,6 +1130,12 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
return ret;
}
+ if (cpufreq_avg_freq_supported(policy)) {
+ ret = sysfs_create_file(&policy->kobj, &cpuinfo_avg_freq.attr);
+ if (ret)
+ return ret;
+ }
+
ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
if (ret)
return ret;
@@ -1409,10 +1473,6 @@ static int cpufreq_online(unsigned int cpu)
goto out_free_policy;
}
- /* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
- if (cpufreq_boost_enabled() && policy_has_boost_freq(policy))
- policy->boost_enabled = true;
-
/*
* The initialization has succeeded and the policy is online.
* If there is a problem with its frequency table, take it
@@ -1475,6 +1535,10 @@ static int cpufreq_online(unsigned int cpu)
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_CREATE_POLICY, policy);
+ } else {
+ ret = freq_qos_update_request(policy->max_freq_req, policy->max);
+ if (ret < 0)
+ goto out_destroy_policy;
}
if (cpufreq_driver->get && has_target()) {
@@ -1538,7 +1602,7 @@ static int cpufreq_online(unsigned int cpu)
/*
* Register with the energy model before
- * sugov_eas_rebuild_sd() is called, which will result
+ * em_rebuild_sched_domains() is called, which will result
* in rebuilding of the sched domains, which should only be done
* once the energy model is properly initialized for the policy
* first.
@@ -1569,6 +1633,19 @@ static int cpufreq_online(unsigned int cpu)
if (new_policy && cpufreq_thermal_control_enabled(cpufreq_driver))
policy->cdev = of_cpufreq_cooling_register(policy);
+ /* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
+ if (cpufreq_driver->set_boost && policy->boost_supported &&
+ policy->boost_enabled != cpufreq_boost_enabled()) {
+ policy->boost_enabled = cpufreq_boost_enabled();
+ ret = cpufreq_driver->set_boost(policy, policy->boost_enabled);
+ if (ret) {
+ /* If the set_boost fails, the online operation is not affected */
+ pr_info("%s: CPU%d: Cannot %s BOOST\n", __func__, policy->cpu,
+ str_enable_disable(policy->boost_enabled));
+ policy->boost_enabled = !policy->boost_enabled;
+ }
+ }
+
pr_debug("initialization complete\n");
return 0;
@@ -2323,7 +2400,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
if (cpufreq_disabled())
return -ENODEV;
- target_freq = __resolve_freq(policy, target_freq, relation);
+ target_freq = __resolve_freq(policy, target_freq, policy->min,
+ policy->max, relation);
pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
policy->cpu, target_freq, relation, old_target_freq);
@@ -2647,11 +2725,18 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
* Resolve policy min/max to available frequencies. It ensures
* no frequency resolution will neither overshoot the requested maximum
* nor undershoot the requested minimum.
+ *
+ * Avoid storing intermediate values in policy->max or policy->min and
+ * compiler optimizations around them because they may be accessed
+ * concurrently by cpufreq_driver_resolve_freq() during the update.
*/
- policy->min = new_data.min;
- policy->max = new_data.max;
- policy->min = __resolve_freq(policy, policy->min, CPUFREQ_RELATION_L);
- policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H);
+ WRITE_ONCE(policy->max, __resolve_freq(policy, new_data.max,
+ new_data.min, new_data.max,
+ CPUFREQ_RELATION_H));
+ new_data.min = __resolve_freq(policy, new_data.min, new_data.min,
+ new_data.max, CPUFREQ_RELATION_L);
+ WRITE_ONCE(policy->min, new_data.min > policy->max ? policy->max : new_data.min);
+
trace_cpu_frequency_limits(policy);
cpufreq_update_pressure(policy);
@@ -2748,6 +2833,12 @@ EXPORT_SYMBOL(cpufreq_update_policy);
*/
void cpufreq_update_limits(unsigned int cpu)
{
+ struct cpufreq_policy *policy __free(put_cpufreq_policy);
+
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy)
+ return;
+
if (cpufreq_driver->update_limits)
cpufreq_driver->update_limits(cpu);
else
@@ -2758,7 +2849,7 @@ EXPORT_SYMBOL_GPL(cpufreq_update_limits);
/*********************************************************************
* BOOST *
*********************************************************************/
-static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
+int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
{
int ret;
@@ -2777,8 +2868,9 @@ static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
return 0;
}
+EXPORT_SYMBOL_GPL(cpufreq_boost_set_sw);
-int cpufreq_boost_trigger_state(int state)
+static int cpufreq_boost_trigger_state(int state)
{
struct cpufreq_policy *policy;
unsigned long flags;
@@ -2793,6 +2885,9 @@ int cpufreq_boost_trigger_state(int state)
cpus_read_lock();
for_each_active_policy(policy) {
+ if (!policy->boost_supported)
+ continue;
+
policy->boost_enabled = state;
ret = cpufreq_driver->set_boost(policy, state);
if (ret) {
@@ -2812,7 +2907,7 @@ err_reset_state:
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
pr_err("%s: Cannot %s BOOST\n",
- __func__, state ? "enable" : "disable");
+ __func__, str_enable_disable(state));
return ret;
}
@@ -2840,21 +2935,6 @@ static void remove_boost_sysfs_file(void)
sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
}
-int cpufreq_enable_boost_support(void)
-{
- if (!cpufreq_driver)
- return -EINVAL;
-
- if (cpufreq_boost_supported())
- return 0;
-
- cpufreq_driver->set_boost = cpufreq_boost_set_sw;
-
- /* This will get removed on driver unregister */
- return create_boost_sysfs_file();
-}
-EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
-
bool cpufreq_boost_enabled(void)
{
return cpufreq_driver->boost_enabled;