diff options
author | Nicola Mazzucato <nicola.mazzucato@arm.com> | 2021-02-19 01:23:25 +0300 |
---|---|---|
committer | Sudeep Holla <sudeep.holla@arm.com> | 2021-03-15 16:24:45 +0300 |
commit | 80a064dbd556cde36f0b7f1778e8d8a1dc50f19f (patch) | |
tree | 32470d0dbba1877442dba77901638c52794a8eea /drivers/cpufreq | |
parent | 71a37cd6a59dc58df3f4e58db3f4f04c9e69da43 (diff) | |
download | linux-80a064dbd556cde36f0b7f1778e8d8a1dc50f19f.tar.xz |
scmi-cpufreq: Get opp_shared_cpus from opp-v2 for EM
By design, SCMI performance domains define the granularity of
performance controls, they do not describe any underlying hardware
dependencies (although they may match in many cases).
It is therefore possible to have some platforms where hardware may have
the ability to control CPU performance at different granularity and choose
to describe fine-grained performance control through SCMI.
In such situations, the energy model would be provided with inaccurate
information based on controls, while it still needs to know the
performance boundaries.
To restore correct functionality, retrieve information of CPUs under the
same performance domain from operating-points-v2 in DT, and pass it on to
EM.
Link: https://lore.kernel.org/r/20210218222326.15788-3-nicola.mazzucato@arm.com
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Nicola Mazzucato <nicola.mazzucato@arm.com>
Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r-- | drivers/cpufreq/scmi-cpufreq.c | 74 |
1 files changed, 54 insertions, 20 deletions
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index 26a16dc96f8b..a4e1637d335d 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -126,6 +126,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) struct scmi_data *priv; struct cpufreq_frequency_table *freq_table; struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power); + cpumask_var_t opp_shared_cpus; bool power_scale_mw; cpu_dev = get_cpu_device(policy->cpu); @@ -134,32 +135,64 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) return -ENODEV; } - ret = handle->perf_ops->device_opps_add(handle, cpu_dev); - if (ret) { - dev_warn(cpu_dev, "failed to add opps to the device\n"); - return ret; - } + if (!zalloc_cpumask_var(&opp_shared_cpus, GFP_KERNEL)) + ret = -ENOMEM; + /* Obtain CPUs that share SCMI performance controls */ ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus); if (ret) { dev_warn(cpu_dev, "failed to get sharing cpumask\n"); - return ret; + goto out_free_cpumask; } - ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus); - if (ret) { - dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n", - __func__, ret); - return ret; + /* + * Obtain CPUs that share performance levels. + * The OPP 'sharing cpus' info may come from DT through an empty opp + * table and opp-shared. + */ + ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, opp_shared_cpus); + if (ret || !cpumask_weight(opp_shared_cpus)) { + /* + * Either opp-table is not set or no opp-shared was found. + * Use the CPU mask from SCMI to designate CPUs sharing an OPP + * table. + */ + cpumask_copy(opp_shared_cpus, policy->cpus); } + /* + * A previous CPU may have marked OPPs as shared for a few CPUs, based on + * what OPP core provided. If the current CPU is part of those few, then + * there is no need to add OPPs again. + */ nr_opp = dev_pm_opp_get_opp_count(cpu_dev); if (nr_opp <= 0) { - dev_err(cpu_dev, "%s: No OPPs for this device: %d\n", - __func__, ret); - - ret = -ENODEV; - goto out_free_opp; + ret = handle->perf_ops->device_opps_add(handle, cpu_dev); + if (ret) { + dev_warn(cpu_dev, "failed to add opps to the device\n"); + goto out_free_cpumask; + } + + nr_opp = dev_pm_opp_get_opp_count(cpu_dev); + if (nr_opp <= 0) { + dev_err(cpu_dev, "%s: No OPPs for this device: %d\n", + __func__, ret); + + ret = -ENODEV; + goto out_free_opp; + } + + ret = dev_pm_opp_set_sharing_cpus(cpu_dev, opp_shared_cpus); + if (ret) { + dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n", + __func__, ret); + + goto out_free_opp; + } + + power_scale_mw = handle->perf_ops->power_scale_mw_get(handle); + em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb, + opp_shared_cpus, power_scale_mw); } priv = kzalloc(sizeof(*priv), GFP_KERNEL); @@ -192,17 +225,18 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) policy->fast_switch_possible = handle->perf_ops->fast_switch_possible(handle, cpu_dev); - power_scale_mw = handle->perf_ops->power_scale_mw_get(handle); - em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb, policy->cpus, - power_scale_mw); - + free_cpumask_var(opp_shared_cpus); return 0; out_free_priv: kfree(priv); + out_free_opp: dev_pm_opp_remove_all_dynamic(cpu_dev); +out_free_cpumask: + free_cpumask_var(opp_shared_cpus); + return ret; } |