diff options
Diffstat (limited to 'drivers/base/arch_topology.c')
| -rw-r--r-- | drivers/base/arch_topology.c | 89 | 
1 files changed, 83 insertions, 6 deletions
| diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index de8587cc119e..c1179edc0f3b 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -21,17 +21,94 @@  #include <linux/sched.h>  #include <linux/smp.h> +static DEFINE_PER_CPU(struct scale_freq_data *, sft_data); +static struct cpumask scale_freq_counters_mask; +static bool scale_freq_invariant; + +static bool supports_scale_freq_counters(const struct cpumask *cpus) +{ +	return cpumask_subset(cpus, &scale_freq_counters_mask); +} +  bool topology_scale_freq_invariant(void)  {  	return cpufreq_supports_freq_invariance() || -	       arch_freq_counters_available(cpu_online_mask); +	       supports_scale_freq_counters(cpu_online_mask);  } -__weak bool arch_freq_counters_available(const struct cpumask *cpus) +static void update_scale_freq_invariant(bool status)  { -	return false; +	if (scale_freq_invariant == status) +		return; + +	/* +	 * Task scheduler behavior depends on frequency invariance support, +	 * either cpufreq or counter driven. If the support status changes as +	 * a result of counter initialisation and use, retrigger the build of +	 * scheduling domains to ensure the information is propagated properly. +	 */ +	if (topology_scale_freq_invariant() == status) { +		scale_freq_invariant = status; +		rebuild_sched_domains_energy(); +	}  } -DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE; + +void topology_set_scale_freq_source(struct scale_freq_data *data, +				    const struct cpumask *cpus) +{ +	struct scale_freq_data *sfd; +	int cpu; + +	/* +	 * Avoid calling rebuild_sched_domains() unnecessarily if FIE is +	 * supported by cpufreq. +	 */ +	if (cpumask_empty(&scale_freq_counters_mask)) +		scale_freq_invariant = topology_scale_freq_invariant(); + +	for_each_cpu(cpu, cpus) { +		sfd = per_cpu(sft_data, cpu); + +		/* Use ARCH provided counters whenever possible */ +		if (!sfd || sfd->source != SCALE_FREQ_SOURCE_ARCH) { +			per_cpu(sft_data, cpu) = data; +			cpumask_set_cpu(cpu, &scale_freq_counters_mask); +		} +	} + +	update_scale_freq_invariant(true); +} +EXPORT_SYMBOL_GPL(topology_set_scale_freq_source); + +void topology_clear_scale_freq_source(enum scale_freq_source source, +				      const struct cpumask *cpus) +{ +	struct scale_freq_data *sfd; +	int cpu; + +	for_each_cpu(cpu, cpus) { +		sfd = per_cpu(sft_data, cpu); + +		if (sfd && sfd->source == source) { +			per_cpu(sft_data, cpu) = NULL; +			cpumask_clear_cpu(cpu, &scale_freq_counters_mask); +		} +	} + +	update_scale_freq_invariant(false); +} +EXPORT_SYMBOL_GPL(topology_clear_scale_freq_source); + +void topology_scale_freq_tick(void) +{ +	struct scale_freq_data *sfd = *this_cpu_ptr(&sft_data); + +	if (sfd) +		sfd->set_freq_scale(); +} + +DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE; +EXPORT_PER_CPU_SYMBOL_GPL(arch_freq_scale);  void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,  			     unsigned long max_freq) @@ -47,13 +124,13 @@ void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,  	 * want to update the scale factor with information from CPUFREQ.  	 * Instead the scale factor will be updated from arch_scale_freq_tick.  	 */ -	if (arch_freq_counters_available(cpus)) +	if (supports_scale_freq_counters(cpus))  		return;  	scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq;  	for_each_cpu(i, cpus) -		per_cpu(freq_scale, i) = scale; +		per_cpu(arch_freq_scale, i) = scale;  }  DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; | 
