summaryrefslogtreecommitdiff
path: root/kernel/sched/cpufreq_schedutil.c
diff options
context:
space:
mode:
authorVincent Guittot <vincent.guittot@linaro.org>2018-06-28 18:45:11 +0300
committerIngo Molnar <mingo@kernel.org>2018-07-16 00:51:21 +0300
commitdfa444dc2ff62edbaf1ff95ed22dd2ce8a5715da (patch)
treedbaa457404515589a827c50e9d04fa7b2031c807 /kernel/sched/cpufreq_schedutil.c
parent9033ea11889f88f243445495f72441e22256d5e9 (diff)
downloadlinux-dfa444dc2ff62edbaf1ff95ed22dd2ce8a5715da.tar.xz
sched/cpufreq: Remove sugov_aggregate_util()
There is no reason why sugov_get_util() and sugov_aggregate_util() were in fact separate functions. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> [ Rebased after adding irq tracking and fixed some compilation errors. ] Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Viresh Kumar <viresh.kumar@linaro.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Morten.Rasmussen@arm.com Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: claudio@evidence.eu.com Cc: daniel.lezcano@linaro.org Cc: dietmar.eggemann@arm.com Cc: joel@joelfernandes.org Cc: juri.lelli@redhat.com Cc: luca.abeni@santannapisa.it Cc: patrick.bellasi@arm.com Cc: quentin.perret@arm.com Cc: rjw@rjwysocki.net Cc: valentin.schneider@arm.com Link: http://lkml.kernel.org/r/1530200714-4504-9-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/cpufreq_schedutil.c')
-rw-r--r--kernel/sched/cpufreq_schedutil.c44
1 files changed, 15 insertions, 29 deletions
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 7016bde9d194..c9622b3f183d 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -53,12 +53,7 @@ struct sugov_cpu {
unsigned int iowait_boost_max;
u64 last_update;
- /* The fields below are only needed when sharing a policy: */
- unsigned long util_cfs;
- unsigned long util_dl;
unsigned long bw_dl;
- unsigned long util_rt;
- unsigned long util_irq;
unsigned long max;
/* The field below is for single-CPU policies only: */
@@ -182,38 +177,31 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
return cpufreq_driver_resolve_freq(policy, freq);
}
-static void sugov_get_util(struct sugov_cpu *sg_cpu)
+static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
{
struct rq *rq = cpu_rq(sg_cpu->cpu);
+ unsigned long util, irq, max;
- sg_cpu->max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu);
- sg_cpu->util_cfs = cpu_util_cfs(rq);
- sg_cpu->util_dl = cpu_util_dl(rq);
- sg_cpu->bw_dl = cpu_bw_dl(rq);
- sg_cpu->util_rt = cpu_util_rt(rq);
- sg_cpu->util_irq = cpu_util_irq(rq);
-}
-
-static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu)
-{
- struct rq *rq = cpu_rq(sg_cpu->cpu);
- unsigned long util, max = sg_cpu->max;
+ sg_cpu->max = max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu);
+ sg_cpu->bw_dl = cpu_bw_dl(rq);
if (rt_rq_is_runnable(&rq->rt))
- return sg_cpu->max;
+ return max;
+
+ irq = cpu_util_irq(rq);
- if (unlikely(sg_cpu->util_irq >= max))
+ if (unlikely(irq >= max))
return max;
/* Sum rq utilization */
- util = sg_cpu->util_cfs;
- util += sg_cpu->util_rt;
+ util = cpu_util_cfs(rq);
+ util += cpu_util_rt(rq);
/*
* Interrupt time is not seen by RQS utilization so we can compare
* them with the CPU capacity
*/
- if ((util + sg_cpu->util_dl) >= max)
+ if ((util + cpu_util_dl(rq)) >= max)
return max;
/*
@@ -231,11 +219,11 @@ static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu)
*/
/* Weight RQS utilization to normal context window */
- util *= (max - sg_cpu->util_irq);
+ util *= (max - irq);
util /= max;
/* Add interrupt utilization */
- util += sg_cpu->util_irq;
+ util += irq;
/* Add DL bandwidth requirement */
util += sg_cpu->bw_dl;
@@ -418,9 +406,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
busy = sugov_cpu_is_busy(sg_cpu);
- sugov_get_util(sg_cpu);
+ util = sugov_get_util(sg_cpu);
max = sg_cpu->max;
- util = sugov_aggregate_util(sg_cpu);
sugov_iowait_apply(sg_cpu, time, &util, &max);
next_f = get_next_freq(sg_policy, util, max);
/*
@@ -459,9 +446,8 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
unsigned long j_util, j_max;
- sugov_get_util(j_sg_cpu);
+ j_util = sugov_get_util(j_sg_cpu);
j_max = j_sg_cpu->max;
- j_util = sugov_aggregate_util(j_sg_cpu);
sugov_iowait_apply(j_sg_cpu, time, &j_util, &j_max);
if (j_util * max > j_max * util) {