diff options
author | Luca Abeni <luca.abeni@santannapisa.it> | 2020-05-20 16:42:42 +0300 |
---|---|---|
committer | Peter Zijlstra <peterz@infradead.org> | 2020-06-15 15:10:05 +0300 |
commit | b4118988fdcb4554ea6687dd8ff68bcab690b8ea (patch) | |
tree | 701ac2c1c5e1789bf7068fdc560081b5b90a7f36 /kernel | |
parent | 60ffd5edc5e4fa69622c125c54ef8e7d5d894af8 (diff) | |
download | linux-b4118988fdcb4554ea6687dd8ff68bcab690b8ea.tar.xz |
sched/deadline: Make DL capacity-aware
The current SCHED_DEADLINE (DL) scheduler uses a global EDF scheduling
algorithm w/o considering CPU capacity or task utilization.
This works well on homogeneous systems where DL tasks are guaranteed
to have a bounded tardiness but presents issues on heterogeneous
systems.
A DL task can migrate to a CPU which does not have enough CPU capacity
to correctly serve the task (e.g. a task w/ 70ms runtime and 100ms
period on a CPU w/ 512 capacity).
Add the DL fitness function dl_task_fits_capacity() for DL admission
control on heterogeneous systems. A task fits onto a CPU if:
CPU original capacity / 1024 >= task runtime / task deadline
Use this function on heterogeneous systems to try to find a CPU which
meets this criterion during task wakeup, push and offline migration.
On homogeneous systems the original behavior of the DL admission
control should be retained.
Signed-off-by: Luca Abeni <luca.abeni@santannapisa.it>
Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Juri Lelli <juri.lelli@redhat.com>
Link: https://lkml.kernel.org/r/20200520134243.19352-5-dietmar.eggemann@arm.com
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/cpudeadline.c | 14 | ||||
-rw-r--r-- | kernel/sched/deadline.c | 18 | ||||
-rw-r--r-- | kernel/sched/sched.h | 15 |
3 files changed, 42 insertions, 5 deletions
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c index 5cc4012572ec..8630f2a40a3f 100644 --- a/kernel/sched/cpudeadline.c +++ b/kernel/sched/cpudeadline.c @@ -121,7 +121,19 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p, if (later_mask && cpumask_and(later_mask, cp->free_cpus, p->cpus_ptr)) { - return 1; + int cpu; + + if (!static_branch_unlikely(&sched_asym_cpucapacity)) + return 1; + + /* Ensure the capacity of the CPUs fits the task. */ + for_each_cpu(cpu, later_mask) { + if (!dl_task_fits_capacity(p, cpu)) + cpumask_clear_cpu(cpu, later_mask); + } + + if (!cpumask_empty(later_mask)) + return 1; } else { int best_cpu = cpudl_maximum(cp); diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 9ebd0a9241ed..84e84ba0b00a 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1643,6 +1643,7 @@ static int select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags) { struct task_struct *curr; + bool select_rq; struct rq *rq; if (sd_flag != SD_BALANCE_WAKE) @@ -1662,10 +1663,19 @@ select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags) * other hand, if it has a shorter deadline, we * try to make it stay here, it might be important. */ - if (unlikely(dl_task(curr)) && - (curr->nr_cpus_allowed < 2 || - !dl_entity_preempt(&p->dl, &curr->dl)) && - (p->nr_cpus_allowed > 1)) { + select_rq = unlikely(dl_task(curr)) && + (curr->nr_cpus_allowed < 2 || + !dl_entity_preempt(&p->dl, &curr->dl)) && + p->nr_cpus_allowed > 1; + + /* + * Take the capacity of the CPU into account to + * ensure it fits the requirement of the task. + */ + if (static_branch_unlikely(&sched_asym_cpucapacity)) + select_rq |= !dl_task_fits_capacity(p, cpu); + + if (select_rq) { int target = find_later_rq(p); if (target != -1 && diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 91b250f265c0..336887607b3d 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -317,6 +317,21 @@ static inline bool __dl_overflow(struct dl_bw *dl_b, unsigned long cap, cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw; } +/* + * Verify the fitness of task @p to run on @cpu taking into account the + * CPU original capacity and the runtime/deadline ratio of the task. + * + * The function will return true if the CPU original capacity of the + * @cpu scaled by SCHED_CAPACITY_SCALE >= runtime/deadline ratio of the + * task and false otherwise. + */ +static inline bool dl_task_fits_capacity(struct task_struct *p, int cpu) +{ + unsigned long cap = arch_scale_cpu_capacity(cpu); + + return cap_scale(p->dl.dl_deadline, cap) >= p->dl.dl_runtime; +} + extern void init_dl_bw(struct dl_bw *dl_b); extern int sched_dl_global_validate(void); extern void sched_dl_do_global(void); |