diff options
author | Peter Zijlstra <peterz@infradead.org> | 2015-06-11 15:46:41 +0300 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2015-06-19 01:25:26 +0300 |
commit | fd7a4bed183523275279c9addbf42fce550c2e90 (patch) | |
tree | dbc5e639a704774c0278ee3e6e95ecb682dbff58 /kernel/sched | |
parent | 8046d6806247088de5725eaf8a2580b29e50ac5a (diff) | |
download | linux-fd7a4bed183523275279c9addbf42fce550c2e90.tar.xz |
sched, rt: Convert switched_{from, to}_rt() / prio_changed_rt() to balance callbacks
Remove the direct {push,pull} balancing operations from
switched_{from,to}_rt() / prio_changed_rt() and use the balance
callback queue.
Again, err on the side of too many reschedules; since too few is a
hard bug while too many is just annoying.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: ktkhai@parallels.com
Cc: rostedt@goodmis.org
Cc: juri.lelli@gmail.com
Cc: pang.xunlei@linaro.org
Cc: oleg@redhat.com
Cc: wanpeng.li@linux.intel.com
Cc: umgwanakikbuti@gmail.com
Link: http://lkml.kernel.org/r/20150611124742.766832367@infradead.org
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/rt.c | 35 |
1 files changed, 19 insertions, 16 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index c702b48de9f0..460f85888b74 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -354,16 +354,23 @@ static inline int has_pushable_tasks(struct rq *rq) return !plist_head_empty(&rq->rt.pushable_tasks); } -static DEFINE_PER_CPU(struct callback_head, rt_balance_head); +static DEFINE_PER_CPU(struct callback_head, rt_push_head); +static DEFINE_PER_CPU(struct callback_head, rt_pull_head); static void push_rt_tasks(struct rq *); +static void pull_rt_task(struct rq *); static inline void queue_push_tasks(struct rq *rq) { if (!has_pushable_tasks(rq)) return; - queue_balance_callback(rq, &per_cpu(rt_balance_head, rq->cpu), push_rt_tasks); + queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks); +} + +static inline void queue_pull_task(struct rq *rq) +{ + queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task); } static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) @@ -2139,7 +2146,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p) if (!task_on_rq_queued(p) || rq->rt.rt_nr_running) return; - pull_rt_task(rq); + queue_pull_task(rq); } void __init init_sched_rt_class(void) @@ -2160,8 +2167,6 @@ void __init init_sched_rt_class(void) */ static void switched_to_rt(struct rq *rq, struct task_struct *p) { - int check_resched = 1; - /* * If we are already running, then there's nothing * that needs to be done. But if we are not running @@ -2171,13 +2176,12 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p) */ if (task_on_rq_queued(p) && rq->curr != p) { #ifdef CONFIG_SMP - if (p->nr_cpus_allowed > 1 && rq->rt.overloaded && - /* Don't resched if we changed runqueues */ - push_rt_task(rq) && rq != task_rq(p)) - check_resched = 0; -#endif /* CONFIG_SMP */ - if (check_resched && p->prio < rq->curr->prio) + if (p->nr_cpus_allowed > 1 && rq->rt.overloaded) + queue_push_tasks(rq); +#else + if (p->prio < rq->curr->prio) resched_curr(rq); +#endif /* CONFIG_SMP */ } } @@ -2198,14 +2202,13 @@ prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) * may need to pull tasks to this runqueue. */ if (oldprio < p->prio) - pull_rt_task(rq); + queue_pull_task(rq); + /* * If there's a higher priority task waiting to run - * then reschedule. Note, the above pull_rt_task - * can release the rq lock and p could migrate. - * Only reschedule if p is still on the same runqueue. + * then reschedule. */ - if (p->prio > rq->rt.highest_prio.curr && rq->curr == p) + if (p->prio > rq->rt.highest_prio.curr) resched_curr(rq); #else /* For UP simply resched on drop of prio */ |