diff options
Diffstat (limited to 'kernel/sched_rt.c')
| -rw-r--r-- | kernel/sched_rt.c | 26 | 
1 files changed, 19 insertions, 7 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 10d018212bab..97540f0c9e47 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -185,11 +185,23 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq)  typedef struct task_group *rt_rq_iter_t; -#define for_each_rt_rq(rt_rq, iter, rq) \ -	for (iter = list_entry_rcu(task_groups.next, typeof(*iter), list); \ -	     (&iter->list != &task_groups) && \ -	     (rt_rq = iter->rt_rq[cpu_of(rq)]); \ -	     iter = list_entry_rcu(iter->list.next, typeof(*iter), list)) +static inline struct task_group *next_task_group(struct task_group *tg) +{ +	do { +		tg = list_entry_rcu(tg->list.next, +			typeof(struct task_group), list); +	} while (&tg->list != &task_groups && task_group_is_autogroup(tg)); + +	if (&tg->list == &task_groups) +		tg = NULL; + +	return tg; +} + +#define for_each_rt_rq(rt_rq, iter, rq)					\ +	for (iter = container_of(&task_groups, typeof(*iter), list);	\ +		(iter = next_task_group(iter)) &&			\ +		(rt_rq = iter->rt_rq[cpu_of(rq)]);)  static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)  { @@ -1126,7 +1138,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)  	rt_rq = &rq->rt; -	if (unlikely(!rt_rq->rt_nr_running)) +	if (!rt_rq->rt_nr_running)  		return NULL;  	if (rt_rq_throttled(rt_rq)) @@ -1548,7 +1560,7 @@ skip:  static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)  {  	/* Try to pull RT tasks here if we lower this rq's prio */ -	if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio) +	if (rq->rt.highest_prio.curr > prev->prio)  		pull_rt_task(rq);  }  | 
