diff options
Diffstat (limited to 'kernel/sched/rt.c')
| -rw-r--r-- | kernel/sched/rt.c | 36 | 
1 files changed, 27 insertions, 9 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 01970c8e64df..1c4065575fa2 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -246,8 +246,10 @@ static inline void rt_set_overload(struct rq *rq)  	 * if we should look at the mask. It would be a shame  	 * if we looked at the mask, but the mask was not  	 * updated yet. +	 * +	 * Matched by the barrier in pull_rt_task().  	 */ -	wmb(); +	smp_wmb();  	atomic_inc(&rq->rd->rto_count);  } @@ -899,6 +901,13 @@ inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)  {  	struct rq *rq = rq_of_rt_rq(rt_rq); +#ifdef CONFIG_RT_GROUP_SCHED +	/* +	 * Change rq's cpupri only if rt_rq is the top queue. +	 */ +	if (&rq->rt != rt_rq) +		return; +#endif  	if (rq->online && prio < prev_prio)  		cpupri_set(&rq->rd->cpupri, rq->cpu, prio);  } @@ -908,6 +917,13 @@ dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)  {  	struct rq *rq = rq_of_rt_rq(rt_rq); +#ifdef CONFIG_RT_GROUP_SCHED +	/* +	 * Change rq's cpupri only if rt_rq is the top queue. +	 */ +	if (&rq->rt != rt_rq) +		return; +#endif  	if (rq->online && rt_rq->highest_prio.curr != prev_prio)  		cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);  } @@ -1169,13 +1185,10 @@ static void yield_task_rt(struct rq *rq)  static int find_lowest_rq(struct task_struct *task);  static int -select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) +select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)  {  	struct task_struct *curr;  	struct rq *rq; -	int cpu; - -	cpu = task_cpu(p);  	if (p->nr_cpus_allowed == 1)  		goto out; @@ -1213,8 +1226,7 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)  	 */  	if (curr && unlikely(rt_task(curr)) &&  	    (curr->nr_cpus_allowed < 2 || -	     curr->prio <= p->prio) && -	    (p->nr_cpus_allowed > 1)) { +	     curr->prio <= p->prio)) {  		int target = find_lowest_rq(p);  		if (target != -1) @@ -1630,6 +1642,12 @@ static int pull_rt_task(struct rq *this_rq)  	if (likely(!rt_overloaded(this_rq)))  		return 0; +	/* +	 * Match the barrier from rt_set_overloaded; this guarantees that if we +	 * see overloaded we must also see the rto_mask bit. +	 */ +	smp_rmb(); +  	for_each_cpu(cpu, this_rq->rd->rto_mask) {  		if (this_cpu == cpu)  			continue; @@ -1931,8 +1949,8 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)  	p->rt.time_slice = sched_rr_timeslice;  	/* -	 * Requeue to the end of queue if we (and all of our ancestors) are the -	 * only element on the queue +	 * Requeue to the end of queue if we (and all of our ancestors) are not +	 * the only element on the queue  	 */  	for_each_sched_rt_entity(rt_se) {  		if (rt_se->run_list.prev != rt_se->run_list.next) {  | 
