diff options
Diffstat (limited to 'kernel/sched')
| -rw-r--r-- | kernel/sched/core.c | 13 | ||||
| -rw-r--r-- | kernel/sched/deadline.c | 25 | ||||
| -rw-r--r-- | kernel/sched/fair.c | 6 | 
3 files changed, 14 insertions, 30 deletions
| diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b5797b78add6..c0accc00566e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7113,9 +7113,6 @@ void __init sched_init(void)  #ifdef CONFIG_RT_GROUP_SCHED  	alloc_size += 2 * nr_cpu_ids * sizeof(void **);  #endif -#ifdef CONFIG_CPUMASK_OFFSTACK -	alloc_size += num_possible_cpus() * cpumask_size(); -#endif  	if (alloc_size) {  		ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); @@ -7135,13 +7132,13 @@ void __init sched_init(void)  		ptr += nr_cpu_ids * sizeof(void **);  #endif /* CONFIG_RT_GROUP_SCHED */ +	}  #ifdef CONFIG_CPUMASK_OFFSTACK -		for_each_possible_cpu(i) { -			per_cpu(load_balance_mask, i) = (void *)ptr; -			ptr += cpumask_size(); -		} -#endif /* CONFIG_CPUMASK_OFFSTACK */ +	for_each_possible_cpu(i) { +		per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node( +			cpumask_size(), GFP_KERNEL, cpu_to_node(i));  	} +#endif /* CONFIG_CPUMASK_OFFSTACK */  	init_rt_bandwidth(&def_rt_bandwidth,  			global_rt_period(), global_rt_runtime()); diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index e5db8c6feebd..b52092f2636d 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -570,24 +570,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)  static  int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)  { -	int dmiss = dl_time_before(dl_se->deadline, rq_clock(rq)); -	int rorun = dl_se->runtime <= 0; - -	if (!rorun && !dmiss) -		return 0; - -	/* -	 * If we are beyond our current deadline and we are still -	 * executing, then we have already used some of the runtime of -	 * the next instance. Thus, if we do not account that, we are -	 * stealing bandwidth from the system at each deadline miss! -	 */ -	if (dmiss) { -		dl_se->runtime = rorun ? dl_se->runtime : 0; -		dl_se->runtime -= rq_clock(rq) - dl_se->deadline; -	} - -	return 1; +	return (dl_se->runtime <= 0);  }  extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); @@ -826,10 +809,10 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se,  	 * parameters of the task might need updating. Otherwise,  	 * we want a replenishment of its runtime.  	 */ -	if (!dl_se->dl_new && flags & ENQUEUE_REPLENISH) -		replenish_dl_entity(dl_se, pi_se); -	else +	if (dl_se->dl_new || flags & ENQUEUE_WAKEUP)  		update_dl_entity(dl_se, pi_se); +	else if (flags & ENQUEUE_REPLENISH) +		replenish_dl_entity(dl_se, pi_se);  	__enqueue_dl_entity(dl_se);  } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index df2cdf77f899..40667cbf371b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4005,6 +4005,10 @@ void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force)  static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)  { +	/* init_cfs_bandwidth() was not called */ +	if (!cfs_b->throttled_cfs_rq.next) +		return; +  	hrtimer_cancel(&cfs_b->period_timer);  	hrtimer_cancel(&cfs_b->slack_timer);  } @@ -4424,7 +4428,7 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)  		 * wl = S * s'_i; see (2)  		 */  		if (W > 0 && w < W) -			wl = (w * tg->shares) / W; +			wl = (w * (long)tg->shares) / W;  		else  			wl = tg->shares; | 
