summaryrefslogtreecommitdiff
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-12-24 16:18:21 +0300
committerIngo Molnar <mingo@elte.hu>2010-01-21 15:40:15 +0300
commit8f190fb3f7a405682666d3723f6ec370b5afe4da (patch)
tree4494079705c3c18e5e4f48c5a77877677b244d5d /kernel/sched_fair.c
parentf492e12ef050e02bf0185b6b57874992591b9be1 (diff)
downloadlinux-8f190fb3f7a405682666d3723f6ec370b5afe4da.tar.xz
sched: Assume *balance is valid
Since all load_balance() callers will have !NULL balance parameters we can now assume so and remove a few checks. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index de5ab1239e04..0b482f5b5b3b 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -2465,7 +2465,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
* to do the newly idle load balance.
*/
if (idle != CPU_NEWLY_IDLE && local_group &&
- balance_cpu != this_cpu && balance) {
+ balance_cpu != this_cpu) {
*balance = 0;
return;
}
@@ -2528,7 +2528,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
update_sg_lb_stats(sd, group, this_cpu, idle, load_idx, sd_idle,
local_group, cpus, balance, &sgs);
- if (local_group && balance && !(*balance))
+ if (local_group && !(*balance))
return;
sds->total_load += sgs.group_load;
@@ -2720,7 +2720,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
* 5) The imbalance is within the specified limit.
* 6) Any rebalance would lead to ping-pong
*/
- if (balance && !(*balance))
+ if (!(*balance))
goto ret;
if (!sds.busiest || sds.busiest_nr_running == 0)