summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-09-03 15:16:51 +0400
committerIngo Molnar <mingo@elte.hu>2009-09-15 18:01:07 +0400
commit78e7ed53c9f42f04f9401ada6f7047db60781676 (patch)
tree24f45333ce4479b27c96b425c7d09c080a26609f /kernel
parentd7c33c4930f569caf6b2ece597432853c4151a45 (diff)
downloadlinux-78e7ed53c9f42f04f9401ada6f7047db60781676.tar.xz
sched: Tweak wake_idx
When merging select_task_rq_fair() and sched_balance_self() we lost the use of wake_idx, restore that and set them to 0 to make wake balancing more aggressive. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched_fair.c21
1 files changed, 18 insertions, 3 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 8b3eddbcf9a4..19593568031a 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1232,12 +1232,27 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
* domain.
*/
static struct sched_group *
-find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
+find_idlest_group(struct sched_domain *sd, struct task_struct *p,
+ int this_cpu, int flag)
{
struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
unsigned long min_load = ULONG_MAX, this_load = 0;
- int load_idx = sd->forkexec_idx;
int imbalance = 100 + (sd->imbalance_pct-100)/2;
+ int load_idx = 0;
+
+ switch (flag) {
+ case SD_BALANCE_FORK:
+ case SD_BALANCE_EXEC:
+ load_idx = sd->forkexec_idx;
+ break;
+
+ case SD_BALANCE_WAKE:
+ load_idx = sd->wake_idx;
+ break;
+
+ default:
+ break;
+ }
do {
unsigned long load, avg_load;
@@ -1392,7 +1407,7 @@ static int select_task_rq_fair(struct task_struct *p, int flag, int sync)
continue;
}
- group = find_idlest_group(sd, p, cpu);
+ group = find_idlest_group(sd, p, cpu, flag);
if (!group) {
sd = sd->child;
continue;