summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-03-16 22:36:10 +0300
committerIngo Molnar <mingo@elte.hu>2008-03-19 06:27:52 +0300
commit098fb9db2c74cfd6ffdbf61eb026a0c21abc5f75 (patch)
tree1b7d4d35ce9d375f7078937518f92b60f4fda000 /kernel
parentf920bb6f5fe21047e669381fe4dd346f6a9d3562 (diff)
downloadlinux-098fb9db2c74cfd6ffdbf61eb026a0c21abc5f75.tar.xz
sched: clean up wakeup balancing, move wake_affine()
split out the affine-wakeup bits. No code changed: kernel/sched.o: text data bss dec hex filename 42521 2858 232 45611 b22b sched.o.before 42521 2858 232 45611 b22b sched.o.after md5: 9d76738f1272aa82f0b7affd2f51df6b sched.o.before.asm 09b31c44e9aff8666f72773dc433e2df sched.o.after.asm (the md5's changed because stack slots changed and some registers get scheduled by gcc in a different order - but otherwise the before and after assembly is instruction for instruction equivalent.) Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched_fair.c134
1 files changed, 75 insertions, 59 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index f2cc59080efa..70679b266693 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -980,12 +980,59 @@ static inline int wake_idle(int cpu, struct task_struct *p)
#endif
#ifdef CONFIG_SMP
+
+static int
+wake_affine(struct rq *rq, struct sched_domain *this_sd, struct task_struct *p,
+ int cpu, int this_cpu, int sync, int idx,
+ unsigned long load, unsigned long this_load,
+ unsigned int imbalance)
+{
+ unsigned long tl = this_load;
+ unsigned long tl_per_task;
+
+ if (!(this_sd->flags & SD_WAKE_AFFINE))
+ return 0;
+
+ /*
+ * Attract cache-cold tasks on sync wakeups:
+ */
+ if (sync && !task_hot(p, rq->clock, this_sd))
+ return 1;
+
+ schedstat_inc(p, se.nr_wakeups_affine_attempts);
+ tl_per_task = cpu_avg_load_per_task(this_cpu);
+
+ /*
+ * If sync wakeup then subtract the (maximum possible)
+ * effect of the currently running task from the load
+ * of the current CPU:
+ */
+ if (sync)
+ tl -= current->se.load.weight;
+
+ if ((tl <= load && tl + target_load(cpu, idx) <= tl_per_task) ||
+ 100*(tl + p->se.load.weight) <= imbalance*load) {
+ /*
+ * This domain has SD_WAKE_AFFINE and
+ * p is cache cold in this domain, and
+ * there is no bad imbalance.
+ */
+ schedstat_inc(this_sd, ttwu_move_affine);
+ schedstat_inc(p, se.nr_wakeups_affine);
+
+ return 1;
+ }
+ return 0;
+}
+
static int select_task_rq_fair(struct task_struct *p, int sync)
{
- int cpu, this_cpu;
- struct rq *rq;
struct sched_domain *sd, *this_sd = NULL;
- int new_cpu;
+ unsigned long load, this_load;
+ int cpu, this_cpu, new_cpu;
+ unsigned int imbalance;
+ struct rq *rq;
+ int idx;
cpu = task_cpu(p);
rq = task_rq(p);
@@ -1008,66 +1055,35 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
/*
* Check for affine wakeup and passive balancing possibilities.
*/
- if (this_sd) {
- int idx = this_sd->wake_idx;
- unsigned int imbalance;
- unsigned long load, this_load;
-
- imbalance = 100 + (this_sd->imbalance_pct - 100) / 2;
-
- load = source_load(cpu, idx);
- this_load = target_load(this_cpu, idx);
-
- new_cpu = this_cpu; /* Wake to this CPU if we can */
-
- if (this_sd->flags & SD_WAKE_AFFINE) {
- unsigned long tl = this_load;
- unsigned long tl_per_task;
-
- /*
- * Attract cache-cold tasks on sync wakeups:
- */
- if (sync && !task_hot(p, rq->clock, this_sd))
- goto out_set_cpu;
-
- schedstat_inc(p, se.nr_wakeups_affine_attempts);
- tl_per_task = cpu_avg_load_per_task(this_cpu);
-
- /*
- * If sync wakeup then subtract the (maximum possible)
- * effect of the currently running task from the load
- * of the current CPU:
- */
- if (sync)
- tl -= current->se.load.weight;
-
- if ((tl <= load &&
- tl + target_load(cpu, idx) <= tl_per_task) ||
- 100*(tl + p->se.load.weight) <= imbalance*load) {
- /*
- * This domain has SD_WAKE_AFFINE and
- * p is cache cold in this domain, and
- * there is no bad imbalance.
- */
- schedstat_inc(this_sd, ttwu_move_affine);
- schedstat_inc(p, se.nr_wakeups_affine);
- goto out_set_cpu;
- }
- }
+ if (!this_sd)
+ goto out_keep_cpu;
- /*
- * Start passive balancing when half the imbalance_pct
- * limit is reached.
- */
- if (this_sd->flags & SD_WAKE_BALANCE) {
- if (imbalance*this_load <= 100*load) {
- schedstat_inc(this_sd, ttwu_move_balance);
- schedstat_inc(p, se.nr_wakeups_passive);
- goto out_set_cpu;
- }
+ idx = this_sd->wake_idx;
+
+ imbalance = 100 + (this_sd->imbalance_pct - 100) / 2;
+
+ load = source_load(cpu, idx);
+ this_load = target_load(this_cpu, idx);
+
+ new_cpu = this_cpu; /* Wake to this CPU if we can */
+
+ if (wake_affine(rq, this_sd, p, cpu, this_cpu, sync, idx,
+ load, this_load, imbalance))
+ goto out_set_cpu;
+
+ /*
+ * Start passive balancing when half the imbalance_pct
+ * limit is reached.
+ */
+ if (this_sd->flags & SD_WAKE_BALANCE) {
+ if (imbalance*this_load <= 100*load) {
+ schedstat_inc(this_sd, ttwu_move_balance);
+ schedstat_inc(p, se.nr_wakeups_passive);
+ goto out_set_cpu;
}
}
+out_keep_cpu:
new_cpu = cpu; /* Could not wake to this_cpu. Wake to cpu instead */
out_set_cpu:
return wake_idle(new_cpu, p);