diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-09-01 12:34:35 +0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-09-04 12:09:54 +0400 |
commit | a52bfd73589eaf88d9c95ad2c1de0b38a6b27972 (patch) | |
tree | 33cee609a46624525abf1643d71d0fbc79e25f87 | |
parent | cc9fba7d7672fa3ed58d9d9ecb6c45b1351c29a6 (diff) | |
download | linux-a52bfd73589eaf88d9c95ad2c1de0b38a6b27972.tar.xz |
sched: Add smt_gain
The idea is that multi-threading a core yields more work
capacity than a single thread, provide a way to express a
static gain for threads.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Tested-by: Andreas Herrmann <andreas.herrmann3@amd.com>
Acked-by: Andreas Herrmann <andreas.herrmann3@amd.com>
Acked-by: Gautham R Shenoy <ego@in.ibm.com>
Cc: Balbir Singh <balbir@in.ibm.com>
LKML-Reference: <20090901083826.073345955@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | include/linux/sched.h | 1 | ||||
-rw-r--r-- | include/linux/topology.h | 1 | ||||
-rw-r--r-- | kernel/sched.c | 8 |
3 files changed, 9 insertions, 1 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 651dded25720..9c81c921acb3 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -921,6 +921,7 @@ struct sched_domain { unsigned int newidle_idx; unsigned int wake_idx; unsigned int forkexec_idx; + unsigned int smt_gain; int flags; /* See SD_* */ enum sched_domain_level level; diff --git a/include/linux/topology.h b/include/linux/topology.h index 7402c1a27c4f..6203ae5067ce 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -99,6 +99,7 @@ int arch_update_cpu_topology(void); | SD_SHARE_CPUPOWER, \ .last_balance = jiffies, \ .balance_interval = 1, \ + .smt_gain = 1178, /* 15% */ \ } #endif #endif /* CONFIG_SCHED_SMT */ diff --git a/kernel/sched.c b/kernel/sched.c index ecb4a47d4214..55112261027b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -8523,9 +8523,15 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd) weight = cpumask_weight(sched_domain_span(sd)); /* * SMT siblings share the power of a single core. + * Usually multiple threads get a better yield out of + * that one core than a single thread would have, + * reflect that in sd->smt_gain. */ - if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) + if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) { + power *= sd->smt_gain; power /= weight; + power >>= SCHED_LOAD_SHIFT; + } sg_inc_cpu_power(sd->groups, power); return; } |