diff options
author | Paul Turner <pjt@google.com> | 2012-10-04 15:18:31 +0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-10-24 12:27:25 +0400 |
commit | 8165e145ceb62fc338e099c9b12b3239c83d2f8e (patch) | |
tree | be494c8d97adec0615f776515875e4961a40d920 /kernel | |
parent | c566e8e9e44b72b53091da20e2dedefc730f2ee2 (diff) | |
download | linux-8165e145ceb62fc338e099c9b12b3239c83d2f8e.tar.xz |
sched: Compute load contribution by a group entity
Unlike task entities who have a fixed weight, group entities instead own a
fraction of their parenting task_group's shares as their contributed weight.
Compute this fraction so that we can correctly account hierarchies and shared
entity nodes.
Signed-off-by: Paul Turner <pjt@google.com>
Reviewed-by: Ben Segall <bsegall@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/20120823141506.855074415@google.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/fair.c | 33 |
1 files changed, 27 insertions, 6 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index db788222f198..e20cb2693ef7 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1117,22 +1117,43 @@ static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq, cfs_rq->tg_load_contrib += tg_contrib; } } + +static inline void __update_group_entity_contrib(struct sched_entity *se) +{ + struct cfs_rq *cfs_rq = group_cfs_rq(se); + struct task_group *tg = cfs_rq->tg; + u64 contrib; + + contrib = cfs_rq->tg_load_contrib * tg->shares; + se->avg.load_avg_contrib = div64_u64(contrib, + atomic64_read(&tg->load_avg) + 1); +} #else static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq, int force_update) {} +static inline void __update_group_entity_contrib(struct sched_entity *se) {} #endif +static inline void __update_task_entity_contrib(struct sched_entity *se) +{ + u32 contrib; + + /* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */ + contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight); + contrib /= (se->avg.runnable_avg_period + 1); + se->avg.load_avg_contrib = scale_load(contrib); +} + /* Compute the current contribution to load_avg by se, return any delta */ static long __update_entity_load_avg_contrib(struct sched_entity *se) { long old_contrib = se->avg.load_avg_contrib; - if (!entity_is_task(se)) - return 0; - - se->avg.load_avg_contrib = div64_u64(se->avg.runnable_avg_sum * - se->load.weight, - se->avg.runnable_avg_period + 1); + if (entity_is_task(se)) { + __update_task_entity_contrib(se); + } else { + __update_group_entity_contrib(se); + } return se->avg.load_avg_contrib - old_contrib; } |