diff options
author | Chengming Zhou <zhouchengming@bytedance.com> | 2022-08-25 19:41:10 +0300 |
---|---|---|
committer | Peter Zijlstra <peterz@infradead.org> | 2022-09-09 12:08:33 +0300 |
commit | dc86aba751e2867244411adda1562f6664747019 (patch) | |
tree | a3da212be6c190b5585ee4f9761c4177ad4f19c6 /kernel/sched | |
parent | 57899a6610e67ba26fa3251ebbef4a5ed21efc5d (diff) | |
download | linux-dc86aba751e2867244411adda1562f6664747019.tar.xz |
sched/psi: Cache parent psi_group to speed up group iteration
We use iterate_groups() to iterate each level psi_group to update
PSI stats, which is a very hot path.
In current code, iterate_groups() have to use multiple branches and
cgroup_parent() to get parent psi_group for each level, which is not
very efficient.
This patch cache parent psi_group in struct psi_group, only need to get
psi_group of task itself first, then just use group->parent to iterate.
Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Link: https://lore.kernel.org/r/20220825164111.29534-10-zhouchengming@bytedance.com
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/psi.c | 49 |
1 files changed, 19 insertions, 30 deletions
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c index 2545a78f82d8..9a8aee80a087 100644 --- a/kernel/sched/psi.c +++ b/kernel/sched/psi.c @@ -772,27 +772,12 @@ static void psi_group_change(struct psi_group *group, int cpu, schedule_delayed_work(&group->avgs_work, PSI_FREQ); } -static struct psi_group *iterate_groups(struct task_struct *task, void **iter) +static inline struct psi_group *task_psi_group(struct task_struct *task) { - if (*iter == &psi_system) - return NULL; - #ifdef CONFIG_CGROUPS - if (static_branch_likely(&psi_cgroups_enabled)) { - struct cgroup *cgroup = NULL; - - if (!*iter) - cgroup = task->cgroups->dfl_cgrp; - else - cgroup = cgroup_parent(*iter); - - if (cgroup && cgroup_parent(cgroup)) { - *iter = cgroup; - return cgroup_psi(cgroup); - } - } + if (static_branch_likely(&psi_cgroups_enabled)) + return cgroup_psi(task_dfl_cgroup(task)); #endif - *iter = &psi_system; return &psi_system; } @@ -815,7 +800,6 @@ void psi_task_change(struct task_struct *task, int clear, int set) { int cpu = task_cpu(task); struct psi_group *group; - void *iter = NULL; u64 now; if (!task->pid) @@ -825,8 +809,10 @@ void psi_task_change(struct task_struct *task, int clear, int set) now = cpu_clock(cpu); - while ((group = iterate_groups(task, &iter))) + group = task_psi_group(task); + do { psi_group_change(group, cpu, clear, set, now, true); + } while ((group = group->parent)); } void psi_task_switch(struct task_struct *prev, struct task_struct *next, @@ -834,7 +820,6 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next, { struct psi_group *group, *common = NULL; int cpu = task_cpu(prev); - void *iter; u64 now = cpu_clock(cpu); if (next->pid) { @@ -844,8 +829,8 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next, * ancestors with @prev, those will already have @prev's * TSK_ONCPU bit set, and we can stop the iteration there. */ - iter = NULL; - while ((group = iterate_groups(next, &iter))) { + group = task_psi_group(next); + do { if (per_cpu_ptr(group->pcpu, cpu)->state_mask & PSI_ONCPU) { common = group; @@ -853,7 +838,7 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next, } psi_group_change(group, cpu, 0, TSK_ONCPU, now, true); - } + } while ((group = group->parent)); } if (prev->pid) { @@ -886,9 +871,12 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next, psi_flags_change(prev, clear, set); - iter = NULL; - while ((group = iterate_groups(prev, &iter)) && group != common) + group = task_psi_group(prev); + do { + if (group == common) + break; psi_group_change(group, cpu, clear, set, now, wake_clock); + } while ((group = group->parent)); /* * TSK_ONCPU is handled up to the common ancestor. If there are @@ -898,7 +886,7 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next, */ if ((prev->psi_flags ^ next->psi_flags) & ~TSK_ONCPU) { clear &= ~TSK_ONCPU; - for (; group; group = iterate_groups(prev, &iter)) + for (; group; group = group->parent) psi_group_change(group, cpu, clear, set, now, wake_clock); } } @@ -908,7 +896,6 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next, void psi_account_irqtime(struct task_struct *task, u32 delta) { int cpu = task_cpu(task); - void *iter = NULL; struct psi_group *group; struct psi_group_cpu *groupc; u64 now; @@ -918,7 +905,8 @@ void psi_account_irqtime(struct task_struct *task, u32 delta) now = cpu_clock(cpu); - while ((group = iterate_groups(task, &iter))) { + group = task_psi_group(task); + do { groupc = per_cpu_ptr(group->pcpu, cpu); write_seqcount_begin(&groupc->seq); @@ -930,7 +918,7 @@ void psi_account_irqtime(struct task_struct *task, u32 delta) if (group->poll_states & (1 << PSI_IRQ_FULL)) psi_schedule_poll_work(group, 1); - } + } while ((group = group->parent)); } #endif @@ -1010,6 +998,7 @@ int psi_cgroup_alloc(struct cgroup *cgroup) return -ENOMEM; } group_init(cgroup->psi); + cgroup->psi->parent = cgroup_psi(cgroup_parent(cgroup)); return 0; } |