diff options
author | Tejun Heo <tj@kernel.org> | 2018-04-27 00:29:04 +0300 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2018-04-27 00:29:04 +0300 |
commit | c58632b3631cb222da41d9dc0dd39e106c1eafd0 (patch) | |
tree | bd8be17287250d1e79e7076ca310ca7912c854a7 /include/linux/cgroup-defs.h | |
parent | a5c2b93f79ef7d746a3cd2c1bd66833286f9be70 (diff) | |
download | linux-c58632b3631cb222da41d9dc0dd39e106c1eafd0.tar.xz |
cgroup: Rename stat to rstat
stat is too generic a name and ends up causing subtle confusions.
It'll be made generic so that controllers can plug into it, which will
make the problem worse. Let's rename it to something more specific -
cgroup_rstat for cgroup recursive stat.
This patch does the following renames. No other changes.
* cpu_stat -> rstat_cpu
* stat -> rstat
* ?cstat -> ?rstatc
Note that the renames are selective. The unrenamed are the ones which
implement basic resource statistics on top of rstat. This will be
further cleaned up in the following patches.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'include/linux/cgroup-defs.h')
-rw-r--r-- | include/linux/cgroup-defs.h | 16 |
1 files changed, 9 insertions, 7 deletions
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 133531fcfb33..04cb42419310 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -259,11 +259,11 @@ struct css_set { }; /* - * cgroup basic resource usage statistics. Accounting is done per-cpu in - * cgroup_cpu_stat which is then lazily propagated up the hierarchy on - * reads. + * rstat - cgroup scalable recursive statistics. Accounting is done + * per-cpu in cgroup_rstat_cpu which is then lazily propagated up the + * hierarchy on reads. * - * When a stat gets updated, the cgroup_cpu_stat and its ancestors are + * When a stat gets updated, the cgroup_rstat_cpu and its ancestors are * linked into the updated tree. On the following read, propagation only * considers and consumes the updated tree. This makes reading O(the * number of descendants which have been active since last read) instead of @@ -274,7 +274,7 @@ struct css_set { * become very expensive. By propagating selectively, increasing reading * frequency decreases the cost of each read. */ -struct cgroup_cpu_stat { +struct cgroup_rstat_cpu { /* * ->sync protects all the current counters. These are the only * fields which get updated in the hot path. @@ -297,7 +297,7 @@ struct cgroup_cpu_stat { * to the cgroup makes it unnecessary for each per-cpu struct to * point back to the associated cgroup. * - * Protected by per-cpu cgroup_cpu_stat_lock. + * Protected by per-cpu cgroup_rstat_cpu_lock. */ struct cgroup *updated_children; /* terminated by self cgroup */ struct cgroup *updated_next; /* NULL iff not on the list */ @@ -408,8 +408,10 @@ struct cgroup { */ struct cgroup *dom_cgrp; + /* per-cpu recursive resource statistics */ + struct cgroup_rstat_cpu __percpu *rstat_cpu; + /* cgroup basic resource statistics */ - struct cgroup_cpu_stat __percpu *cpu_stat; struct cgroup_stat pending_stat; /* pending from children */ struct cgroup_stat stat; |