diff options
| author | Shakeel Butt <shakeel.butt@linux.dev> | 2025-06-17 22:57:24 +0300 |
|---|---|---|
| committer | Tejun Heo <tj@kernel.org> | 2025-06-17 23:01:18 +0300 |
| commit | 6af89c6ca71742e9227e6f8172a86ce1ee16aa85 (patch) | |
| tree | 43c277395e33b31d96b681dd3ec4cfda0165c623 /include/linux | |
| parent | 36df6e3dbd7e7b074e55fec080012184e2fa3a46 (diff) | |
| download | linux-6af89c6ca71742e9227e6f8172a86ce1ee16aa85.tar.xz | |
cgroup: remove per-cpu per-subsystem locks
The rstat update side used to insert the cgroup whose stats are updated
in the update tree and the read side flush the update tree to get the
latest uptodate stats. The per-cpu per-subsystem locks were used to
synchronize the update and flush side. However now the update side does
not access update tree but uses per-cpu lockless lists. So there is no
need for locks to synchronize update and flush side. Let's remove them.
Suggested-by: JP Kobryn <inwardvessel@gmail.com>
Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev>
Tested-by: JP Kobryn <inwardvessel@gmail.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/cgroup-defs.h | 7 |
1 files changed, 0 insertions, 7 deletions
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 04191d99228c..6b93a64115fe 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -375,12 +375,6 @@ struct css_rstat_cpu { * Child cgroups with stat updates on this cpu since the last read * are linked on the parent's ->updated_children through * ->updated_next. updated_children is terminated by its container css. - * - * In addition to being more compact, singly-linked list pointing to - * the css makes it unnecessary for each per-cpu struct to point back - * to the associated css. - * - * Protected by per-cpu css->ss->rstat_ss_cpu_lock. */ struct cgroup_subsys_state *updated_children; struct cgroup_subsys_state *updated_next; /* NULL if not on the list */ @@ -824,7 +818,6 @@ struct cgroup_subsys { unsigned int depends_on; spinlock_t rstat_ss_lock; - raw_spinlock_t __percpu *rstat_ss_cpu_lock; struct llist_head __percpu *lhead; /* lockless update list head */ }; |
