diff options
author | Ingo Molnar <mingo@kernel.org> | 2020-02-24 13:36:09 +0300 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2020-02-24 13:36:09 +0300 |
commit | 546121b65f47384e11ec1fa2e55449fc9f4846b2 (patch) | |
tree | 8f18470ec7c0c77b0f48eb1b2338e591b0b0aaff /arch/mips/kernel/cacheinfo.c | |
parent | 000619680c3714020ce9db17eef6a4a7ce2dc28b (diff) | |
parent | f8788d86ab28f61f7b46eb6be375f8a726783636 (diff) | |
download | linux-546121b65f47384e11ec1fa2e55449fc9f4846b2.tar.xz |
Merge tag 'v5.6-rc3' into sched/core, to pick up fixes and dependent patches
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/mips/kernel/cacheinfo.c')
-rw-r--r-- | arch/mips/kernel/cacheinfo.c | 27 |
1 files changed, 26 insertions, 1 deletions
diff --git a/arch/mips/kernel/cacheinfo.c b/arch/mips/kernel/cacheinfo.c index f777e44653d5..47312c529410 100644 --- a/arch/mips/kernel/cacheinfo.c +++ b/arch/mips/kernel/cacheinfo.c @@ -50,6 +50,25 @@ static int __init_cache_level(unsigned int cpu) return 0; } +static void fill_cpumask_siblings(int cpu, cpumask_t *cpu_map) +{ + int cpu1; + + for_each_possible_cpu(cpu1) + if (cpus_are_siblings(cpu, cpu1)) + cpumask_set_cpu(cpu1, cpu_map); +} + +static void fill_cpumask_cluster(int cpu, cpumask_t *cpu_map) +{ + int cpu1; + int cluster = cpu_cluster(&cpu_data[cpu]); + + for_each_possible_cpu(cpu1) + if (cpu_cluster(&cpu_data[cpu1]) == cluster) + cpumask_set_cpu(cpu1, cpu_map); +} + static int __populate_cache_leaves(unsigned int cpu) { struct cpuinfo_mips *c = ¤t_cpu_data; @@ -57,14 +76,20 @@ static int __populate_cache_leaves(unsigned int cpu) struct cacheinfo *this_leaf = this_cpu_ci->info_list; if (c->icache.waysize) { + /* L1 caches are per core */ + fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map); populate_cache(dcache, this_leaf, 1, CACHE_TYPE_DATA); + fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map); populate_cache(icache, this_leaf, 1, CACHE_TYPE_INST); } else { populate_cache(dcache, this_leaf, 1, CACHE_TYPE_UNIFIED); } - if (c->scache.waysize) + if (c->scache.waysize) { + /* L2 cache is per cluster */ + fill_cpumask_cluster(cpu, &this_leaf->shared_cpu_map); populate_cache(scache, this_leaf, 2, CACHE_TYPE_UNIFIED); + } if (c->tcache.waysize) populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED); |