diff options
author | Ahmed S. Darwish <darwi@linutronix.de> | 2025-03-24 16:33:19 +0300 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2025-03-25 12:23:18 +0300 |
commit | 66122616e212f1b9fee7d03582a5fdab2e8ed0e4 (patch) | |
tree | 2d65677b96a5b0439af2da37df5cb3ebc3268105 | |
parent | 5adfd367589cf9bb984aabfab74107bcf4402fde (diff) | |
download | linux-66122616e212f1b9fee7d03582a5fdab2e8ed0e4.tar.xz |
x86/cacheinfo: Separate Intel CPUID leaf 0x4 handling
init_intel_cacheinfo() was overly complex. It parsed leaf 0x4 data,
leaf 0x2 data, and performed post-processing, all within one function.
Parent commit moved leaf 0x2 parsing and the post-processing logic into
their own functions.
Continue the refactoring by extracting leaf 0x4 parsing into its own
function. Initialize local L2/L3 topology ID variables to BAD_APICID by
default, thus ensuring they can be used unconditionally.
Suggested-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ahmed S. Darwish <darwi@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: https://lore.kernel.org/r/20250324133324.23458-25-darwi@linutronix.de
-rw-r--r-- | arch/x86/kernel/cpu/cacheinfo.c | 110 |
1 files changed, 54 insertions, 56 deletions
diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c index b39aad1ecf9c..72cc32d22c4d 100644 --- a/arch/x86/kernel/cpu/cacheinfo.c +++ b/arch/x86/kernel/cpu/cacheinfo.c @@ -400,73 +400,71 @@ static void intel_cacheinfo_0x2(struct cpuinfo_x86 *c) intel_cacheinfo_done(c, l3, l2, l1i, l1d); } -void init_intel_cacheinfo(struct cpuinfo_x86 *c) +static bool intel_cacheinfo_0x4(struct cpuinfo_x86 *c) { struct cpu_cacheinfo *ci = get_cpu_cacheinfo(c->cpu_index); - unsigned int l1i = 0, l1d = 0, l2 = 0, l3 = 0; - unsigned int l2_id = 0, l3_id = 0; - - if (c->cpuid_level > 3) { - /* - * There should be at least one leaf. A non-zero value means - * that the number of leaves has been initialized. - */ - if (!ci->num_leaves) - ci->num_leaves = find_num_cache_leaves(c); + unsigned int l2_id = BAD_APICID, l3_id = BAD_APICID; + unsigned int l1d = 0, l1i = 0, l2 = 0, l3 = 0; - /* - * Whenever possible use cpuid(4), deterministic cache - * parameters cpuid leaf to find the cache details - */ - for (int i = 0; i < ci->num_leaves; i++) { - unsigned int num_threads_sharing, index_msb; - struct _cpuid4_info id4 = {}; - int retval; + if (c->cpuid_level < 4) + return false; - retval = intel_fill_cpuid4_info(i, &id4); - if (retval < 0) - continue; + /* + * There should be at least one leaf. A non-zero value means + * that the number of leaves has been previously initialized. + */ + if (!ci->num_leaves) + ci->num_leaves = find_num_cache_leaves(c); - switch (id4.eax.split.level) { - case 1: - if (id4.eax.split.type == CTYPE_DATA) - l1d = id4.size / 1024; - else if (id4.eax.split.type == CTYPE_INST) - l1i = id4.size / 1024; - break; - case 2: - l2 = id4.size / 1024; - num_threads_sharing = 1 + id4.eax.split.num_threads_sharing; - index_msb = get_count_order(num_threads_sharing); - l2_id = c->topo.apicid & ~((1 << index_msb) - 1); - break; - case 3: - l3 = id4.size / 1024; - num_threads_sharing = 1 + id4.eax.split.num_threads_sharing; - index_msb = get_count_order(num_threads_sharing); - l3_id = c->topo.apicid & ~((1 << index_msb) - 1); - break; - default: - break; - } + if (!ci->num_leaves) + return false; + + for (int i = 0; i < ci->num_leaves; i++) { + unsigned int num_threads_sharing, index_msb; + struct _cpuid4_info id4 = {}; + int ret; + + ret = intel_fill_cpuid4_info(i, &id4); + if (ret < 0) + continue; + + switch (id4.eax.split.level) { + case 1: + if (id4.eax.split.type == CTYPE_DATA) + l1d = id4.size / 1024; + else if (id4.eax.split.type == CTYPE_INST) + l1i = id4.size / 1024; + break; + case 2: + l2 = id4.size / 1024; + num_threads_sharing = 1 + id4.eax.split.num_threads_sharing; + index_msb = get_count_order(num_threads_sharing); + l2_id = c->topo.apicid & ~((1 << index_msb) - 1); + break; + case 3: + l3 = id4.size / 1024; + num_threads_sharing = 1 + id4.eax.split.num_threads_sharing; + index_msb = get_count_order(num_threads_sharing); + l3_id = c->topo.apicid & ~((1 << index_msb) - 1); + break; + default: + break; } } + c->topo.l2c_id = l2_id; + c->topo.llc_id = (l3_id == BAD_APICID) ? l2_id : l3_id; + intel_cacheinfo_done(c, l3, l2, l1i, l1d); + return true; +} + +void init_intel_cacheinfo(struct cpuinfo_x86 *c) +{ /* Don't use CPUID(2) if CPUID(4) is supported. */ - if (!ci->num_leaves && c->cpuid_level > 1) { - intel_cacheinfo_0x2(c); + if (intel_cacheinfo_0x4(c)) return; - } - - if (l2) { - c->topo.llc_id = l2_id; - c->topo.l2c_id = l2_id; - } - - if (l3) - c->topo.llc_id = l3_id; - intel_cacheinfo_done(c, l3, l2, l1i, l1d); + intel_cacheinfo_0x2(c); } static int __cache_amd_cpumap_setup(unsigned int cpu, int index, |