diff options
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r-- | arch/x86/kernel/cpu/Makefile | 3 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/amd.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/aperfmperf.c | 9 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/bugs.c | 141 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cacheinfo.c | 179 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 99 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/hygon.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel.c | 144 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel_epb.c | 7 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/microcode/amd.c | 5 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/microcode/core.c | 205 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/microcode/intel.c | 165 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mtrr/amd.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mtrr/centaur.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mtrr/cyrix.c | 42 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mtrr/generic.c | 107 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mtrr/mtrr.c | 173 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mtrr/mtrr.h | 15 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/resctrl/monitor.c | 49 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/resctrl/pseudo_lock.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/resctrl/rdtgroup.c | 12 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/sgx/encl.c | 4 |
22 files changed, 609 insertions, 774 deletions
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index f10a921ee756..d7e3ceaf75c1 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -17,9 +17,6 @@ KMSAN_SANITIZE_common.o := n # As above, instrumenting secondary CPU boot code causes boot hangs. KCSAN_SANITIZE_common.o := n -# Make sure load_percpu_segment has no stackprotector -CFLAGS_common.o := -fno-stack-protector - obj-y := cacheinfo.o scattered.o topology.o obj-y += common.o obj-y += rdrand.o diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index c75d75b9f11a..f769d6d08b43 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -983,7 +983,7 @@ static void init_amd(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH); /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */ - if (!cpu_has(c, X86_FEATURE_XENPV)) + if (!cpu_feature_enabled(X86_FEATURE_XENPV)) set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); /* diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c index 1f60a2b27936..fdbb5f07448f 100644 --- a/arch/x86/kernel/cpu/aperfmperf.c +++ b/arch/x86/kernel/cpu/aperfmperf.c @@ -330,7 +330,16 @@ static void __init bp_init_freq_invariance(void) static void disable_freq_invariance_workfn(struct work_struct *work) { + int cpu; + static_branch_disable(&arch_scale_freq_key); + + /* + * Set arch_freq_scale to a default value on all cpus + * This negates the effect of scaling + */ + for_each_possible_cpu(cpu) + per_cpu(arch_freq_scale, cpu) = SCHED_CAPACITY_SCALE; } static DECLARE_WORK(disable_freq_invariance_work, diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 6daf84229548..bca0bd8f4846 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -787,6 +787,7 @@ enum retbleed_mitigation { RETBLEED_MITIGATION_IBPB, RETBLEED_MITIGATION_IBRS, RETBLEED_MITIGATION_EIBRS, + RETBLEED_MITIGATION_STUFF, }; enum retbleed_mitigation_cmd { @@ -794,6 +795,7 @@ enum retbleed_mitigation_cmd { RETBLEED_CMD_AUTO, RETBLEED_CMD_UNRET, RETBLEED_CMD_IBPB, + RETBLEED_CMD_STUFF, }; static const char * const retbleed_strings[] = { @@ -802,6 +804,7 @@ static const char * const retbleed_strings[] = { [RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB", [RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS", [RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS", + [RETBLEED_MITIGATION_STUFF] = "Mitigation: Stuffing", }; static enum retbleed_mitigation retbleed_mitigation __ro_after_init = @@ -831,8 +834,12 @@ static int __init retbleed_parse_cmdline(char *str) retbleed_cmd = RETBLEED_CMD_UNRET; } else if (!strcmp(str, "ibpb")) { retbleed_cmd = RETBLEED_CMD_IBPB; + } else if (!strcmp(str, "stuff")) { + retbleed_cmd = RETBLEED_CMD_STUFF; } else if (!strcmp(str, "nosmt")) { retbleed_nosmt = true; + } else if (!strcmp(str, "force")) { + setup_force_cpu_bug(X86_BUG_RETBLEED); } else { pr_err("Ignoring unknown retbleed option (%s).", str); } @@ -879,6 +886,21 @@ static void __init retbleed_select_mitigation(void) } break; + case RETBLEED_CMD_STUFF: + if (IS_ENABLED(CONFIG_CALL_DEPTH_TRACKING) && + spectre_v2_enabled == SPECTRE_V2_RETPOLINE) { + retbleed_mitigation = RETBLEED_MITIGATION_STUFF; + + } else { + if (IS_ENABLED(CONFIG_CALL_DEPTH_TRACKING)) + pr_err("WARNING: retbleed=stuff depends on spectre_v2=retpoline\n"); + else + pr_err("WARNING: kernel not compiled with CALL_DEPTH_TRACKING.\n"); + + goto do_cmd_auto; + } + break; + do_cmd_auto: case RETBLEED_CMD_AUTO: default: @@ -916,6 +938,12 @@ do_cmd_auto: mitigate_smt = true; break; + case RETBLEED_MITIGATION_STUFF: + setup_force_cpu_cap(X86_FEATURE_RETHUNK); + setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH); + x86_set_skl_return_thunk(); + break; + default: break; } @@ -926,7 +954,7 @@ do_cmd_auto: /* * Let IBRS trump all on Intel without affecting the effects of the - * retbleed= cmdline option. + * retbleed= cmdline option except for call depth based stuffing */ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { switch (spectre_v2_enabled) { @@ -939,7 +967,8 @@ do_cmd_auto: retbleed_mitigation = RETBLEED_MITIGATION_EIBRS; break; default: - pr_err(RETBLEED_INTEL_MSG); + if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF) + pr_err(RETBLEED_INTEL_MSG); } } @@ -1302,7 +1331,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) return SPECTRE_V2_CMD_AUTO; } - if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_has(X86_FEATURE_XENPV)) { + if (cmd == SPECTRE_V2_CMD_IBRS && cpu_feature_enabled(X86_FEATURE_XENPV)) { pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n", mitigation_options[i].option); return SPECTRE_V2_CMD_AUTO; @@ -1413,6 +1442,7 @@ static void __init spectre_v2_select_mitigation(void) if (IS_ENABLED(CONFIG_CPU_IBRS_ENTRY) && boot_cpu_has_bug(X86_BUG_RETBLEED) && retbleed_cmd != RETBLEED_CMD_OFF && + retbleed_cmd != RETBLEED_CMD_STUFF && boot_cpu_has(X86_FEATURE_IBRS) && boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { mode = SPECTRE_V2_IBRS; @@ -1951,6 +1981,8 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) if (ctrl == PR_SPEC_FORCE_DISABLE) task_set_spec_ib_force_disable(task); task_update_spec_tif(task); + if (task == current) + indirect_branch_prediction_barrier(); break; default: return -ERANGE; @@ -2206,74 +2238,74 @@ static const char * const l1tf_vmx_states[] = { static ssize_t l1tf_show_state(char *buf) { if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) - return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG); + return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG); if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED || (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER && sched_smt_active())) { - return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG, - l1tf_vmx_states[l1tf_vmx_mitigation]); + return sysfs_emit(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG, + l1tf_vmx_states[l1tf_vmx_mitigation]); } - return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG, - l1tf_vmx_states[l1tf_vmx_mitigation], - sched_smt_active() ? "vulnerable" : "disabled"); + return sysfs_emit(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG, + l1tf_vmx_states[l1tf_vmx_mitigation], + sched_smt_active() ? "vulnerable" : "disabled"); } static ssize_t itlb_multihit_show_state(char *buf) { if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) || !boot_cpu_has(X86_FEATURE_VMX)) - return sprintf(buf, "KVM: Mitigation: VMX unsupported\n"); + return sysfs_emit(buf, "KVM: Mitigation: VMX unsupported\n"); else if (!(cr4_read_shadow() & X86_CR4_VMXE)) - return sprintf(buf, "KVM: Mitigation: VMX disabled\n"); + return sysfs_emit(buf, "KVM: Mitigation: VMX disabled\n"); else if (itlb_multihit_kvm_mitigation) - return sprintf(buf, "KVM: Mitigation: Split huge pages\n"); + return sysfs_emit(buf, "KVM: Mitigation: Split huge pages\n"); else - return sprintf(buf, "KVM: Vulnerable\n"); + return sysfs_emit(buf, "KVM: Vulnerable\n"); } #else static ssize_t l1tf_show_state(char *buf) { - return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG); + return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG); } static ssize_t itlb_multihit_show_state(char *buf) { - return sprintf(buf, "Processor vulnerable\n"); + return sysfs_emit(buf, "Processor vulnerable\n"); } #endif static ssize_t mds_show_state(char *buf) { if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { - return sprintf(buf, "%s; SMT Host state unknown\n", - mds_strings[mds_mitigation]); + return sysfs_emit(buf, "%s; SMT Host state unknown\n", + mds_strings[mds_mitigation]); } if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) { - return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], - (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" : - sched_smt_active() ? "mitigated" : "disabled")); + return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], + (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" : + sched_smt_active() ? "mitigated" : "disabled")); } - return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], - sched_smt_active() ? "vulnerable" : "disabled"); + return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], + sched_smt_active() ? "vulnerable" : "disabled"); } static ssize_t tsx_async_abort_show_state(char *buf) { if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) || (taa_mitigation == TAA_MITIGATION_OFF)) - return sprintf(buf, "%s\n", taa_strings[taa_mitigation]); + return sysfs_emit(buf, "%s\n", taa_strings[taa_mitigation]); if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { - return sprintf(buf, "%s; SMT Host state unknown\n", - taa_strings[taa_mitigation]); + return sysfs_emit(buf, "%s; SMT Host state unknown\n", + taa_strings[taa_mitigation]); } - return sprintf(buf, "%s; SMT %s\n", taa_strings[taa_mitigation], - sched_smt_active() ? "vulnerable" : "disabled"); + return sysfs_emit(buf, "%s; SMT %s\n", taa_strings[taa_mitigation], + sched_smt_active() ? "vulnerable" : "disabled"); } static ssize_t mmio_stale_data_show_state(char *buf) @@ -2341,73 +2373,72 @@ static char *pbrsb_eibrs_state(void) static ssize_t spectre_v2_show_state(char *buf) { if (spectre_v2_enabled == SPECTRE_V2_LFENCE) - return sprintf(buf, "Vulnerable: LFENCE\n"); + return sysfs_emit(buf, "Vulnerable: LFENCE\n"); if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) - return sprintf(buf, "Vulnerable: eIBRS with unprivileged eBPF\n"); + return sysfs_emit(buf, "Vulnerable: eIBRS with unprivileged eBPF\n"); if (sched_smt_active() && unprivileged_ebpf_enabled() && spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) - return sprintf(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n"); + return sysfs_emit(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n"); - return sprintf(buf, "%s%s%s%s%s%s%s\n", - spectre_v2_strings[spectre_v2_enabled], - ibpb_state(), - boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", - stibp_state(), - boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "", - pbrsb_eibrs_state(), - spectre_v2_module_string()); + return sysfs_emit(buf, "%s%s%s%s%s%s%s\n", + spectre_v2_strings[spectre_v2_enabled], + ibpb_state(), + boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", + stibp_state(), + boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "", + pbrsb_eibrs_state(), + spectre_v2_module_string()); } static ssize_t srbds_show_state(char *buf) { - return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]); + return sysfs_emit(buf, "%s\n", srbds_strings[srbds_mitigation]); } static ssize_t retbleed_show_state(char *buf) { if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET || retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { - if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && - boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) - return sprintf(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n"); + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && + boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) + return sysfs_emit(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n"); - return sprintf(buf, "%s; SMT %s\n", - retbleed_strings[retbleed_mitigation], - !sched_smt_active() ? "disabled" : - spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || - spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ? - "enabled with STIBP protection" : "vulnerable"); + return sysfs_emit(buf, "%s; SMT %s\n", retbleed_strings[retbleed_mitigation], + !sched_smt_active() ? "disabled" : + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ? + "enabled with STIBP protection" : "vulnerable"); } - return sprintf(buf, "%s\n", retbleed_strings[retbleed_mitigation]); + return sysfs_emit(buf, "%s\n", retbleed_strings[retbleed_mitigation]); } static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, char *buf, unsigned int bug) { if (!boot_cpu_has_bug(bug)) - return sprintf(buf, "Not affected\n"); + return sysfs_emit(buf, "Not affected\n"); switch (bug) { case X86_BUG_CPU_MELTDOWN: if (boot_cpu_has(X86_FEATURE_PTI)) - return sprintf(buf, "Mitigation: PTI\n"); + return sysfs_emit(buf, "Mitigation: PTI\n"); if (hypervisor_is_type(X86_HYPER_XEN_PV)) - return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n"); + return sysfs_emit(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n"); break; case X86_BUG_SPECTRE_V1: - return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]); + return sysfs_emit(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]); case X86_BUG_SPECTRE_V2: return spectre_v2_show_state(buf); case X86_BUG_SPEC_STORE_BYPASS: - return sprintf(buf, "%s\n", ssb_strings[ssb_mode]); + return sysfs_emit(buf, "%s\n", ssb_strings[ssb_mode]); case X86_BUG_L1TF: if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV)) @@ -2437,7 +2468,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr break; } - return sprintf(buf, "Vulnerable\n"); + return sysfs_emit(buf, "Vulnerable\n"); } ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c index 66556833d7af..f4e5aa27eec6 100644 --- a/arch/x86/kernel/cpu/cacheinfo.c +++ b/arch/x86/kernel/cpu/cacheinfo.c @@ -11,15 +11,19 @@ #include <linux/slab.h> #include <linux/cacheinfo.h> #include <linux/cpu.h> +#include <linux/cpuhotplug.h> #include <linux/sched.h> #include <linux/capability.h> #include <linux/sysfs.h> #include <linux/pci.h> +#include <linux/stop_machine.h> #include <asm/cpufeature.h> #include <asm/cacheinfo.h> #include <asm/amd_nb.h> #include <asm/smp.h> +#include <asm/mtrr.h> +#include <asm/tlbflush.h> #include "cpu.h" @@ -35,6 +39,9 @@ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map); /* Shared L2 cache maps */ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_l2c_shared_map); +/* Kernel controls MTRR and/or PAT MSRs. */ +unsigned int memory_caching_control __ro_after_init; + struct _cache_table { unsigned char descriptor; char cache_type; @@ -1040,3 +1047,175 @@ int populate_cache_leaves(unsigned int cpu) return 0; } + +/* + * Disable and enable caches. Needed for changing MTRRs and the PAT MSR. + * + * Since we are disabling the cache don't allow any interrupts, + * they would run extremely slow and would only increase the pain. + * + * The caller must ensure that local interrupts are disabled and + * are reenabled after cache_enable() has been called. + */ +static unsigned long saved_cr4; +static DEFINE_RAW_SPINLOCK(cache_disable_lock); + +void cache_disable(void) __acquires(cache_disable_lock) +{ + unsigned long cr0; + + /* + * Note that this is not ideal + * since the cache is only flushed/disabled for this CPU while the + * MTRRs are changed, but changing this requires more invasive + * changes to the way the kernel boots + */ + + raw_spin_lock(&cache_disable_lock); + + /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */ + cr0 = read_cr0() | X86_CR0_CD; + write_cr0(cr0); + + /* + * Cache flushing is the most time-consuming step when programming + * the MTRRs. Fortunately, as per the Intel Software Development + * Manual, we can skip it if the processor supports cache self- + * snooping. + */ + if (!static_cpu_has(X86_FEATURE_SELFSNOOP)) + wbinvd(); + + /* Save value of CR4 and clear Page Global Enable (bit 7) */ + if (cpu_feature_enabled(X86_FEATURE_PGE)) { + saved_cr4 = __read_cr4(); + __write_cr4(saved_cr4 & ~X86_CR4_PGE); + } + + /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */ + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); + flush_tlb_local(); + + if (cpu_feature_enabled(X86_FEATURE_MTRR)) + mtrr_disable(); + + /* Again, only flush caches if we have to. */ + if (!static_cpu_has(X86_FEATURE_SELFSNOOP)) + wbinvd(); +} + +void cache_enable(void) __releases(cache_disable_lock) +{ + /* Flush TLBs (no need to flush caches - they are disabled) */ + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); + flush_tlb_local(); + + if (cpu_feature_enabled(X86_FEATURE_MTRR)) + mtrr_enable(); + + /* Enable caches */ + write_cr0(read_cr0() & ~X86_CR0_CD); + + /* Restore value of CR4 */ + if (cpu_feature_enabled(X86_FEATURE_PGE)) + __write_cr4(saved_cr4); + + raw_spin_unlock(&cache_disable_lock); +} + +static void cache_cpu_init(void) +{ + unsigned long flags; + + local_irq_save(flags); + cache_disable(); + + if (memory_caching_control & CACHE_MTRR) + mtrr_generic_set_state(); + + if (memory_caching_control & CACHE_PAT) + pat_cpu_init(); + + cache_enable(); + local_irq_restore(flags); +} + +static bool cache_aps_delayed_init = true; + +void set_cache_aps_delayed_init(bool val) +{ + cache_aps_delayed_init = val; +} + +bool get_cache_aps_delayed_init(void) +{ + return cache_aps_delayed_init; +} + +static int cache_rendezvous_handler(void *unused) +{ + if (get_cache_aps_delayed_init() || !cpu_online(smp_processor_id())) + cache_cpu_init(); + + return 0; +} + +void __init cache_bp_init(void) +{ + mtrr_bp_init(); + pat_bp_init(); + + if (memory_caching_control) + cache_cpu_init(); +} + +void cache_bp_restore(void) +{ + if (memory_caching_control) + cache_cpu_init(); +} + +static int cache_ap_init(unsigned int cpu) +{ + if (!memory_caching_control || get_cache_aps_delayed_init()) + return 0; + + /* + * Ideally we should hold mtrr_mutex here to avoid MTRR entries + * changed, but this routine will be called in CPU boot time, + * holding the lock breaks it. + * + * This routine is called in two cases: + * + * 1. very early time of software resume, when there absolutely + * isn't MTRR entry changes; + * + * 2. CPU hotadd time. We let mtrr_add/del_page hold cpuhotplug + * lock to prevent MTRR entry changes + */ + stop_machine_from_inactive_cpu(cache_rendezvous_handler, NULL, + cpu_callout_mask); + + return 0; +} + +/* + * Delayed cache initialization for all AP's + */ +void cache_aps_init(void) +{ + if (!memory_caching_control || !get_cache_aps_delayed_init()) + return; + + stop_machine(cache_rendezvous_handler, NULL, cpu_online_mask); + set_cache_aps_delayed_init(false); +} + +static int __init cache_ap_register(void) +{ + cpuhp_setup_state_nocalls(CPUHP_AP_CACHECTRL_STARTING, + "x86/cachectrl:starting", + cache_ap_init, NULL); + return 0; +} +core_initcall(cache_ap_register); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 3f66dd03c091..9cfca3d7d0e2 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -52,6 +52,7 @@ #include <asm/cpu.h> #include <asm/mce.h> #include <asm/msr.h> +#include <asm/cacheinfo.h> #include <asm/memtype.h> #include <asm/microcode.h> #include <asm/microcode_intel.h> @@ -609,6 +610,7 @@ static __always_inline void setup_cet(struct cpuinfo_x86 *c) if (!ibt_selftest()) { pr_err("IBT selftest: Failed!\n"); + wrmsrl(MSR_IA32_S_CET, 0); setup_clear_cpu_cap(X86_FEATURE_IBT); return; } @@ -701,16 +703,6 @@ static const char *table_lookup_model(struct cpuinfo_x86 *c) __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long)); __u32 cpu_caps_set[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long)); -void load_percpu_segment(int cpu) -{ -#ifdef CONFIG_X86_32 - loadsegment(fs, __KERNEL_PERCPU); -#else - __loadsegment_simple(gs, 0); - wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu)); -#endif -} - #ifdef CONFIG_X86_32 /* The 32-bit entry code needs to find cpu_entry_area. */ DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); @@ -738,16 +730,45 @@ void load_fixmap_gdt(int cpu) } EXPORT_SYMBOL_GPL(load_fixmap_gdt); -/* - * Current gdt points %fs at the "master" per-cpu area: after this, - * it's on the real one. +/** + * switch_gdt_and_percpu_base - Switch to direct GDT and runtime per CPU base + * @cpu: The CPU number for which this is invoked + * + * Invoked during early boot to switch from early GDT and early per CPU to + * the direct GDT and the runtime per CPU area. On 32-bit the percpu base + * switch is implicit by loading the direct GDT. On 64bit this requires + * to update GSBASE. */ -void switch_to_new_gdt(int cpu) +void __init switch_gdt_and_percpu_base(int cpu) { - /* Load the original GDT */ load_direct_gdt(cpu); - /* Reload the per-cpu base */ - load_percpu_segment(cpu); + +#ifdef CONFIG_X86_64 + /* + * No need to load %gs. It is already correct. + * + * Writing %gs on 64bit would zero GSBASE which would make any per + * CPU operation up to the point of the wrmsrl() fault. + * + * Set GSBASE to the new offset. Until the wrmsrl() happens the + * early mapping is still valid. That means the GSBASE update will + * lose any prior per CPU data which was not copied over in + * setup_per_cpu_areas(). + * + * This works even with stackprotector enabled because the + * per CPU stack canary is 0 in both per CPU areas. + */ + wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu)); +#else + /* + * %fs is already set to __KERNEL_PERCPU, but after switching GDT + * it is required to load FS again so that the 'hidden' part is + * updated from the new GDT. Up to this point the early per CPU + * translation is active. Any content of the early per CPU data + * which was not copied over in setup_per_cpu_areas() is lost. + */ + loadsegment(fs, __KERNEL_PERCPU); +#endif } static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; @@ -1948,7 +1969,6 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c) #ifdef CONFIG_X86_32 enable_sep_cpu(); #endif - mtrr_ap_init(); validate_apic_and_package_id(c); x86_spec_ctrl_setup_ap(); update_srbds_msr(); @@ -1993,27 +2013,18 @@ static __init int setup_clearcpuid(char *arg) } __setup("clearcpuid=", setup_clearcpuid); +DEFINE_PER_CPU_ALIGNED(struct pcpu_hot, pcpu_hot) = { + .current_task = &init_task, + .preempt_count = INIT_PREEMPT_COUNT, + .top_of_stack = TOP_OF_INIT_STACK, +}; +EXPORT_PER_CPU_SYMBOL(pcpu_hot); + #ifdef CONFIG_X86_64 DEFINE_PER_CPU_FIRST(struct fixed_percpu_data, fixed_percpu_data) __aligned(PAGE_SIZE) __visible; EXPORT_PER_CPU_SYMBOL_GPL(fixed_percpu_data); -/* - * The following percpu variables are hot. Align current_task to - * cacheline size such that they fall in the same cacheline. - */ -DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned = - &init_task; -EXPORT_PER_CPU_SYMBOL(current_task); - -DEFINE_PER_CPU(void *, hardirq_stack_ptr); -DEFINE_PER_CPU(bool, hardirq_stack_inuse); - -DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; -EXPORT_PER_CPU_SYMBOL(__preempt_count); - -DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) = TOP_OF_INIT_STACK; - static void wrmsrl_cstar(unsigned long val) { /* @@ -2064,20 +2075,6 @@ void syscall_init(void) #else /* CONFIG_X86_64 */ -DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; -EXPORT_PER_CPU_SYMBOL(current_task); -DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; -EXPORT_PER_CPU_SYMBOL(__preempt_count); - -/* - * On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find - * the top of the kernel stack. Use an extra percpu variable to track the - * top of the kernel stack directly. - */ -DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) = - (unsigned long)&init_thread_union + THREAD_SIZE; -EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack); - #ifdef CONFIG_STACKPROTECTOR DEFINE_PER_CPU(unsigned long, __stack_chk_guard); EXPORT_PER_CPU_SYMBOL(__stack_chk_guard); @@ -2248,12 +2245,6 @@ void cpu_init(void) boot_cpu_has(X86_FEATURE_TSC) || boot_cpu_has(X86_FEATURE_DE)) cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); - /* - * Initialize the per-CPU GDT with the boot GDT, - * and set up the GDT descriptor: - */ - switch_to_new_gdt(cpu); - if (IS_ENABLED(CONFIG_X86_64)) { loadsegment(fs, 0); memset(cur->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index c393b8773ace..5a2962c492d3 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -339,7 +339,7 @@ static void init_hygon(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_ARAT); /* Hygon CPUs don't reset SS attributes on SYSRET, Xen does. */ - if (!cpu_has(c, X86_FEATURE_XENPV)) + if (!cpu_feature_enabled(X86_FEATURE_XENPV)) set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); check_null_seg_clears_base(c); diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 427899650483..291d4167fab8 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -210,12 +210,154 @@ int intel_cpu_collect_info(struct ucode_cpu_info *uci) csig.rev = intel_get_microcode_revision(); uci->cpu_sig = csig; - uci->valid = 1; return 0; } EXPORT_SYMBOL_GPL(intel_cpu_collect_info); +/* + * Returns 1 if update has been found, 0 otherwise. + */ +int intel_find_matching_signature(void *mc, unsigned int csig, int cpf) +{ + struct microcode_header_intel *mc_hdr = mc; + struct extended_sigtable *ext_hdr; + struct extended_signature *ext_sig; + int i; + + if (intel_cpu_signatures_match(csig, cpf, mc_hdr->sig, mc_hdr->pf)) + return 1; + + /* Look for ext. headers: */ + if (get_totalsize(mc_hdr) <= get_datasize(mc_hdr) + MC_HEADER_SIZE) + return 0; + + ext_hdr = mc + get_datasize(mc_hdr) + MC_HEADER_SIZE; + ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE; + + for (i = 0; i < ext_hdr->count; i++) { + if (intel_cpu_signatures_match(csig, cpf, ext_sig->sig, ext_sig->pf)) + return 1; + ext_sig++; + } + return 0; +} +EXPORT_SYMBOL_GPL(intel_find_matching_signature); + +/** + * intel_microcode_sanity_check() - Sanity check microcode file. + * @mc: Pointer to the microcode file contents. + * @print_err: Display failure reason if true, silent if false. + * @hdr_type: Type of file, i.e. normal microcode file or In Field Scan file. + * Validate if the microcode header type matches with the type + * specified here. + * + * Validate certain header fields and verify if computed checksum matches + * with the one specified in the header. + * + * Return: 0 if the file passes all the checks, -EINVAL if any of the checks + * fail. + */ +int intel_microcode_sanity_check(void *mc, bool print_err, int hdr_type) +{ + unsigned long total_size, data_size, ext_table_size; + struct microcode_header_intel *mc_header = mc; + struct extended_sigtable *ext_header = NULL; + u32 sum, orig_sum, ext_sigcount = 0, i; + struct extended_signature *ext_sig; + + total_size = get_totalsize(mc_header); + data_size = get_datasize(mc_header); + + if (data_size + MC_HEADER_SIZE > total_size) { + if (print_err) + pr_err("Error: bad microcode data file size.\n"); + return -EINVAL; + } + + if (mc_header->ldrver != 1 || mc_header->hdrver != hdr_type) { + if (print_err) + pr_err("Error: invalid/unknown microcode update format. Header type %d\n", + mc_header->hdrver); + return -EINVAL; + } + + ext_table_size = total_size - (MC_HEADER_SIZE + data_size); + if (ext_table_size) { + u32 ext_table_sum = 0; + u32 *ext_tablep; + + if (ext_table_size < EXT_HEADER_SIZE || + ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) { + if (print_err) + pr_err("Error: truncated extended signature table.\n"); + return -EINVAL; + } + + ext_header = mc + MC_HEADER_SIZE + data_size; + if (ext_table_size != exttable_size(ext_header)) { + if (print_err) + pr_err("Error: extended signature table size mismatch.\n"); + return -EFAULT; + } + + ext_sigcount = ext_header->count; + + /* + * Check extended table checksum: the sum of all dwords that + * comprise a valid table must be 0. + */ + ext_tablep = (u32 *)ext_header; + + i = ext_table_size / sizeof(u32); + while (i--) + ext_table_sum += ext_tablep[i]; + + if (ext_table_sum) { + if (print_err) + pr_warn("Bad extended signature table checksum, aborting.\n"); + return -EINVAL; + } + } + + /* + * Calculate the checksum of update data and header. The checksum of + * valid update data and header including the extended signature table + * must be 0. + */ + orig_sum = 0; + i = (MC_HEADER_SIZE + data_size) / sizeof(u32); + while (i--) + orig_sum += ((u32 *)mc)[i]; + + if (orig_sum) { + if (print_err) + pr_err("Bad microcode data checksum, aborting.\n"); + return -EINVAL; + } + + if (!ext_table_size) + return 0; + + /* + * Check extended signature checksum: 0 => valid. + */ + for (i = 0; i < ext_sigcount; i++) { + ext_sig = (void *)ext_header + EXT_HEADER_SIZE + + EXT_SIGNATURE_SIZE * i; + + sum = (mc_header->sig + mc_header->pf + mc_header->cksum) - + (ext_sig->sig + ext_sig->pf + ext_sig->cksum); + if (sum) { + if (print_err) + pr_err("Bad extended signature checksum, aborting.\n"); + return -EINVAL; + } + } + return 0; +} +EXPORT_SYMBOL_GPL(intel_microcode_sanity_check); + static void early_init_intel(struct cpuinfo_x86 *c) { u64 misc_enable; diff --git a/arch/x86/kernel/cpu/intel_epb.c b/arch/x86/kernel/cpu/intel_epb.c index fbaf12e43f41..3b8476158236 100644 --- a/arch/x86/kernel/cpu/intel_epb.c +++ b/arch/x86/kernel/cpu/intel_epb.c @@ -204,7 +204,12 @@ static int intel_epb_offline(unsigned int cpu) } static const struct x86_cpu_id intel_epb_normal[] = { - X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 7), + X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, + ENERGY_PERF_BIAS_NORMAL_POWERSAVE), + X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, + ENERGY_PERF_BIAS_NORMAL_POWERSAVE), + X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, + ENERGY_PERF_BIAS_NORMAL_POWERSAVE), {} }; diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 3a35dec3ec55..56471f750762 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -901,8 +901,7 @@ load_microcode_amd(bool save, u8 family, const u8 *data, size_t size) * * These might be larger than 2K. */ -static enum ucode_state request_microcode_amd(int cpu, struct device *device, - bool refresh_fw) +static enum ucode_state request_microcode_amd(int cpu, struct device *device) { char fw_name[36] = "amd-ucode/microcode_amd.bin"; struct cpuinfo_x86 *c = &cpu_data(cpu); @@ -911,7 +910,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device, const struct firmware *fw; /* reload ucode container only on the boot cpu */ - if (!refresh_fw || !bsp) + if (!bsp) return UCODE_OK; if (c->x86 >= 0x15) diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 6a41cee242f6..712aafff96e0 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -319,60 +319,6 @@ void reload_early_microcode(void) } } -static void collect_cpu_info_local(void *arg) -{ - struct cpu_info_ctx *ctx = arg; - - ctx->err = microcode_ops->collect_cpu_info(smp_processor_id(), - ctx->cpu_sig); -} - -static int collect_cpu_info_on_target(int cpu, struct cpu_signature *cpu_sig) -{ - struct cpu_info_ctx ctx = { .cpu_sig = cpu_sig, .err = 0 }; - int ret; - - ret = smp_call_function_single(cpu, collect_cpu_info_local, &ctx, 1); - if (!ret) - ret = ctx.err; - - return ret; -} - -static int collect_cpu_info(int cpu) -{ - struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - int ret; - - memset(uci, 0, sizeof(*uci)); - - ret = collect_cpu_info_on_target(cpu, &uci->cpu_sig); - if (!ret) - uci->valid = 1; - - return ret; -} - -static void apply_microcode_local(void *arg) -{ - enum ucode_state *err = arg; - - *err = microcode_ops->apply_microcode(smp_processor_id()); -} - -static int apply_microcode_on_target(int cpu) -{ - enum ucode_state err; - int ret; - - ret = smp_call_function_single(cpu, apply_microcode_local, &err, 1); - if (!ret) { - if (err == UCODE_ERROR) - ret = 1; - } - return ret; -} - /* fake device for request_firmware */ static struct platform_device *microcode_pdev; @@ -458,7 +404,7 @@ static int __reload_late(void *info) * below. */ if (cpumask_first(topology_sibling_cpumask(cpu)) == cpu) - apply_microcode_local(&err); + err = microcode_ops->apply_microcode(cpu); else goto wait_for_siblings; @@ -480,7 +426,7 @@ wait_for_siblings: * revision. */ if (cpumask_first(topology_sibling_cpumask(cpu)) != cpu) - apply_microcode_local(&err); + err = microcode_ops->apply_microcode(cpu); return ret; } @@ -531,7 +477,7 @@ static ssize_t reload_store(struct device *dev, if (ret) goto put; - tmp_ret = microcode_ops->request_microcode_fw(bsp, µcode_pdev->dev, true); + tmp_ret = microcode_ops->request_microcode_fw(bsp, µcode_pdev->dev); if (tmp_ret != UCODE_NEW) goto put; @@ -589,91 +535,17 @@ static void microcode_fini_cpu(int cpu) microcode_ops->microcode_fini_cpu(cpu); } -static enum ucode_state microcode_resume_cpu(int cpu) +static enum ucode_state microcode_init_cpu(int cpu) { - if (apply_microcode_on_target(cpu)) - return UCODE_ERROR; - - pr_debug("CPU%d updated upon resume\n", cpu); - - return UCODE_OK; -} - -static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw) -{ - enum ucode_state ustate; struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - if (uci->valid) - return UCODE_OK; - - if (collect_cpu_info(cpu)) - return UCODE_ERROR; - - /* --dimm. Trigger a delayed update? */ - if (system_state != SYSTEM_RUNNING) - return UCODE_NFOUND; - - ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev, refresh_fw); - if (ustate == UCODE_NEW) { - pr_debug("CPU%d updated upon init\n", cpu); - apply_microcode_on_target(cpu); - } - - return ustate; -} - -static enum ucode_state microcode_update_cpu(int cpu) -{ - struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - - /* Refresh CPU microcode revision after resume. */ - collect_cpu_info(cpu); - - if (uci->valid) - return microcode_resume_cpu(cpu); - - return microcode_init_cpu(cpu, false); -} - -static int mc_device_add(struct device *dev, struct subsys_interface *sif) -{ - int err, cpu = dev->id; - - if (!cpu_online(cpu)) - return 0; - - pr_debug("CPU%d added\n", cpu); - - err = sysfs_create_group(&dev->kobj, &mc_attr_group); - if (err) - return err; + memset(uci, 0, sizeof(*uci)); - if (microcode_init_cpu(cpu, true) == UCODE_ERROR) - return -EINVAL; + microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig); - return err; + return microcode_ops->apply_microcode(cpu); } -static void mc_device_remove(struct device *dev, struct subsys_interface *sif) -{ - int cpu = dev->id; - - if (!cpu_online(cpu)) - return; - - pr_debug("CPU%d removed\n", cpu); - microcode_fini_cpu(cpu); - sysfs_remove_group(&dev->kobj, &mc_attr_group); -} - -static struct subsys_interface mc_cpu_interface = { - .name = "microcode", - .subsys = &cpu_subsys, - .add_dev = mc_device_add, - .remove_dev = mc_device_remove, -}; - /** * microcode_bsp_resume - Update boot CPU microcode during resume. */ @@ -682,21 +554,23 @@ void microcode_bsp_resume(void) int cpu = smp_processor_id(); struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - if (uci->valid && uci->mc) + if (uci->mc) microcode_ops->apply_microcode(cpu); - else if (!uci->mc) + else reload_early_microcode(); } static struct syscore_ops mc_syscore_ops = { - .resume = microcode_bsp_resume, + .resume = microcode_bsp_resume, }; static int mc_cpu_starting(unsigned int cpu) { - microcode_update_cpu(cpu); - pr_debug("CPU%d added\n", cpu); - return 0; + enum ucode_state err = microcode_ops->apply_microcode(cpu); + + pr_debug("%s: CPU%d, err: %d\n", __func__, cpu, err); + + return err == UCODE_ERROR; } static int mc_cpu_online(unsigned int cpu) @@ -713,13 +587,30 @@ static int mc_cpu_down_prep(unsigned int cpu) struct device *dev; dev = get_cpu_device(cpu); + + microcode_fini_cpu(cpu); + /* Suspend is in progress, only remove the interface */ sysfs_remove_group(&dev->kobj, &mc_attr_group); - pr_debug("CPU%d removed\n", cpu); + pr_debug("%s: CPU%d\n", __func__, cpu); return 0; } +static void setup_online_cpu(struct work_struct *work) +{ + int cpu = smp_processor_id(); + enum ucode_state err; + + err = microcode_init_cpu(cpu); + if (err == UCODE_ERROR) { + pr_err("Error applying microcode on CPU%d\n", cpu); + return; + } + + mc_cpu_online(cpu); +} + static struct attribute *cpu_root_microcode_attrs[] = { #ifdef CONFIG_MICROCODE_LATE_LOADING &dev_attr_reload.attr, @@ -750,28 +641,19 @@ static int __init microcode_init(void) if (!microcode_ops) return -ENODEV; - microcode_pdev = platform_device_register_simple("microcode", -1, - NULL, 0); + microcode_pdev = platform_device_register_simple("microcode", -1, NULL, 0); if (IS_ERR(microcode_pdev)) return PTR_ERR(microcode_pdev); - cpus_read_lock(); - mutex_lock(µcode_mutex); - error = subsys_interface_register(&mc_cpu_interface); - mutex_unlock(µcode_mutex); - cpus_read_unlock(); - - if (error) - goto out_pdev; - - error = sysfs_create_group(&cpu_subsys.dev_root->kobj, - &cpu_root_microcode_group); - + error = sysfs_create_group(&cpu_subsys.dev_root->kobj, &cpu_root_microcode_group); if (error) { pr_err("Error creating microcode group!\n"); - goto out_driver; + goto out_pdev; } + /* Do per-CPU setup */ + schedule_on_each_cpu(setup_online_cpu); + register_syscore_ops(&mc_syscore_ops); cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:starting", mc_cpu_starting, NULL); @@ -782,15 +664,6 @@ static int __init microcode_init(void) return 0; - out_driver: - cpus_read_lock(); - mutex_lock(µcode_mutex); - - subsys_interface_unregister(&mc_cpu_interface); - - mutex_unlock(µcode_mutex); - cpus_read_unlock(); - out_pdev: platform_device_unregister(microcode_pdev); return error; diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index fdd2c4a754ce..cac2bdb57f0b 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -48,34 +48,6 @@ static int llc_size_per_core; /* * Returns 1 if update has been found, 0 otherwise. */ -static int find_matching_signature(void *mc, unsigned int csig, int cpf) -{ - struct microcode_header_intel *mc_hdr = mc; - struct extended_sigtable *ext_hdr; - struct extended_signature *ext_sig; - int i; - - if (intel_cpu_signatures_match(csig, cpf, mc_hdr->sig, mc_hdr->pf)) - return 1; - - /* Look for ext. headers: */ - if (get_totalsize(mc_hdr) <= get_datasize(mc_hdr) + MC_HEADER_SIZE) - return 0; - - ext_hdr = mc + get_datasize(mc_hdr) + MC_HEADER_SIZE; - ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE; - - for (i = 0; i < ext_hdr->count; i++) { - if (intel_cpu_signatures_match(csig, cpf, ext_sig->sig, ext_sig->pf)) - return 1; - ext_sig++; - } - return 0; -} - -/* - * Returns 1 if update has been found, 0 otherwise. - */ static int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev) { struct microcode_header_intel *mc_hdr = mc; @@ -83,7 +55,7 @@ static int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev if (mc_hdr->rev <= new_rev) return 0; - return find_matching_signature(mc, csig, cpf); + return intel_find_matching_signature(mc, csig, cpf); } static struct ucode_patch *memdup_patch(void *data, unsigned int size) @@ -117,7 +89,7 @@ static void save_microcode_patch(struct ucode_cpu_info *uci, void *data, unsigne sig = mc_saved_hdr->sig; pf = mc_saved_hdr->pf; - if (find_matching_signature(data, sig, pf)) { + if (intel_find_matching_signature(data, sig, pf)) { prev_found = true; if (mc_hdr->rev <= mc_saved_hdr->rev) @@ -149,7 +121,7 @@ static void save_microcode_patch(struct ucode_cpu_info *uci, void *data, unsigne if (!p) return; - if (!find_matching_signature(p->data, uci->cpu_sig.sig, uci->cpu_sig.pf)) + if (!intel_find_matching_signature(p->data, uci->cpu_sig.sig, uci->cpu_sig.pf)) return; /* @@ -163,104 +135,6 @@ static void save_microcode_patch(struct ucode_cpu_info *uci, void *data, unsigne intel_ucode_patch = p->data; } -static int microcode_sanity_check(void *mc, int print_err) -{ - unsigned long total_size, data_size, ext_table_size; - struct microcode_header_intel *mc_header = mc; - struct extended_sigtable *ext_header = NULL; - u32 sum, orig_sum, ext_sigcount = 0, i; - struct extended_signature *ext_sig; - - total_size = get_totalsize(mc_header); - data_size = get_datasize(mc_header); - - if (data_size + MC_HEADER_SIZE > total_size) { - if (print_err) - pr_err("Error: bad microcode data file size.\n"); - return -EINVAL; - } - - if (mc_header->ldrver != 1 || mc_header->hdrver != 1) { - if (print_err) - pr_err("Error: invalid/unknown microcode update format.\n"); - return -EINVAL; - } - - ext_table_size = total_size - (MC_HEADER_SIZE + data_size); - if (ext_table_size) { - u32 ext_table_sum = 0; - u32 *ext_tablep; - - if ((ext_table_size < EXT_HEADER_SIZE) - || ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) { - if (print_err) - pr_err("Error: truncated extended signature table.\n"); - return -EINVAL; - } - - ext_header = mc + MC_HEADER_SIZE + data_size; - if (ext_table_size != exttable_size(ext_header)) { - if (print_err) - pr_err("Error: extended signature table size mismatch.\n"); - return -EFAULT; - } - - ext_sigcount = ext_header->count; - - /* - * Check extended table checksum: the sum of all dwords that - * comprise a valid table must be 0. - */ - ext_tablep = (u32 *)ext_header; - - i = ext_table_size / sizeof(u32); - while (i--) - ext_table_sum += ext_tablep[i]; - - if (ext_table_sum) { - if (print_err) - pr_warn("Bad extended signature table checksum, aborting.\n"); - return -EINVAL; - } - } - - /* - * Calculate the checksum of update data and header. The checksum of - * valid update data and header including the extended signature table - * must be 0. - */ - orig_sum = 0; - i = (MC_HEADER_SIZE + data_size) / sizeof(u32); - while (i--) - orig_sum += ((u32 *)mc)[i]; - - if (orig_sum) { - if (print_err) - pr_err("Bad microcode data checksum, aborting.\n"); - return -EINVAL; - } - - if (!ext_table_size) - return 0; - - /* - * Check extended signature checksum: 0 => valid. - */ - for (i = 0; i < ext_sigcount; i++) { - ext_sig = (void *)ext_header + EXT_HEADER_SIZE + - EXT_SIGNATURE_SIZE * i; - - sum = (mc_header->sig + mc_header->pf + mc_header->cksum) - - (ext_sig->sig + ext_sig->pf + ext_sig->cksum); - if (sum) { - if (print_err) - pr_err("Bad extended signature checksum, aborting.\n"); - return -EINVAL; - } - } - return 0; -} - /* * Get microcode matching with BSP's model. Only CPUs with the same model as * BSP can stay in the platform. @@ -281,13 +155,13 @@ scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save) mc_size = get_totalsize(mc_header); if (!mc_size || mc_size > size || - microcode_sanity_check(data, 0) < 0) + intel_microcode_sanity_check(data, false, MC_HEADER_TYPE_MICROCODE) < 0) break; size -= mc_size; - if (!find_matching_signature(data, uci->cpu_sig.sig, - uci->cpu_sig.pf)) { + if (!intel_find_matching_signature(data, uci->cpu_sig.sig, + uci->cpu_sig.pf)) { data += mc_size; continue; } @@ -621,7 +495,6 @@ void load_ucode_intel_ap(void) else iup = &intel_ucode_patch; -reget: if (!*iup) { patch = __load_ucode_intel(&uci); if (!patch) @@ -632,12 +505,7 @@ reget: uci.mc = *iup; - if (apply_microcode_early(&uci, true)) { - /* Mixed-silicon system? Try to refetch the proper patch: */ - *iup = NULL; - - goto reget; - } + apply_microcode_early(&uci, true); } static struct microcode_intel *find_patch(struct ucode_cpu_info *uci) @@ -652,9 +520,9 @@ static struct microcode_intel *find_patch(struct ucode_cpu_info *uci) if (phdr->rev <= uci->cpu_sig.rev) continue; - if (!find_matching_signature(phdr, - uci->cpu_sig.sig, - uci->cpu_sig.pf)) + if (!intel_find_matching_signature(phdr, + uci->cpu_sig.sig, + uci->cpu_sig.pf)) continue; return iter->data; @@ -680,7 +548,6 @@ void reload_ucode_intel(void) static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) { - static struct cpu_signature prev; struct cpuinfo_x86 *c = &cpu_data(cpu_num); unsigned int val[2]; @@ -696,13 +563,6 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) csig->rev = c->microcode; - /* No extra locking on prev, races are harmless. */ - if (csig->sig != prev.sig || csig->pf != prev.pf || csig->rev != prev.rev) { - pr_info("sig=0x%x, pf=0x%x, revision=0x%x\n", - csig->sig, csig->pf, csig->rev); - prev = *csig; - } - return 0; } @@ -820,7 +680,7 @@ static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter) memcpy(mc, &mc_header, sizeof(mc_header)); data = mc + sizeof(mc_header); if (!copy_from_iter_full(data, data_size, iter) || - microcode_sanity_check(mc, 1) < 0) { + intel_microcode_sanity_check(mc, true, MC_HEADER_TYPE_MICROCODE) < 0) { break; } @@ -885,8 +745,7 @@ static bool is_blacklisted(unsigned int cpu) return false; } -static enum ucode_state request_microcode_fw(int cpu, struct device *device, - bool refresh_fw) +static enum ucode_state request_microcode_fw(int cpu, struct device *device) { struct cpuinfo_x86 *c = &cpu_data(cpu); const struct firmware *firmware; diff --git a/arch/x86/kernel/cpu/mtrr/amd.c b/arch/x86/kernel/cpu/mtrr/amd.c index a65a0272096d..eff6ac62c0ff 100644 --- a/arch/x86/kernel/cpu/mtrr/amd.c +++ b/arch/x86/kernel/cpu/mtrr/amd.c @@ -109,7 +109,7 @@ amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type) return 0; } -static const struct mtrr_ops amd_mtrr_ops = { +const struct mtrr_ops amd_mtrr_ops = { .vendor = X86_VENDOR_AMD, .set = amd_set_mtrr, .get = amd_get_mtrr, @@ -117,9 +117,3 @@ static const struct mtrr_ops amd_mtrr_ops = { .validate_add_page = amd_validate_add_page, .have_wrcomb = positive_have_wrcomb, }; - -int __init amd_init_mtrr(void) -{ - set_mtrr_ops(&amd_mtrr_ops); - return 0; -} diff --git a/arch/x86/kernel/cpu/mtrr/centaur.c b/arch/x86/kernel/cpu/mtrr/centaur.c index f27177816569..b8a74eddde83 100644 --- a/arch/x86/kernel/cpu/mtrr/centaur.c +++ b/arch/x86/kernel/cpu/mtrr/centaur.c @@ -111,7 +111,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t return 0; } -static const struct mtrr_ops centaur_mtrr_ops = { +const struct mtrr_ops centaur_mtrr_ops = { .vendor = X86_VENDOR_CENTAUR, .set = centaur_set_mcr, .get = centaur_get_mcr, @@ -119,9 +119,3 @@ static const struct mtrr_ops centaur_mtrr_ops = { .validate_add_page = centaur_validate_add_page, .have_wrcomb = positive_have_wrcomb, }; - -int __init centaur_init_mtrr(void) -{ - set_mtrr_ops(¢aur_mtrr_ops); - return 0; -} diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c index ca670919b561..173b9e01e623 100644 --- a/arch/x86/kernel/cpu/mtrr/cyrix.c +++ b/arch/x86/kernel/cpu/mtrr/cyrix.c @@ -234,51 +234,11 @@ static void cyrix_set_arr(unsigned int reg, unsigned long base, post_set(); } -typedef struct { - unsigned long base; - unsigned long size; - mtrr_type type; -} arr_state_t; - -static arr_state_t arr_state[8] = { - {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, - {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL} -}; - -static unsigned char ccr_state[7] = { 0, 0, 0, 0, 0, 0, 0 }; - -static void cyrix_set_all(void) -{ - int i; - - prepare_set(); - - /* the CCRs are not contiguous */ - for (i = 0; i < 4; i++) - setCx86(CX86_CCR0 + i, ccr_state[i]); - for (; i < 7; i++) - setCx86(CX86_CCR4 + i, ccr_state[i]); - - for (i = 0; i < 8; i++) { - cyrix_set_arr(i, arr_state[i].base, - arr_state[i].size, arr_state[i].type); - } - - post_set(); -} - -static const struct mtrr_ops cyrix_mtrr_ops = { +const struct mtrr_ops cyrix_mtrr_ops = { .vendor = X86_VENDOR_CYRIX, - .set_all = cyrix_set_all, .set = cyrix_set_arr, .get = cyrix_get_arr, .get_free_region = cyrix_get_free_region, .validate_add_page = generic_validate_add_page, .have_wrcomb = positive_have_wrcomb, }; - -int __init cyrix_init_mtrr(void) -{ - set_mtrr_ops(&cyrix_mtrr_ops); - return 0; -} diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index 558108296f3c..ee09d359e08f 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c @@ -10,6 +10,7 @@ #include <linux/mm.h> #include <asm/processor-flags.h> +#include <asm/cacheinfo.h> #include <asm/cpufeature.h> #include <asm/tlbflush.h> #include <asm/mtrr.h> @@ -396,9 +397,6 @@ print_fixed(unsigned base, unsigned step, const mtrr_type *types) } } -static void prepare_set(void); -static void post_set(void); - static void __init print_mtrr_state(void) { unsigned int i; @@ -444,20 +442,6 @@ static void __init print_mtrr_state(void) pr_debug("TOM2: %016llx aka %lldM\n", mtrr_tom2, mtrr_tom2>>20); } -/* PAT setup for BP. We need to go through sync steps here */ -void __init mtrr_bp_pat_init(void) -{ - unsigned long flags; - - local_irq_save(flags); - prepare_set(); - - pat_init(); - - post_set(); - local_irq_restore(flags); -} - /* Grab all of the MTRR state for this CPU into *state */ bool __init get_mtrr_state(void) { @@ -684,7 +668,10 @@ static u32 deftype_lo, deftype_hi; /** * set_mtrr_state - Set the MTRR state for this CPU. * - * NOTE: The CPU must already be in a safe state for MTRR changes. + * NOTE: The CPU must already be in a safe state for MTRR changes, including + * measures that only a single CPU can be active in set_mtrr_state() in + * order to not be subject to races for usage of deftype_lo. This is + * accomplished by taking cache_disable_lock. * RETURNS: 0 if no changes made, else a mask indicating what was changed. */ static unsigned long set_mtrr_state(void) @@ -715,106 +702,34 @@ static unsigned long set_mtrr_state(void) return change_mask; } - -static unsigned long cr4; -static DEFINE_RAW_SPINLOCK(set_atomicity_lock); - -/* - * Since we are disabling the cache don't allow any interrupts, - * they would run extremely slow and would only increase the pain. - * - * The caller must ensure that local interrupts are disabled and - * are reenabled after post_set() has been called. - */ -static void prepare_set(void) __acquires(set_atomicity_lock) +void mtrr_disable(void) { - unsigned long cr0; - - /* - * Note that this is not ideal - * since the cache is only flushed/disabled for this CPU while the - * MTRRs are changed, but changing this requires more invasive - * changes to the way the kernel boots - */ - - raw_spin_lock(&set_atomicity_lock); - - /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */ - cr0 = read_cr0() | X86_CR0_CD; - write_cr0(cr0); - - /* - * Cache flushing is the most time-consuming step when programming - * the MTRRs. Fortunately, as per the Intel Software Development - * Manual, we can skip it if the processor supports cache self- - * snooping. - */ - if (!static_cpu_has(X86_FEATURE_SELFSNOOP)) - wbinvd(); - - /* Save value of CR4 and clear Page Global Enable (bit 7) */ - if (boot_cpu_has(X86_FEATURE_PGE)) { - cr4 = __read_cr4(); - __write_cr4(cr4 & ~X86_CR4_PGE); - } - - /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */ - count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); - flush_tlb_local(); - /* Save MTRR state */ rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); /* Disable MTRRs, and set the default type to uncached */ mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi); - - /* Again, only flush caches if we have to. */ - if (!static_cpu_has(X86_FEATURE_SELFSNOOP)) - wbinvd(); } -static void post_set(void) __releases(set_atomicity_lock) +void mtrr_enable(void) { - /* Flush TLBs (no need to flush caches - they are disabled) */ - count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); - flush_tlb_local(); - /* Intel (P6) standard MTRRs */ mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); - - /* Enable caches */ - write_cr0(read_cr0() & ~X86_CR0_CD); - - /* Restore value of CR4 */ - if (boot_cpu_has(X86_FEATURE_PGE)) - __write_cr4(cr4); - raw_spin_unlock(&set_atomicity_lock); } -static void generic_set_all(void) +void mtrr_generic_set_state(void) { unsigned long mask, count; - unsigned long flags; - - local_irq_save(flags); - prepare_set(); /* Actually set the state */ mask = set_mtrr_state(); - /* also set PAT */ - pat_init(); - - post_set(); - local_irq_restore(flags); - /* Use the atomic bitops to update the global mask */ for (count = 0; count < sizeof(mask) * 8; ++count) { if (mask & 0x01) set_bit(count, &smp_changes_mask); mask >>= 1; } - } /** @@ -836,7 +751,7 @@ static void generic_set_mtrr(unsigned int reg, unsigned long base, vr = &mtrr_state.var_ranges[reg]; local_irq_save(flags); - prepare_set(); + cache_disable(); if (size == 0) { /* @@ -855,7 +770,7 @@ static void generic_set_mtrr(unsigned int reg, unsigned long base, mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi); } - post_set(); + cache_enable(); local_irq_restore(flags); } @@ -914,8 +829,6 @@ int positive_have_wrcomb(void) * Generic structure... */ const struct mtrr_ops generic_mtrr_ops = { - .use_intel_if = 1, - .set_all = generic_set_all, .get = generic_get_mtrr, .get_free_region = generic_get_free_region, .set = generic_set_mtrr, diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c index 2746cac9d8a9..783f3210d582 100644 --- a/arch/x86/kernel/cpu/mtrr/mtrr.c +++ b/arch/x86/kernel/cpu/mtrr/mtrr.c @@ -46,6 +46,7 @@ #include <linux/syscore_ops.h> #include <linux/rcupdate.h> +#include <asm/cacheinfo.h> #include <asm/cpufeature.h> #include <asm/e820/api.h> #include <asm/mtrr.h> @@ -58,32 +59,18 @@ #define MTRR_TO_PHYS_WC_OFFSET 1000 u32 num_var_ranges; -static bool __mtrr_enabled; - static bool mtrr_enabled(void) { - return __mtrr_enabled; + return !!mtrr_if; } unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES]; static DEFINE_MUTEX(mtrr_mutex); u64 size_or_mask, size_and_mask; -static bool mtrr_aps_delayed_init; - -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __ro_after_init; const struct mtrr_ops *mtrr_if; -static void set_mtrr(unsigned int reg, unsigned long base, - unsigned long size, mtrr_type type); - -void __init set_mtrr_ops(const struct mtrr_ops *ops) -{ - if (ops->vendor && ops->vendor < X86_VENDOR_NUM) - mtrr_ops[ops->vendor] = ops; -} - /* Returns non-zero if we have the write-combining memory type */ static int have_wrcomb(void) { @@ -119,11 +106,11 @@ static int have_wrcomb(void) } /* This function returns the number of variable MTRRs */ -static void __init set_num_var_ranges(void) +static void __init set_num_var_ranges(bool use_generic) { unsigned long config = 0, dummy; - if (use_intel()) + if (use_generic) rdmsr(MSR_MTRRcap, config, dummy); else if (is_cpu(AMD) || is_cpu(HYGON)) config = 2; @@ -160,25 +147,8 @@ static int mtrr_rendezvous_handler(void *info) { struct set_mtrr_data *data = info; - /* - * We use this same function to initialize the mtrrs during boot, - * resume, runtime cpu online and on an explicit request to set a - * specific MTRR. - * - * During boot or suspend, the state of the boot cpu's mtrrs has been - * saved, and we want to replicate that across all the cpus that come - * online (either at the end of boot or resume or during a runtime cpu - * online). If we're doing that, @reg is set to something special and on - * all the cpu's we do mtrr_if->set_all() (On the logical cpu that - * started the boot/resume sequence, this might be a duplicate - * set_all()). - */ - if (data->smp_reg != ~0U) { - mtrr_if->set(data->smp_reg, data->smp_base, - data->smp_size, data->smp_type); - } else if (mtrr_aps_delayed_init || !cpu_online(smp_processor_id())) { - mtrr_if->set_all(); - } + mtrr_if->set(data->smp_reg, data->smp_base, + data->smp_size, data->smp_type); return 0; } @@ -248,19 +218,6 @@ static void set_mtrr_cpuslocked(unsigned int reg, unsigned long base, stop_machine_cpuslocked(mtrr_rendezvous_handler, &data, cpu_online_mask); } -static void set_mtrr_from_inactive_cpu(unsigned int reg, unsigned long base, - unsigned long size, mtrr_type type) -{ - struct set_mtrr_data data = { .smp_reg = reg, - .smp_base = base, - .smp_size = size, - .smp_type = type - }; - - stop_machine_from_inactive_cpu(mtrr_rendezvous_handler, &data, - cpu_callout_mask); -} - /** * mtrr_add_page - Add a memory type region * @base: Physical base address of region in pages (in units of 4 kB!) @@ -617,20 +574,6 @@ int arch_phys_wc_index(int handle) } EXPORT_SYMBOL_GPL(arch_phys_wc_index); -/* - * HACK ALERT! - * These should be called implicitly, but we can't yet until all the initcall - * stuff is done... - */ -static void __init init_ifs(void) -{ -#ifndef CONFIG_X86_64 - amd_init_mtrr(); - cyrix_init_mtrr(); - centaur_init_mtrr(); -#endif -} - /* The suspend/resume methods are only for CPU without MTRR. CPU using generic * MTRR driver doesn't require this */ @@ -686,10 +629,9 @@ int __initdata changed_by_mtrr_cleanup; */ void __init mtrr_bp_init(void) { + const char *why = "(not available)"; u32 phys_addr; - init_ifs(); - phys_addr = 32; if (boot_cpu_has(X86_FEATURE_MTRR)) { @@ -730,21 +672,21 @@ void __init mtrr_bp_init(void) case X86_VENDOR_AMD: if (cpu_feature_enabled(X86_FEATURE_K6_MTRR)) { /* Pre-Athlon (K6) AMD CPU MTRRs */ - mtrr_if = mtrr_ops[X86_VENDOR_AMD]; + mtrr_if = &amd_mtrr_ops; size_or_mask = SIZE_OR_MASK_BITS(32); size_and_mask = 0; } break; case X86_VENDOR_CENTAUR: if (cpu_feature_enabled(X86_FEATURE_CENTAUR_MCR)) { - mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR]; + mtrr_if = ¢aur_mtrr_ops; size_or_mask = SIZE_OR_MASK_BITS(32); size_and_mask = 0; } break; case X86_VENDOR_CYRIX: if (cpu_feature_enabled(X86_FEATURE_CYRIX_ARR)) { - mtrr_if = mtrr_ops[X86_VENDOR_CYRIX]; + mtrr_if = &cyrix_mtrr_ops; size_or_mask = SIZE_OR_MASK_BITS(32); size_and_mask = 0; } @@ -754,58 +696,23 @@ void __init mtrr_bp_init(void) } } - if (mtrr_if) { - __mtrr_enabled = true; - set_num_var_ranges(); + if (mtrr_enabled()) { + set_num_var_ranges(mtrr_if == &generic_mtrr_ops); init_table(); - if (use_intel()) { + if (mtrr_if == &generic_mtrr_ops) { /* BIOS may override */ - __mtrr_enabled = get_mtrr_state(); - - if (mtrr_enabled()) - mtrr_bp_pat_init(); - - if (mtrr_cleanup(phys_addr)) { - changed_by_mtrr_cleanup = 1; - mtrr_if->set_all(); + if (get_mtrr_state()) { + memory_caching_control |= CACHE_MTRR; + changed_by_mtrr_cleanup = mtrr_cleanup(phys_addr); + } else { + mtrr_if = NULL; + why = "by BIOS"; } } } - if (!mtrr_enabled()) { - pr_info("Disabled\n"); - - /* - * PAT initialization relies on MTRR's rendezvous handler. - * Skip PAT init until the handler can initialize both - * features independently. - */ - pat_disable("MTRRs disabled, skipping PAT initialization too."); - } -} - -void mtrr_ap_init(void) -{ if (!mtrr_enabled()) - return; - - if (!use_intel() || mtrr_aps_delayed_init) - return; - - /* - * Ideally we should hold mtrr_mutex here to avoid mtrr entries - * changed, but this routine will be called in cpu boot time, - * holding the lock breaks it. - * - * This routine is called in two cases: - * - * 1. very early time of software resume, when there absolutely - * isn't mtrr entry changes; - * - * 2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug - * lock to prevent mtrr entry changes - */ - set_mtrr_from_inactive_cpu(~0U, 0, 0, 0); + pr_info("MTRRs disabled %s\n", why); } /** @@ -823,50 +730,12 @@ void mtrr_save_state(void) smp_call_function_single(first_cpu, mtrr_save_fixed_ranges, NULL, 1); } -void set_mtrr_aps_delayed_init(void) -{ - if (!mtrr_enabled()) - return; - if (!use_intel()) - return; - - mtrr_aps_delayed_init = true; -} - -/* - * Delayed MTRR initialization for all AP's - */ -void mtrr_aps_init(void) -{ - if (!use_intel() || !mtrr_enabled()) - return; - - /* - * Check if someone has requested the delay of AP MTRR initialization, - * by doing set_mtrr_aps_delayed_init(), prior to this point. If not, - * then we are done. - */ - if (!mtrr_aps_delayed_init) - return; - - set_mtrr(~0U, 0, 0, 0); - mtrr_aps_delayed_init = false; -} - -void mtrr_bp_restore(void) -{ - if (!use_intel() || !mtrr_enabled()) - return; - - mtrr_if->set_all(); -} - static int __init mtrr_init_finialize(void) { if (!mtrr_enabled()) return 0; - if (use_intel()) { + if (memory_caching_control & CACHE_MTRR) { if (!changed_by_mtrr_cleanup) mtrr_state_warn(); return 0; diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h index 2ac99e561181..02eb5871492d 100644 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h @@ -14,11 +14,8 @@ extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES]; struct mtrr_ops { u32 vendor; - u32 use_intel_if; void (*set)(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type); - void (*set_all)(void); - void (*get)(unsigned int reg, unsigned long *base, unsigned long *size, mtrr_type *type); int (*get_free_region)(unsigned long base, unsigned long size, @@ -53,15 +50,11 @@ void set_mtrr_prepare_save(struct set_mtrr_context *ctxt); void fill_mtrr_var_range(unsigned int index, u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi); bool get_mtrr_state(void); -void mtrr_bp_pat_init(void); - -extern void __init set_mtrr_ops(const struct mtrr_ops *ops); extern u64 size_or_mask, size_and_mask; extern const struct mtrr_ops *mtrr_if; #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd) -#define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1) extern unsigned int num_var_ranges; extern u64 mtrr_tom2; @@ -71,10 +64,10 @@ void mtrr_state_warn(void); const char *mtrr_attrib_to_str(int x); void mtrr_wrmsr(unsigned, unsigned, unsigned); -/* CPU specific mtrr init functions */ -int amd_init_mtrr(void); -int cyrix_init_mtrr(void); -int centaur_init_mtrr(void); +/* CPU specific mtrr_ops vectors. */ +extern const struct mtrr_ops amd_mtrr_ops; +extern const struct mtrr_ops cyrix_mtrr_ops; +extern const struct mtrr_ops centaur_mtrr_ops; extern int changed_by_mtrr_cleanup; extern int mtrr_cleanup(unsigned address_bits); diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index efe0c30d3a12..77538abeb72a 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -146,6 +146,30 @@ static inline struct rmid_entry *__rmid_entry(u32 rmid) return entry; } +static int __rmid_read(u32 rmid, enum resctrl_event_id eventid, u64 *val) +{ + u64 msr_val; + + /* + * As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured + * with a valid event code for supported resource type and the bits + * IA32_QM_EVTSEL.RMID (bits 41:32) are configured with valid RMID, + * IA32_QM_CTR.data (bits 61:0) reports the monitored data. + * IA32_QM_CTR.Error (bit 63) and IA32_QM_CTR.Unavailable (bit 62) + * are error bits. + */ + wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid); + rdmsrl(MSR_IA32_QM_CTR, msr_val); + + if (msr_val & RMID_VAL_ERROR) + return -EIO; + if (msr_val & RMID_VAL_UNAVAIL) + return -EINVAL; + + *val = msr_val; + return 0; +} + static struct arch_mbm_state *get_arch_mbm_state(struct rdt_hw_domain *hw_dom, u32 rmid, enum resctrl_event_id eventid) @@ -172,8 +196,12 @@ void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d, struct arch_mbm_state *am; am = get_arch_mbm_state(hw_dom, rmid, eventid); - if (am) + if (am) { memset(am, 0, sizeof(*am)); + + /* Record any initial, non-zero count value. */ + __rmid_read(rmid, eventid, &am->prev_msr); + } } static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width) @@ -191,25 +219,14 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); struct arch_mbm_state *am; u64 msr_val, chunks; + int ret; if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask)) return -EINVAL; - /* - * As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured - * with a valid event code for supported resource type and the bits - * IA32_QM_EVTSEL.RMID (bits 41:32) are configured with valid RMID, - * IA32_QM_CTR.data (bits 61:0) reports the monitored data. - * IA32_QM_CTR.Error (bit 63) and IA32_QM_CTR.Unavailable (bit 62) - * are error bits. - */ - wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid); - rdmsrl(MSR_IA32_QM_CTR, msr_val); - - if (msr_val & RMID_VAL_ERROR) - return -EIO; - if (msr_val & RMID_VAL_UNAVAIL) - return -EINVAL; + ret = __rmid_read(rmid, eventid, &msr_val); + if (ret) + return ret; am = get_arch_mbm_state(hw_dom, rmid, eventid); if (am) { diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index ba8d0763b36b..524f8ff3e69c 100644 --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -1560,9 +1560,9 @@ static const struct file_operations pseudo_lock_dev_fops = { .mmap = pseudo_lock_dev_mmap, }; -static char *pseudo_lock_devnode(struct device *dev, umode_t *mode) +static char *pseudo_lock_devnode(const struct device *dev, umode_t *mode) { - struct rdtgroup *rdtgrp; + const struct rdtgroup *rdtgrp; rdtgrp = dev_get_drvdata(dev); if (mode) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index e5a48f05e787..5993da21d822 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -580,8 +580,10 @@ static int __rdtgroup_move_task(struct task_struct *tsk, /* * Ensure the task's closid and rmid are written before determining if * the task is current that will decide if it will be interrupted. + * This pairs with the full barrier between the rq->curr update and + * resctrl_sched_in() during context switch. */ - barrier(); + smp_mb(); /* * By now, the task's closid and rmid are set. If the task is current @@ -2402,6 +2404,14 @@ static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to, WRITE_ONCE(t->rmid, to->mon.rmid); /* + * Order the closid/rmid stores above before the loads + * in task_curr(). This pairs with the full barrier + * between the rq->curr update and resctrl_sched_in() + * during context switch. + */ + smp_mb(); + + /* * If the task is on a CPU, set the CPU in the mask. * The detection is inaccurate as tasks might move or * schedule before the smp function call takes place. diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c index 68f8b18d2278..2a0e90fe2abc 100644 --- a/arch/x86/kernel/cpu/sgx/encl.c +++ b/arch/x86/kernel/cpu/sgx/encl.c @@ -268,7 +268,7 @@ static struct sgx_encl_page *sgx_encl_load_page_in_vma(struct sgx_encl *encl, unsigned long addr, unsigned long vm_flags) { - unsigned long vm_prot_bits = vm_flags & (VM_READ | VM_WRITE | VM_EXEC); + unsigned long vm_prot_bits = vm_flags & VM_ACCESS_FLAGS; struct sgx_encl_page *entry; entry = xa_load(&encl->page_array, PFN_DOWN(addr)); @@ -502,7 +502,7 @@ static void sgx_vma_open(struct vm_area_struct *vma) int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start, unsigned long end, unsigned long vm_flags) { - unsigned long vm_prot_bits = vm_flags & (VM_READ | VM_WRITE | VM_EXEC); + unsigned long vm_prot_bits = vm_flags & VM_ACCESS_FLAGS; struct sgx_encl_page *page; unsigned long count = 0; int ret = 0; |