diff options
Diffstat (limited to 'arch/x86/kernel')
67 files changed, 1463 insertions, 768 deletions
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 92c76bf97ad8..2624de16cd7a 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -848,7 +848,7 @@ EXPORT_SYMBOL(acpi_unregister_ioapic); /** * acpi_ioapic_registered - Check whether IOAPIC assoicatied with @gsi_base * has been registered - * @handle: ACPI handle of the IOAPIC deivce + * @handle: ACPI handle of the IOAPIC device * @gsi_base: GSI base associated with the IOAPIC * * Assume caller holds some type of lock to serialize acpi_ioapic_registered() @@ -1776,5 +1776,5 @@ void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size) u64 x86_default_get_root_pointer(void) { - return boot_params.hdr.acpi_rsdp_addr; + return boot_params.acpi_rsdp_addr; } diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index a6eca647bc76..cc51275c8759 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -11,14 +11,15 @@ #include <linux/errno.h> #include <linux/export.h> #include <linux/spinlock.h> +#include <linux/pci_ids.h> #include <asm/amd_nb.h> #define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450 #define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0 -#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 +#define PCI_DEVICE_ID_AMD_17H_M30H_ROOT 0x1480 #define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464 -#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec +#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494 /* Protect the PCI config register pairs used for SMN and DF indirect access. */ static DEFINE_MUTEX(smn_mutex); @@ -28,9 +29,11 @@ static u32 *flush_words; static const struct pci_device_id amd_root_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) }, {} }; + #define PCI_DEVICE_ID_AMD_CNB17H_F4 0x1704 const struct pci_device_id amd_nb_misc_ids[] = { @@ -44,6 +47,7 @@ const struct pci_device_id amd_nb_misc_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, {} }; @@ -57,6 +61,7 @@ static const struct pci_device_id amd_nb_link_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) }, {} }; @@ -214,7 +219,10 @@ int amd_cache_northbridges(void) const struct pci_device_id *root_ids = amd_root_ids; struct pci_dev *root, *misc, *link; struct amd_northbridge *nb; - u16 i = 0; + u16 roots_per_misc = 0; + u16 misc_count = 0; + u16 root_count = 0; + u16 i, j; if (amd_northbridges.num) return 0; @@ -227,26 +235,55 @@ int amd_cache_northbridges(void) misc = NULL; while ((misc = next_northbridge(misc, misc_ids)) != NULL) - i++; + misc_count++; - if (!i) + if (!misc_count) return -ENODEV; - nb = kcalloc(i, sizeof(struct amd_northbridge), GFP_KERNEL); + root = NULL; + while ((root = next_northbridge(root, root_ids)) != NULL) + root_count++; + + if (root_count) { + roots_per_misc = root_count / misc_count; + + /* + * There should be _exactly_ N roots for each DF/SMN + * interface. + */ + if (!roots_per_misc || (root_count % roots_per_misc)) { + pr_info("Unsupported AMD DF/PCI configuration found\n"); + return -ENODEV; + } + } + + nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL); if (!nb) return -ENOMEM; amd_northbridges.nb = nb; - amd_northbridges.num = i; + amd_northbridges.num = misc_count; link = misc = root = NULL; - for (i = 0; i != amd_northbridges.num; i++) { + for (i = 0; i < amd_northbridges.num; i++) { node_to_amd_nb(i)->root = root = next_northbridge(root, root_ids); node_to_amd_nb(i)->misc = misc = next_northbridge(misc, misc_ids); node_to_amd_nb(i)->link = link = next_northbridge(link, link_ids); + + /* + * If there are more PCI root devices than data fabric/ + * system management network interfaces, then the (N) + * PCI roots per DF/SMN interface are functionally the + * same (for DF/SMN access) and N-1 are redundant. N-1 + * PCI roots should be skipped per DF/SMN interface so + * the following DF/SMN interfaces get mapped to + * correct PCI roots. + */ + for (j = 1; j < roots_per_misc; j++) + root = next_northbridge(root, root_ids); } if (amd_gart_present()) diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 2c4d5ece7456..58176b56354e 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c @@ -264,18 +264,23 @@ static int __init parse_gart_mem(char *p) } early_param("gart_fix_e820", parse_gart_mem); +/* + * With kexec/kdump, if the first kernel doesn't shut down the GART and the + * second kernel allocates a different GART region, there might be two + * overlapping GART regions present: + * + * - the first still used by the GART initialized in the first kernel. + * - (sub-)set of it used as normal RAM by the second kernel. + * + * which leads to memory corruptions and a kernel panic eventually. + * + * This can also happen if the BIOS has forgotten to mark the GART region + * as reserved. + * + * Try to update the e820 map to mark that new region as reserved. + */ void __init early_gart_iommu_check(void) { - /* - * in case it is enabled before, esp for kexec/kdump, - * previous kernel already enable that. memset called - * by allocate_aperture/__alloc_bootmem_nopanic cause restart. - * or second kernel have different position for GART hole. and new - * kernel could use hole as RAM that is still used by GART set by - * first kernel - * or BIOS forget to put that in reserved. - * try to update e820 to make that region as reserved. - */ u32 agp_aper_order = 0; int i, fix, slot, valid_agp = 0; u32 ctl; diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 32b2b7a41ef5..b7bcdd781651 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -44,6 +44,7 @@ #include <asm/mpspec.h> #include <asm/i8259.h> #include <asm/proto.h> +#include <asm/traps.h> #include <asm/apic.h> #include <asm/io_apic.h> #include <asm/desc.h> diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c index e84c9eb4e5b4..0005c284a5c5 100644 --- a/arch/x86/kernel/apic/apic_flat_64.c +++ b/arch/x86/kernel/apic/apic_flat_64.c @@ -8,6 +8,7 @@ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and * James Cleverdon. */ +#include <linux/acpi.h> #include <linux/errno.h> #include <linux/threads.h> #include <linux/cpumask.h> @@ -16,13 +17,13 @@ #include <linux/ctype.h> #include <linux/hardirq.h> #include <linux/export.h> + #include <asm/smp.h> -#include <asm/apic.h> #include <asm/ipi.h> +#include <asm/apic.h> +#include <asm/apic_flat_64.h> #include <asm/jailhouse_para.h> -#include <linux/acpi.h> - static struct apic apic_physflat; static struct apic apic_flat; diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index 652e7ffa9b9d..3173e07d3791 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -18,6 +18,7 @@ #include <linux/slab.h> #include <asm/irqdomain.h> #include <asm/hw_irq.h> +#include <asm/traps.h> #include <asm/apic.h> #include <asm/i8259.h> #include <asm/desc.h> diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 391f358ebb4c..a555da094157 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -1079,7 +1079,7 @@ late_initcall(uv_init_heartbeat); #endif /* !CONFIG_HOTPLUG_CPU */ /* Direct Legacy VGA I/O traffic to designated IOH */ -int uv_set_vga_state(struct pci_dev *pdev, bool decode, unsigned int command_bits, u32 flags) +static int uv_set_vga_state(struct pci_dev *pdev, bool decode, unsigned int command_bits, u32 flags) { int domain, bus, rc; @@ -1148,7 +1148,7 @@ static void get_mn(struct mn *mnp) mnp->m_shift = mnp->m_val ? 64 - mnp->m_val : 0; } -void __init uv_init_hub_info(struct uv_hub_info_s *hi) +static void __init uv_init_hub_info(struct uv_hub_info_s *hi) { union uvh_node_id_u node_id; struct mn mn; diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index 72adf6c335dc..168543d077d7 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -29,7 +29,8 @@ # include "asm-offsets_64.c" #endif -void common(void) { +static void __used common(void) +{ BLANK(); OFFSET(TASK_threadsp, task_struct, thread.sp); #ifdef CONFIG_STACKPROTECTOR diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c index 1979a76bfadd..5136e6818da8 100644 --- a/arch/x86/kernel/check.c +++ b/arch/x86/kernel/check.c @@ -9,6 +9,7 @@ #include <linux/memblock.h> #include <asm/proto.h> +#include <asm/setup.h> /* * Some BIOSes seem to corrupt the low 64k of memory during events @@ -136,7 +137,7 @@ void __init setup_bios_corruption_check(void) } -void check_for_bios_corruption(void) +static void check_for_bios_corruption(void) { int i; int corruption = 0; diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 1f5d2291c31e..ac78f90aea56 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -36,13 +36,10 @@ obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o -obj-$(CONFIG_INTEL_RDT) += intel_rdt.o intel_rdt_rdtgroup.o intel_rdt_monitor.o -obj-$(CONFIG_INTEL_RDT) += intel_rdt_ctrlmondata.o intel_rdt_pseudo_lock.o -CFLAGS_intel_rdt_pseudo_lock.o = -I$(src) - -obj-$(CONFIG_X86_MCE) += mcheck/ +obj-$(CONFIG_X86_MCE) += mce/ obj-$(CONFIG_MTRR) += mtrr/ obj-$(CONFIG_MICROCODE) += microcode/ +obj-$(CONFIG_RESCTRL) += resctrl/ obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index eeea634bee0a..69f6bbb41be0 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -15,6 +15,7 @@ #include <asm/smp.h> #include <asm/pci-direct.h> #include <asm/delay.h> +#include <asm/debugreg.h> #ifdef CONFIG_X86_64 # include <asm/mmconfig.h> diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c index 7eba34df54c3..804c49493938 100644 --- a/arch/x86/kernel/cpu/aperfmperf.c +++ b/arch/x86/kernel/cpu/aperfmperf.c @@ -12,6 +12,7 @@ #include <linux/ktime.h> #include <linux/math64.h> #include <linux/percpu.h> +#include <linux/cpufreq.h> #include <linux/smp.h> #include "cpu.h" diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index c37e66e493bf..8654b8b0c848 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -14,6 +14,7 @@ #include <linux/module.h> #include <linux/nospec.h> #include <linux/prctl.h> +#include <linux/sched/smt.h> #include <asm/spec-ctrl.h> #include <asm/cmdline.h> @@ -31,6 +32,8 @@ #include <asm/e820/api.h> #include <asm/hypervisor.h> +#include "cpu.h" + static void __init spectre_v2_select_mitigation(void); static void __init ssb_select_mitigation(void); static void __init l1tf_select_mitigation(void); @@ -53,6 +56,13 @@ static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS; u64 __ro_after_init x86_amd_ls_cfg_base; u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask; +/* Control conditional STIBP in switch_to() */ +DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp); +/* Control conditional IBPB in switch_mm() */ +DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); +/* Control unconditional IBPB in switch_mm() */ +DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb); + void __init check_bugs(void) { identify_boot_cpu(); @@ -123,31 +133,6 @@ void __init check_bugs(void) #endif } -/* The kernel command line selection */ -enum spectre_v2_mitigation_cmd { - SPECTRE_V2_CMD_NONE, - SPECTRE_V2_CMD_AUTO, - SPECTRE_V2_CMD_FORCE, - SPECTRE_V2_CMD_RETPOLINE, - SPECTRE_V2_CMD_RETPOLINE_GENERIC, - SPECTRE_V2_CMD_RETPOLINE_AMD, -}; - -static const char *spectre_v2_strings[] = { - [SPECTRE_V2_NONE] = "Vulnerable", - [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline", - [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline", - [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline", - [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline", - [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS", -}; - -#undef pr_fmt -#define pr_fmt(fmt) "Spectre V2 : " fmt - -static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = - SPECTRE_V2_NONE; - void x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) { @@ -169,6 +154,10 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) static_cpu_has(X86_FEATURE_AMD_SSBD)) hostval |= ssbd_tif_to_spec_ctrl(ti->flags); + /* Conditional STIBP enabled? */ + if (static_branch_unlikely(&switch_to_cond_stibp)) + hostval |= stibp_tif_to_spec_ctrl(ti->flags); + if (hostval != guestval) { msrval = setguest ? guestval : hostval; wrmsrl(MSR_IA32_SPEC_CTRL, msrval); @@ -202,7 +191,7 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) : ssbd_spec_ctrl_to_tif(hostval); - speculative_store_bypass_update(tif); + speculation_ctrl_update(tif); } } EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl); @@ -217,6 +206,15 @@ static void x86_amd_ssb_disable(void) wrmsrl(MSR_AMD64_LS_CFG, msrval); } +#undef pr_fmt +#define pr_fmt(fmt) "Spectre V2 : " fmt + +static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = + SPECTRE_V2_NONE; + +static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init = + SPECTRE_V2_USER_NONE; + #ifdef RETPOLINE static bool spectre_v2_bad_module; @@ -238,67 +236,227 @@ static inline const char *spectre_v2_module_string(void) static inline const char *spectre_v2_module_string(void) { return ""; } #endif -static void __init spec2_print_if_insecure(const char *reason) +static inline bool match_option(const char *arg, int arglen, const char *opt) { - if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) - pr_info("%s selected on command line.\n", reason); + int len = strlen(opt); + + return len == arglen && !strncmp(arg, opt, len); } -static void __init spec2_print_if_secure(const char *reason) +/* The kernel command line selection for spectre v2 */ +enum spectre_v2_mitigation_cmd { + SPECTRE_V2_CMD_NONE, + SPECTRE_V2_CMD_AUTO, + SPECTRE_V2_CMD_FORCE, + SPECTRE_V2_CMD_RETPOLINE, + SPECTRE_V2_CMD_RETPOLINE_GENERIC, + SPECTRE_V2_CMD_RETPOLINE_AMD, +}; + +enum spectre_v2_user_cmd { + SPECTRE_V2_USER_CMD_NONE, + SPECTRE_V2_USER_CMD_AUTO, + SPECTRE_V2_USER_CMD_FORCE, + SPECTRE_V2_USER_CMD_PRCTL, + SPECTRE_V2_USER_CMD_PRCTL_IBPB, + SPECTRE_V2_USER_CMD_SECCOMP, + SPECTRE_V2_USER_CMD_SECCOMP_IBPB, +}; + +static const char * const spectre_v2_user_strings[] = { + [SPECTRE_V2_USER_NONE] = "User space: Vulnerable", + [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection", + [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection", + [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl", + [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl", +}; + +static const struct { + const char *option; + enum spectre_v2_user_cmd cmd; + bool secure; +} v2_user_options[] __initdata = { + { "auto", SPECTRE_V2_USER_CMD_AUTO, false }, + { "off", SPECTRE_V2_USER_CMD_NONE, false }, + { "on", SPECTRE_V2_USER_CMD_FORCE, true }, + { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false }, + { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false }, + { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false }, + { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false }, +}; + +static void __init spec_v2_user_print_cond(const char *reason, bool secure) { - if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) - pr_info("%s selected on command line.\n", reason); + if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) + pr_info("spectre_v2_user=%s forced on command line.\n", reason); } -static inline bool retp_compiler(void) +static enum spectre_v2_user_cmd __init +spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd) { - return __is_defined(RETPOLINE); + char arg[20]; + int ret, i; + + switch (v2_cmd) { + case SPECTRE_V2_CMD_NONE: + return SPECTRE_V2_USER_CMD_NONE; + case SPECTRE_V2_CMD_FORCE: + return SPECTRE_V2_USER_CMD_FORCE; + default: + break; + } + + ret = cmdline_find_option(boot_command_line, "spectre_v2_user", + arg, sizeof(arg)); + if (ret < 0) + return SPECTRE_V2_USER_CMD_AUTO; + + for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) { + if (match_option(arg, ret, v2_user_options[i].option)) { + spec_v2_user_print_cond(v2_user_options[i].option, + v2_user_options[i].secure); + return v2_user_options[i].cmd; + } + } + + pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg); + return SPECTRE_V2_USER_CMD_AUTO; } -static inline bool match_option(const char *arg, int arglen, const char *opt) +static void __init +spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) { - int len = strlen(opt); + enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE; + bool smt_possible = IS_ENABLED(CONFIG_SMP); + enum spectre_v2_user_cmd cmd; - return len == arglen && !strncmp(arg, opt, len); + if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP)) + return; + + if (cpu_smt_control == CPU_SMT_FORCE_DISABLED || + cpu_smt_control == CPU_SMT_NOT_SUPPORTED) + smt_possible = false; + + cmd = spectre_v2_parse_user_cmdline(v2_cmd); + switch (cmd) { + case SPECTRE_V2_USER_CMD_NONE: + goto set_mode; + case SPECTRE_V2_USER_CMD_FORCE: + mode = SPECTRE_V2_USER_STRICT; + break; + case SPECTRE_V2_USER_CMD_PRCTL: + case SPECTRE_V2_USER_CMD_PRCTL_IBPB: + mode = SPECTRE_V2_USER_PRCTL; + break; + case SPECTRE_V2_USER_CMD_AUTO: + case SPECTRE_V2_USER_CMD_SECCOMP: + case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: + if (IS_ENABLED(CONFIG_SECCOMP)) + mode = SPECTRE_V2_USER_SECCOMP; + else + mode = SPECTRE_V2_USER_PRCTL; + break; + } + + /* + * At this point, an STIBP mode other than "off" has been set. + * If STIBP support is not being forced, check if STIBP always-on + * is preferred. + */ + if (mode != SPECTRE_V2_USER_STRICT && + boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON)) + mode = SPECTRE_V2_USER_STRICT_PREFERRED; + + /* Initialize Indirect Branch Prediction Barrier */ + if (boot_cpu_has(X86_FEATURE_IBPB)) { + setup_force_cpu_cap(X86_FEATURE_USE_IBPB); + + switch (cmd) { + case SPECTRE_V2_USER_CMD_FORCE: + case SPECTRE_V2_USER_CMD_PRCTL_IBPB: + case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: + static_branch_enable(&switch_mm_always_ibpb); + break; + case SPECTRE_V2_USER_CMD_PRCTL: + case SPECTRE_V2_USER_CMD_AUTO: + case SPECTRE_V2_USER_CMD_SECCOMP: + static_branch_enable(&switch_mm_cond_ibpb); + break; + default: + break; + } + + pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n", + static_key_enabled(&switch_mm_always_ibpb) ? + "always-on" : "conditional"); + } + + /* If enhanced IBRS is enabled no STIBP required */ + if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) + return; + + /* + * If SMT is not possible or STIBP is not available clear the STIBP + * mode. + */ + if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP)) + mode = SPECTRE_V2_USER_NONE; +set_mode: + spectre_v2_user = mode; + /* Only print the STIBP mode when SMT possible */ + if (smt_possible) + pr_info("%s\n", spectre_v2_user_strings[mode]); } +static const char * const spectre_v2_strings[] = { + [SPECTRE_V2_NONE] = "Vulnerable", + [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline", + [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline", + [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS", +}; + static const struct { const char *option; enum spectre_v2_mitigation_cmd cmd; bool secure; -} mitigation_options[] = { - { "off", SPECTRE_V2_CMD_NONE, false }, - { "on", SPECTRE_V2_CMD_FORCE, true }, - { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false }, - { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false }, - { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false }, - { "auto", SPECTRE_V2_CMD_AUTO, false }, +} mitigation_options[] __initdata = { + { "off", SPECTRE_V2_CMD_NONE, false }, + { "on", SPECTRE_V2_CMD_FORCE, true }, + { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false }, + { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false }, + { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false }, + { "auto", SPECTRE_V2_CMD_AUTO, false }, }; +static void __init spec_v2_print_cond(const char *reason, bool secure) +{ + if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) + pr_info("%s selected on command line.\n", reason); +} + static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) { + enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO; char arg[20]; int ret, i; - enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO; if (cmdline_find_option_bool(boot_command_line, "nospectre_v2")) return SPECTRE_V2_CMD_NONE; - else { - ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); - if (ret < 0) - return SPECTRE_V2_CMD_AUTO; - for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) { - if (!match_option(arg, ret, mitigation_options[i].option)) - continue; - cmd = mitigation_options[i].cmd; - break; - } + ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); + if (ret < 0) + return SPECTRE_V2_CMD_AUTO; - if (i >= ARRAY_SIZE(mitigation_options)) { - pr_err("unknown option (%s). Switching to AUTO select\n", arg); - return SPECTRE_V2_CMD_AUTO; - } + for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) { + if (!match_option(arg, ret, mitigation_options[i].option)) + continue; + cmd = mitigation_options[i].cmd; + break; + } + + if (i >= ARRAY_SIZE(mitigation_options)) { + pr_err("unknown option (%s). Switching to AUTO select\n", arg); + return SPECTRE_V2_CMD_AUTO; } if ((cmd == SPECTRE_V2_CMD_RETPOLINE || @@ -316,54 +474,11 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) return SPECTRE_V2_CMD_AUTO; } - if (mitigation_options[i].secure) - spec2_print_if_secure(mitigation_options[i].option); - else - spec2_print_if_insecure(mitigation_options[i].option); - + spec_v2_print_cond(mitigation_options[i].option, + mitigation_options[i].secure); return cmd; } -static bool stibp_needed(void) -{ - if (spectre_v2_enabled == SPECTRE_V2_NONE) - return false; - - if (!boot_cpu_has(X86_FEATURE_STIBP)) - return false; - - return true; -} - -static void update_stibp_msr(void *info) -{ - wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); -} - -void arch_smt_update(void) -{ - u64 mask; - - if (!stibp_needed()) - return; - - mutex_lock(&spec_ctrl_mutex); - mask = x86_spec_ctrl_base; - if (cpu_smt_control == CPU_SMT_ENABLED) - mask |= SPEC_CTRL_STIBP; - else - mask &= ~SPEC_CTRL_STIBP; - - if (mask != x86_spec_ctrl_base) { - pr_info("Spectre v2 cross-process SMT mitigation: %s STIBP\n", - cpu_smt_control == CPU_SMT_ENABLED ? - "Enabling" : "Disabling"); - x86_spec_ctrl_base = mask; - on_each_cpu(update_stibp_msr, NULL, 1); - } - mutex_unlock(&spec_ctrl_mutex); -} - static void __init spectre_v2_select_mitigation(void) { enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); @@ -417,14 +532,12 @@ retpoline_auto: pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n"); goto retpoline_generic; } - mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD : - SPECTRE_V2_RETPOLINE_MINIMAL_AMD; + mode = SPECTRE_V2_RETPOLINE_AMD; setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD); setup_force_cpu_cap(X86_FEATURE_RETPOLINE); } else { retpoline_generic: - mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC : - SPECTRE_V2_RETPOLINE_MINIMAL; + mode = SPECTRE_V2_RETPOLINE_GENERIC; setup_force_cpu_cap(X86_FEATURE_RETPOLINE); } @@ -443,12 +556,6 @@ specv2_set_mode: setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n"); - /* Initialize Indirect Branch Prediction Barrier if supported */ - if (boot_cpu_has(X86_FEATURE_IBPB)) { - setup_force_cpu_cap(X86_FEATURE_USE_IBPB); - pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n"); - } - /* * Retpoline means the kernel is safe because it has no indirect * branches. Enhanced IBRS protects firmware too, so, enable restricted @@ -465,10 +572,68 @@ specv2_set_mode: pr_info("Enabling Restricted Speculation for firmware calls\n"); } + /* Set up IBPB and STIBP depending on the general spectre V2 command */ + spectre_v2_user_select_mitigation(cmd); + /* Enable STIBP if appropriate */ arch_smt_update(); } +static void update_stibp_msr(void * __unused) +{ + wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); +} + +/* Update x86_spec_ctrl_base in case SMT state changed. */ +static void update_stibp_strict(void) +{ + u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP; + + if (sched_smt_active()) + mask |= SPEC_CTRL_STIBP; + + if (mask == x86_spec_ctrl_base) + return; + + pr_info("Update user space SMT mitigation: STIBP %s\n", + mask & SPEC_CTRL_STIBP ? "always-on" : "off"); + x86_spec_ctrl_base = mask; + on_each_cpu(update_stibp_msr, NULL, 1); +} + +/* Update the static key controlling the evaluation of TIF_SPEC_IB */ +static void update_indir_branch_cond(void) +{ + if (sched_smt_active()) + static_branch_enable(&switch_to_cond_stibp); + else + static_branch_disable(&switch_to_cond_stibp); +} + +void arch_smt_update(void) +{ + /* Enhanced IBRS implies STIBP. No update required. */ + if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) + return; + + mutex_lock(&spec_ctrl_mutex); + + switch (spectre_v2_user) { + case SPECTRE_V2_USER_NONE: + break; + case SPECTRE_V2_USER_STRICT: + case SPECTRE_V2_USER_STRICT_PREFERRED: + update_stibp_strict(); + break; + case SPECTRE_V2_USER_PRCTL: + case SPECTRE_V2_USER_SECCOMP: + update_indir_branch_cond(); + break; + } + + mutex_unlock(&spec_ctrl_mutex); +} + #undef pr_fmt #define pr_fmt(fmt) "Speculative Store Bypass: " fmt @@ -483,7 +648,7 @@ enum ssb_mitigation_cmd { SPEC_STORE_BYPASS_CMD_SECCOMP, }; -static const char *ssb_strings[] = { +static const char * const ssb_strings[] = { [SPEC_STORE_BYPASS_NONE] = "Vulnerable", [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled", [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl", @@ -493,7 +658,7 @@ static const char *ssb_strings[] = { static const struct { const char *option; enum ssb_mitigation_cmd cmd; -} ssb_mitigation_options[] = { +} ssb_mitigation_options[] __initdata = { { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */ { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */ { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */ @@ -604,10 +769,25 @@ static void ssb_select_mitigation(void) #undef pr_fmt #define pr_fmt(fmt) "Speculation prctl: " fmt -static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) +static void task_update_spec_tif(struct task_struct *tsk) { - bool update; + /* Force the update of the real TIF bits */ + set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE); + /* + * Immediately update the speculation control MSRs for the current + * task, but for a non-current task delay setting the CPU + * mitigation until it is scheduled next. + * + * This can only happen for SECCOMP mitigation. For PRCTL it's + * always the current task. + */ + if (tsk == current) + speculation_ctrl_update_current(); +} + +static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) +{ if (ssb_mode != SPEC_STORE_BYPASS_PRCTL && ssb_mode != SPEC_STORE_BYPASS_SECCOMP) return -ENXIO; @@ -618,28 +798,58 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) if (task_spec_ssb_force_disable(task)) return -EPERM; task_clear_spec_ssb_disable(task); - update = test_and_clear_tsk_thread_flag(task, TIF_SSBD); + task_update_spec_tif(task); break; case PR_SPEC_DISABLE: task_set_spec_ssb_disable(task); - update = !test_and_set_tsk_thread_flag(task, TIF_SSBD); + task_update_spec_tif(task); break; case PR_SPEC_FORCE_DISABLE: task_set_spec_ssb_disable(task); task_set_spec_ssb_force_disable(task); - update = !test_and_set_tsk_thread_flag(task, TIF_SSBD); + task_update_spec_tif(task); break; default: return -ERANGE; } + return 0; +} - /* - * If being set on non-current task, delay setting the CPU - * mitigation until it is next scheduled. - */ - if (task == current && update) - speculative_store_bypass_update_current(); - +static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) +{ + switch (ctrl) { + case PR_SPEC_ENABLE: + if (spectre_v2_user == SPECTRE_V2_USER_NONE) + return 0; + /* + * Indirect branch speculation is always disabled in strict + * mode. + */ + if (spectre_v2_user == SPECTRE_V2_USER_STRICT || + spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED) + return -EPERM; + task_clear_spec_ib_disable(task); + task_update_spec_tif(task); + break; + case PR_SPEC_DISABLE: + case PR_SPEC_FORCE_DISABLE: + /* + * Indirect branch speculation is always allowed when + * mitigation is force disabled. + */ + if (spectre_v2_user == SPECTRE_V2_USER_NONE) + return -EPERM; + if (spectre_v2_user == SPECTRE_V2_USER_STRICT || + spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED) + return 0; + task_set_spec_ib_disable(task); + if (ctrl == PR_SPEC_FORCE_DISABLE) + task_set_spec_ib_force_disable(task); + task_update_spec_tif(task); + break; + default: + return -ERANGE; + } return 0; } @@ -649,6 +859,8 @@ int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, switch (which) { case PR_SPEC_STORE_BYPASS: return ssb_prctl_set(task, ctrl); + case PR_SPEC_INDIRECT_BRANCH: + return ib_prctl_set(task, ctrl); default: return -ENODEV; } @@ -659,6 +871,8 @@ void arch_seccomp_spec_mitigate(struct task_struct *task) { if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); + if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP) + ib_prctl_set(task, PR_SPEC_FORCE_DISABLE); } #endif @@ -681,11 +895,36 @@ static int ssb_prctl_get(struct task_struct *task) } } +static int ib_prctl_get(struct task_struct *task) +{ + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) + return PR_SPEC_NOT_AFFECTED; + + switch (spectre_v2_user) { + case SPECTRE_V2_USER_NONE: + return PR_SPEC_ENABLE; + case SPECTRE_V2_USER_PRCTL: + case SPECTRE_V2_USER_SECCOMP: + if (task_spec_ib_force_disable(task)) + return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; + if (task_spec_ib_disable(task)) + return PR_SPEC_PRCTL | PR_SPEC_DISABLE; + return PR_SPEC_PRCTL | PR_SPEC_ENABLE; + case SPECTRE_V2_USER_STRICT: + case SPECTRE_V2_USER_STRICT_PREFERRED: + return PR_SPEC_DISABLE; + default: + return PR_SPEC_NOT_AFFECTED; + } +} + int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) { switch (which) { case PR_SPEC_STORE_BYPASS: return ssb_prctl_get(task); + case PR_SPEC_INDIRECT_BRANCH: + return ib_prctl_get(task); default: return -ENODEV; } @@ -779,7 +1018,8 @@ static void __init l1tf_select_mitigation(void) #endif half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; - if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { + if (l1tf_mitigation != L1TF_MITIGATION_OFF && + e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n", half_pa); @@ -823,7 +1063,7 @@ early_param("l1tf", l1tf_cmdline); #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion" #if IS_ENABLED(CONFIG_KVM_INTEL) -static const char *l1tf_vmx_states[] = { +static const char * const l1tf_vmx_states[] = { [VMENTER_L1D_FLUSH_AUTO] = "auto", [VMENTER_L1D_FLUSH_NEVER] = "vulnerable", [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes", @@ -839,13 +1079,14 @@ static ssize_t l1tf_show_state(char *buf) if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED || (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER && - cpu_smt_control == CPU_SMT_ENABLED)) + sched_smt_active())) { return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG, l1tf_vmx_states[l1tf_vmx_mitigation]); + } return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG, l1tf_vmx_states[l1tf_vmx_mitigation], - cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : "disabled"); + sched_smt_active() ? "vulnerable" : "disabled"); } #else static ssize_t l1tf_show_state(char *buf) @@ -854,11 +1095,41 @@ static ssize_t l1tf_show_state(char *buf) } #endif +static char *stibp_state(void) +{ + if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) + return ""; + + switch (spectre_v2_user) { + case SPECTRE_V2_USER_NONE: + return ", STIBP: disabled"; + case SPECTRE_V2_USER_STRICT: + return ", STIBP: forced"; + case SPECTRE_V2_USER_STRICT_PREFERRED: + return ", STIBP: always-on"; + case SPECTRE_V2_USER_PRCTL: + case SPECTRE_V2_USER_SECCOMP: + if (static_key_enabled(&switch_to_cond_stibp)) + return ", STIBP: conditional"; + } + return ""; +} + +static char *ibpb_state(void) +{ + if (boot_cpu_has(X86_FEATURE_IBPB)) { + if (static_key_enabled(&switch_mm_always_ibpb)) + return ", IBPB: always-on"; + if (static_key_enabled(&switch_mm_cond_ibpb)) + return ", IBPB: conditional"; + return ", IBPB: disabled"; + } + return ""; +} + static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, char *buf, unsigned int bug) { - int ret; - if (!boot_cpu_has_bug(bug)) return sprintf(buf, "Not affected\n"); @@ -876,13 +1147,12 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr return sprintf(buf, "Mitigation: __user pointer sanitization\n"); case X86_BUG_SPECTRE_V2: - ret = sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], - boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "", + return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], + ibpb_state(), boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", - (x86_spec_ctrl_base & SPEC_CTRL_STIBP) ? ", STIBP" : "", + stibp_state(), boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "", spectre_v2_module_string()); - return ret; case X86_BUG_SPEC_STORE_BYPASS: return sprintf(buf, "%s\n", ssb_strings[ssb_mode]); diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c index dc1b9342e9c4..c4d1023fb0ab 100644 --- a/arch/x86/kernel/cpu/cacheinfo.c +++ b/arch/x86/kernel/cpu/cacheinfo.c @@ -17,6 +17,7 @@ #include <linux/pci.h> #include <asm/cpufeature.h> +#include <asm/cacheinfo.h> #include <asm/amd_nb.h> #include <asm/smp.h> diff --git a/arch/x86/kernel/cpu/mcheck/Makefile b/arch/x86/kernel/cpu/mce/Makefile index bcc7c54c7041..9f020c994154 100644 --- a/arch/x86/kernel/cpu/mcheck/Makefile +++ b/arch/x86/kernel/cpu/mce/Makefile @@ -1,14 +1,16 @@ # SPDX-License-Identifier: GPL-2.0 -obj-y = mce.o mce-severity.o mce-genpool.o +obj-y = core.o severity.o genpool.o obj-$(CONFIG_X86_ANCIENT_MCE) += winchip.o p5.o -obj-$(CONFIG_X86_MCE_INTEL) += mce_intel.o -obj-$(CONFIG_X86_MCE_AMD) += mce_amd.o +obj-$(CONFIG_X86_MCE_INTEL) += intel.o +obj-$(CONFIG_X86_MCE_AMD) += amd.o obj-$(CONFIG_X86_MCE_THRESHOLD) += threshold.o + +mce-inject-y := inject.o obj-$(CONFIG_X86_MCE_INJECT) += mce-inject.o obj-$(CONFIG_X86_THERMAL_VECTOR) += therm_throt.o -obj-$(CONFIG_ACPI_APEI) += mce-apei.o +obj-$(CONFIG_ACPI_APEI) += apei.o obj-$(CONFIG_X86_MCELOG_LEGACY) += dev-mcelog.o diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mce/amd.c index dd33c357548f..89298c83de53 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mce/amd.c @@ -23,12 +23,13 @@ #include <linux/string.h> #include <asm/amd_nb.h> +#include <asm/traps.h> #include <asm/apic.h> #include <asm/mce.h> #include <asm/msr.h> #include <asm/trace/irq_vectors.h> -#include "mce-internal.h" +#include "internal.h" #define NR_BLOCKS 5 #define THRESHOLD_MAX 0xFFF @@ -56,7 +57,7 @@ /* Threshold LVT offset is at MSR0xC0000410[15:12] */ #define SMCA_THR_LVT_OFF 0xF000 -static bool thresholding_en; +static bool thresholding_irq_en; static const char * const th_names[] = { "load_store", @@ -99,7 +100,7 @@ static u32 smca_bank_addrs[MAX_NR_BANKS][NR_BLOCKS] __ro_after_init = [0 ... MAX_NR_BANKS - 1] = { [0 ... NR_BLOCKS - 1] = -1 } }; -const char *smca_get_name(enum smca_bank_types t) +static const char *smca_get_name(enum smca_bank_types t) { if (t >= N_SMCA_BANK_TYPES) return NULL; @@ -534,9 +535,8 @@ prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr, set_offset: offset = setup_APIC_mce_threshold(offset, new); - - if ((offset == new) && (mce_threshold_vector != amd_threshold_interrupt)) - mce_threshold_vector = amd_threshold_interrupt; + if (offset == new) + thresholding_irq_en = true; done: mce_threshold_block_init(&b, offset); @@ -825,7 +825,7 @@ static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc) mce_log(&m); } -asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(void) +asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(struct pt_regs *regs) { entering_irq(); trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR); @@ -1357,9 +1357,6 @@ int mce_threshold_remove_device(unsigned int cpu) { unsigned int bank; - if (!thresholding_en) - return 0; - for (bank = 0; bank < mca_cfg.banks; ++bank) { if (!(per_cpu(bank_map, cpu) & (1 << bank))) continue; @@ -1377,9 +1374,6 @@ int mce_threshold_create_device(unsigned int cpu) struct threshold_bank **bp; int err = 0; - if (!thresholding_en) - return 0; - bp = per_cpu(threshold_banks, cpu); if (bp) return 0; @@ -1408,9 +1402,6 @@ static __init int threshold_init_device(void) { unsigned lcpu = 0; - if (mce_threshold_vector == amd_threshold_interrupt) - thresholding_en = true; - /* to hit CPUs online before the notifier is up */ for_each_online_cpu(lcpu) { int err = mce_threshold_create_device(lcpu); @@ -1419,6 +1410,9 @@ static __init int threshold_init_device(void) return err; } + if (thresholding_irq_en) + mce_threshold_vector = amd_threshold_interrupt; + return 0; } /* diff --git a/arch/x86/kernel/cpu/mcheck/mce-apei.c b/arch/x86/kernel/cpu/mce/apei.c index 2eee85379689..1d9b3ce662a0 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-apei.c +++ b/arch/x86/kernel/cpu/mce/apei.c @@ -36,7 +36,7 @@ #include <acpi/ghes.h> #include <asm/mce.h> -#include "mce-internal.h" +#include "internal.h" void apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err) { diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mce/core.c index 8c66d2fc8f81..672c7225cb1b 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -8,8 +8,6 @@ * Author: Andi Kleen */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include <linux/thread_info.h> #include <linux/capability.h> #include <linux/miscdevice.h> @@ -52,7 +50,7 @@ #include <asm/msr.h> #include <asm/reboot.h> -#include "mce-internal.h" +#include "internal.h" static DEFINE_MUTEX(mce_log_mutex); @@ -485,7 +483,7 @@ static void mce_report_event(struct pt_regs *regs) * be somewhat complicated (e.g. segment offset would require an instruction * parser). So only support physical addresses up to page granuality for now. */ -static int mce_usable_address(struct mce *m) +int mce_usable_address(struct mce *m) { if (!(m->status & MCI_STATUS_ADDRV)) return 0; @@ -505,6 +503,7 @@ static int mce_usable_address(struct mce *m) return 1; } +EXPORT_SYMBOL_GPL(mce_usable_address); bool mce_is_memory_error(struct mce *m) { @@ -534,7 +533,7 @@ bool mce_is_memory_error(struct mce *m) } EXPORT_SYMBOL_GPL(mce_is_memory_error); -static bool mce_is_correctable(struct mce *m) +bool mce_is_correctable(struct mce *m) { if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED) return false; @@ -547,6 +546,7 @@ static bool mce_is_correctable(struct mce *m) return true; } +EXPORT_SYMBOL_GPL(mce_is_correctable); static bool cec_add_mce(struct mce *m) { @@ -684,7 +684,7 @@ DEFINE_PER_CPU(unsigned, mce_poll_count); * errors here. However this would be quite problematic -- * we would need to reimplement the Monarch handling and * it would mess up the exclusion between exception handler - * and poll hander -- * so we skip this for now. + * and poll handler -- * so we skip this for now. * These cases should not happen anyways, or only when the CPU * is already totally * confused. In this case it's likely it will * not fully execute the machine check handler either. diff --git a/arch/x86/kernel/cpu/mcheck/dev-mcelog.c b/arch/x86/kernel/cpu/mce/dev-mcelog.c index 27f394ac983f..9690ec5c8051 100644 --- a/arch/x86/kernel/cpu/mcheck/dev-mcelog.c +++ b/arch/x86/kernel/cpu/mce/dev-mcelog.c @@ -8,14 +8,12 @@ * Author: Andi Kleen */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include <linux/miscdevice.h> #include <linux/slab.h> #include <linux/kmod.h> #include <linux/poll.h> -#include "mce-internal.h" +#include "internal.h" static BLOCKING_NOTIFIER_HEAD(mce_injector_chain); diff --git a/arch/x86/kernel/cpu/mcheck/mce-genpool.c b/arch/x86/kernel/cpu/mce/genpool.c index 217cd4449bc9..3395549c51d3 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-genpool.c +++ b/arch/x86/kernel/cpu/mce/genpool.c @@ -10,7 +10,7 @@ #include <linux/mm.h> #include <linux/genalloc.h> #include <linux/llist.h> -#include "mce-internal.h" +#include "internal.h" /* * printk() is not safe in MCE context. This is a lock-less memory allocator diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mce/inject.c index 1fc424c40a31..8492ef7d9015 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c +++ b/arch/x86/kernel/cpu/mce/inject.c @@ -38,7 +38,7 @@ #include <asm/nmi.h> #include <asm/smp.h> -#include "mce-internal.h" +#include "internal.h" /* * Collect all the MCi_XXX settings diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mce/intel.c index d05be307d081..e43eb6732630 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/arch/x86/kernel/cpu/mce/intel.c @@ -18,7 +18,7 @@ #include <asm/msr.h> #include <asm/mce.h> -#include "mce-internal.h" +#include "internal.h" /* * Support for Intel Correct Machine Check Interrupts. This allows diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mce/internal.h index ceb67cd5918f..af5eab1e65e2 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-internal.h +++ b/arch/x86/kernel/cpu/mce/internal.h @@ -2,6 +2,9 @@ #ifndef __X86_MCE_INTERNAL_H__ #define __X86_MCE_INTERNAL_H__ +#undef pr_fmt +#define pr_fmt(fmt) "mce: " fmt + #include <linux/device.h> #include <asm/mce.h> diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mce/p5.c index 5cddf831720f..4ae6df556526 100644 --- a/arch/x86/kernel/cpu/mcheck/p5.c +++ b/arch/x86/kernel/cpu/mce/p5.c @@ -14,6 +14,8 @@ #include <asm/mce.h> #include <asm/msr.h> +#include "internal.h" + /* By default disabled */ int mce_p5_enabled __read_mostly; diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mce/severity.c index 44396d521987..dc3e26e905a3 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-severity.c +++ b/arch/x86/kernel/cpu/mce/severity.c @@ -16,7 +16,7 @@ #include <asm/mce.h> #include <linux/uaccess.h> -#include "mce-internal.h" +#include "internal.h" /* * Grade an mce by severity. In general the most severe ones are processed diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mce/therm_throt.c index 2da67b70ba98..10a3b0599300 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mce/therm_throt.c @@ -25,11 +25,14 @@ #include <linux/cpu.h> #include <asm/processor.h> +#include <asm/traps.h> #include <asm/apic.h> #include <asm/mce.h> #include <asm/msr.h> #include <asm/trace/irq_vectors.h> +#include "internal.h" + /* How long to wait between reporting thermal events */ #define CHECK_INTERVAL (300 * HZ) @@ -390,7 +393,7 @@ static void unexpected_thermal_interrupt(void) static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt; -asmlinkage __visible void __irq_entry smp_thermal_interrupt(struct pt_regs *r) +asmlinkage __visible void __irq_entry smp_thermal_interrupt(struct pt_regs *regs) { entering_irq(); trace_thermal_apic_entry(THERMAL_APIC_VECTOR); diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mce/threshold.c index 2b584b319eff..28812cc15300 100644 --- a/arch/x86/kernel/cpu/mcheck/threshold.c +++ b/arch/x86/kernel/cpu/mce/threshold.c @@ -6,10 +6,13 @@ #include <linux/kernel.h> #include <asm/irq_vectors.h> +#include <asm/traps.h> #include <asm/apic.h> #include <asm/mce.h> #include <asm/trace/irq_vectors.h> +#include "internal.h" + static void default_threshold_interrupt(void) { pr_err("Unexpected threshold interrupt at vector %x\n", @@ -18,7 +21,7 @@ static void default_threshold_interrupt(void) void (*mce_threshold_vector)(void) = default_threshold_interrupt; -asmlinkage __visible void __irq_entry smp_threshold_interrupt(void) +asmlinkage __visible void __irq_entry smp_threshold_interrupt(struct pt_regs *regs) { entering_irq(); trace_threshold_apic_entry(THRESHOLD_APIC_VECTOR); diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mce/winchip.c index 3b45b270a865..a30ea13cccc2 100644 --- a/arch/x86/kernel/cpu/mcheck/winchip.c +++ b/arch/x86/kernel/cpu/mce/winchip.c @@ -13,6 +13,8 @@ #include <asm/mce.h> #include <asm/msr.h> +#include "internal.h" + /* Machine check handler for WinChip C6: */ static void winchip_machine_check(struct pt_regs *regs, long error_code) { diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 07b5fc00b188..51adde0a0f1a 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -5,7 +5,7 @@ * CPUs and later. * * Copyright (C) 2008-2011 Advanced Micro Devices Inc. - * 2013-2016 Borislav Petkov <bp@alien8.de> + * 2013-2018 Borislav Petkov <bp@alien8.de> * * Author: Peter Oruba <peter.oruba@amd.com> * @@ -38,7 +38,10 @@ #include <asm/cpu.h> #include <asm/msr.h> -static struct equiv_cpu_entry *equiv_cpu_table; +static struct equiv_cpu_table { + unsigned int num_entries; + struct equiv_cpu_entry *entry; +} equiv_table; /* * This points to the current valid container of microcode patches which we will @@ -63,13 +66,225 @@ static u8 amd_ucode_patch[PATCH_MAX_SIZE]; static const char ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin"; -static u16 find_equiv_id(struct equiv_cpu_entry *equiv_table, u32 sig) +static u16 find_equiv_id(struct equiv_cpu_table *et, u32 sig) { - for (; equiv_table && equiv_table->installed_cpu; equiv_table++) { - if (sig == equiv_table->installed_cpu) - return equiv_table->equiv_cpu; + unsigned int i; + + if (!et || !et->num_entries) + return 0; + + for (i = 0; i < et->num_entries; i++) { + struct equiv_cpu_entry *e = &et->entry[i]; + + if (sig == e->installed_cpu) + return e->equiv_cpu; + + e++; + } + return 0; +} + +/* + * Check whether there is a valid microcode container file at the beginning + * of @buf of size @buf_size. Set @early to use this function in the early path. + */ +static bool verify_container(const u8 *buf, size_t buf_size, bool early) +{ + u32 cont_magic; + + if (buf_size <= CONTAINER_HDR_SZ) { + if (!early) + pr_debug("Truncated microcode container header.\n"); + + return false; + } + + cont_magic = *(const u32 *)buf; + if (cont_magic != UCODE_MAGIC) { + if (!early) + pr_debug("Invalid magic value (0x%08x).\n", cont_magic); + + return false; + } + + return true; +} + +/* + * Check whether there is a valid, non-truncated CPU equivalence table at the + * beginning of @buf of size @buf_size. Set @early to use this function in the + * early path. + */ +static bool verify_equivalence_table(const u8 *buf, size_t buf_size, bool early) +{ + const u32 *hdr = (const u32 *)buf; + u32 cont_type, equiv_tbl_len; + + if (!verify_container(buf, buf_size, early)) + return false; + + cont_type = hdr[1]; + if (cont_type != UCODE_EQUIV_CPU_TABLE_TYPE) { + if (!early) + pr_debug("Wrong microcode container equivalence table type: %u.\n", + cont_type); + + return false; + } + + buf_size -= CONTAINER_HDR_SZ; + + equiv_tbl_len = hdr[2]; + if (equiv_tbl_len < sizeof(struct equiv_cpu_entry) || + buf_size < equiv_tbl_len) { + if (!early) + pr_debug("Truncated equivalence table.\n"); + + return false; + } + + return true; +} + +/* + * Check whether there is a valid, non-truncated microcode patch section at the + * beginning of @buf of size @buf_size. Set @early to use this function in the + * early path. + * + * On success, @sh_psize returns the patch size according to the section header, + * to the caller. + */ +static bool +__verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize, bool early) +{ + u32 p_type, p_size; + const u32 *hdr; + + if (buf_size < SECTION_HDR_SIZE) { + if (!early) + pr_debug("Truncated patch section.\n"); + + return false; + } + + hdr = (const u32 *)buf; + p_type = hdr[0]; + p_size = hdr[1]; + + if (p_type != UCODE_UCODE_TYPE) { + if (!early) + pr_debug("Invalid type field (0x%x) in container file section header.\n", + p_type); + + return false; + } + + if (p_size < sizeof(struct microcode_header_amd)) { + if (!early) + pr_debug("Patch of size %u too short.\n", p_size); + + return false; + } + + *sh_psize = p_size; + + return true; +} + +/* + * Check whether the passed remaining file @buf_size is large enough to contain + * a patch of the indicated @sh_psize (and also whether this size does not + * exceed the per-family maximum). @sh_psize is the size read from the section + * header. + */ +static unsigned int __verify_patch_size(u8 family, u32 sh_psize, size_t buf_size) +{ + u32 max_size; + + if (family >= 0x15) + return min_t(u32, sh_psize, buf_size); + +#define F1XH_MPB_MAX_SIZE 2048 +#define F14H_MPB_MAX_SIZE 1824 + + switch (family) { + case 0x10 ... 0x12: + max_size = F1XH_MPB_MAX_SIZE; + break; + case 0x14: + max_size = F14H_MPB_MAX_SIZE; + break; + default: + WARN(1, "%s: WTF family: 0x%x\n", __func__, family); + return 0; + break; + } + + if (sh_psize > min_t(u32, buf_size, max_size)) + return 0; + + return sh_psize; +} + +/* + * Verify the patch in @buf. + * + * Returns: + * negative: on error + * positive: patch is not for this family, skip it + * 0: success + */ +static int +verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool early) +{ + struct microcode_header_amd *mc_hdr; + unsigned int ret; + u32 sh_psize; + u16 proc_id; + u8 patch_fam; + + if (!__verify_patch_section(buf, buf_size, &sh_psize, early)) + return -1; + + /* + * The section header length is not included in this indicated size + * but is present in the leftover file length so we need to subtract + * it before passing this value to the function below. + */ + buf_size -= SECTION_HDR_SIZE; + + /* + * Check if the remaining buffer is big enough to contain a patch of + * size sh_psize, as the section claims. + */ + if (buf_size < sh_psize) { + if (!early) + pr_debug("Patch of size %u truncated.\n", sh_psize); + + return -1; + } + + ret = __verify_patch_size(family, sh_psize, buf_size); + if (!ret) { + if (!early) + pr_debug("Per-family patch size mismatch.\n"); + return -1; + } + + *patch_size = sh_psize; + + mc_hdr = (struct microcode_header_amd *)(buf + SECTION_HDR_SIZE); + if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) { + if (!early) + pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", mc_hdr->patch_id); + return -1; } + proc_id = mc_hdr->processor_rev_id; + patch_fam = 0xf + (proc_id >> 12); + if (patch_fam != family) + return 1; + return 0; } @@ -80,26 +295,28 @@ static u16 find_equiv_id(struct equiv_cpu_entry *equiv_table, u32 sig) * Returns the amount of bytes consumed while scanning. @desc contains all the * data we're going to use in later stages of the application. */ -static ssize_t parse_container(u8 *ucode, ssize_t size, struct cont_desc *desc) +static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc) { - struct equiv_cpu_entry *eq; - ssize_t orig_size = size; + struct equiv_cpu_table table; + size_t orig_size = size; u32 *hdr = (u32 *)ucode; u16 eq_id; u8 *buf; - /* Am I looking at an equivalence table header? */ - if (hdr[0] != UCODE_MAGIC || - hdr[1] != UCODE_EQUIV_CPU_TABLE_TYPE || - hdr[2] == 0) - return CONTAINER_HDR_SZ; + if (!verify_equivalence_table(ucode, size, true)) + return 0; buf = ucode; - eq = (struct equiv_cpu_entry *)(buf + CONTAINER_HDR_SZ); + table.entry = (struct equiv_cpu_entry *)(buf + CONTAINER_HDR_SZ); + table.num_entries = hdr[2] / sizeof(struct equiv_cpu_entry); - /* Find the equivalence ID of our CPU in this table: */ - eq_id = find_equiv_id(eq, desc->cpuid_1_eax); + /* + * Find the equivalence ID of our CPU in this table. Even if this table + * doesn't contain a patch for the CPU, scan through the whole container + * so that it can be skipped in case there are other containers appended. + */ + eq_id = find_equiv_id(&table, desc->cpuid_1_eax); buf += hdr[2] + CONTAINER_HDR_SZ; size -= hdr[2] + CONTAINER_HDR_SZ; @@ -111,29 +328,29 @@ static ssize_t parse_container(u8 *ucode, ssize_t size, struct cont_desc *desc) while (size > 0) { struct microcode_amd *mc; u32 patch_size; + int ret; + + ret = verify_patch(x86_family(desc->cpuid_1_eax), buf, size, &patch_size, true); + if (ret < 0) { + /* + * Patch verification failed, skip to the next + * container, if there's one: + */ + goto out; + } else if (ret > 0) { + goto skip; + } - hdr = (u32 *)buf; - - if (hdr[0] != UCODE_UCODE_TYPE) - break; - - /* Sanity-check patch size. */ - patch_size = hdr[1]; - if (patch_size > PATCH_MAX_SIZE) - break; - - /* Skip patch section header: */ - buf += SECTION_HDR_SIZE; - size -= SECTION_HDR_SIZE; - - mc = (struct microcode_amd *)buf; + mc = (struct microcode_amd *)(buf + SECTION_HDR_SIZE); if (eq_id == mc->hdr.processor_rev_id) { desc->psize = patch_size; desc->mc = mc; } - buf += patch_size; - size -= patch_size; +skip: + /* Skip patch section header too: */ + buf += patch_size + SECTION_HDR_SIZE; + size -= patch_size + SECTION_HDR_SIZE; } /* @@ -150,6 +367,7 @@ static ssize_t parse_container(u8 *ucode, ssize_t size, struct cont_desc *desc) return 0; } +out: return orig_size - size; } @@ -159,15 +377,18 @@ static ssize_t parse_container(u8 *ucode, ssize_t size, struct cont_desc *desc) */ static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc) { - ssize_t rem = size; - - while (rem >= 0) { - ssize_t s = parse_container(ucode, rem, desc); + while (size) { + size_t s = parse_container(ucode, size, desc); if (!s) return; - ucode += s; - rem -= s; + /* catch wraparound */ + if (size >= s) { + ucode += s; + size -= s; + } else { + return; + } } } @@ -364,21 +585,7 @@ void reload_ucode_amd(void) static u16 __find_equiv_id(unsigned int cpu) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - return find_equiv_id(equiv_cpu_table, uci->cpu_sig.sig); -} - -static u32 find_cpu_family_by_equiv_cpu(u16 equiv_cpu) -{ - int i = 0; - - BUG_ON(!equiv_cpu_table); - - while (equiv_cpu_table[i].equiv_cpu != 0) { - if (equiv_cpu == equiv_cpu_table[i].equiv_cpu) - return equiv_cpu_table[i].installed_cpu; - i++; - } - return 0; + return find_equiv_id(&equiv_table, uci->cpu_sig.sig); } /* @@ -461,43 +668,6 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig) return 0; } -static unsigned int verify_patch_size(u8 family, u32 patch_size, - unsigned int size) -{ - u32 max_size; - -#define F1XH_MPB_MAX_SIZE 2048 -#define F14H_MPB_MAX_SIZE 1824 -#define F15H_MPB_MAX_SIZE 4096 -#define F16H_MPB_MAX_SIZE 3458 -#define F17H_MPB_MAX_SIZE 3200 - - switch (family) { - case 0x14: - max_size = F14H_MPB_MAX_SIZE; - break; - case 0x15: - max_size = F15H_MPB_MAX_SIZE; - break; - case 0x16: - max_size = F16H_MPB_MAX_SIZE; - break; - case 0x17: - max_size = F17H_MPB_MAX_SIZE; - break; - default: - max_size = F1XH_MPB_MAX_SIZE; - break; - } - - if (patch_size > min_t(u32, size, max_size)) { - pr_err("patch size mismatch\n"); - return 0; - } - - return patch_size; -} - static enum ucode_state apply_microcode_amd(int cpu) { struct cpuinfo_x86 *c = &cpu_data(cpu); @@ -548,34 +718,34 @@ out: return ret; } -static int install_equiv_cpu_table(const u8 *buf) +static size_t install_equiv_cpu_table(const u8 *buf, size_t buf_size) { - unsigned int *ibuf = (unsigned int *)buf; - unsigned int type = ibuf[1]; - unsigned int size = ibuf[2]; + u32 equiv_tbl_len; + const u32 *hdr; - if (type != UCODE_EQUIV_CPU_TABLE_TYPE || !size) { - pr_err("empty section/" - "invalid type field in container file section header\n"); - return -EINVAL; - } + if (!verify_equivalence_table(buf, buf_size, false)) + return 0; + + hdr = (const u32 *)buf; + equiv_tbl_len = hdr[2]; - equiv_cpu_table = vmalloc(size); - if (!equiv_cpu_table) { + equiv_table.entry = vmalloc(equiv_tbl_len); + if (!equiv_table.entry) { pr_err("failed to allocate equivalent CPU table\n"); - return -ENOMEM; + return 0; } - memcpy(equiv_cpu_table, buf + CONTAINER_HDR_SZ, size); + memcpy(equiv_table.entry, buf + CONTAINER_HDR_SZ, equiv_tbl_len); + equiv_table.num_entries = equiv_tbl_len / sizeof(struct equiv_cpu_entry); /* add header length */ - return size + CONTAINER_HDR_SZ; + return equiv_tbl_len + CONTAINER_HDR_SZ; } static void free_equiv_cpu_table(void) { - vfree(equiv_cpu_table); - equiv_cpu_table = NULL; + vfree(equiv_table.entry); + memset(&equiv_table, 0, sizeof(equiv_table)); } static void cleanup(void) @@ -585,47 +755,23 @@ static void cleanup(void) } /* - * We return the current size even if some of the checks failed so that + * Return a non-negative value even if some of the checks failed so that * we can skip over the next patch. If we return a negative value, we * signal a grave error like a memory allocation has failed and the * driver cannot continue functioning normally. In such cases, we tear * down everything we've used up so far and exit. */ -static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover) +static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover, + unsigned int *patch_size) { struct microcode_header_amd *mc_hdr; struct ucode_patch *patch; - unsigned int patch_size, crnt_size, ret; - u32 proc_fam; u16 proc_id; + int ret; - patch_size = *(u32 *)(fw + 4); - crnt_size = patch_size + SECTION_HDR_SIZE; - mc_hdr = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE); - proc_id = mc_hdr->processor_rev_id; - - proc_fam = find_cpu_family_by_equiv_cpu(proc_id); - if (!proc_fam) { - pr_err("No patch family for equiv ID: 0x%04x\n", proc_id); - return crnt_size; - } - - /* check if patch is for the current family */ - proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff); - if (proc_fam != family) - return crnt_size; - - if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) { - pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", - mc_hdr->patch_id); - return crnt_size; - } - - ret = verify_patch_size(family, patch_size, leftover); - if (!ret) { - pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id); - return crnt_size; - } + ret = verify_patch(family, fw, leftover, patch_size, false); + if (ret) + return ret; patch = kzalloc(sizeof(*patch), GFP_KERNEL); if (!patch) { @@ -633,13 +779,16 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover) return -EINVAL; } - patch->data = kmemdup(fw + SECTION_HDR_SIZE, patch_size, GFP_KERNEL); + patch->data = kmemdup(fw + SECTION_HDR_SIZE, *patch_size, GFP_KERNEL); if (!patch->data) { pr_err("Patch data allocation failure.\n"); kfree(patch); return -EINVAL; } + mc_hdr = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE); + proc_id = mc_hdr->processor_rev_id; + INIT_LIST_HEAD(&patch->plist); patch->patch_id = mc_hdr->patch_id; patch->equiv_cpu = proc_id; @@ -650,39 +799,38 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover) /* ... and add to cache. */ update_cache(patch); - return crnt_size; + return 0; } static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, size_t size) { - enum ucode_state ret = UCODE_ERROR; - unsigned int leftover; u8 *fw = (u8 *)data; - int crnt_size = 0; - int offset; + size_t offset; - offset = install_equiv_cpu_table(data); - if (offset < 0) { - pr_err("failed to create equivalent cpu table\n"); - return ret; - } - fw += offset; - leftover = size - offset; + offset = install_equiv_cpu_table(data, size); + if (!offset) + return UCODE_ERROR; + + fw += offset; + size -= offset; if (*(u32 *)fw != UCODE_UCODE_TYPE) { pr_err("invalid type field in container file section header\n"); free_equiv_cpu_table(); - return ret; + return UCODE_ERROR; } - while (leftover) { - crnt_size = verify_and_add_patch(family, fw, leftover); - if (crnt_size < 0) - return ret; + while (size > 0) { + unsigned int crnt_size = 0; + int ret; - fw += crnt_size; - leftover -= crnt_size; + ret = verify_and_add_patch(family, fw, size, &crnt_size); + if (ret < 0) + return UCODE_ERROR; + + fw += crnt_size + SECTION_HDR_SIZE; + size -= (crnt_size + SECTION_HDR_SIZE); } return UCODE_OK; @@ -761,10 +909,8 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device, } ret = UCODE_ERROR; - if (*(u32 *)fw->data != UCODE_MAGIC) { - pr_err("invalid magic value (0x%08x)\n", *(u32 *)fw->data); + if (!verify_container(fw->data, fw->size, false)) goto fw_release; - } ret = load_microcode_amd(bsp, c->x86, fw->data, fw->size); diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index 1c72f3819eb1..e81a2db42df7 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -20,6 +20,7 @@ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/kexec.h> +#include <linux/i8253.h> #include <asm/processor.h> #include <asm/hypervisor.h> #include <asm/hyperv-tlfs.h> @@ -295,6 +296,16 @@ static void __init ms_hyperv_init_platform(void) if (efi_enabled(EFI_BOOT)) x86_platform.get_nmi_reason = hv_get_nmi_reason; + /* + * Hyper-V VMs have a PIT emulation quirk such that zeroing the + * counter register during PIT shutdown restarts the PIT. So it + * continues to interrupt @18.2 HZ. Setting i8253_clear_counter + * to false tells pit_shutdown() not to zero the counter so that + * the PIT really is shutdown. Generation 2 VMs don't have a PIT, + * and setting this value has no effect. + */ + i8253_clear_counter_on_shutdown = false; + #if IS_ENABLED(CONFIG_HYPERV) /* * Setup the hook to get control post apic initialization. diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c index 2e173d47b450..4d36dcc1cf87 100644 --- a/arch/x86/kernel/cpu/mtrr/if.c +++ b/arch/x86/kernel/cpu/mtrr/if.c @@ -165,6 +165,8 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg) struct mtrr_gentry gentry; void __user *arg = (void __user *) __arg; + memset(&gentry, 0, sizeof(gentry)); + switch (cmd) { case MTRRIOC_ADD_ENTRY: case MTRRIOC_SET_ENTRY: diff --git a/arch/x86/kernel/cpu/resctrl/Makefile b/arch/x86/kernel/cpu/resctrl/Makefile new file mode 100644 index 000000000000..6895049ceef7 --- /dev/null +++ b/arch/x86/kernel/cpu/resctrl/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_RESCTRL) += core.o rdtgroup.o monitor.o +obj-$(CONFIG_RESCTRL) += ctrlmondata.o pseudo_lock.o +CFLAGS_pseudo_lock.o = -I$(src) diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/resctrl/core.c index 44272b7107ad..c3a9dc63edf2 100644 --- a/arch/x86/kernel/cpu/intel_rdt.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -22,7 +22,7 @@ * Software Developer Manual June 2016, volume 3, section 17.17. */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#define pr_fmt(fmt) "resctrl: " fmt #include <linux/slab.h> #include <linux/err.h> @@ -30,22 +30,19 @@ #include <linux/cpuhotplug.h> #include <asm/intel-family.h> -#include <asm/intel_rdt_sched.h> -#include "intel_rdt.h" - -#define MBA_IS_LINEAR 0x4 -#define MBA_MAX_MBPS U32_MAX +#include <asm/resctrl_sched.h> +#include "internal.h" /* Mutex to protect rdtgroup access. */ DEFINE_MUTEX(rdtgroup_mutex); /* - * The cached intel_pqr_state is strictly per CPU and can never be + * The cached resctrl_pqr_state is strictly per CPU and can never be * updated from a remote CPU. Functions which modify the state * are called with interrupts disabled and no preemption, which * is sufficient for the protection. */ -DEFINE_PER_CPU(struct intel_pqr_state, pqr_state); +DEFINE_PER_CPU(struct resctrl_pqr_state, pqr_state); /* * Used to store the max resource name width and max resource data width @@ -60,9 +57,13 @@ int max_name_width, max_data_width; bool rdt_alloc_capable; static void -mba_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r); +mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m, + struct rdt_resource *r); static void cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r); +static void +mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, + struct rdt_resource *r); #define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].domains) @@ -72,7 +73,7 @@ struct rdt_resource rdt_resources_all[] = { .rid = RDT_RESOURCE_L3, .name = "L3", .domains = domain_init(RDT_RESOURCE_L3), - .msr_base = IA32_L3_CBM_BASE, + .msr_base = MSR_IA32_L3_CBM_BASE, .msr_update = cat_wrmsr, .cache_level = 3, .cache = { @@ -89,7 +90,7 @@ struct rdt_resource rdt_resources_all[] = { .rid = RDT_RESOURCE_L3DATA, .name = "L3DATA", .domains = domain_init(RDT_RESOURCE_L3DATA), - .msr_base = IA32_L3_CBM_BASE, + .msr_base = MSR_IA32_L3_CBM_BASE, .msr_update = cat_wrmsr, .cache_level = 3, .cache = { @@ -106,7 +107,7 @@ struct rdt_resource rdt_resources_all[] = { .rid = RDT_RESOURCE_L3CODE, .name = "L3CODE", .domains = domain_init(RDT_RESOURCE_L3CODE), - .msr_base = IA32_L3_CBM_BASE, + .msr_base = MSR_IA32_L3_CBM_BASE, .msr_update = cat_wrmsr, .cache_level = 3, .cache = { @@ -123,7 +124,7 @@ struct rdt_resource rdt_resources_all[] = { .rid = RDT_RESOURCE_L2, .name = "L2", .domains = domain_init(RDT_RESOURCE_L2), - .msr_base = IA32_L2_CBM_BASE, + .msr_base = MSR_IA32_L2_CBM_BASE, .msr_update = cat_wrmsr, .cache_level = 2, .cache = { @@ -140,7 +141,7 @@ struct rdt_resource rdt_resources_all[] = { .rid = RDT_RESOURCE_L2DATA, .name = "L2DATA", .domains = domain_init(RDT_RESOURCE_L2DATA), - .msr_base = IA32_L2_CBM_BASE, + .msr_base = MSR_IA32_L2_CBM_BASE, .msr_update = cat_wrmsr, .cache_level = 2, .cache = { @@ -157,7 +158,7 @@ struct rdt_resource rdt_resources_all[] = { .rid = RDT_RESOURCE_L2CODE, .name = "L2CODE", .domains = domain_init(RDT_RESOURCE_L2CODE), - .msr_base = IA32_L2_CBM_BASE, + .msr_base = MSR_IA32_L2_CBM_BASE, .msr_update = cat_wrmsr, .cache_level = 2, .cache = { @@ -174,10 +175,7 @@ struct rdt_resource rdt_resources_all[] = { .rid = RDT_RESOURCE_MBA, .name = "MB", .domains = domain_init(RDT_RESOURCE_MBA), - .msr_base = IA32_MBA_THRTL_BASE, - .msr_update = mba_wrmsr, .cache_level = 3, - .parse_ctrlval = parse_bw, .format_str = "%d=%*u", .fflags = RFTYPE_RES_MB, }, @@ -211,9 +209,10 @@ static inline void cache_alloc_hsw_probe(void) struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3]; u32 l, h, max_cbm = BIT_MASK(20) - 1; - if (wrmsr_safe(IA32_L3_CBM_BASE, max_cbm, 0)) + if (wrmsr_safe(MSR_IA32_L3_CBM_BASE, max_cbm, 0)) return; - rdmsr(IA32_L3_CBM_BASE, l, h); + + rdmsr(MSR_IA32_L3_CBM_BASE, l, h); /* If all the bits were set in MSR, return success */ if (l != max_cbm) @@ -259,7 +258,7 @@ static inline bool rdt_get_mb_table(struct rdt_resource *r) return false; } -static bool rdt_get_mem_config(struct rdt_resource *r) +static bool __get_mem_config_intel(struct rdt_resource *r) { union cpuid_0x10_3_eax eax; union cpuid_0x10_x_edx edx; @@ -285,6 +284,30 @@ static bool rdt_get_mem_config(struct rdt_resource *r) return true; } +static bool __rdt_get_mem_config_amd(struct rdt_resource *r) +{ + union cpuid_0x10_3_eax eax; + union cpuid_0x10_x_edx edx; + u32 ebx, ecx; + + cpuid_count(0x80000020, 1, &eax.full, &ebx, &ecx, &edx.full); + r->num_closid = edx.split.cos_max + 1; + r->default_ctrl = MAX_MBA_BW_AMD; + + /* AMD does not use delay */ + r->membw.delay_linear = false; + + r->membw.min_bw = 0; + r->membw.bw_gran = 1; + /* Max value is 2048, Data width should be 4 in decimal */ + r->data_width = 4; + + r->alloc_capable = true; + r->alloc_enabled = true; + + return true; +} + static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r) { union cpuid_0x10_1_eax eax; @@ -344,6 +367,15 @@ static int get_cache_id(int cpu, int level) return -1; } +static void +mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r) +{ + unsigned int i; + + for (i = m->low; i < m->high; i++) + wrmsrl(r->msr_base + i, d->ctrl_val[i]); +} + /* * Map the memory b/w percentage value to delay values * that can be written to QOS_MSRs. @@ -359,7 +391,8 @@ u32 delay_bw_map(unsigned long bw, struct rdt_resource *r) } static void -mba_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r) +mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m, + struct rdt_resource *r) { unsigned int i; @@ -421,7 +454,7 @@ struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id, struct list_head *l; if (id < 0) - return ERR_PTR(id); + return ERR_PTR(-ENODEV); list_for_each(l, &r->domains) { d = list_entry(l, struct rdt_domain, list); @@ -639,7 +672,7 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r) static void clear_closid_rmid(int cpu) { - struct intel_pqr_state *state = this_cpu_ptr(&pqr_state); + struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state); state->default_closid = 0; state->default_rmid = 0; @@ -648,7 +681,7 @@ static void clear_closid_rmid(int cpu) wrmsr(IA32_PQR_ASSOC, 0, 0); } -static int intel_rdt_online_cpu(unsigned int cpu) +static int resctrl_online_cpu(unsigned int cpu) { struct rdt_resource *r; @@ -674,7 +707,7 @@ static void clear_childcpus(struct rdtgroup *r, unsigned int cpu) } } -static int intel_rdt_offline_cpu(unsigned int cpu) +static int resctrl_offline_cpu(unsigned int cpu) { struct rdtgroup *rdtgrp; struct rdt_resource *r; @@ -794,6 +827,19 @@ static bool __init rdt_cpu_has(int flag) return ret; } +static __init bool get_mem_config(void) +{ + if (!rdt_cpu_has(X86_FEATURE_MBA)) + return false; + + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) + return __get_mem_config_intel(&rdt_resources_all[RDT_RESOURCE_MBA]); + else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + return __rdt_get_mem_config_amd(&rdt_resources_all[RDT_RESOURCE_MBA]); + + return false; +} + static __init bool get_rdt_alloc_resources(void) { bool ret = false; @@ -818,10 +864,9 @@ static __init bool get_rdt_alloc_resources(void) ret = true; } - if (rdt_cpu_has(X86_FEATURE_MBA)) { - if (rdt_get_mem_config(&rdt_resources_all[RDT_RESOURCE_MBA])) - ret = true; - } + if (get_mem_config()) + ret = true; + return ret; } @@ -840,7 +885,7 @@ static __init bool get_rdt_mon_resources(void) return !rdt_get_mon_l3_config(&rdt_resources_all[RDT_RESOURCE_L3]); } -static __init void rdt_quirks(void) +static __init void __check_quirks_intel(void) { switch (boot_cpu_data.x86_model) { case INTEL_FAM6_HASWELL_X: @@ -855,30 +900,91 @@ static __init void rdt_quirks(void) } } +static __init void check_quirks(void) +{ + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) + __check_quirks_intel(); +} + static __init bool get_rdt_resources(void) { - rdt_quirks(); rdt_alloc_capable = get_rdt_alloc_resources(); rdt_mon_capable = get_rdt_mon_resources(); return (rdt_mon_capable || rdt_alloc_capable); } +static __init void rdt_init_res_defs_intel(void) +{ + struct rdt_resource *r; + + for_each_rdt_resource(r) { + if (r->rid == RDT_RESOURCE_L3 || + r->rid == RDT_RESOURCE_L3DATA || + r->rid == RDT_RESOURCE_L3CODE || + r->rid == RDT_RESOURCE_L2 || + r->rid == RDT_RESOURCE_L2DATA || + r->rid == RDT_RESOURCE_L2CODE) + r->cbm_validate = cbm_validate_intel; + else if (r->rid == RDT_RESOURCE_MBA) { + r->msr_base = MSR_IA32_MBA_THRTL_BASE; + r->msr_update = mba_wrmsr_intel; + r->parse_ctrlval = parse_bw_intel; + } + } +} + +static __init void rdt_init_res_defs_amd(void) +{ + struct rdt_resource *r; + + for_each_rdt_resource(r) { + if (r->rid == RDT_RESOURCE_L3 || + r->rid == RDT_RESOURCE_L3DATA || + r->rid == RDT_RESOURCE_L3CODE || + r->rid == RDT_RESOURCE_L2 || + r->rid == RDT_RESOURCE_L2DATA || + r->rid == RDT_RESOURCE_L2CODE) + r->cbm_validate = cbm_validate_amd; + else if (r->rid == RDT_RESOURCE_MBA) { + r->msr_base = MSR_IA32_MBA_BW_BASE; + r->msr_update = mba_wrmsr_amd; + r->parse_ctrlval = parse_bw_amd; + } + } +} + +static __init void rdt_init_res_defs(void) +{ + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) + rdt_init_res_defs_intel(); + else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + rdt_init_res_defs_amd(); +} + static enum cpuhp_state rdt_online; -static int __init intel_rdt_late_init(void) +static int __init resctrl_late_init(void) { struct rdt_resource *r; int state, ret; + /* + * Initialize functions(or definitions) that are different + * between vendors here. + */ + rdt_init_res_defs(); + + check_quirks(); + if (!get_rdt_resources()) return -ENODEV; rdt_init_padding(); state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, - "x86/rdt/cat:online:", - intel_rdt_online_cpu, intel_rdt_offline_cpu); + "x86/resctrl/cat:online:", + resctrl_online_cpu, resctrl_offline_cpu); if (state < 0) return state; @@ -890,20 +996,20 @@ static int __init intel_rdt_late_init(void) rdt_online = state; for_each_alloc_capable_rdt_resource(r) - pr_info("Intel RDT %s allocation detected\n", r->name); + pr_info("%s allocation detected\n", r->name); for_each_mon_capable_rdt_resource(r) - pr_info("Intel RDT %s monitoring detected\n", r->name); + pr_info("%s monitoring detected\n", r->name); return 0; } -late_initcall(intel_rdt_late_init); +late_initcall(resctrl_late_init); -static void __exit intel_rdt_exit(void) +static void __exit resctrl_exit(void) { cpuhp_remove_state(rdt_online); rdtgroup_exit(); } -__exitcall(intel_rdt_exit); +__exitcall(resctrl_exit); diff --git a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c index 27937458c231..2dbd990a2eb7 100644 --- a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c @@ -23,10 +23,58 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/cpu.h> #include <linux/kernfs.h> #include <linux/seq_file.h> #include <linux/slab.h> -#include "intel_rdt.h" +#include "internal.h" + +/* + * Check whether MBA bandwidth percentage value is correct. The value is + * checked against the minimum and maximum bandwidth values specified by + * the hardware. The allocated bandwidth percentage is rounded to the next + * control step available on the hardware. + */ +static bool bw_validate_amd(char *buf, unsigned long *data, + struct rdt_resource *r) +{ + unsigned long bw; + int ret; + + ret = kstrtoul(buf, 10, &bw); + if (ret) { + rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf); + return false; + } + + if (bw < r->membw.min_bw || bw > r->default_ctrl) { + rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw, + r->membw.min_bw, r->default_ctrl); + return false; + } + + *data = roundup(bw, (unsigned long)r->membw.bw_gran); + return true; +} + +int parse_bw_amd(struct rdt_parse_data *data, struct rdt_resource *r, + struct rdt_domain *d) +{ + unsigned long bw_val; + + if (d->have_new_ctrl) { + rdt_last_cmd_printf("Duplicate domain %d\n", d->id); + return -EINVAL; + } + + if (!bw_validate_amd(data->buf, &bw_val, r)) + return -EINVAL; + + d->new_ctrl = bw_val; + d->have_new_ctrl = true; + + return 0; +} /* * Check whether MBA bandwidth percentage value is correct. The value is @@ -64,13 +112,13 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r) return true; } -int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r, - struct rdt_domain *d) +int parse_bw_intel(struct rdt_parse_data *data, struct rdt_resource *r, + struct rdt_domain *d) { unsigned long bw_val; if (d->have_new_ctrl) { - rdt_last_cmd_printf("duplicate domain %d\n", d->id); + rdt_last_cmd_printf("Duplicate domain %d\n", d->id); return -EINVAL; } @@ -88,7 +136,7 @@ int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r, * are allowed (e.g. FFFFH, 0FF0H, 003CH, etc.). * Additionally Haswell requires at least two bits set. */ -static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) +bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r) { unsigned long first_bit, zero_bit, val; unsigned int cbm_len = r->cache.cbm_len; @@ -96,12 +144,12 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) ret = kstrtoul(buf, 16, &val); if (ret) { - rdt_last_cmd_printf("non-hex character in mask %s\n", buf); + rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf); return false; } if (val == 0 || val > r->default_ctrl) { - rdt_last_cmd_puts("mask out of range\n"); + rdt_last_cmd_puts("Mask out of range\n"); return false; } @@ -109,12 +157,12 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); if (find_next_bit(&val, cbm_len, zero_bit) < cbm_len) { - rdt_last_cmd_printf("mask %lx has non-consecutive 1-bits\n", val); + rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", val); return false; } if ((zero_bit - first_bit) < r->cache.min_cbm_bits) { - rdt_last_cmd_printf("Need at least %d bits in mask\n", + rdt_last_cmd_printf("Need at least %d bits in the mask\n", r->cache.min_cbm_bits); return false; } @@ -124,6 +172,30 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) } /* + * Check whether a cache bit mask is valid. AMD allows non-contiguous + * bitmasks + */ +bool cbm_validate_amd(char *buf, u32 *data, struct rdt_resource *r) +{ + unsigned long val; + int ret; + + ret = kstrtoul(buf, 16, &val); + if (ret) { + rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf); + return false; + } + + if (val > r->default_ctrl) { + rdt_last_cmd_puts("Mask out of range\n"); + return false; + } + + *data = val; + return true; +} + +/* * Read one cache bit mask (hex). Check that it is valid for the current * resource type. */ @@ -134,7 +206,7 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r, u32 cbm_val; if (d->have_new_ctrl) { - rdt_last_cmd_printf("duplicate domain %d\n", d->id); + rdt_last_cmd_printf("Duplicate domain %d\n", d->id); return -EINVAL; } @@ -144,17 +216,17 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r, */ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP && rdtgroup_pseudo_locked_in_hierarchy(d)) { - rdt_last_cmd_printf("pseudo-locked region in hierarchy\n"); + rdt_last_cmd_puts("Pseudo-locked region in hierarchy\n"); return -EINVAL; } - if (!cbm_validate(data->buf, &cbm_val, r)) + if (!r->cbm_validate(data->buf, &cbm_val, r)) return -EINVAL; if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE || rdtgrp->mode == RDT_MODE_SHAREABLE) && rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) { - rdt_last_cmd_printf("CBM overlaps with pseudo-locked region\n"); + rdt_last_cmd_puts("CBM overlaps with pseudo-locked region\n"); return -EINVAL; } @@ -163,14 +235,14 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r, * either is exclusive. */ if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, true)) { - rdt_last_cmd_printf("overlaps with exclusive group\n"); + rdt_last_cmd_puts("Overlaps with exclusive group\n"); return -EINVAL; } if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, false)) { if (rdtgrp->mode == RDT_MODE_EXCLUSIVE || rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - rdt_last_cmd_printf("overlaps with other group\n"); + rdt_last_cmd_puts("Overlaps with other group\n"); return -EINVAL; } } @@ -292,7 +364,7 @@ static int rdtgroup_parse_resource(char *resname, char *tok, if (!strcmp(resname, r->name) && rdtgrp->closid < r->num_closid) return parse_line(tok, r, rdtgrp); } - rdt_last_cmd_printf("unknown/unsupported resource name '%s'\n", resname); + rdt_last_cmd_printf("Unknown or unsupported resource name '%s'\n", resname); return -EINVAL; } @@ -310,9 +382,11 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, return -EINVAL; buf[nbytes - 1] = '\0'; + cpus_read_lock(); rdtgrp = rdtgroup_kn_lock_live(of->kn); if (!rdtgrp) { rdtgroup_kn_unlock(of->kn); + cpus_read_unlock(); return -ENOENT; } rdt_last_cmd_clear(); @@ -323,7 +397,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, */ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { ret = -EINVAL; - rdt_last_cmd_puts("resource group is pseudo-locked\n"); + rdt_last_cmd_puts("Resource group is pseudo-locked\n"); goto out; } @@ -367,6 +441,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, out: rdtgroup_kn_unlock(of->kn); + cpus_read_unlock(); return ret ?: nbytes; } @@ -463,7 +538,7 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg) r = &rdt_resources_all[resid]; d = rdt_find_domain(r, domid, NULL); - if (!d) { + if (IS_ERR_OR_NULL(d)) { ret = -ENOENT; goto out; } diff --git a/arch/x86/kernel/cpu/intel_rdt.h b/arch/x86/kernel/cpu/resctrl/internal.h index 3736f6dc9545..822b7db634ee 100644 --- a/arch/x86/kernel/cpu/intel_rdt.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -1,20 +1,24 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_X86_INTEL_RDT_H -#define _ASM_X86_INTEL_RDT_H +#ifndef _ASM_X86_RESCTRL_INTERNAL_H +#define _ASM_X86_RESCTRL_INTERNAL_H #include <linux/sched.h> #include <linux/kernfs.h> #include <linux/jump_label.h> -#define IA32_L3_QOS_CFG 0xc81 -#define IA32_L2_QOS_CFG 0xc82 -#define IA32_L3_CBM_BASE 0xc90 -#define IA32_L2_CBM_BASE 0xd10 -#define IA32_MBA_THRTL_BASE 0xd50 +#define MSR_IA32_L3_QOS_CFG 0xc81 +#define MSR_IA32_L2_QOS_CFG 0xc82 +#define MSR_IA32_L3_CBM_BASE 0xc90 +#define MSR_IA32_L2_CBM_BASE 0xd10 +#define MSR_IA32_MBA_THRTL_BASE 0xd50 +#define MSR_IA32_MBA_BW_BASE 0xc0000200 -#define L3_QOS_CDP_ENABLE 0x01ULL +#define MSR_IA32_QM_CTR 0x0c8e +#define MSR_IA32_QM_EVTSEL 0x0c8d -#define L2_QOS_CDP_ENABLE 0x01ULL +#define L3_QOS_CDP_ENABLE 0x01ULL + +#define L2_QOS_CDP_ENABLE 0x01ULL /* * Event IDs are used to program IA32_QM_EVTSEL before reading event @@ -29,6 +33,9 @@ #define MBM_CNTR_WIDTH 24 #define MBM_OVERFLOW_INTERVAL 1000 #define MAX_MBA_BW 100u +#define MBA_IS_LINEAR 0x4 +#define MBA_MAX_MBPS U32_MAX +#define MAX_MBA_BW_AMD 0x800 #define RMID_VAL_ERROR BIT_ULL(63) #define RMID_VAL_UNAVAIL BIT_ULL(62) @@ -69,7 +76,7 @@ struct rmid_read { u64 val; }; -extern unsigned int intel_cqm_threshold; +extern unsigned int resctrl_cqm_threshold; extern bool rdt_alloc_capable; extern bool rdt_mon_capable; extern unsigned int rdt_mon_features; @@ -391,9 +398,9 @@ struct rdt_parse_data { * struct rdt_resource - attributes of an RDT resource * @rid: The index of the resource * @alloc_enabled: Is allocation enabled on this machine - * @mon_enabled: Is monitoring enabled for this feature + * @mon_enabled: Is monitoring enabled for this feature * @alloc_capable: Is allocation available on this machine - * @mon_capable: Is monitor feature available on this machine + * @mon_capable: Is monitor feature available on this machine * @name: Name to use in "schemata" file * @num_closid: Number of CLOSIDs available * @cache_level: Which cache level defines scope of this resource @@ -405,10 +412,11 @@ struct rdt_parse_data { * @cache: Cache allocation related data * @format_str: Per resource format string to show domain value * @parse_ctrlval: Per resource function pointer to parse control values - * @evt_list: List of monitoring events - * @num_rmid: Number of RMIDs available - * @mon_scale: cqm counter * mon_scale = occupancy in bytes - * @fflags: flags to choose base and info files + * @cbm_validate Cache bitmask validate function + * @evt_list: List of monitoring events + * @num_rmid: Number of RMIDs available + * @mon_scale: cqm counter * mon_scale = occupancy in bytes + * @fflags: flags to choose base and info files */ struct rdt_resource { int rid; @@ -431,6 +439,7 @@ struct rdt_resource { int (*parse_ctrlval)(struct rdt_parse_data *data, struct rdt_resource *r, struct rdt_domain *d); + bool (*cbm_validate)(char *buf, u32 *data, struct rdt_resource *r); struct list_head evt_list; int num_rmid; unsigned int mon_scale; @@ -439,8 +448,10 @@ struct rdt_resource { int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r, struct rdt_domain *d); -int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r, - struct rdt_domain *d); +int parse_bw_intel(struct rdt_parse_data *data, struct rdt_resource *r, + struct rdt_domain *d); +int parse_bw_amd(struct rdt_parse_data *data, struct rdt_resource *r, + struct rdt_domain *d); extern struct mutex rdtgroup_mutex; @@ -463,6 +474,10 @@ enum { RDT_NUM_RESOURCES, }; +#define for_each_rdt_resource(r) \ + for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\ + r++) + #define for_each_capable_rdt_resource(r) \ for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\ r++) \ @@ -567,5 +582,7 @@ void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms); void cqm_handle_limbo(struct work_struct *work); bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d); void __check_limbo(struct rdt_domain *d, bool force_free); +bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r); +bool cbm_validate_amd(char *buf, u32 *data, struct rdt_resource *r); -#endif /* _ASM_X86_INTEL_RDT_H */ +#endif /* _ASM_X86_RESCTRL_INTERNAL_H */ diff --git a/arch/x86/kernel/cpu/intel_rdt_monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index b0f3aed76b75..f33f11f69078 100644 --- a/arch/x86/kernel/cpu/intel_rdt_monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -26,10 +26,7 @@ #include <linux/module.h> #include <linux/slab.h> #include <asm/cpu_device_id.h> -#include "intel_rdt.h" - -#define MSR_IA32_QM_CTR 0x0c8e -#define MSR_IA32_QM_EVTSEL 0x0c8d +#include "internal.h" struct rmid_entry { u32 rmid; @@ -73,7 +70,7 @@ unsigned int rdt_mon_features; * This is the threshold cache occupancy at which we will consider an * RMID available for re-allocation. */ -unsigned int intel_cqm_threshold; +unsigned int resctrl_cqm_threshold; static inline struct rmid_entry *__rmid_entry(u32 rmid) { @@ -107,7 +104,7 @@ static bool rmid_dirty(struct rmid_entry *entry) { u64 val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID); - return val >= intel_cqm_threshold; + return val >= resctrl_cqm_threshold; } /* @@ -187,7 +184,7 @@ static void add_rmid_to_limbo(struct rmid_entry *entry) list_for_each_entry(d, &r->domains, list) { if (cpumask_test_cpu(cpu, &d->cpu_mask)) { val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID); - if (val <= intel_cqm_threshold) + if (val <= resctrl_cqm_threshold) continue; } @@ -625,6 +622,7 @@ static void l3_mon_evt_init(struct rdt_resource *r) int rdt_get_mon_l3_config(struct rdt_resource *r) { + unsigned int cl_size = boot_cpu_data.x86_cache_size; int ret; r->mon_scale = boot_cpu_data.x86_cache_occ_scale; @@ -637,10 +635,10 @@ int rdt_get_mon_l3_config(struct rdt_resource *r) * * For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC. */ - intel_cqm_threshold = boot_cpu_data.x86_cache_size * 1024 / r->num_rmid; + resctrl_cqm_threshold = cl_size * 1024 / r->num_rmid; /* h/w works in units of "boot_cpu_data.x86_cache_occ_scale" */ - intel_cqm_threshold /= r->mon_scale; + resctrl_cqm_threshold /= r->mon_scale; ret = dom_data_init(r); if (ret) diff --git a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index 815b4e92522c..14bed6af8377 100644 --- a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -24,14 +24,14 @@ #include <asm/cacheflush.h> #include <asm/intel-family.h> -#include <asm/intel_rdt_sched.h> +#include <asm/resctrl_sched.h> #include <asm/perf_event.h> #include "../../events/perf_event.h" /* For X86_CONFIG() */ -#include "intel_rdt.h" +#include "internal.h" #define CREATE_TRACE_POINTS -#include "intel_rdt_pseudo_lock_event.h" +#include "pseudo_lock_event.h" /* * MSR_MISC_FEATURE_CONTROL register enables the modification of hardware @@ -213,7 +213,7 @@ static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr) for_each_cpu(cpu, &plr->d->cpu_mask) { pm_req = kzalloc(sizeof(*pm_req), GFP_KERNEL); if (!pm_req) { - rdt_last_cmd_puts("fail allocating mem for PM QoS\n"); + rdt_last_cmd_puts("Failure to allocate memory for PM QoS\n"); ret = -ENOMEM; goto out_err; } @@ -222,7 +222,7 @@ static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr) DEV_PM_QOS_RESUME_LATENCY, 30); if (ret < 0) { - rdt_last_cmd_printf("fail to add latency req cpu%d\n", + rdt_last_cmd_printf("Failed to add latency req CPU%d\n", cpu); kfree(pm_req); ret = -1; @@ -289,7 +289,7 @@ static int pseudo_lock_region_init(struct pseudo_lock_region *plr) plr->cpu = cpumask_first(&plr->d->cpu_mask); if (!cpu_online(plr->cpu)) { - rdt_last_cmd_printf("cpu %u associated with cache not online\n", + rdt_last_cmd_printf("CPU %u associated with cache not online\n", plr->cpu); ret = -ENODEV; goto out_region; @@ -307,7 +307,7 @@ static int pseudo_lock_region_init(struct pseudo_lock_region *plr) } ret = -1; - rdt_last_cmd_puts("unable to determine cache line size\n"); + rdt_last_cmd_puts("Unable to determine cache line size\n"); out_region: pseudo_lock_region_clear(plr); return ret; @@ -361,14 +361,14 @@ static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr) * KMALLOC_MAX_SIZE. */ if (plr->size > KMALLOC_MAX_SIZE) { - rdt_last_cmd_puts("requested region exceeds maximum size\n"); + rdt_last_cmd_puts("Requested region exceeds maximum size\n"); ret = -E2BIG; goto out_region; } plr->kmem = kzalloc(plr->size, GFP_KERNEL); if (!plr->kmem) { - rdt_last_cmd_puts("unable to allocate memory\n"); + rdt_last_cmd_puts("Unable to allocate memory\n"); ret = -ENOMEM; goto out_region; } @@ -665,7 +665,7 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) * default closid associated with it. */ if (rdtgrp == &rdtgroup_default) { - rdt_last_cmd_puts("cannot pseudo-lock default group\n"); + rdt_last_cmd_puts("Cannot pseudo-lock default group\n"); return -EINVAL; } @@ -707,17 +707,17 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) */ prefetch_disable_bits = get_prefetch_disable_bits(); if (prefetch_disable_bits == 0) { - rdt_last_cmd_puts("pseudo-locking not supported\n"); + rdt_last_cmd_puts("Pseudo-locking not supported\n"); return -EINVAL; } if (rdtgroup_monitor_in_progress(rdtgrp)) { - rdt_last_cmd_puts("monitoring in progress\n"); + rdt_last_cmd_puts("Monitoring in progress\n"); return -EINVAL; } if (rdtgroup_tasks_assigned(rdtgrp)) { - rdt_last_cmd_puts("tasks assigned to resource group\n"); + rdt_last_cmd_puts("Tasks assigned to resource group\n"); return -EINVAL; } @@ -727,13 +727,13 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) } if (rdtgroup_locksetup_user_restrict(rdtgrp)) { - rdt_last_cmd_puts("unable to modify resctrl permissions\n"); + rdt_last_cmd_puts("Unable to modify resctrl permissions\n"); return -EIO; } ret = pseudo_lock_init(rdtgrp); if (ret) { - rdt_last_cmd_puts("unable to init pseudo-lock region\n"); + rdt_last_cmd_puts("Unable to init pseudo-lock region\n"); goto out_release; } @@ -770,7 +770,7 @@ int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp) if (rdt_mon_capable) { ret = alloc_rmid(); if (ret < 0) { - rdt_last_cmd_puts("out of RMIDs\n"); + rdt_last_cmd_puts("Out of RMIDs\n"); return ret; } rdtgrp->mon.rmid = ret; @@ -1304,7 +1304,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) "pseudo_lock/%u", plr->cpu); if (IS_ERR(thread)) { ret = PTR_ERR(thread); - rdt_last_cmd_printf("locking thread returned error %d\n", ret); + rdt_last_cmd_printf("Locking thread returned error %d\n", ret); goto out_cstates; } @@ -1322,13 +1322,13 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) * the cleared, but not freed, plr struct resulting in an * empty pseudo-locking loop. */ - rdt_last_cmd_puts("locking thread interrupted\n"); + rdt_last_cmd_puts("Locking thread interrupted\n"); goto out_cstates; } ret = pseudo_lock_minor_get(&new_minor); if (ret < 0) { - rdt_last_cmd_puts("unable to obtain a new minor number\n"); + rdt_last_cmd_puts("Unable to obtain a new minor number\n"); goto out_cstates; } @@ -1360,7 +1360,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) if (IS_ERR(dev)) { ret = PTR_ERR(dev); - rdt_last_cmd_printf("failed to create character device: %d\n", + rdt_last_cmd_printf("Failed to create character device: %d\n", ret); goto out_debugfs; } diff --git a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock_event.h b/arch/x86/kernel/cpu/resctrl/pseudo_lock_event.h index 2c041e6d9f05..428ebbd4270b 100644 --- a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock_event.h +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock_event.h @@ -39,5 +39,5 @@ TRACE_EVENT(pseudo_lock_l3, #undef TRACE_INCLUDE_PATH #define TRACE_INCLUDE_PATH . -#define TRACE_INCLUDE_FILE intel_rdt_pseudo_lock_event +#define TRACE_INCLUDE_FILE pseudo_lock_event #include <trace/define_trace.h> diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index f27b8115ffa2..8388adf241b2 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -35,8 +35,8 @@ #include <uapi/linux/magic.h> -#include <asm/intel_rdt_sched.h> -#include "intel_rdt.h" +#include <asm/resctrl_sched.h> +#include "internal.h" DEFINE_STATIC_KEY_FALSE(rdt_enable_key); DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key); @@ -298,7 +298,7 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of, } /* - * This is safe against intel_rdt_sched_in() called from __switch_to() + * This is safe against resctrl_sched_in() called from __switch_to() * because __switch_to() is executed with interrupts disabled. A local call * from update_closid_rmid() is proteced against __switch_to() because * preemption is disabled. @@ -317,7 +317,7 @@ static void update_cpu_closid_rmid(void *info) * executing task might have its own closid selected. Just reuse * the context switch code. */ - intel_rdt_sched_in(); + resctrl_sched_in(); } /* @@ -345,7 +345,7 @@ static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, /* Check whether cpus belong to parent ctrl group */ cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask); if (cpumask_weight(tmpmask)) { - rdt_last_cmd_puts("can only add CPUs to mongroup that belong to parent\n"); + rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n"); return -EINVAL; } @@ -470,14 +470,14 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, rdt_last_cmd_clear(); if (!rdtgrp) { ret = -ENOENT; - rdt_last_cmd_puts("directory was removed\n"); + rdt_last_cmd_puts("Directory was removed\n"); goto unlock; } if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { ret = -EINVAL; - rdt_last_cmd_puts("pseudo-locking in progress\n"); + rdt_last_cmd_puts("Pseudo-locking in progress\n"); goto unlock; } @@ -487,7 +487,7 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, ret = cpumask_parse(buf, newmask); if (ret) { - rdt_last_cmd_puts("bad cpu list/mask\n"); + rdt_last_cmd_puts("Bad CPU list/mask\n"); goto unlock; } @@ -495,7 +495,7 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, cpumask_andnot(tmpmask, newmask, cpu_online_mask); if (cpumask_weight(tmpmask)) { ret = -EINVAL; - rdt_last_cmd_puts("can only assign online cpus\n"); + rdt_last_cmd_puts("Can only assign online CPUs\n"); goto unlock; } @@ -542,7 +542,7 @@ static void move_myself(struct callback_head *head) preempt_disable(); /* update PQR_ASSOC MSR to make resource group go into effect */ - intel_rdt_sched_in(); + resctrl_sched_in(); preempt_enable(); kfree(callback); @@ -574,7 +574,7 @@ static int __rdtgroup_move_task(struct task_struct *tsk, */ atomic_dec(&rdtgrp->waitcount); kfree(callback); - rdt_last_cmd_puts("task exited\n"); + rdt_last_cmd_puts("Task exited\n"); } else { /* * For ctrl_mon groups move both closid and rmid. @@ -692,7 +692,7 @@ static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of, if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { ret = -EINVAL; - rdt_last_cmd_puts("pseudo-locking in progress\n"); + rdt_last_cmd_puts("Pseudo-locking in progress\n"); goto unlock; } @@ -926,7 +926,7 @@ static int max_threshold_occ_show(struct kernfs_open_file *of, { struct rdt_resource *r = of->kn->parent->priv; - seq_printf(seq, "%u\n", intel_cqm_threshold * r->mon_scale); + seq_printf(seq, "%u\n", resctrl_cqm_threshold * r->mon_scale); return 0; } @@ -945,7 +945,7 @@ static ssize_t max_threshold_occ_write(struct kernfs_open_file *of, if (bytes > (boot_cpu_data.x86_cache_size * 1024)) return -EINVAL; - intel_cqm_threshold = bytes / r->mon_scale; + resctrl_cqm_threshold = bytes / r->mon_scale; return nbytes; } @@ -1029,7 +1029,7 @@ static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d, * peer RDT CDP resource. Hence the WARN. */ _d_cdp = rdt_find_domain(_r_cdp, d->id, NULL); - if (WARN_ON(!_d_cdp)) { + if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) { _r_cdp = NULL; ret = -EINVAL; } @@ -1158,14 +1158,14 @@ static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp) list_for_each_entry(d, &r->domains, list) { if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid], rdtgrp->closid, false)) { - rdt_last_cmd_puts("schemata overlaps\n"); + rdt_last_cmd_puts("Schemata overlaps\n"); return false; } } } if (!has_cache) { - rdt_last_cmd_puts("cannot be exclusive without CAT/CDP\n"); + rdt_last_cmd_puts("Cannot be exclusive without CAT/CDP\n"); return false; } @@ -1206,7 +1206,7 @@ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of, goto out; if (mode == RDT_MODE_PSEUDO_LOCKED) { - rdt_last_cmd_printf("cannot change pseudo-locked group\n"); + rdt_last_cmd_puts("Cannot change pseudo-locked group\n"); ret = -EINVAL; goto out; } @@ -1235,7 +1235,7 @@ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of, goto out; rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP; } else { - rdt_last_cmd_printf("unknown/unsupported mode\n"); + rdt_last_cmd_puts("Unknown or unsupported mode\n"); ret = -EINVAL; } @@ -1722,14 +1722,14 @@ static void l3_qos_cfg_update(void *arg) { bool *enable = arg; - wrmsrl(IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL); + wrmsrl(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL); } static void l2_qos_cfg_update(void *arg) { bool *enable = arg; - wrmsrl(IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL); + wrmsrl(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL); } static inline bool is_mba_linear(void) @@ -1878,7 +1878,10 @@ static int parse_rdtgroupfs_options(char *data) if (ret) goto out; } else if (!strcmp(token, "mba_MBps")) { - ret = set_mba_sc(true); + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) + ret = set_mba_sc(true); + else + ret = -EINVAL; if (ret) goto out; } else { @@ -2540,7 +2543,7 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) tmp_cbm = d->new_ctrl; if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) { - rdt_last_cmd_printf("no space on %s:%d\n", + rdt_last_cmd_printf("No space on %s:%d\n", r->name, d->id); return -ENOSPC; } @@ -2557,7 +2560,7 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) continue; ret = update_domains(r, rdtgrp->closid); if (ret < 0) { - rdt_last_cmd_puts("failed to initialize allocations\n"); + rdt_last_cmd_puts("Failed to initialize allocations\n"); return ret; } rdtgrp->mode = RDT_MODE_SHAREABLE; @@ -2580,7 +2583,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, rdt_last_cmd_clear(); if (!prdtgrp) { ret = -ENODEV; - rdt_last_cmd_puts("directory was removed\n"); + rdt_last_cmd_puts("Directory was removed\n"); goto out_unlock; } @@ -2588,7 +2591,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) { ret = -EINVAL; - rdt_last_cmd_puts("pseudo-locking in progress\n"); + rdt_last_cmd_puts("Pseudo-locking in progress\n"); goto out_unlock; } @@ -2596,7 +2599,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL); if (!rdtgrp) { ret = -ENOSPC; - rdt_last_cmd_puts("kernel out of memory\n"); + rdt_last_cmd_puts("Kernel out of memory\n"); goto out_unlock; } *r = rdtgrp; @@ -2637,7 +2640,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, if (rdt_mon_capable) { ret = alloc_rmid(); if (ret < 0) { - rdt_last_cmd_puts("out of RMIDs\n"); + rdt_last_cmd_puts("Out of RMIDs\n"); goto out_destroy; } rdtgrp->mon.rmid = ret; @@ -2725,7 +2728,7 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, kn = rdtgrp->kn; ret = closid_alloc(); if (ret < 0) { - rdt_last_cmd_puts("out of CLOSIDs\n"); + rdt_last_cmd_puts("Out of CLOSIDs\n"); goto out_common_fail; } closid = ret; diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index 0631f5328b7f..94aa1c72ca98 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c @@ -5,9 +5,10 @@ #include <linux/cpu.h> #include <asm/pat.h> +#include <asm/apic.h> #include <asm/processor.h> -#include <asm/apic.h> +#include "cpu.h" struct cpuid_bit { u16 feature; @@ -17,7 +18,11 @@ struct cpuid_bit { u32 sub_leaf; }; -/* Please keep the leaf sorted by cpuid_bit.level for faster search. */ +/* + * Please keep the leaf sorted by cpuid_bit.level for faster search. + * X86_FEATURE_MBA is supported by both Intel and AMD. But the CPUID + * levels are different and there is a separate entry for each. + */ static const struct cpuid_bit cpuid_bits[] = { { X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 }, { X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 }, @@ -29,6 +34,7 @@ static const struct cpuid_bit cpuid_bits[] = { { X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 }, { X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 }, { X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 }, + { X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 }, { X86_FEATURE_SME, CPUID_EAX, 0, 0x8000001f, 0 }, { X86_FEATURE_SEV, CPUID_EAX, 1, 0x8000001f, 0 }, { 0, 0, 0, 0, 0 } diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c index 71ca064e3794..8f6c784141d1 100644 --- a/arch/x86/kernel/cpu/topology.c +++ b/arch/x86/kernel/cpu/topology.c @@ -10,6 +10,8 @@ #include <asm/pat.h> #include <asm/processor.h> +#include "cpu.h" + /* leaf 0xb SMT level */ #define SMT_LEVEL 0 diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index d9ab49bed8af..0eda91f8eeac 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c @@ -77,7 +77,7 @@ static __init int setup_vmw_sched_clock(char *s) } early_param("no-vmw-sched-clock", setup_vmw_sched_clock); -static unsigned long long vmware_sched_clock(void) +static unsigned long long notrace vmware_sched_clock(void) { unsigned long long ns; diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index f631a3f15587..c8b07d8ea5a2 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -37,6 +37,7 @@ #include <asm/reboot.h> #include <asm/virtext.h> #include <asm/intel_pt.h> +#include <asm/crash.h> /* Used while preparing memory map entries for second kernel */ struct crash_memmap_data { diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c index eb8ab3915268..22369dd5de3b 100644 --- a/arch/x86/kernel/crash_dump_64.c +++ b/arch/x86/kernel/crash_dump_64.c @@ -62,7 +62,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, /** * copy_oldmem_page_encrypted - same as copy_oldmem_page() above but ioremap the - * memory with the encryption mask set to accomodate kdump on SME-enabled + * memory with the encryption mask set to accommodate kdump on SME-enabled * machines. */ ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize, diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c index 7299dcbf8e85..8d85e00bb40a 100644 --- a/arch/x86/kernel/devicetree.c +++ b/arch/x86/kernel/devicetree.c @@ -23,6 +23,7 @@ #include <asm/pci_x86.h> #include <asm/setup.h> #include <asm/i8259.h> +#include <asm/prom.h> __initdata u64 initial_dtb; char __initdata cmd_line[COMMAND_LINE_SIZE]; diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c index 61a949d84dfa..d99a8ee9e185 100644 --- a/arch/x86/kernel/fpu/signal.c +++ b/arch/x86/kernel/fpu/signal.c @@ -344,10 +344,10 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) sanitize_restored_xstate(tsk, &env, xfeatures, fx_only); } + local_bh_disable(); fpu->initialized = 1; - preempt_disable(); fpu__restore(fpu); - preempt_enable(); + local_bh_enable(); return err; } else { diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 87a57b7642d3..cd3956fc8158 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -811,7 +811,7 @@ void fpu__resume_cpu(void) * * Note: does not work for compacted buffers. */ -void *__raw_xsave_addr(struct xregs_state *xsave, int xstate_feature_mask) +static void *__raw_xsave_addr(struct xregs_state *xsave, int xstate_feature_mask) { int feature_nr = fls64(xstate_feature_mask) - 1; diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 01ebcb6f263e..7ee8067cbf45 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -994,7 +994,6 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, { unsigned long old; int faulted; - struct ftrace_graph_ent trace; unsigned long return_hooker = (unsigned long) &return_to_handler; @@ -1046,19 +1045,7 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, return; } - trace.func = self_addr; - trace.depth = current->curr_ret_stack + 1; - - /* Only trace if the calling function expects to */ - if (!ftrace_graph_entry(&trace)) { + if (function_graph_enter(old, self_addr, frame_pointer, parent)) *parent = old; - return; - } - - if (ftrace_push_return_trace(old, self_addr, &trace.depth, - frame_pointer, parent) == -EBUSY) { - *parent = old; - return; - } } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index 76fa3b836598..ec6fefbfd3c0 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c @@ -37,7 +37,6 @@ asmlinkage __visible void __init i386_start_kernel(void) cr4_init_shadow(); sanitize_boot_params(&boot_params); - x86_verify_bootdata_version(); x86_early_init_platform_quirks(); diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 7663a8eb602b..16b1cbd3a61e 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -457,8 +457,6 @@ void __init x86_64_start_reservations(char *real_mode_data) if (!boot_params.hdr.version) copy_bootdata(__va(real_mode_data)); - x86_verify_bootdata_version(); - x86_early_init_platform_quirks(); switch (boot_params.hdr.hardware_subarch) { diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 747c758f67b7..d1dbe8e4eb82 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -386,7 +386,7 @@ NEXT_PAGE(early_dynamic_pgts) .data -#if defined(CONFIG_XEN_PV) || defined(CONFIG_XEN_PVH) +#if defined(CONFIG_XEN_PV) || defined(CONFIG_PVH) NEXT_PGD_PAGE(init_top_pgt) .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC .org init_top_pgt + L4_PAGE_OFFSET*8, 0 diff --git a/arch/x86/kernel/jailhouse.c b/arch/x86/kernel/jailhouse.c index 108c48d0d40e..1b2ee55a2dfb 100644 --- a/arch/x86/kernel/jailhouse.c +++ b/arch/x86/kernel/jailhouse.c @@ -19,6 +19,7 @@ #include <asm/pci_x86.h> #include <asm/reboot.h> #include <asm/setup.h> +#include <asm/jailhouse_para.h> static __initdata struct jailhouse_setup_data setup_data; static unsigned int precalibrated_tsc_khz; diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index c33b06f5faa4..4ba75afba527 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -751,7 +751,7 @@ STACK_FRAME_NON_STANDARD(kretprobe_trampoline); /* * Called from kretprobe_trampoline */ -__visible __used void *trampoline_handler(struct pt_regs *regs) +static __used void *trampoline_handler(struct pt_regs *regs) { struct kretprobe_instance *ri = NULL; struct hlist_head *head, empty_rp; @@ -1026,12 +1026,10 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr) } NOKPROBE_SYMBOL(kprobe_fault_handler); -bool arch_within_kprobe_blacklist(unsigned long addr) +int __init arch_populate_kprobe_blacklist(void) { - return (addr >= (unsigned long)__kprobes_text_start && - addr < (unsigned long)__kprobes_text_end) || - (addr >= (unsigned long)__entry_text_start && - addr < (unsigned long)__entry_text_end); + return kprobe_add_area_blacklist((unsigned long)__entry_text_start, + (unsigned long)__entry_text_end); } int __init arch_init_kprobes(void) diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 40b16b270656..6adf6e6c2933 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -189,7 +189,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real) int len = 0, ret; while (len < RELATIVEJUMP_SIZE) { - ret = __copy_instruction(dest + len, src + len, real, &insn); + ret = __copy_instruction(dest + len, src + len, real + len, &insn); if (!ret || !can_boost(&insn, src + len)) return -EINVAL; len += ret; diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 30084ecaa20f..e811d4d1c824 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -1,19 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* KVM paravirtual clock driver. A clocksource implementation Copyright (C) 2008 Glauber de Oliveira Costa, Red Hat Inc. - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/clocksource.h> diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index ab18e0884dc6..6135ae8ce036 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c @@ -199,14 +199,6 @@ static void sanity_check_ldt_mapping(struct mm_struct *mm) /* * If PTI is enabled, this maps the LDT into the kernelmode and * usermode tables for the given mm. - * - * There is no corresponding unmap function. Even if the LDT is freed, we - * leave the PTEs around until the slot is reused or the mm is destroyed. - * This is harmless: the LDT is always in ordinary memory, and no one will - * access the freed slot. - * - * If we wanted to unmap freed LDTs, we'd also need to do a flush to make - * it useful, and the flush would slow down modify_ldt(). */ static int map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) @@ -214,8 +206,7 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) unsigned long va; bool is_vmalloc; spinlock_t *ptl; - pgd_t *pgd; - int i; + int i, nr_pages; if (!static_cpu_has(X86_FEATURE_PTI)) return 0; @@ -229,16 +220,11 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) /* Check if the current mappings are sane */ sanity_check_ldt_mapping(mm); - /* - * Did we already have the top level entry allocated? We can't - * use pgd_none() for this because it doens't do anything on - * 4-level page table kernels. - */ - pgd = pgd_offset(mm, LDT_BASE_ADDR); - is_vmalloc = is_vmalloc_addr(ldt->entries); - for (i = 0; i * PAGE_SIZE < ldt->nr_entries * LDT_ENTRY_SIZE; i++) { + nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE); + + for (i = 0; i < nr_pages; i++) { unsigned long offset = i << PAGE_SHIFT; const void *src = (char *)ldt->entries + offset; unsigned long pfn; @@ -272,13 +258,39 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) /* Propagate LDT mapping to the user page-table */ map_ldt_struct_to_user(mm); - va = (unsigned long)ldt_slot_va(slot); - flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, PAGE_SHIFT, false); - ldt->slot = slot; return 0; } +static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt) +{ + unsigned long va; + int i, nr_pages; + + if (!ldt) + return; + + /* LDT map/unmap is only required for PTI */ + if (!static_cpu_has(X86_FEATURE_PTI)) + return; + + nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE); + + for (i = 0; i < nr_pages; i++) { + unsigned long offset = i << PAGE_SHIFT; + spinlock_t *ptl; + pte_t *ptep; + + va = (unsigned long)ldt_slot_va(ldt->slot) + offset; + ptep = get_locked_pte(mm, va, &ptl); + pte_clear(mm, va, ptep); + pte_unmap_unlock(ptep, ptl); + } + + va = (unsigned long)ldt_slot_va(ldt->slot); + flush_tlb_mm_range(mm, va, va + nr_pages * PAGE_SIZE, PAGE_SHIFT, false); +} + #else /* !CONFIG_PAGE_TABLE_ISOLATION */ static int @@ -286,6 +298,10 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) { return 0; } + +static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt) +{ +} #endif /* CONFIG_PAGE_TABLE_ISOLATION */ static void free_ldt_pgtables(struct mm_struct *mm) @@ -524,6 +540,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) } install_ldt(mm, new_ldt); + unmap_ldt_struct(mm, old_ldt); free_ldt_struct(old_ldt); error = 0; diff --git a/arch/x86/kernel/macros.S b/arch/x86/kernel/macros.S deleted file mode 100644 index 161c95059044..000000000000 --- a/arch/x86/kernel/macros.S +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -/* - * This file includes headers whose assembly part includes macros which are - * commonly used. The macros are precompiled into assmebly file which is later - * assembled together with each compiled file. - */ - -#include <linux/compiler.h> -#include <asm/refcount.h> -#include <asm/alternative-asm.h> -#include <asm/bug.h> -#include <asm/paravirt.h> -#include <asm/asm.h> -#include <asm/cpufeature.h> -#include <asm/jump_label.h> diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index c93fcfdf1673..90ae0ca51083 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -22,6 +22,8 @@ #include <linux/utsname.h> #include <linux/stackprotector.h> #include <linux/cpuidle.h> +#include <linux/acpi.h> +#include <linux/elf-randomize.h> #include <trace/events/power.h> #include <linux/hw_breakpoint.h> #include <asm/cpu.h> @@ -39,6 +41,9 @@ #include <asm/desc.h> #include <asm/prctl.h> #include <asm/spec-ctrl.h> +#include <asm/proto.h> + +#include "process.h" /* * per-CPU TSS segments. Threads are completely 'soft' on Linux, @@ -252,11 +257,12 @@ void arch_setup_new_exec(void) enable_cpuid(); } -static inline void switch_to_bitmap(struct tss_struct *tss, - struct thread_struct *prev, +static inline void switch_to_bitmap(struct thread_struct *prev, struct thread_struct *next, unsigned long tifp, unsigned long tifn) { + struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw); + if (tifn & _TIF_IO_BITMAP) { /* * Copy the relevant range of the IO bitmap. @@ -395,32 +401,85 @@ static __always_inline void amd_set_ssb_virt_state(unsigned long tifn) wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn)); } -static __always_inline void intel_set_ssb_state(unsigned long tifn) +/* + * Update the MSRs managing speculation control, during context switch. + * + * tifp: Previous task's thread flags + * tifn: Next task's thread flags + */ +static __always_inline void __speculation_ctrl_update(unsigned long tifp, + unsigned long tifn) { - u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn); + unsigned long tif_diff = tifp ^ tifn; + u64 msr = x86_spec_ctrl_base; + bool updmsr = false; + + /* + * If TIF_SSBD is different, select the proper mitigation + * method. Note that if SSBD mitigation is disabled or permanentely + * enabled this branch can't be taken because nothing can set + * TIF_SSBD. + */ + if (tif_diff & _TIF_SSBD) { + if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) { + amd_set_ssb_virt_state(tifn); + } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) { + amd_set_core_ssb_state(tifn); + } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || + static_cpu_has(X86_FEATURE_AMD_SSBD)) { + msr |= ssbd_tif_to_spec_ctrl(tifn); + updmsr = true; + } + } + + /* + * Only evaluate TIF_SPEC_IB if conditional STIBP is enabled, + * otherwise avoid the MSR write. + */ + if (IS_ENABLED(CONFIG_SMP) && + static_branch_unlikely(&switch_to_cond_stibp)) { + updmsr |= !!(tif_diff & _TIF_SPEC_IB); + msr |= stibp_tif_to_spec_ctrl(tifn); + } - wrmsrl(MSR_IA32_SPEC_CTRL, msr); + if (updmsr) + wrmsrl(MSR_IA32_SPEC_CTRL, msr); } -static __always_inline void __speculative_store_bypass_update(unsigned long tifn) +static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk) { - if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) - amd_set_ssb_virt_state(tifn); - else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) - amd_set_core_ssb_state(tifn); - else - intel_set_ssb_state(tifn); + if (test_and_clear_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE)) { + if (task_spec_ssb_disable(tsk)) + set_tsk_thread_flag(tsk, TIF_SSBD); + else + clear_tsk_thread_flag(tsk, TIF_SSBD); + + if (task_spec_ib_disable(tsk)) + set_tsk_thread_flag(tsk, TIF_SPEC_IB); + else + clear_tsk_thread_flag(tsk, TIF_SPEC_IB); + } + /* Return the updated threadinfo flags*/ + return task_thread_info(tsk)->flags; } -void speculative_store_bypass_update(unsigned long tif) +void speculation_ctrl_update(unsigned long tif) { + /* Forced update. Make sure all relevant TIF flags are different */ preempt_disable(); - __speculative_store_bypass_update(tif); + __speculation_ctrl_update(~tif, tif); preempt_enable(); } -void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, - struct tss_struct *tss) +/* Called from seccomp/prctl update */ +void speculation_ctrl_update_current(void) +{ + preempt_disable(); + speculation_ctrl_update(speculation_ctrl_update_tif(current)); + preempt_enable(); +} + +void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p) { struct thread_struct *prev, *next; unsigned long tifp, tifn; @@ -430,7 +489,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, tifn = READ_ONCE(task_thread_info(next_p)->flags); tifp = READ_ONCE(task_thread_info(prev_p)->flags); - switch_to_bitmap(tss, prev, next, tifp, tifn); + switch_to_bitmap(prev, next, tifp, tifn); propagate_user_return_notify(prev_p, next_p); @@ -451,8 +510,15 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, if ((tifp ^ tifn) & _TIF_NOCPUID) set_cpuid_faulting(!!(tifn & _TIF_NOCPUID)); - if ((tifp ^ tifn) & _TIF_SSBD) - __speculative_store_bypass_update(tifn); + if (likely(!((tifp | tifn) & _TIF_SPEC_FORCE_UPDATE))) { + __speculation_ctrl_update(tifp, tifn); + } else { + speculation_ctrl_update_tif(prev_p); + tifn = speculation_ctrl_update_tif(next_p); + + /* Enforce MSR update to ensure consistent state */ + __speculation_ctrl_update(~tifn, tifn); + } } /* @@ -730,7 +796,7 @@ unsigned long get_wchan(struct task_struct *p) unsigned long start, bottom, top, sp, fp, ip, ret = 0; int count = 0; - if (!p || p == current || p->state == TASK_RUNNING) + if (p == current || p->state == TASK_RUNNING) return 0; if (!try_get_task_stack(p)) diff --git a/arch/x86/kernel/process.h b/arch/x86/kernel/process.h new file mode 100644 index 000000000000..320ab978fb1f --- /dev/null +++ b/arch/x86/kernel/process.h @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Code shared between 32 and 64 bit + +#include <asm/spec-ctrl.h> + +void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p); + +/* + * This needs to be inline to optimize for the common case where no extra + * work needs to be done. + */ +static inline void switch_to_extra(struct task_struct *prev, + struct task_struct *next) +{ + unsigned long next_tif = task_thread_info(next)->flags; + unsigned long prev_tif = task_thread_info(prev)->flags; + + if (IS_ENABLED(CONFIG_SMP)) { + /* + * Avoid __switch_to_xtra() invocation when conditional + * STIBP is disabled and the only different bit is + * TIF_SPEC_IB. For CONFIG_SMP=n TIF_SPEC_IB is not + * in the TIF_WORK_CTXSW masks. + */ + if (!static_branch_likely(&switch_to_cond_stibp)) { + prev_tif &= ~_TIF_SPEC_IB; + next_tif &= ~_TIF_SPEC_IB; + } + } + + /* + * __switch_to_xtra() handles debug registers, i/o bitmaps, + * speculation mitigations etc. + */ + if (unlikely(next_tif & _TIF_WORK_CTXSW_NEXT || + prev_tif & _TIF_WORK_CTXSW_PREV)) + __switch_to_xtra(prev, next); +} diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 5046a3c9dec2..9d08f0510620 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -56,9 +56,11 @@ #include <asm/debugreg.h> #include <asm/switch_to.h> #include <asm/vm86.h> -#include <asm/intel_rdt_sched.h> +#include <asm/resctrl_sched.h> #include <asm/proto.h> +#include "process.h" + void __show_regs(struct pt_regs *regs, enum show_regs_mode mode) { unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; @@ -232,7 +234,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) struct fpu *prev_fpu = &prev->fpu; struct fpu *next_fpu = &next->fpu; int cpu = smp_processor_id(); - struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu); /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ @@ -264,12 +265,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl)) set_iopl_mask(next->iopl); - /* - * Now maybe handle debug registers and/or IO bitmaps - */ - if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV || - task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) - __switch_to_xtra(prev_p, next_p, tss); + switch_to_extra(prev_p, next_p); /* * Leave lazy mode, flushing any hypercalls made here. @@ -302,7 +298,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) this_cpu_write(current_task, next_p); /* Load the Intel cache allocation PQR MSR. */ - intel_rdt_sched_in(); + resctrl_sched_in(); return prev_p; } diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 0e0b4288a4b2..721d02bd2d0d 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -52,7 +52,7 @@ #include <asm/switch_to.h> #include <asm/xen/hypervisor.h> #include <asm/vdso.h> -#include <asm/intel_rdt_sched.h> +#include <asm/resctrl_sched.h> #include <asm/unistd.h> #include <asm/fsgsbase.h> #ifdef CONFIG_IA32_EMULATION @@ -60,6 +60,8 @@ #include <asm/unistd_32_ia32.h> #endif +#include "process.h" + /* Prints also some state that isn't saved in the pt_regs */ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode) { @@ -337,24 +339,6 @@ static unsigned long x86_fsgsbase_read_task(struct task_struct *task, return base; } -void x86_fsbase_write_cpu(unsigned long fsbase) -{ - /* - * Set the selector to 0 as a notion, that the segment base is - * overwritten, which will be checked for skipping the segment load - * during context switch. - */ - loadseg(FS, 0); - wrmsrl(MSR_FS_BASE, fsbase); -} - -void x86_gsbase_write_cpu_inactive(unsigned long gsbase) -{ - /* Set the selector to 0 for the same reason as %fs above. */ - loadseg(GS, 0); - wrmsrl(MSR_KERNEL_GS_BASE, gsbase); -} - unsigned long x86_fsbase_read_task(struct task_struct *task) { unsigned long fsbase; @@ -383,38 +367,18 @@ unsigned long x86_gsbase_read_task(struct task_struct *task) return gsbase; } -int x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase) +void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase) { - /* - * Not strictly needed for %fs, but do it for symmetry - * with %gs - */ - if (unlikely(fsbase >= TASK_SIZE_MAX)) - return -EPERM; + WARN_ON_ONCE(task == current); - preempt_disable(); task->thread.fsbase = fsbase; - if (task == current) - x86_fsbase_write_cpu(fsbase); - task->thread.fsindex = 0; - preempt_enable(); - - return 0; } -int x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase) +void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase) { - if (unlikely(gsbase >= TASK_SIZE_MAX)) - return -EPERM; + WARN_ON_ONCE(task == current); - preempt_disable(); task->thread.gsbase = gsbase; - if (task == current) - x86_gsbase_write_cpu_inactive(gsbase); - task->thread.gsindex = 0; - preempt_enable(); - - return 0; } int copy_thread_tls(unsigned long clone_flags, unsigned long sp, @@ -553,7 +517,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) struct fpu *prev_fpu = &prev->fpu; struct fpu *next_fpu = &next->fpu; int cpu = smp_processor_id(); - struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu); WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) && this_cpu_read(irq_count) != -1); @@ -617,12 +580,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) /* Reload sp0. */ update_task_stack(next_p); - /* - * Now maybe reload the debug registers and handle I/O bitmaps - */ - if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT || - task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV)) - __switch_to_xtra(prev_p, next_p, tss); + switch_to_extra(prev_p, next_p); #ifdef CONFIG_XEN_PV /* @@ -664,7 +622,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) } /* Load the Intel cache allocation PQR MSR. */ - intel_rdt_sched_in(); + resctrl_sched_in(); return prev_p; } @@ -688,7 +646,7 @@ void set_personality_64bit(void) /* TBD: overwrites user setup. Should have two bits. But 64bit processes have always behaved this way, so it's not too bad. The main problem is just that - 32bit childs are affected again. */ + 32bit children are affected again. */ current->personality &= ~READ_IMPLIES_EXEC; } @@ -758,11 +716,60 @@ long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2) switch (option) { case ARCH_SET_GS: { - ret = x86_gsbase_write_task(task, arg2); + if (unlikely(arg2 >= TASK_SIZE_MAX)) + return -EPERM; + + preempt_disable(); + /* + * ARCH_SET_GS has always overwritten the index + * and the base. Zero is the most sensible value + * to put in the index, and is the only value that + * makes any sense if FSGSBASE is unavailable. + */ + if (task == current) { + loadseg(GS, 0); + x86_gsbase_write_cpu_inactive(arg2); + + /* + * On non-FSGSBASE systems, save_base_legacy() expects + * that we also fill in thread.gsbase. + */ + task->thread.gsbase = arg2; + + } else { + task->thread.gsindex = 0; + x86_gsbase_write_task(task, arg2); + } + preempt_enable(); break; } case ARCH_SET_FS: { - ret = x86_fsbase_write_task(task, arg2); + /* + * Not strictly needed for %fs, but do it for symmetry + * with %gs + */ + if (unlikely(arg2 >= TASK_SIZE_MAX)) + return -EPERM; + + preempt_disable(); + /* + * Set the selector to 0 for the same reason + * as %gs above. + */ + if (task == current) { + loadseg(FS, 0); + x86_fsbase_write_cpu(arg2); + + /* + * On non-FSGSBASE systems, save_base_legacy() expects + * that we also fill in thread.fsbase. + */ + task->thread.fsbase = arg2; + } else { + task->thread.fsindex = 0; + x86_fsbase_write_task(task, arg2); + } + preempt_enable(); break; } case ARCH_GET_FS: { diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index ffae9b9740fd..4b8ee05dd6ad 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -397,11 +397,12 @@ static int putreg(struct task_struct *child, if (value >= TASK_SIZE_MAX) return -EIO; /* - * When changing the FS base, use the same - * mechanism as for do_arch_prctl_64(). + * When changing the FS base, use do_arch_prctl_64() + * to set the index to zero and to set the base + * as requested. */ if (child->thread.fsbase != value) - return x86_fsbase_write_task(child, value); + return do_arch_prctl_64(child, ARCH_SET_FS, value); return 0; case offsetof(struct user_regs_struct,gs_base): /* @@ -410,7 +411,7 @@ static int putreg(struct task_struct *child, if (value >= TASK_SIZE_MAX) return -EIO; if (child->thread.gsbase != value) - return x86_gsbase_write_task(child, value); + return do_arch_prctl_64(child, ARCH_SET_GS, value); return 0; #endif } diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index 736348ead421..8451f38ad399 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c @@ -7,6 +7,7 @@ #include <linux/irq.h> #include <asm/hpet.h> +#include <asm/setup.h> #if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI) diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index b74e7bfed6ab..d494b9bfe618 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1280,23 +1280,6 @@ void __init setup_arch(char **cmdline_p) unwind_init(); } -/* - * From boot protocol 2.14 onwards we expect the bootloader to set the - * version to "0x8000 | <used version>". In case we find a version >= 2.14 - * without the 0x8000 we assume the boot loader supports 2.13 only and - * reset the version accordingly. The 0x8000 flag is removed in any case. - */ -void __init x86_verify_bootdata_version(void) -{ - if (boot_params.hdr.version & VERSION_WRITTEN) - boot_params.hdr.version &= ~VERSION_WRITTEN; - else if (boot_params.hdr.version >= 0x020e) - boot_params.hdr.version = 0x020d; - - if (boot_params.hdr.version < 0x020e) - boot_params.hdr.acpi_rsdp_addr = 0; -} - #ifdef CONFIG_X86_32 static struct resource video_ram_resource = { diff --git a/arch/x86/kernel/sysfb_efi.c b/arch/x86/kernel/sysfb_efi.c index 623965e86b65..fa51723571c8 100644 --- a/arch/x86/kernel/sysfb_efi.c +++ b/arch/x86/kernel/sysfb_efi.c @@ -19,12 +19,15 @@ #include <linux/dmi.h> #include <linux/err.h> +#include <linux/efi.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/pci.h> #include <linux/screen_info.h> #include <video/vga.h> + +#include <asm/efi.h> #include <asm/sysfb.h> enum { diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c index 5bd30c442794..496748ed266a 100644 --- a/arch/x86/kernel/tracepoint.c +++ b/arch/x86/kernel/tracepoint.c @@ -10,6 +10,8 @@ #include <asm/hw_irq.h> #include <asm/desc.h> +#include <asm/trace/exceptions.h> +#include <asm/trace/irq_vectors.h> DEFINE_STATIC_KEY_FALSE(trace_pagefault_key); diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c index 1eae5af491c2..891a75dbc131 100644 --- a/arch/x86/kernel/vsmp_64.c +++ b/arch/x86/kernel/vsmp_64.c @@ -26,65 +26,8 @@ #define TOPOLOGY_REGISTER_OFFSET 0x10 -#if defined CONFIG_PCI && defined CONFIG_PARAVIRT_XXL -/* - * Interrupt control on vSMPowered systems: - * ~AC is a shadow of IF. If IF is 'on' AC should be 'off' - * and vice versa. - */ - -asmlinkage __visible unsigned long vsmp_save_fl(void) -{ - unsigned long flags = native_save_fl(); - - if (!(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC)) - flags &= ~X86_EFLAGS_IF; - return flags; -} -PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl); - -__visible void vsmp_restore_fl(unsigned long flags) -{ - if (flags & X86_EFLAGS_IF) - flags &= ~X86_EFLAGS_AC; - else - flags |= X86_EFLAGS_AC; - native_restore_fl(flags); -} -PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl); - -asmlinkage __visible void vsmp_irq_disable(void) -{ - unsigned long flags = native_save_fl(); - - native_restore_fl((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC); -} -PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable); - -asmlinkage __visible void vsmp_irq_enable(void) -{ - unsigned long flags = native_save_fl(); - - native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC)); -} -PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_enable); - -static unsigned __init vsmp_patch(u8 type, void *ibuf, - unsigned long addr, unsigned len) -{ - switch (type) { - case PARAVIRT_PATCH(irq.irq_enable): - case PARAVIRT_PATCH(irq.irq_disable): - case PARAVIRT_PATCH(irq.save_fl): - case PARAVIRT_PATCH(irq.restore_fl): - return paravirt_patch_default(type, ibuf, addr, len); - default: - return native_patch(type, ibuf, addr, len); - } - -} - -static void __init set_vsmp_pv_ops(void) +#ifdef CONFIG_PCI +static void __init set_vsmp_ctl(void) { void __iomem *address; unsigned int cap, ctl, cfg; @@ -109,28 +52,12 @@ static void __init set_vsmp_pv_ops(void) } #endif - if (cap & ctl & (1 << 4)) { - /* Setup irq ops and turn on vSMP IRQ fastpath handling */ - pv_ops.irq.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable); - pv_ops.irq.irq_enable = PV_CALLEE_SAVE(vsmp_irq_enable); - pv_ops.irq.save_fl = PV_CALLEE_SAVE(vsmp_save_fl); - pv_ops.irq.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl); - pv_ops.init.patch = vsmp_patch; - ctl &= ~(1 << 4); - } writel(ctl, address + 4); ctl = readl(address + 4); pr_info("vSMP CTL: control set to:0x%08x\n", ctl); early_iounmap(address, 8); } -#else -static void __init set_vsmp_pv_ops(void) -{ -} -#endif - -#ifdef CONFIG_PCI static int is_vsmp = -1; static void __init detect_vsmp_box(void) @@ -164,11 +91,14 @@ static int is_vsmp_box(void) { return 0; } +static void __init set_vsmp_ctl(void) +{ +} #endif static void __init vsmp_cap_cpus(void) { -#if !defined(CONFIG_X86_VSMP) && defined(CONFIG_SMP) +#if !defined(CONFIG_X86_VSMP) && defined(CONFIG_SMP) && defined(CONFIG_PCI) void __iomem *address; unsigned int cfg, topology, node_shift, maxcpus; @@ -221,6 +151,6 @@ void __init vsmp_init(void) vsmp_cap_cpus(); - set_vsmp_pv_ops(); + set_vsmp_ctl(); return; } |