diff options
Diffstat (limited to 'arch/x86/include/asm')
27 files changed, 628 insertions, 396 deletions
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h index 8e4ea39e55d0..31b627b43a8e 100644 --- a/arch/x86/include/asm/alternative-asm.h +++ b/arch/x86/include/asm/alternative-asm.h @@ -7,24 +7,16 @@ #include <asm/asm.h> #ifdef CONFIG_SMP -.macro LOCK_PREFIX_HERE + .macro LOCK_PREFIX +672: lock .pushsection .smp_locks,"a" .balign 4 - .long 671f - . # offset + .long 672b - . .popsection -671: -.endm - -.macro LOCK_PREFIX insn:vararg - LOCK_PREFIX_HERE - lock \insn -.endm + .endm #else -.macro LOCK_PREFIX_HERE -.endm - -.macro LOCK_PREFIX insn:vararg -.endm + .macro LOCK_PREFIX + .endm #endif /* diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index ea9886651c39..0660e14690c8 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -31,8 +31,15 @@ */ #ifdef CONFIG_SMP -#define LOCK_PREFIX_HERE "LOCK_PREFIX_HERE\n\t" -#define LOCK_PREFIX "LOCK_PREFIX " +#define LOCK_PREFIX_HERE \ + ".pushsection .smp_locks,\"a\"\n" \ + ".balign 4\n" \ + ".long 671f - .\n" /* offset */ \ + ".popsection\n" \ + "671:" + +#define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; " + #else /* ! CONFIG_SMP */ #define LOCK_PREFIX_HERE "" #define LOCK_PREFIX "" diff --git a/arch/x86/include/asm/arch_hweight.h b/arch/x86/include/asm/arch_hweight.h index 34a10b2d5b73..fc0693569f7a 100644 --- a/arch/x86/include/asm/arch_hweight.h +++ b/arch/x86/include/asm/arch_hweight.h @@ -5,15 +5,9 @@ #include <asm/cpufeatures.h> #ifdef CONFIG_64BIT -/* popcnt %edi, %eax */ -#define POPCNT32 ".byte 0xf3,0x0f,0xb8,0xc7" -/* popcnt %rdi, %rax */ -#define POPCNT64 ".byte 0xf3,0x48,0x0f,0xb8,0xc7" #define REG_IN "D" #define REG_OUT "a" #else -/* popcnt %eax, %eax */ -#define POPCNT32 ".byte 0xf3,0x0f,0xb8,0xc0" #define REG_IN "a" #define REG_OUT "a" #endif @@ -24,7 +18,7 @@ static __always_inline unsigned int __arch_hweight32(unsigned int w) { unsigned int res; - asm (ALTERNATIVE("call __sw_hweight32", POPCNT32, X86_FEATURE_POPCNT) + asm (ALTERNATIVE("call __sw_hweight32", "popcntl %1, %0", X86_FEATURE_POPCNT) : "="REG_OUT (res) : REG_IN (w)); @@ -52,7 +46,7 @@ static __always_inline unsigned long __arch_hweight64(__u64 w) { unsigned long res; - asm (ALTERNATIVE("call __sw_hweight64", POPCNT64, X86_FEATURE_POPCNT) + asm (ALTERNATIVE("call __sw_hweight64", "popcntq %1, %0", X86_FEATURE_POPCNT) : "="REG_OUT (res) : REG_IN (w)); diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h index 21b086786404..6467757bb39f 100644 --- a/arch/x86/include/asm/asm.h +++ b/arch/x86/include/asm/asm.h @@ -120,25 +120,12 @@ /* Exception table entry */ #ifdef __ASSEMBLY__ # define _ASM_EXTABLE_HANDLE(from, to, handler) \ - ASM_EXTABLE_HANDLE from to handler - -.macro ASM_EXTABLE_HANDLE from:req to:req handler:req - .pushsection "__ex_table","a" - .balign 4 - .long (\from) - . - .long (\to) - . - .long (\handler) - . + .pushsection "__ex_table","a" ; \ + .balign 4 ; \ + .long (from) - . ; \ + .long (to) - . ; \ + .long (handler) - . ; \ .popsection -.endm -#else /* __ASSEMBLY__ */ - -# define _ASM_EXTABLE_HANDLE(from, to, handler) \ - "ASM_EXTABLE_HANDLE from=" #from " to=" #to \ - " handler=\"" #handler "\"\n\t" - -/* For C file, we already have NOKPROBE_SYMBOL macro */ - -#endif /* __ASSEMBLY__ */ # define _ASM_EXTABLE(from, to) \ _ASM_EXTABLE_HANDLE(from, to, ex_handler_default) @@ -161,7 +148,6 @@ _ASM_PTR (entry); \ .popsection -#ifdef __ASSEMBLY__ .macro ALIGN_DESTINATION /* check for bad alignment of destination */ movl %edi,%ecx @@ -185,7 +171,34 @@ _ASM_EXTABLE_UA(100b, 103b) _ASM_EXTABLE_UA(101b, 103b) .endm -#endif /* __ASSEMBLY__ */ + +#else +# define _EXPAND_EXTABLE_HANDLE(x) #x +# define _ASM_EXTABLE_HANDLE(from, to, handler) \ + " .pushsection \"__ex_table\",\"a\"\n" \ + " .balign 4\n" \ + " .long (" #from ") - .\n" \ + " .long (" #to ") - .\n" \ + " .long (" _EXPAND_EXTABLE_HANDLE(handler) ") - .\n" \ + " .popsection\n" + +# define _ASM_EXTABLE(from, to) \ + _ASM_EXTABLE_HANDLE(from, to, ex_handler_default) + +# define _ASM_EXTABLE_UA(from, to) \ + _ASM_EXTABLE_HANDLE(from, to, ex_handler_uaccess) + +# define _ASM_EXTABLE_FAULT(from, to) \ + _ASM_EXTABLE_HANDLE(from, to, ex_handler_fault) + +# define _ASM_EXTABLE_EX(from, to) \ + _ASM_EXTABLE_HANDLE(from, to, ex_handler_ext) + +# define _ASM_EXTABLE_REFCOUNT(from, to) \ + _ASM_EXTABLE_HANDLE(from, to, ex_handler_refcount) + +/* For C file, we already have NOKPROBE_SYMBOL macro */ +#endif #ifndef __ASSEMBLY__ /* diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h index a07ffd23e4dd..f6f6ef436599 100644 --- a/arch/x86/include/asm/bootparam_utils.h +++ b/arch/x86/include/asm/bootparam_utils.h @@ -36,6 +36,7 @@ static void sanitize_boot_params(struct boot_params *boot_params) */ if (boot_params->sentinel) { /* fields in boot_params are left uninitialized, clear them */ + boot_params->acpi_rsdp_addr = 0; memset(&boot_params->ext_ramdisk_image, 0, (char *)&boot_params->efi_info - (char *)&boot_params->ext_ramdisk_image); diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h index 5090035e6d16..6804d6642767 100644 --- a/arch/x86/include/asm/bug.h +++ b/arch/x86/include/asm/bug.h @@ -4,8 +4,6 @@ #include <linux/stringify.h> -#ifndef __ASSEMBLY__ - /* * Despite that some emulators terminate on UD2, we use it for WARN(). * @@ -22,15 +20,53 @@ #define LEN_UD2 2 +#ifdef CONFIG_GENERIC_BUG + +#ifdef CONFIG_X86_32 +# define __BUG_REL(val) ".long " __stringify(val) +#else +# define __BUG_REL(val) ".long " __stringify(val) " - 2b" +#endif + +#ifdef CONFIG_DEBUG_BUGVERBOSE + +#define _BUG_FLAGS(ins, flags) \ +do { \ + asm volatile("1:\t" ins "\n" \ + ".pushsection __bug_table,\"aw\"\n" \ + "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \ + "\t" __BUG_REL(%c0) "\t# bug_entry::file\n" \ + "\t.word %c1" "\t# bug_entry::line\n" \ + "\t.word %c2" "\t# bug_entry::flags\n" \ + "\t.org 2b+%c3\n" \ + ".popsection" \ + : : "i" (__FILE__), "i" (__LINE__), \ + "i" (flags), \ + "i" (sizeof(struct bug_entry))); \ +} while (0) + +#else /* !CONFIG_DEBUG_BUGVERBOSE */ + #define _BUG_FLAGS(ins, flags) \ do { \ - asm volatile("ASM_BUG ins=\"" ins "\" file=%c0 line=%c1 " \ - "flags=%c2 size=%c3" \ - : : "i" (__FILE__), "i" (__LINE__), \ - "i" (flags), \ + asm volatile("1:\t" ins "\n" \ + ".pushsection __bug_table,\"aw\"\n" \ + "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \ + "\t.word %c0" "\t# bug_entry::flags\n" \ + "\t.org 2b+%c1\n" \ + ".popsection" \ + : : "i" (flags), \ "i" (sizeof(struct bug_entry))); \ } while (0) +#endif /* CONFIG_DEBUG_BUGVERBOSE */ + +#else + +#define _BUG_FLAGS(ins, flags) asm volatile(ins) + +#endif /* CONFIG_GENERIC_BUG */ + #define HAVE_ARCH_BUG #define BUG() \ do { \ @@ -46,54 +82,4 @@ do { \ #include <asm-generic/bug.h> -#else /* __ASSEMBLY__ */ - -#ifdef CONFIG_GENERIC_BUG - -#ifdef CONFIG_X86_32 -.macro __BUG_REL val:req - .long \val -.endm -#else -.macro __BUG_REL val:req - .long \val - 2b -.endm -#endif - -#ifdef CONFIG_DEBUG_BUGVERBOSE - -.macro ASM_BUG ins:req file:req line:req flags:req size:req -1: \ins - .pushsection __bug_table,"aw" -2: __BUG_REL val=1b # bug_entry::bug_addr - __BUG_REL val=\file # bug_entry::file - .word \line # bug_entry::line - .word \flags # bug_entry::flags - .org 2b+\size - .popsection -.endm - -#else /* !CONFIG_DEBUG_BUGVERBOSE */ - -.macro ASM_BUG ins:req file:req line:req flags:req size:req -1: \ins - .pushsection __bug_table,"aw" -2: __BUG_REL val=1b # bug_entry::bug_addr - .word \flags # bug_entry::flags - .org 2b+\size - .popsection -.endm - -#endif /* CONFIG_DEBUG_BUGVERBOSE */ - -#else /* CONFIG_GENERIC_BUG */ - -.macro ASM_BUG ins:req file:req line:req flags:req size:req - \ins -.endm - -#endif /* CONFIG_GENERIC_BUG */ - -#endif /* __ASSEMBLY__ */ - #endif /* _ASM_X86_BUG_H */ diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 7d442722ef24..aced6c9290d6 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -2,10 +2,10 @@ #ifndef _ASM_X86_CPUFEATURE_H #define _ASM_X86_CPUFEATURE_H -#ifdef __KERNEL__ -#ifndef __ASSEMBLY__ - #include <asm/processor.h> + +#if defined(__KERNEL__) && !defined(__ASSEMBLY__) + #include <asm/asm.h> #include <linux/bitops.h> @@ -161,10 +161,37 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit); */ static __always_inline __pure bool _static_cpu_has(u16 bit) { - asm_volatile_goto("STATIC_CPU_HAS bitnum=%[bitnum] " - "cap_byte=\"%[cap_byte]\" " - "feature=%P[feature] t_yes=%l[t_yes] " - "t_no=%l[t_no] always=%P[always]" + asm_volatile_goto("1: jmp 6f\n" + "2:\n" + ".skip -(((5f-4f) - (2b-1b)) > 0) * " + "((5f-4f) - (2b-1b)),0x90\n" + "3:\n" + ".section .altinstructions,\"a\"\n" + " .long 1b - .\n" /* src offset */ + " .long 4f - .\n" /* repl offset */ + " .word %P[always]\n" /* always replace */ + " .byte 3b - 1b\n" /* src len */ + " .byte 5f - 4f\n" /* repl len */ + " .byte 3b - 2b\n" /* pad len */ + ".previous\n" + ".section .altinstr_replacement,\"ax\"\n" + "4: jmp %l[t_no]\n" + "5:\n" + ".previous\n" + ".section .altinstructions,\"a\"\n" + " .long 1b - .\n" /* src offset */ + " .long 0\n" /* no replacement */ + " .word %P[feature]\n" /* feature bit */ + " .byte 3b - 1b\n" /* src len */ + " .byte 0\n" /* repl len */ + " .byte 0\n" /* pad len */ + ".previous\n" + ".section .altinstr_aux,\"ax\"\n" + "6:\n" + " testb %[bitnum],%[cap_byte]\n" + " jnz %l[t_yes]\n" + " jmp %l[t_no]\n" + ".previous\n" : : [feature] "i" (bit), [always] "i" (X86_FEATURE_ALWAYS), [bitnum] "i" (1 << (bit & 7)), @@ -199,44 +226,5 @@ t_no: #define CPU_FEATURE_TYPEVAL boot_cpu_data.x86_vendor, boot_cpu_data.x86, \ boot_cpu_data.x86_model -#else /* __ASSEMBLY__ */ - -.macro STATIC_CPU_HAS bitnum:req cap_byte:req feature:req t_yes:req t_no:req always:req -1: - jmp 6f -2: - .skip -(((5f-4f) - (2b-1b)) > 0) * ((5f-4f) - (2b-1b)),0x90 -3: - .section .altinstructions,"a" - .long 1b - . /* src offset */ - .long 4f - . /* repl offset */ - .word \always /* always replace */ - .byte 3b - 1b /* src len */ - .byte 5f - 4f /* repl len */ - .byte 3b - 2b /* pad len */ - .previous - .section .altinstr_replacement,"ax" -4: - jmp \t_no -5: - .previous - .section .altinstructions,"a" - .long 1b - . /* src offset */ - .long 0 /* no replacement */ - .word \feature /* feature bit */ - .byte 3b - 1b /* src len */ - .byte 0 /* repl len */ - .byte 0 /* pad len */ - .previous - .section .altinstr_aux,"ax" -6: - testb \bitnum,\cap_byte - jnz \t_yes - jmp \t_no - .previous -.endm - -#endif /* __ASSEMBLY__ */ - -#endif /* __KERNEL__ */ +#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ #endif /* _ASM_X86_CPUFEATURE_H */ diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 28c4a502b419..6d6122524711 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -281,9 +281,11 @@ #define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ #define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */ #define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */ +#define X86_FEATURE_WBNOINVD (13*32+ 9) /* WBNOINVD instruction */ #define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */ #define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */ #define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */ +#define X86_FEATURE_AMD_STIBP_ALWAYS_ON (13*32+17) /* "" Single Thread Indirect Branch Predictors always-on preferred */ #define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */ #define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */ #define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */ diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index db45773a63b6..42982a6cc6cf 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -141,6 +141,8 @@ extern int __init efi_reuse_config(u64 tables, int nr_tables); extern void efi_delete_dummy_variable(void); extern void efi_switch_mm(struct mm_struct *mm); extern void efi_recover_from_page_fault(unsigned long phys_addr); +extern void efi_free_boot_services(void); +extern void efi_reserve_boot_services(void); struct efi_setup_data { u64 fw_vendor; diff --git a/arch/x86/include/asm/fsgsbase.h b/arch/x86/include/asm/fsgsbase.h index eb377b6e9eed..bca4c743de77 100644 --- a/arch/x86/include/asm/fsgsbase.h +++ b/arch/x86/include/asm/fsgsbase.h @@ -16,8 +16,8 @@ */ extern unsigned long x86_fsbase_read_task(struct task_struct *task); extern unsigned long x86_gsbase_read_task(struct task_struct *task); -extern int x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase); -extern int x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase); +extern void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase); +extern void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase); /* Helper functions for reading/writing FS/GS base */ @@ -39,8 +39,15 @@ static inline unsigned long x86_gsbase_read_cpu_inactive(void) return gsbase; } -extern void x86_fsbase_write_cpu(unsigned long fsbase); -extern void x86_gsbase_write_cpu_inactive(unsigned long gsbase); +static inline void x86_fsbase_write_cpu(unsigned long fsbase) +{ + wrmsrl(MSR_FS_BASE, fsbase); +} + +static inline void x86_gsbase_write_cpu_inactive(unsigned long gsbase) +{ + wrmsrl(MSR_KERNEL_GS_BASE, gsbase); +} #endif /* CONFIG_X86_64 */ diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h index 4139f7650fe5..705dafc2d11a 100644 --- a/arch/x86/include/asm/hyperv-tlfs.h +++ b/arch/x86/include/asm/hyperv-tlfs.h @@ -10,6 +10,7 @@ #define _ASM_X86_HYPERV_TLFS_H #include <linux/types.h> +#include <asm/page.h> /* * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent @@ -30,158 +31,150 @@ /* * Feature identification. EAX indicates which features are available * to the partition based upon the current partition privileges. + * These are HYPERV_CPUID_FEATURES.EAX bits. */ /* VP Runtime (HV_X64_MSR_VP_RUNTIME) available */ -#define HV_X64_MSR_VP_RUNTIME_AVAILABLE (1 << 0) +#define HV_X64_MSR_VP_RUNTIME_AVAILABLE BIT(0) /* Partition Reference Counter (HV_X64_MSR_TIME_REF_COUNT) available*/ -#define HV_MSR_TIME_REF_COUNT_AVAILABLE (1 << 1) -/* Partition reference TSC MSR is available */ -#define HV_MSR_REFERENCE_TSC_AVAILABLE (1 << 9) -/* Partition Guest IDLE MSR is available */ -#define HV_X64_MSR_GUEST_IDLE_AVAILABLE (1 << 10) - -/* A partition's reference time stamp counter (TSC) page */ -#define HV_X64_MSR_REFERENCE_TSC 0x40000021 - -/* - * There is a single feature flag that signifies if the partition has access - * to MSRs with local APIC and TSC frequencies. - */ -#define HV_X64_ACCESS_FREQUENCY_MSRS (1 << 11) - -/* AccessReenlightenmentControls privilege */ -#define HV_X64_ACCESS_REENLIGHTENMENT BIT(13) - +#define HV_MSR_TIME_REF_COUNT_AVAILABLE BIT(1) /* * Basic SynIC MSRs (HV_X64_MSR_SCONTROL through HV_X64_MSR_EOM * and HV_X64_MSR_SINT0 through HV_X64_MSR_SINT15) available */ -#define HV_X64_MSR_SYNIC_AVAILABLE (1 << 2) +#define HV_X64_MSR_SYNIC_AVAILABLE BIT(2) /* * Synthetic Timer MSRs (HV_X64_MSR_STIMER0_CONFIG through * HV_X64_MSR_STIMER3_COUNT) available */ -#define HV_MSR_SYNTIMER_AVAILABLE (1 << 3) +#define HV_MSR_SYNTIMER_AVAILABLE BIT(3) /* * APIC access MSRs (HV_X64_MSR_EOI, HV_X64_MSR_ICR and HV_X64_MSR_TPR) * are available */ -#define HV_X64_MSR_APIC_ACCESS_AVAILABLE (1 << 4) +#define HV_X64_MSR_APIC_ACCESS_AVAILABLE BIT(4) /* Hypercall MSRs (HV_X64_MSR_GUEST_OS_ID and HV_X64_MSR_HYPERCALL) available*/ -#define HV_X64_MSR_HYPERCALL_AVAILABLE (1 << 5) +#define HV_X64_MSR_HYPERCALL_AVAILABLE BIT(5) /* Access virtual processor index MSR (HV_X64_MSR_VP_INDEX) available*/ -#define HV_X64_MSR_VP_INDEX_AVAILABLE (1 << 6) +#define HV_X64_MSR_VP_INDEX_AVAILABLE BIT(6) /* Virtual system reset MSR (HV_X64_MSR_RESET) is available*/ -#define HV_X64_MSR_RESET_AVAILABLE (1 << 7) - /* - * Access statistics pages MSRs (HV_X64_MSR_STATS_PARTITION_RETAIL_PAGE, - * HV_X64_MSR_STATS_PARTITION_INTERNAL_PAGE, HV_X64_MSR_STATS_VP_RETAIL_PAGE, - * HV_X64_MSR_STATS_VP_INTERNAL_PAGE) available - */ -#define HV_X64_MSR_STAT_PAGES_AVAILABLE (1 << 8) - -/* Frequency MSRs available */ -#define HV_FEATURE_FREQUENCY_MSRS_AVAILABLE (1 << 8) - -/* Crash MSR available */ -#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE (1 << 10) - -/* stimer Direct Mode is available */ -#define HV_STIMER_DIRECT_MODE_AVAILABLE (1 << 19) +#define HV_X64_MSR_RESET_AVAILABLE BIT(7) +/* + * Access statistics pages MSRs (HV_X64_MSR_STATS_PARTITION_RETAIL_PAGE, + * HV_X64_MSR_STATS_PARTITION_INTERNAL_PAGE, HV_X64_MSR_STATS_VP_RETAIL_PAGE, + * HV_X64_MSR_STATS_VP_INTERNAL_PAGE) available + */ +#define HV_X64_MSR_STAT_PAGES_AVAILABLE BIT(8) +/* Partition reference TSC MSR is available */ +#define HV_MSR_REFERENCE_TSC_AVAILABLE BIT(9) +/* Partition Guest IDLE MSR is available */ +#define HV_X64_MSR_GUEST_IDLE_AVAILABLE BIT(10) +/* + * There is a single feature flag that signifies if the partition has access + * to MSRs with local APIC and TSC frequencies. + */ +#define HV_X64_ACCESS_FREQUENCY_MSRS BIT(11) +/* AccessReenlightenmentControls privilege */ +#define HV_X64_ACCESS_REENLIGHTENMENT BIT(13) /* - * Feature identification: EBX indicates which flags were specified at - * partition creation. The format is the same as the partition creation - * flag structure defined in section Partition Creation Flags. + * Feature identification: indicates which flags were specified at partition + * creation. The format is the same as the partition creation flag structure + * defined in section Partition Creation Flags. + * These are HYPERV_CPUID_FEATURES.EBX bits. */ -#define HV_X64_CREATE_PARTITIONS (1 << 0) -#define HV_X64_ACCESS_PARTITION_ID (1 << 1) -#define HV_X64_ACCESS_MEMORY_POOL (1 << 2) -#define HV_X64_ADJUST_MESSAGE_BUFFERS (1 << 3) -#define HV_X64_POST_MESSAGES (1 << 4) -#define HV_X64_SIGNAL_EVENTS (1 << 5) -#define HV_X64_CREATE_PORT (1 << 6) -#define HV_X64_CONNECT_PORT (1 << 7) -#define HV_X64_ACCESS_STATS (1 << 8) -#define HV_X64_DEBUGGING (1 << 11) -#define HV_X64_CPU_POWER_MANAGEMENT (1 << 12) -#define HV_X64_CONFIGURE_PROFILER (1 << 13) +#define HV_X64_CREATE_PARTITIONS BIT(0) +#define HV_X64_ACCESS_PARTITION_ID BIT(1) +#define HV_X64_ACCESS_MEMORY_POOL BIT(2) +#define HV_X64_ADJUST_MESSAGE_BUFFERS BIT(3) +#define HV_X64_POST_MESSAGES BIT(4) +#define HV_X64_SIGNAL_EVENTS BIT(5) +#define HV_X64_CREATE_PORT BIT(6) +#define HV_X64_CONNECT_PORT BIT(7) +#define HV_X64_ACCESS_STATS BIT(8) +#define HV_X64_DEBUGGING BIT(11) +#define HV_X64_CPU_POWER_MANAGEMENT BIT(12) /* * Feature identification. EDX indicates which miscellaneous features * are available to the partition. + * These are HYPERV_CPUID_FEATURES.EDX bits. */ /* The MWAIT instruction is available (per section MONITOR / MWAIT) */ -#define HV_X64_MWAIT_AVAILABLE (1 << 0) +#define HV_X64_MWAIT_AVAILABLE BIT(0) /* Guest debugging support is available */ -#define HV_X64_GUEST_DEBUGGING_AVAILABLE (1 << 1) +#define HV_X64_GUEST_DEBUGGING_AVAILABLE BIT(1) /* Performance Monitor support is available*/ -#define HV_X64_PERF_MONITOR_AVAILABLE (1 << 2) +#define HV_X64_PERF_MONITOR_AVAILABLE BIT(2) /* Support for physical CPU dynamic partitioning events is available*/ -#define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE (1 << 3) +#define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE BIT(3) /* * Support for passing hypercall input parameter block via XMM * registers is available */ -#define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE (1 << 4) +#define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE BIT(4) /* Support for a virtual guest idle state is available */ -#define HV_X64_GUEST_IDLE_STATE_AVAILABLE (1 << 5) -/* Guest crash data handler available */ -#define HV_X64_GUEST_CRASH_MSR_AVAILABLE (1 << 10) +#define HV_X64_GUEST_IDLE_STATE_AVAILABLE BIT(5) +/* Frequency MSRs available */ +#define HV_FEATURE_FREQUENCY_MSRS_AVAILABLE BIT(8) +/* Crash MSR available */ +#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE BIT(10) +/* stimer Direct Mode is available */ +#define HV_STIMER_DIRECT_MODE_AVAILABLE BIT(19) /* * Implementation recommendations. Indicates which behaviors the hypervisor * recommends the OS implement for optimal performance. + * These are HYPERV_CPUID_ENLIGHTMENT_INFO.EAX bits. + */ +/* + * Recommend using hypercall for address space switches rather + * than MOV to CR3 instruction */ - /* - * Recommend using hypercall for address space switches rather - * than MOV to CR3 instruction - */ -#define HV_X64_AS_SWITCH_RECOMMENDED (1 << 0) +#define HV_X64_AS_SWITCH_RECOMMENDED BIT(0) /* Recommend using hypercall for local TLB flushes rather * than INVLPG or MOV to CR3 instructions */ -#define HV_X64_LOCAL_TLB_FLUSH_RECOMMENDED (1 << 1) +#define HV_X64_LOCAL_TLB_FLUSH_RECOMMENDED BIT(1) /* * Recommend using hypercall for remote TLB flushes rather * than inter-processor interrupts */ -#define HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED (1 << 2) +#define HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED BIT(2) /* * Recommend using MSRs for accessing APIC registers * EOI, ICR and TPR rather than their memory-mapped counterparts */ -#define HV_X64_APIC_ACCESS_RECOMMENDED (1 << 3) +#define HV_X64_APIC_ACCESS_RECOMMENDED BIT(3) /* Recommend using the hypervisor-provided MSR to initiate a system RESET */ -#define HV_X64_SYSTEM_RESET_RECOMMENDED (1 << 4) +#define HV_X64_SYSTEM_RESET_RECOMMENDED BIT(4) /* * Recommend using relaxed timing for this partition. If used, * the VM should disable any watchdog timeouts that rely on the * timely delivery of external interrupts */ -#define HV_X64_RELAXED_TIMING_RECOMMENDED (1 << 5) +#define HV_X64_RELAXED_TIMING_RECOMMENDED BIT(5) /* * Recommend not using Auto End-Of-Interrupt feature */ -#define HV_DEPRECATING_AEOI_RECOMMENDED (1 << 9) +#define HV_DEPRECATING_AEOI_RECOMMENDED BIT(9) /* * Recommend using cluster IPI hypercalls. */ -#define HV_X64_CLUSTER_IPI_RECOMMENDED (1 << 10) +#define HV_X64_CLUSTER_IPI_RECOMMENDED BIT(10) /* Recommend using the newer ExProcessorMasks interface */ -#define HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED (1 << 11) +#define HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED BIT(11) /* Recommend using enlightened VMCS */ -#define HV_X64_ENLIGHTENED_VMCS_RECOMMENDED (1 << 14) +#define HV_X64_ENLIGHTENED_VMCS_RECOMMENDED BIT(14) -/* - * Crash notification flags. - */ -#define HV_CRASH_CTL_CRASH_NOTIFY_MSG BIT_ULL(62) -#define HV_CRASH_CTL_CRASH_NOTIFY BIT_ULL(63) +/* Nested features. These are HYPERV_CPUID_NESTED_FEATURES.EAX bits. */ +#define HV_X64_NESTED_GUEST_MAPPING_FLUSH BIT(18) +#define HV_X64_NESTED_MSR_BITMAP BIT(19) + +/* Hyper-V specific model specific registers (MSRs) */ /* MSR used to identify the guest OS. */ #define HV_X64_MSR_GUEST_OS_ID 0x40000000 @@ -201,6 +194,9 @@ /* MSR used to read the per-partition time reference counter */ #define HV_X64_MSR_TIME_REF_COUNT 0x40000020 +/* A partition's reference time stamp counter (TSC) page */ +#define HV_X64_MSR_REFERENCE_TSC 0x40000021 + /* MSR used to retrieve the TSC frequency */ #define HV_X64_MSR_TSC_FREQUENCY 0x40000022 @@ -258,9 +254,11 @@ #define HV_X64_MSR_CRASH_P3 0x40000103 #define HV_X64_MSR_CRASH_P4 0x40000104 #define HV_X64_MSR_CRASH_CTL 0x40000105 -#define HV_X64_MSR_CRASH_CTL_NOTIFY (1ULL << 63) -#define HV_X64_MSR_CRASH_PARAMS \ - (1 + (HV_X64_MSR_CRASH_P4 - HV_X64_MSR_CRASH_P0)) + +/* TSC emulation after migration */ +#define HV_X64_MSR_REENLIGHTENMENT_CONTROL 0x40000106 +#define HV_X64_MSR_TSC_EMULATION_CONTROL 0x40000107 +#define HV_X64_MSR_TSC_EMULATION_STATUS 0x40000108 /* * Declare the MSR used to setup pages used to communicate with the hypervisor. @@ -271,7 +269,7 @@ union hv_x64_msr_hypercall_contents { u64 enable:1; u64 reserved:11; u64 guest_physical_address:52; - }; + } __packed; }; /* @@ -283,7 +281,7 @@ struct ms_hyperv_tsc_page { volatile u64 tsc_scale; volatile s64 tsc_offset; u64 reserved2[509]; -}; +} __packed; /* * The guest OS needs to register the guest ID with the hypervisor. @@ -311,39 +309,37 @@ struct ms_hyperv_tsc_page { #define HV_LINUX_VENDOR_ID 0x8100 -/* TSC emulation after migration */ -#define HV_X64_MSR_REENLIGHTENMENT_CONTROL 0x40000106 - -/* Nested features (CPUID 0x4000000A) EAX */ -#define HV_X64_NESTED_GUEST_MAPPING_FLUSH BIT(18) -#define HV_X64_NESTED_MSR_BITMAP BIT(19) - struct hv_reenlightenment_control { __u64 vector:8; __u64 reserved1:8; __u64 enabled:1; __u64 reserved2:15; __u64 target_vp:32; -}; - -#define HV_X64_MSR_TSC_EMULATION_CONTROL 0x40000107 -#define HV_X64_MSR_TSC_EMULATION_STATUS 0x40000108 +} __packed; struct hv_tsc_emulation_control { __u64 enabled:1; __u64 reserved:63; -}; +} __packed; struct hv_tsc_emulation_status { __u64 inprogress:1; __u64 reserved:63; -}; +} __packed; #define HV_X64_MSR_HYPERCALL_ENABLE 0x00000001 #define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT 12 #define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK \ (~((1ull << HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT) - 1)) +/* + * Crash notification (HV_X64_MSR_CRASH_CTL) flags. + */ +#define HV_CRASH_CTL_CRASH_NOTIFY_MSG BIT_ULL(62) +#define HV_CRASH_CTL_CRASH_NOTIFY BIT_ULL(63) +#define HV_X64_MSR_CRASH_PARAMS \ + (1 + (HV_X64_MSR_CRASH_P4 - HV_X64_MSR_CRASH_P0)) + #define HV_IPI_LOW_VECTOR 0x10 #define HV_IPI_HIGH_VECTOR 0xff @@ -358,6 +354,7 @@ struct hv_tsc_emulation_status { #define HVCALL_POST_MESSAGE 0x005c #define HVCALL_SIGNAL_EVENT 0x005d #define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE 0x00af +#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST 0x00b0 #define HV_X64_MSR_VP_ASSIST_PAGE_ENABLE 0x00000001 #define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT 12 @@ -409,7 +406,7 @@ typedef struct _HV_REFERENCE_TSC_PAGE { __u32 res1; __u64 tsc_scale; __s64 tsc_offset; -} HV_REFERENCE_TSC_PAGE, *PHV_REFERENCE_TSC_PAGE; +} __packed HV_REFERENCE_TSC_PAGE, *PHV_REFERENCE_TSC_PAGE; /* Define the number of synthetic interrupt sources. */ #define HV_SYNIC_SINT_COUNT (16) @@ -466,7 +463,7 @@ union hv_message_flags { struct { __u8 msg_pending:1; __u8 reserved:7; - }; + } __packed; }; /* Define port identifier type. */ @@ -475,7 +472,7 @@ union hv_port_id { struct { __u32 id:24; __u32 reserved:8; - } u; + } __packed u; }; /* Define synthetic interrupt controller message header. */ @@ -488,7 +485,7 @@ struct hv_message_header { __u64 sender; union hv_port_id port; }; -}; +} __packed; /* Define synthetic interrupt controller message format. */ struct hv_message { @@ -496,12 +493,12 @@ struct hv_message { union { __u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT]; } u; -}; +} __packed; /* Define the synthetic interrupt message page layout. */ struct hv_message_page { struct hv_message sint_message[HV_SYNIC_SINT_COUNT]; -}; +} __packed; /* Define timer message payload structure. */ struct hv_timer_message_payload { @@ -509,7 +506,7 @@ struct hv_timer_message_payload { __u32 reserved; __u64 expiration_time; /* When the timer expired */ __u64 delivery_time; /* When the message was delivered */ -}; +} __packed; /* Define virtual processor assist page structure. */ struct hv_vp_assist_page { @@ -518,8 +515,9 @@ struct hv_vp_assist_page { __u64 vtl_control[2]; __u64 nested_enlightenments_control[2]; __u32 enlighten_vmentry; + __u32 padding; __u64 current_nested_vmcs; -}; +} __packed; struct hv_enlightened_vmcs { u32 revision_id; @@ -533,6 +531,8 @@ struct hv_enlightened_vmcs { u16 host_gs_selector; u16 host_tr_selector; + u16 padding16_1; + u64 host_ia32_pat; u64 host_ia32_efer; @@ -651,7 +651,7 @@ struct hv_enlightened_vmcs { u64 ept_pointer; u16 virtual_processor_id; - u16 padding16[3]; + u16 padding16_2[3]; u64 padding64_2[5]; u64 guest_physical_address; @@ -693,7 +693,7 @@ struct hv_enlightened_vmcs { u32 nested_flush_hypercall:1; u32 msr_bitmap:1; u32 reserved:30; - } hv_enlightenments_control; + } __packed hv_enlightenments_control; u32 hv_vp_id; u64 hv_vm_id; @@ -703,7 +703,7 @@ struct hv_enlightened_vmcs { u64 padding64_5[7]; u64 xss_exit_bitmap; u64 padding64_6[7]; -}; +} __packed; #define HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE 0 #define HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP BIT(0) @@ -725,36 +725,129 @@ struct hv_enlightened_vmcs { #define HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL 0xFFFF -#define HV_STIMER_ENABLE (1ULL << 0) -#define HV_STIMER_PERIODIC (1ULL << 1) -#define HV_STIMER_LAZY (1ULL << 2) -#define HV_STIMER_AUTOENABLE (1ULL << 3) -#define HV_STIMER_SINT(config) (__u8)(((config) >> 16) & 0x0F) +/* Define synthetic interrupt controller flag constants. */ +#define HV_EVENT_FLAGS_COUNT (256 * 8) +#define HV_EVENT_FLAGS_LONG_COUNT (256 / sizeof(unsigned long)) + +/* + * Synthetic timer configuration. + */ +union hv_stimer_config { + u64 as_uint64; + struct { + u64 enable:1; + u64 periodic:1; + u64 lazy:1; + u64 auto_enable:1; + u64 apic_vector:8; + u64 direct_mode:1; + u64 reserved_z0:3; + u64 sintx:4; + u64 reserved_z1:44; + } __packed; +}; + + +/* Define the synthetic interrupt controller event flags format. */ +union hv_synic_event_flags { + unsigned long flags[HV_EVENT_FLAGS_LONG_COUNT]; +}; + +/* Define SynIC control register. */ +union hv_synic_scontrol { + u64 as_uint64; + struct { + u64 enable:1; + u64 reserved:63; + } __packed; +}; + +/* Define synthetic interrupt source. */ +union hv_synic_sint { + u64 as_uint64; + struct { + u64 vector:8; + u64 reserved1:8; + u64 masked:1; + u64 auto_eoi:1; + u64 reserved2:46; + } __packed; +}; + +/* Define the format of the SIMP register */ +union hv_synic_simp { + u64 as_uint64; + struct { + u64 simp_enabled:1; + u64 preserved:11; + u64 base_simp_gpa:52; + } __packed; +}; + +/* Define the format of the SIEFP register */ +union hv_synic_siefp { + u64 as_uint64; + struct { + u64 siefp_enabled:1; + u64 preserved:11; + u64 base_siefp_gpa:52; + } __packed; +}; struct hv_vpset { u64 format; u64 valid_bank_mask; u64 bank_contents[]; -}; +} __packed; /* HvCallSendSyntheticClusterIpi hypercall */ struct hv_send_ipi { u32 vector; u32 reserved; u64 cpu_mask; -}; +} __packed; /* HvCallSendSyntheticClusterIpiEx hypercall */ struct hv_send_ipi_ex { u32 vector; u32 reserved; struct hv_vpset vp_set; -}; +} __packed; /* HvFlushGuestPhysicalAddressSpace hypercalls */ struct hv_guest_mapping_flush { u64 address_space; u64 flags; +} __packed; + +/* + * HV_MAX_FLUSH_PAGES = "additional_pages" + 1. It's limited + * by the bitwidth of "additional_pages" in union hv_gpa_page_range. + */ +#define HV_MAX_FLUSH_PAGES (2048) + +/* HvFlushGuestPhysicalAddressList hypercall */ +union hv_gpa_page_range { + u64 address_space; + struct { + u64 additional_pages:11; + u64 largepage:1; + u64 basepfn:52; + } page; +}; + +/* + * All input flush parameters should be in single page. The max flush + * count is equal with how many entries of union hv_gpa_page_range can + * be populated into the input parameter page. + */ +#define HV_MAX_FLUSH_REP_COUNT (PAGE_SIZE - 2 * sizeof(u64) / \ + sizeof(union hv_gpa_page_range)) + +struct hv_guest_mapping_flush_list { + u64 address_space; + u64 flags; + union hv_gpa_page_range gpa_list[HV_MAX_FLUSH_REP_COUNT]; }; /* HvFlushVirtualAddressSpace, HvFlushVirtualAddressList hypercalls */ @@ -763,7 +856,7 @@ struct hv_tlb_flush { u64 flags; u64 processor_mask; u64 gva_list[]; -}; +} __packed; /* HvFlushVirtualAddressSpaceEx, HvFlushVirtualAddressListEx hypercalls */ struct hv_tlb_flush_ex { @@ -771,6 +864,6 @@ struct hv_tlb_flush_ex { u64 flags; struct hv_vpset hv_vp_set; u64 gva_list[]; -}; +} __packed; #endif diff --git a/arch/x86/include/asm/intel_pt.h b/arch/x86/include/asm/intel_pt.h index b523f51c5400..634f99b1dc22 100644 --- a/arch/x86/include/asm/intel_pt.h +++ b/arch/x86/include/asm/intel_pt.h @@ -2,10 +2,36 @@ #ifndef _ASM_X86_INTEL_PT_H #define _ASM_X86_INTEL_PT_H +#define PT_CPUID_LEAVES 2 +#define PT_CPUID_REGS_NUM 4 /* number of regsters (eax, ebx, ecx, edx) */ + +enum pt_capabilities { + PT_CAP_max_subleaf = 0, + PT_CAP_cr3_filtering, + PT_CAP_psb_cyc, + PT_CAP_ip_filtering, + PT_CAP_mtc, + PT_CAP_ptwrite, + PT_CAP_power_event_trace, + PT_CAP_topa_output, + PT_CAP_topa_multiple_entries, + PT_CAP_single_range_output, + PT_CAP_output_subsys, + PT_CAP_payloads_lip, + PT_CAP_num_address_ranges, + PT_CAP_mtc_periods, + PT_CAP_cycle_thresholds, + PT_CAP_psb_periods, +}; + #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL) void cpu_emergency_stop_pt(void); +extern u32 intel_pt_validate_hw_cap(enum pt_capabilities cap); +extern u32 intel_pt_validate_cap(u32 *caps, enum pt_capabilities cap); #else static inline void cpu_emergency_stop_pt(void) {} +static inline u32 intel_pt_validate_hw_cap(enum pt_capabilities cap) { return 0; } +static inline u32 intel_pt_validate_cap(u32 *caps, enum pt_capabilities capability) { return 0; } #endif #endif /* _ASM_X86_INTEL_PT_H */ diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h index a5fb34fe56a4..21efc9d07ed9 100644 --- a/arch/x86/include/asm/jump_label.h +++ b/arch/x86/include/asm/jump_label.h @@ -2,6 +2,19 @@ #ifndef _ASM_X86_JUMP_LABEL_H #define _ASM_X86_JUMP_LABEL_H +#ifndef HAVE_JUMP_LABEL +/* + * For better or for worse, if jump labels (the gcc extension) are missing, + * then the entire static branch patching infrastructure is compiled out. + * If that happens, the code in here will malfunction. Raise a compiler + * error instead. + * + * In theory, jump labels and the static branch patching infrastructure + * could be decoupled to fix this. + */ +#error asm/jump_label.h included on a non-jump-label kernel +#endif + #define JUMP_LABEL_NOP_SIZE 5 #ifdef CONFIG_X86_64 @@ -20,9 +33,15 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool branch) { - asm_volatile_goto("STATIC_BRANCH_NOP l_yes=\"%l[l_yes]\" key=\"%c0\" " - "branch=\"%c1\"" - : : "i" (key), "i" (branch) : : l_yes); + asm_volatile_goto("1:" + ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t" + ".pushsection __jump_table, \"aw\" \n\t" + _ASM_ALIGN "\n\t" + ".long 1b - ., %l[l_yes] - . \n\t" + _ASM_PTR "%c0 + %c1 - .\n\t" + ".popsection \n\t" + : : "i" (key), "i" (branch) : : l_yes); + return false; l_yes: return true; @@ -30,8 +49,14 @@ l_yes: static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) { - asm_volatile_goto("STATIC_BRANCH_JMP l_yes=\"%l[l_yes]\" key=\"%c0\" " - "branch=\"%c1\"" + asm_volatile_goto("1:" + ".byte 0xe9\n\t .long %l[l_yes] - 2f\n\t" + "2:\n\t" + ".pushsection __jump_table, \"aw\" \n\t" + _ASM_ALIGN "\n\t" + ".long 1b - ., %l[l_yes] - . \n\t" + _ASM_PTR "%c0 + %c1 - .\n\t" + ".popsection \n\t" : : "i" (key), "i" (branch) : : l_yes); return false; @@ -41,26 +66,37 @@ l_yes: #else /* __ASSEMBLY__ */ -.macro STATIC_BRANCH_NOP l_yes:req key:req branch:req -.Lstatic_branch_nop_\@: - .byte STATIC_KEY_INIT_NOP -.Lstatic_branch_no_after_\@: +.macro STATIC_JUMP_IF_TRUE target, key, def +.Lstatic_jump_\@: + .if \def + /* Equivalent to "jmp.d32 \target" */ + .byte 0xe9 + .long \target - .Lstatic_jump_after_\@ +.Lstatic_jump_after_\@: + .else + .byte STATIC_KEY_INIT_NOP + .endif .pushsection __jump_table, "aw" _ASM_ALIGN - .long .Lstatic_branch_nop_\@ - ., \l_yes - . - _ASM_PTR \key + \branch - . + .long .Lstatic_jump_\@ - ., \target - . + _ASM_PTR \key - . .popsection .endm -.macro STATIC_BRANCH_JMP l_yes:req key:req branch:req -.Lstatic_branch_jmp_\@: - .byte 0xe9 - .long \l_yes - .Lstatic_branch_jmp_after_\@ -.Lstatic_branch_jmp_after_\@: +.macro STATIC_JUMP_IF_FALSE target, key, def +.Lstatic_jump_\@: + .if \def + .byte STATIC_KEY_INIT_NOP + .else + /* Equivalent to "jmp.d32 \target" */ + .byte 0xe9 + .long \target - .Lstatic_jump_after_\@ +.Lstatic_jump_after_\@: + .endif .pushsection __jump_table, "aw" _ASM_ALIGN - .long .Lstatic_branch_jmp_\@ - ., \l_yes - . - _ASM_PTR \key + \branch - . + .long .Lstatic_jump_\@ - ., \target - . + _ASM_PTR \key + 1 - . .popsection .endm diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index fbda5a917c5b..4660ce90de7f 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -439,6 +439,11 @@ struct kvm_mmu { u64 pdptrs[4]; /* pae */ }; +struct kvm_tlb_range { + u64 start_gfn; + u64 pages; +}; + enum pmc_type { KVM_PMC_GP = 0, KVM_PMC_FIXED, @@ -497,7 +502,7 @@ struct kvm_mtrr { struct kvm_vcpu_hv_stimer { struct hrtimer timer; int index; - u64 config; + union hv_stimer_config config; u64 count; u64 exp_time; struct hv_message msg; @@ -601,17 +606,16 @@ struct kvm_vcpu_arch { /* * QEMU userspace and the guest each have their own FPU state. - * In vcpu_run, we switch between the user and guest FPU contexts. - * While running a VCPU, the VCPU thread will have the guest FPU - * context. + * In vcpu_run, we switch between the user, maintained in the + * task_struct struct, and guest FPU contexts. While running a VCPU, + * the VCPU thread will have the guest FPU context. * * Note that while the PKRU state lives inside the fpu registers, * it is switched out separately at VMENTER and VMEXIT time. The * "guest_fpu" state here contains the guest FPU context, with the * host PRKU bits. */ - struct fpu user_fpu; - struct fpu guest_fpu; + struct fpu *guest_fpu; u64 xcr0; u64 guest_supported_xcr0; @@ -1042,6 +1046,8 @@ struct kvm_x86_ops { void (*tlb_flush)(struct kvm_vcpu *vcpu, bool invalidate_gpa); int (*tlb_remote_flush)(struct kvm *kvm); + int (*tlb_remote_flush_with_range)(struct kvm *kvm, + struct kvm_tlb_range *range); /* * Flush any TLB entries associated with the given GVA. @@ -1106,6 +1112,7 @@ struct kvm_x86_ops { bool (*mpx_supported)(void); bool (*xsaves_supported)(void); bool (*umip_emulated)(void); + bool (*pt_supported)(void); int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr); void (*request_immediate_exit)(struct kvm_vcpu *vcpu); @@ -1186,6 +1193,7 @@ struct kvm_x86_ops { int (*nested_enable_evmcs)(struct kvm_vcpu *vcpu, uint16_t *vmcs_version); + uint16_t (*nested_get_evmcs_version)(struct kvm_vcpu *vcpu); }; struct kvm_arch_async_pf { @@ -1196,6 +1204,7 @@ struct kvm_arch_async_pf { }; extern struct kvm_x86_ops *kvm_x86_ops; +extern struct kmem_cache *x86_fpu_cache; #define __KVM_HAVE_ARCH_VM_ALLOC static inline struct kvm *kvm_arch_alloc_vm(void) @@ -1492,7 +1501,7 @@ asmlinkage void kvm_spurious_fault(void); "cmpb $0, kvm_rebooting \n\t" \ "jne 668b \n\t" \ __ASM_SIZE(push) " $666b \n\t" \ - "call kvm_spurious_fault \n\t" \ + "jmp kvm_spurious_fault \n\t" \ ".popsection \n\t" \ _ASM_EXTABLE(666b, 667b) @@ -1503,7 +1512,7 @@ asmlinkage void kvm_spurious_fault(void); int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); -void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); +int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v); int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 1d0a7778e163..cc60e617931c 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -22,6 +22,11 @@ struct ms_hyperv_info { extern struct ms_hyperv_info ms_hyperv; + +typedef int (*hyperv_fill_flush_list_func)( + struct hv_guest_mapping_flush_list *flush, + void *data); + /* * Generate the guest ID. */ @@ -348,6 +353,11 @@ void set_hv_tscchange_cb(void (*cb)(void)); void clear_hv_tscchange_cb(void); void hyperv_stop_tsc_emulation(void); int hyperv_flush_guest_mapping(u64 as); +int hyperv_flush_guest_mapping_range(u64 as, + hyperv_fill_flush_list_func fill_func, void *data); +int hyperv_fill_flush_guest_mapping_list( + struct hv_guest_mapping_flush_list *flush, + u64 start_gfn, u64 end_gfn); #ifdef CONFIG_X86_64 void hv_apic_init(void); @@ -370,6 +380,11 @@ static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu) return NULL; } static inline int hyperv_flush_guest_mapping(u64 as) { return -1; } +static inline int hyperv_flush_guest_mapping_range(u64 as, + hyperv_fill_flush_list_func fill_func, void *data) +{ + return -1; +} #endif /* CONFIG_HYPERV */ #ifdef CONFIG_HYPERV_TSCPAGE diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index c8f73efb4ece..8e40c2446fd1 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -121,7 +121,43 @@ #define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6 #define MSR_IA32_RTIT_CTL 0x00000570 +#define RTIT_CTL_TRACEEN BIT(0) +#define RTIT_CTL_CYCLEACC BIT(1) +#define RTIT_CTL_OS BIT(2) +#define RTIT_CTL_USR BIT(3) +#define RTIT_CTL_PWR_EVT_EN BIT(4) +#define RTIT_CTL_FUP_ON_PTW BIT(5) +#define RTIT_CTL_FABRIC_EN BIT(6) +#define RTIT_CTL_CR3EN BIT(7) +#define RTIT_CTL_TOPA BIT(8) +#define RTIT_CTL_MTC_EN BIT(9) +#define RTIT_CTL_TSC_EN BIT(10) +#define RTIT_CTL_DISRETC BIT(11) +#define RTIT_CTL_PTW_EN BIT(12) +#define RTIT_CTL_BRANCH_EN BIT(13) +#define RTIT_CTL_MTC_RANGE_OFFSET 14 +#define RTIT_CTL_MTC_RANGE (0x0full << RTIT_CTL_MTC_RANGE_OFFSET) +#define RTIT_CTL_CYC_THRESH_OFFSET 19 +#define RTIT_CTL_CYC_THRESH (0x0full << RTIT_CTL_CYC_THRESH_OFFSET) +#define RTIT_CTL_PSB_FREQ_OFFSET 24 +#define RTIT_CTL_PSB_FREQ (0x0full << RTIT_CTL_PSB_FREQ_OFFSET) +#define RTIT_CTL_ADDR0_OFFSET 32 +#define RTIT_CTL_ADDR0 (0x0full << RTIT_CTL_ADDR0_OFFSET) +#define RTIT_CTL_ADDR1_OFFSET 36 +#define RTIT_CTL_ADDR1 (0x0full << RTIT_CTL_ADDR1_OFFSET) +#define RTIT_CTL_ADDR2_OFFSET 40 +#define RTIT_CTL_ADDR2 (0x0full << RTIT_CTL_ADDR2_OFFSET) +#define RTIT_CTL_ADDR3_OFFSET 44 +#define RTIT_CTL_ADDR3 (0x0full << RTIT_CTL_ADDR3_OFFSET) #define MSR_IA32_RTIT_STATUS 0x00000571 +#define RTIT_STATUS_FILTEREN BIT(0) +#define RTIT_STATUS_CONTEXTEN BIT(1) +#define RTIT_STATUS_TRIGGEREN BIT(2) +#define RTIT_STATUS_BUFFOVF BIT(3) +#define RTIT_STATUS_ERROR BIT(4) +#define RTIT_STATUS_STOPPED BIT(5) +#define RTIT_STATUS_BYTECNT_OFFSET 32 +#define RTIT_STATUS_BYTECNT (0x1ffffull << RTIT_STATUS_BYTECNT_OFFSET) #define MSR_IA32_RTIT_ADDR0_A 0x00000580 #define MSR_IA32_RTIT_ADDR0_B 0x00000581 #define MSR_IA32_RTIT_ADDR1_A 0x00000582 @@ -390,6 +426,7 @@ #define MSR_F15H_NB_PERF_CTR 0xc0010241 #define MSR_F15H_PTSC 0xc0010280 #define MSR_F15H_IC_CFG 0xc0011021 +#define MSR_F15H_EX_CFG 0xc001102c /* Fam 10h MSRs */ #define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058 @@ -771,6 +808,7 @@ #define VMX_BASIC_INOUT 0x0040000000000000LLU /* MSR_IA32_VMX_MISC bits */ +#define MSR_IA32_VMX_MISC_INTEL_PT (1ULL << 14) #define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29) #define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE 0x1F /* AMD-V MSRs */ diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 032b6009baab..dad12b767ba0 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -232,6 +232,7 @@ enum spectre_v2_mitigation { enum spectre_v2_user_mitigation { SPECTRE_V2_USER_NONE, SPECTRE_V2_USER_STRICT, + SPECTRE_V2_USER_STRICT_PREFERRED, SPECTRE_V2_USER_PRCTL, SPECTRE_V2_USER_SECCOMP, }; diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 26942ad63830..488c59686a73 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -348,11 +348,23 @@ extern struct paravirt_patch_template pv_ops; #define paravirt_clobber(clobber) \ [paravirt_clobber] "i" (clobber) +/* + * Generate some code, and mark it as patchable by the + * apply_paravirt() alternate instruction patcher. + */ +#define _paravirt_alt(insn_string, type, clobber) \ + "771:\n\t" insn_string "\n" "772:\n" \ + ".pushsection .parainstructions,\"a\"\n" \ + _ASM_ALIGN "\n" \ + _ASM_PTR " 771b\n" \ + " .byte " type "\n" \ + " .byte 772b-771b\n" \ + " .short " clobber "\n" \ + ".popsection\n" + /* Generate patchable code, with the default asm parameters. */ -#define paravirt_call \ - "PARAVIRT_CALL type=\"%c[paravirt_typenum]\"" \ - " clobber=\"%c[paravirt_clobber]\"" \ - " pv_opptr=\"%c[paravirt_opptr]\";" +#define paravirt_alt(insn_string) \ + _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]") /* Simple instruction patching code. */ #define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t" @@ -373,6 +385,16 @@ unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len); int paravirt_disable_iospace(void); /* + * This generates an indirect call based on the operation type number. + * The type number, computed in PARAVIRT_PATCH, is derived from the + * offset into the paravirt_patch_template structure, and can therefore be + * freely converted back into a structure offset. + */ +#define PARAVIRT_CALL \ + ANNOTATE_RETPOLINE_SAFE \ + "call *%c[paravirt_opptr];" + +/* * These macros are intended to wrap calls through one of the paravirt * ops structs, so that they can be later identified and patched at * runtime. @@ -509,7 +531,7 @@ int paravirt_disable_iospace(void); /* since this condition will never hold */ \ if (sizeof(rettype) > sizeof(unsigned long)) { \ asm volatile(pre \ - paravirt_call \ + paravirt_alt(PARAVIRT_CALL) \ post \ : call_clbr, ASM_CALL_CONSTRAINT \ : paravirt_type(op), \ @@ -519,7 +541,7 @@ int paravirt_disable_iospace(void); __ret = (rettype)((((u64)__edx) << 32) | __eax); \ } else { \ asm volatile(pre \ - paravirt_call \ + paravirt_alt(PARAVIRT_CALL) \ post \ : call_clbr, ASM_CALL_CONSTRAINT \ : paravirt_type(op), \ @@ -546,7 +568,7 @@ int paravirt_disable_iospace(void); PVOP_VCALL_ARGS; \ PVOP_TEST_NULL(op); \ asm volatile(pre \ - paravirt_call \ + paravirt_alt(PARAVIRT_CALL) \ post \ : call_clbr, ASM_CALL_CONSTRAINT \ : paravirt_type(op), \ @@ -664,26 +686,6 @@ struct paravirt_patch_site { extern struct paravirt_patch_site __parainstructions[], __parainstructions_end[]; -#else /* __ASSEMBLY__ */ - -/* - * This generates an indirect call based on the operation type number. - * The type number, computed in PARAVIRT_PATCH, is derived from the - * offset into the paravirt_patch_template structure, and can therefore be - * freely converted back into a structure offset. - */ -.macro PARAVIRT_CALL type:req clobber:req pv_opptr:req -771: ANNOTATE_RETPOLINE_SAFE - call *\pv_opptr -772: .pushsection .parainstructions,"a" - _ASM_ALIGN - _ASM_PTR 771b - .byte \type - .byte 772b-771b - .short \clobber - .popsection -.endm - #endif /* __ASSEMBLY__ */ #endif /* _ASM_X86_PARAVIRT_TYPES_H */ diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h index 959d618dbb17..73bb404f4d2a 100644 --- a/arch/x86/include/asm/pci_x86.h +++ b/arch/x86/include/asm/pci_x86.h @@ -121,7 +121,14 @@ extern void __init dmi_check_pciprobe(void); extern void __init dmi_check_skip_isa_align(void); /* some common used subsys_initcalls */ +#ifdef CONFIG_PCI extern int __init pci_acpi_init(void); +#else +static inline int __init pci_acpi_init(void) +{ + return -EINVAL; +} +#endif extern void __init pcibios_irq_init(void); extern int __init pcibios_init(void); extern int pci_legacy_init(void); diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 84bd9bdc1987..88bca456da99 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -111,6 +111,11 @@ extern unsigned int ptrs_per_p4d; */ #define MAXMEM (1UL << MAX_PHYSMEM_BITS) +#define GUARD_HOLE_PGD_ENTRY -256UL +#define GUARD_HOLE_SIZE (16UL << PGDIR_SHIFT) +#define GUARD_HOLE_BASE_ADDR (GUARD_HOLE_PGD_ENTRY << PGDIR_SHIFT) +#define GUARD_HOLE_END_ADDR (GUARD_HOLE_BASE_ADDR + GUARD_HOLE_SIZE) + #define LDT_PGD_ENTRY -240UL #define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) #define LDT_END_ADDR (LDT_BASE_ADDR + PGDIR_SIZE) diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 106b7d0e2dae..d6ff0bbdb394 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -564,8 +564,12 @@ extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address, unsigned int *level); extern pmd_t *lookup_pmd_address(unsigned long address); extern phys_addr_t slow_virt_to_phys(void *__address); -extern int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, - unsigned numpages, unsigned long page_flags); +extern int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, + unsigned long address, + unsigned numpages, + unsigned long page_flags); +extern int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address, + unsigned long numpages); #endif /* !__ASSEMBLY__ */ #endif /* _ASM_X86_PGTABLE_DEFS_H */ diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h index 90cb2f36c042..99a7fa9ab0a3 100644 --- a/arch/x86/include/asm/preempt.h +++ b/arch/x86/include/asm/preempt.h @@ -8,6 +8,9 @@ DECLARE_PER_CPU(int, __preempt_count); +/* We use the MSB mostly because its available */ +#define PREEMPT_NEED_RESCHED 0x80000000 + /* * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such * that a decrement hitting 0 means we can and should reschedule. diff --git a/arch/x86/include/asm/refcount.h b/arch/x86/include/asm/refcount.h index a8b5e1e13319..dbaed55c1c24 100644 --- a/arch/x86/include/asm/refcount.h +++ b/arch/x86/include/asm/refcount.h @@ -4,41 +4,6 @@ * x86-specific implementation of refcount_t. Based on PAX_REFCOUNT from * PaX/grsecurity. */ - -#ifdef __ASSEMBLY__ - -#include <asm/asm.h> -#include <asm/bug.h> - -.macro REFCOUNT_EXCEPTION counter:req - .pushsection .text..refcount -111: lea \counter, %_ASM_CX -112: ud2 - ASM_UNREACHABLE - .popsection -113: _ASM_EXTABLE_REFCOUNT(112b, 113b) -.endm - -/* Trigger refcount exception if refcount result is negative. */ -.macro REFCOUNT_CHECK_LT_ZERO counter:req - js 111f - REFCOUNT_EXCEPTION counter="\counter" -.endm - -/* Trigger refcount exception if refcount result is zero or negative. */ -.macro REFCOUNT_CHECK_LE_ZERO counter:req - jz 111f - REFCOUNT_CHECK_LT_ZERO counter="\counter" -.endm - -/* Trigger refcount exception unconditionally. */ -.macro REFCOUNT_ERROR counter:req - jmp 111f - REFCOUNT_EXCEPTION counter="\counter" -.endm - -#else /* __ASSEMBLY__ */ - #include <linux/refcount.h> #include <asm/bug.h> @@ -50,12 +15,35 @@ * central refcount exception. The fixup address for the exception points * back to the regular execution flow in .text. */ +#define _REFCOUNT_EXCEPTION \ + ".pushsection .text..refcount\n" \ + "111:\tlea %[var], %%" _ASM_CX "\n" \ + "112:\t" ASM_UD2 "\n" \ + ASM_UNREACHABLE \ + ".popsection\n" \ + "113:\n" \ + _ASM_EXTABLE_REFCOUNT(112b, 113b) + +/* Trigger refcount exception if refcount result is negative. */ +#define REFCOUNT_CHECK_LT_ZERO \ + "js 111f\n\t" \ + _REFCOUNT_EXCEPTION + +/* Trigger refcount exception if refcount result is zero or negative. */ +#define REFCOUNT_CHECK_LE_ZERO \ + "jz 111f\n\t" \ + REFCOUNT_CHECK_LT_ZERO + +/* Trigger refcount exception unconditionally. */ +#define REFCOUNT_ERROR \ + "jmp 111f\n\t" \ + _REFCOUNT_EXCEPTION static __always_inline void refcount_add(unsigned int i, refcount_t *r) { asm volatile(LOCK_PREFIX "addl %1,%0\n\t" - "REFCOUNT_CHECK_LT_ZERO counter=\"%[counter]\"" - : [counter] "+m" (r->refs.counter) + REFCOUNT_CHECK_LT_ZERO + : [var] "+m" (r->refs.counter) : "ir" (i) : "cc", "cx"); } @@ -63,32 +51,31 @@ static __always_inline void refcount_add(unsigned int i, refcount_t *r) static __always_inline void refcount_inc(refcount_t *r) { asm volatile(LOCK_PREFIX "incl %0\n\t" - "REFCOUNT_CHECK_LT_ZERO counter=\"%[counter]\"" - : [counter] "+m" (r->refs.counter) + REFCOUNT_CHECK_LT_ZERO + : [var] "+m" (r->refs.counter) : : "cc", "cx"); } static __always_inline void refcount_dec(refcount_t *r) { asm volatile(LOCK_PREFIX "decl %0\n\t" - "REFCOUNT_CHECK_LE_ZERO counter=\"%[counter]\"" - : [counter] "+m" (r->refs.counter) + REFCOUNT_CHECK_LE_ZERO + : [var] "+m" (r->refs.counter) : : "cc", "cx"); } static __always_inline __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r) { - return GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl", - "REFCOUNT_CHECK_LT_ZERO counter=\"%[var]\"", + REFCOUNT_CHECK_LT_ZERO, r->refs.counter, e, "er", i, "cx"); } static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r) { return GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl", - "REFCOUNT_CHECK_LT_ZERO counter=\"%[var]\"", + REFCOUNT_CHECK_LT_ZERO, r->refs.counter, e, "cx"); } @@ -106,8 +93,8 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r) /* Did we try to increment from/to an undesirable state? */ if (unlikely(c < 0 || c == INT_MAX || result < c)) { - asm volatile("REFCOUNT_ERROR counter=\"%[counter]\"" - : : [counter] "m" (r->refs.counter) + asm volatile(REFCOUNT_ERROR + : : [var] "m" (r->refs.counter) : "cc", "cx"); break; } @@ -122,6 +109,4 @@ static __always_inline __must_check bool refcount_inc_not_zero(refcount_t *r) return refcount_add_not_zero(1, r); } -#endif /* __ASSEMBLY__ */ - #endif diff --git a/arch/x86/include/asm/intel_rdt_sched.h b/arch/x86/include/asm/resctrl_sched.h index 9acb06b6f81e..54990fe2a3ae 100644 --- a/arch/x86/include/asm/intel_rdt_sched.h +++ b/arch/x86/include/asm/resctrl_sched.h @@ -1,8 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_X86_INTEL_RDT_SCHED_H -#define _ASM_X86_INTEL_RDT_SCHED_H +#ifndef _ASM_X86_RESCTRL_SCHED_H +#define _ASM_X86_RESCTRL_SCHED_H -#ifdef CONFIG_INTEL_RDT +#ifdef CONFIG_RESCTRL #include <linux/sched.h> #include <linux/jump_label.h> @@ -10,7 +10,7 @@ #define IA32_PQR_ASSOC 0x0c8f /** - * struct intel_pqr_state - State cache for the PQR MSR + * struct resctrl_pqr_state - State cache for the PQR MSR * @cur_rmid: The cached Resource Monitoring ID * @cur_closid: The cached Class Of Service ID * @default_rmid: The user assigned Resource Monitoring ID @@ -24,21 +24,21 @@ * The cache also helps to avoid pointless updates if the value does * not change. */ -struct intel_pqr_state { +struct resctrl_pqr_state { u32 cur_rmid; u32 cur_closid; u32 default_rmid; u32 default_closid; }; -DECLARE_PER_CPU(struct intel_pqr_state, pqr_state); +DECLARE_PER_CPU(struct resctrl_pqr_state, pqr_state); DECLARE_STATIC_KEY_FALSE(rdt_enable_key); DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key); DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key); /* - * __intel_rdt_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR + * __resctrl_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR * * Following considerations are made so that this has minimal impact * on scheduler hot path: @@ -51,9 +51,9 @@ DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key); * simple as possible. * Must be called with preemption disabled. */ -static void __intel_rdt_sched_in(void) +static void __resctrl_sched_in(void) { - struct intel_pqr_state *state = this_cpu_ptr(&pqr_state); + struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state); u32 closid = state->default_closid; u32 rmid = state->default_rmid; @@ -78,16 +78,16 @@ static void __intel_rdt_sched_in(void) } } -static inline void intel_rdt_sched_in(void) +static inline void resctrl_sched_in(void) { if (static_branch_likely(&rdt_enable_key)) - __intel_rdt_sched_in(); + __resctrl_sched_in(); } #else -static inline void intel_rdt_sched_in(void) {} +static inline void resctrl_sched_in(void) {} -#endif /* CONFIG_INTEL_RDT */ +#endif /* CONFIG_RESCTRL */ -#endif /* _ASM_X86_INTEL_RDT_SCHED_H */ +#endif /* _ASM_X86_RESCTRL_SCHED_H */ diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 93b462e48067..dec9c1e84c78 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -290,11 +290,4 @@ struct __attribute__ ((__packed__)) vmcb { #define SVM_CR0_SELECTIVE_MASK (X86_CR0_TS | X86_CR0_MP) -#define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda" -#define SVM_VMRUN ".byte 0x0f, 0x01, 0xd8" -#define SVM_VMSAVE ".byte 0x0f, 0x01, 0xdb" -#define SVM_CLGI ".byte 0x0f, 0x01, 0xdd" -#define SVM_STGI ".byte 0x0f, 0x01, 0xdc" -#define SVM_INVLPGA ".byte 0x0f, 0x01, 0xdf" - #endif diff --git a/arch/x86/include/asm/trace/hyperv.h b/arch/x86/include/asm/trace/hyperv.h index 2e6245a023ef..ace464f09681 100644 --- a/arch/x86/include/asm/trace/hyperv.h +++ b/arch/x86/include/asm/trace/hyperv.h @@ -42,6 +42,20 @@ TRACE_EVENT(hyperv_nested_flush_guest_mapping, TP_printk("address space %llx ret %d", __entry->as, __entry->ret) ); +TRACE_EVENT(hyperv_nested_flush_guest_mapping_range, + TP_PROTO(u64 as, int ret), + TP_ARGS(as, ret), + + TP_STRUCT__entry( + __field(u64, as) + __field(int, ret) + ), + TP_fast_assign(__entry->as = as; + __entry->ret = ret; + ), + TP_printk("address space %llx ret %d", __entry->as, __entry->ret) + ); + TRACE_EVENT(hyperv_send_ipi_mask, TP_PROTO(const struct cpumask *cpus, int vector), diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index ade0f153947d..4e4133e86484 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -77,7 +77,10 @@ #define SECONDARY_EXEC_ENCLS_EXITING 0x00008000 #define SECONDARY_EXEC_RDSEED_EXITING 0x00010000 #define SECONDARY_EXEC_ENABLE_PML 0x00020000 +#define SECONDARY_EXEC_PT_CONCEAL_VMX 0x00080000 #define SECONDARY_EXEC_XSAVES 0x00100000 +#define SECONDARY_EXEC_PT_USE_GPA 0x01000000 +#define SECONDARY_EXEC_MODE_BASED_EPT_EXEC 0x00400000 #define SECONDARY_EXEC_TSC_SCALING 0x02000000 #define PIN_BASED_EXT_INTR_MASK 0x00000001 @@ -98,6 +101,8 @@ #define VM_EXIT_LOAD_IA32_EFER 0x00200000 #define VM_EXIT_SAVE_VMX_PREEMPTION_TIMER 0x00400000 #define VM_EXIT_CLEAR_BNDCFGS 0x00800000 +#define VM_EXIT_PT_CONCEAL_PIP 0x01000000 +#define VM_EXIT_CLEAR_IA32_RTIT_CTL 0x02000000 #define VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR 0x00036dff @@ -109,6 +114,8 @@ #define VM_ENTRY_LOAD_IA32_PAT 0x00004000 #define VM_ENTRY_LOAD_IA32_EFER 0x00008000 #define VM_ENTRY_LOAD_BNDCFGS 0x00010000 +#define VM_ENTRY_PT_CONCEAL_PIP 0x00020000 +#define VM_ENTRY_LOAD_IA32_RTIT_CTL 0x00040000 #define VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR 0x000011ff @@ -240,6 +247,8 @@ enum vmcs_field { GUEST_PDPTR3_HIGH = 0x00002811, GUEST_BNDCFGS = 0x00002812, GUEST_BNDCFGS_HIGH = 0x00002813, + GUEST_IA32_RTIT_CTL = 0x00002814, + GUEST_IA32_RTIT_CTL_HIGH = 0x00002815, HOST_IA32_PAT = 0x00002c00, HOST_IA32_PAT_HIGH = 0x00002c01, HOST_IA32_EFER = 0x00002c02, |