diff options
Diffstat (limited to 'arch/x86/include/asm')
-rw-r--r-- | arch/x86/include/asm/acpi.h | 3 | ||||
-rw-r--r-- | arch/x86/include/asm/cpu.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/cpufeature.h | 90 | ||||
-rw-r--r-- | arch/x86/include/asm/cpufeatures.h | 3 | ||||
-rw-r--r-- | arch/x86/include/asm/disabled-features.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/intel-family.h | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/msr-index.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/mwait.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/pgalloc.h | 12 | ||||
-rw-r--r-- | arch/x86/include/asm/pmem.h | 77 | ||||
-rw-r--r-- | arch/x86/include/asm/ptrace.h | 6 | ||||
-rw-r--r-- | arch/x86/include/asm/required-features.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/smp.h | 3 | ||||
-rw-r--r-- | arch/x86/include/asm/special_insns.h | 46 | ||||
-rw-r--r-- | arch/x86/include/asm/vmx.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/xen/cpuid.h | 5 |
16 files changed, 100 insertions, 159 deletions
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 94c18ebfd68c..5391b0ae7cc3 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -145,7 +145,6 @@ static inline void disable_acpi(void) { } #define ARCH_HAS_POWER_INIT 1 #ifdef CONFIG_ACPI_NUMA -extern int acpi_numa; extern int x86_acpi_numa_init(void); #endif /* CONFIG_ACPI_NUMA */ @@ -170,4 +169,6 @@ static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr) } #endif +#define ACPI_TABLE_UPGRADE_MAX_PHYS (max_low_pfn_mapped << PAGE_SHIFT) + #endif /* _ASM_X86_ACPI_H */ diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h index 59d34c521d96..9b7fa6313f1a 100644 --- a/arch/x86/include/asm/cpu.h +++ b/arch/x86/include/asm/cpu.h @@ -16,6 +16,7 @@ extern void prefill_possible_map(void); static inline void prefill_possible_map(void) {} #define cpu_physical_id(cpu) boot_cpu_physical_apicid +#define cpu_acpi_id(cpu) 0 #define safe_smp_processor_id() 0 #endif /* CONFIG_SMP */ diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 483fb547e3c0..1d2b69fc0ceb 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -49,43 +49,59 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; #define test_cpu_cap(c, bit) \ test_bit(bit, (unsigned long *)((c)->x86_capability)) -#define REQUIRED_MASK_BIT_SET(bit) \ - ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0 )) || \ - (((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1 )) || \ - (((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2 )) || \ - (((bit)>>5)==3 && (1UL<<((bit)&31) & REQUIRED_MASK3 )) || \ - (((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4 )) || \ - (((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5 )) || \ - (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6 )) || \ - (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7 )) || \ - (((bit)>>5)==8 && (1UL<<((bit)&31) & REQUIRED_MASK8 )) || \ - (((bit)>>5)==9 && (1UL<<((bit)&31) & REQUIRED_MASK9 )) || \ - (((bit)>>5)==10 && (1UL<<((bit)&31) & REQUIRED_MASK10)) || \ - (((bit)>>5)==11 && (1UL<<((bit)&31) & REQUIRED_MASK11)) || \ - (((bit)>>5)==12 && (1UL<<((bit)&31) & REQUIRED_MASK12)) || \ - (((bit)>>5)==13 && (1UL<<((bit)&31) & REQUIRED_MASK13)) || \ - (((bit)>>5)==14 && (1UL<<((bit)&31) & REQUIRED_MASK14)) || \ - (((bit)>>5)==15 && (1UL<<((bit)&31) & REQUIRED_MASK15)) || \ - (((bit)>>5)==16 && (1UL<<((bit)&31) & REQUIRED_MASK16)) ) - -#define DISABLED_MASK_BIT_SET(bit) \ - ( (((bit)>>5)==0 && (1UL<<((bit)&31) & DISABLED_MASK0 )) || \ - (((bit)>>5)==1 && (1UL<<((bit)&31) & DISABLED_MASK1 )) || \ - (((bit)>>5)==2 && (1UL<<((bit)&31) & DISABLED_MASK2 )) || \ - (((bit)>>5)==3 && (1UL<<((bit)&31) & DISABLED_MASK3 )) || \ - (((bit)>>5)==4 && (1UL<<((bit)&31) & DISABLED_MASK4 )) || \ - (((bit)>>5)==5 && (1UL<<((bit)&31) & DISABLED_MASK5 )) || \ - (((bit)>>5)==6 && (1UL<<((bit)&31) & DISABLED_MASK6 )) || \ - (((bit)>>5)==7 && (1UL<<((bit)&31) & DISABLED_MASK7 )) || \ - (((bit)>>5)==8 && (1UL<<((bit)&31) & DISABLED_MASK8 )) || \ - (((bit)>>5)==9 && (1UL<<((bit)&31) & DISABLED_MASK9 )) || \ - (((bit)>>5)==10 && (1UL<<((bit)&31) & DISABLED_MASK10)) || \ - (((bit)>>5)==11 && (1UL<<((bit)&31) & DISABLED_MASK11)) || \ - (((bit)>>5)==12 && (1UL<<((bit)&31) & DISABLED_MASK12)) || \ - (((bit)>>5)==13 && (1UL<<((bit)&31) & DISABLED_MASK13)) || \ - (((bit)>>5)==14 && (1UL<<((bit)&31) & DISABLED_MASK14)) || \ - (((bit)>>5)==15 && (1UL<<((bit)&31) & DISABLED_MASK15)) || \ - (((bit)>>5)==16 && (1UL<<((bit)&31) & DISABLED_MASK16)) ) +/* + * There are 32 bits/features in each mask word. The high bits + * (selected with (bit>>5) give us the word number and the low 5 + * bits give us the bit/feature number inside the word. + * (1UL<<((bit)&31) gives us a mask for the feature_bit so we can + * see if it is set in the mask word. + */ +#define CHECK_BIT_IN_MASK_WORD(maskname, word, bit) \ + (((bit)>>5)==(word) && (1UL<<((bit)&31) & maskname##word )) + +#define REQUIRED_MASK_BIT_SET(feature_bit) \ + ( CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 0, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 1, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 2, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 3, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 4, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 5, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 6, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 7, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 8, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 9, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 10, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 11, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 12, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 13, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 14, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 15, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 16, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 17, feature_bit) || \ + REQUIRED_MASK_CHECK || \ + BUILD_BUG_ON_ZERO(NCAPINTS != 18)) + +#define DISABLED_MASK_BIT_SET(feature_bit) \ + ( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 1, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 2, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 3, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 4, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 5, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 6, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 7, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 8, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 9, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 10, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 11, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 12, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 13, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 14, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 15, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 16, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 17, feature_bit) || \ + DISABLED_MASK_CHECK || \ + BUILD_BUG_ON_ZERO(NCAPINTS != 18)) #define cpu_has(c, bit) \ (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index c64b1e9c5d1a..92a8308b96f6 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -225,7 +225,6 @@ #define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */ #define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */ #define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */ -#define X86_FEATURE_PCOMMIT ( 9*32+22) /* PCOMMIT instruction */ #define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ #define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */ #define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ @@ -310,5 +309,5 @@ #endif #define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */ #define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */ - +#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h index 911e9358ceb1..85599ad4d024 100644 --- a/arch/x86/include/asm/disabled-features.h +++ b/arch/x86/include/asm/disabled-features.h @@ -56,5 +56,7 @@ #define DISABLED_MASK14 0 #define DISABLED_MASK15 0 #define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE) +#define DISABLED_MASK17 0 +#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) #endif /* _ASM_X86_DISABLED_FEATURES_H */ diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h index 6999f7d01a0d..627719475457 100644 --- a/arch/x86/include/asm/intel-family.h +++ b/arch/x86/include/asm/intel-family.h @@ -8,7 +8,7 @@ * "Extreme" ones, like Broadwell-E. * * Things ending in "2" are usually because we have no better - * name for them. There's no processor called "WESTMERE2". + * name for them. There's no processor called "SILVERMONT2". */ #define INTEL_FAM6_CORE_YONAH 0x0E @@ -18,10 +18,10 @@ #define INTEL_FAM6_CORE2_DUNNINGTON 0x1D #define INTEL_FAM6_NEHALEM 0x1E +#define INTEL_FAM6_NEHALEM_G 0x1F /* Auburndale / Havendale */ #define INTEL_FAM6_NEHALEM_EP 0x1A #define INTEL_FAM6_NEHALEM_EX 0x2E #define INTEL_FAM6_WESTMERE 0x25 -#define INTEL_FAM6_WESTMERE2 0x1F #define INTEL_FAM6_WESTMERE_EP 0x2C #define INTEL_FAM6_WESTMERE_EX 0x2F diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 5a73a9c62c39..56f4c6676b29 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -64,8 +64,6 @@ #define MSR_OFFCORE_RSP_0 0x000001a6 #define MSR_OFFCORE_RSP_1 0x000001a7 -#define MSR_NHM_TURBO_RATIO_LIMIT 0x000001ad -#define MSR_IVT_TURBO_RATIO_LIMIT 0x000001ae #define MSR_TURBO_RATIO_LIMIT 0x000001ad #define MSR_TURBO_RATIO_LIMIT1 0x000001ae #define MSR_TURBO_RATIO_LIMIT2 0x000001af diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h index 0deeb2d26df7..f37f2d8a2989 100644 --- a/arch/x86/include/asm/mwait.h +++ b/arch/x86/include/asm/mwait.h @@ -97,7 +97,7 @@ static inline void __sti_mwait(unsigned long eax, unsigned long ecx) */ static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) { - if (!current_set_polling_and_test()) { + if (static_cpu_has_bug(X86_BUG_MONITOR) || !current_set_polling_and_test()) { if (static_cpu_has_bug(X86_BUG_CLFLUSH_MONITOR)) { mb(); clflush((void *)¤t_thread_info()->flags); diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h index 574c23cf761a..b6d425999f99 100644 --- a/arch/x86/include/asm/pgalloc.h +++ b/arch/x86/include/asm/pgalloc.h @@ -81,7 +81,11 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) { struct page *page; - page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0); + gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO; + + if (mm == &init_mm) + gfp &= ~__GFP_ACCOUNT; + page = alloc_pages(gfp, 0); if (!page) return NULL; if (!pgtable_pmd_page_ctor(page)) { @@ -125,7 +129,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) { - return (pud_t *)get_zeroed_page(GFP_KERNEL); + gfp_t gfp = GFP_KERNEL_ACCOUNT; + + if (mm == &init_mm) + gfp &= ~__GFP_ACCOUNT; + return (pud_t *)get_zeroed_page(gfp); } static inline void pud_free(struct mm_struct *mm, pud_t *pud) diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h index fbc5e92e1ecc..643eba42d620 100644 --- a/arch/x86/include/asm/pmem.h +++ b/arch/x86/include/asm/pmem.h @@ -26,13 +26,11 @@ * @n: length of the copy in bytes * * Copy data to persistent memory media via non-temporal stores so that - * a subsequent arch_wmb_pmem() can flush cpu and memory controller - * write buffers to guarantee durability. + * a subsequent pmem driver flush operation will drain posted write queues. */ -static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, - size_t n) +static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n) { - int unwritten; + int rem; /* * We are copying between two kernel buffers, if @@ -40,59 +38,36 @@ static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, * fault) we would have already reported a general protection fault * before the WARN+BUG. */ - unwritten = __copy_from_user_inatomic_nocache((void __force *) dst, - (void __user *) src, n); - if (WARN(unwritten, "%s: fault copying %p <- %p unwritten: %d\n", - __func__, dst, src, unwritten)) + rem = __copy_from_user_inatomic_nocache(dst, (void __user *) src, n); + if (WARN(rem, "%s: fault copying %p <- %p unwritten: %d\n", + __func__, dst, src, rem)) BUG(); } -static inline int arch_memcpy_from_pmem(void *dst, const void __pmem *src, - size_t n) +static inline int arch_memcpy_from_pmem(void *dst, const void *src, size_t n) { if (static_cpu_has(X86_FEATURE_MCE_RECOVERY)) - return memcpy_mcsafe(dst, (void __force *) src, n); - memcpy(dst, (void __force *) src, n); + return memcpy_mcsafe(dst, src, n); + memcpy(dst, src, n); return 0; } /** - * arch_wmb_pmem - synchronize writes to persistent memory - * - * After a series of arch_memcpy_to_pmem() operations this drains data - * from cpu write buffers and any platform (memory controller) buffers - * to ensure that written data is durable on persistent memory media. - */ -static inline void arch_wmb_pmem(void) -{ - /* - * wmb() to 'sfence' all previous writes such that they are - * architecturally visible to 'pcommit'. Note, that we've - * already arranged for pmem writes to avoid the cache via - * arch_memcpy_to_pmem(). - */ - wmb(); - pcommit_sfence(); -} - -/** * arch_wb_cache_pmem - write back a cache range with CLWB * @vaddr: virtual start address * @size: number of bytes to write back * * Write back a cache range using the CLWB (cache line write back) - * instruction. This function requires explicit ordering with an - * arch_wmb_pmem() call. + * instruction. */ -static inline void arch_wb_cache_pmem(void __pmem *addr, size_t size) +static inline void arch_wb_cache_pmem(void *addr, size_t size) { u16 x86_clflush_size = boot_cpu_data.x86_clflush_size; unsigned long clflush_mask = x86_clflush_size - 1; - void *vaddr = (void __force *)addr; - void *vend = vaddr + size; + void *vend = addr + size; void *p; - for (p = (void *)((unsigned long)vaddr & ~clflush_mask); + for (p = (void *)((unsigned long)addr & ~clflush_mask); p < vend; p += x86_clflush_size) clwb(p); } @@ -113,16 +88,14 @@ static inline bool __iter_needs_pmem_wb(struct iov_iter *i) * @i: iterator with source data * * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'. - * This function requires explicit ordering with an arch_wmb_pmem() call. */ -static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes, +static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes, struct iov_iter *i) { - void *vaddr = (void __force *)addr; size_t len; /* TODO: skip the write-back by always using non-temporal stores */ - len = copy_from_iter_nocache(vaddr, bytes, i); + len = copy_from_iter_nocache(addr, bytes, i); if (__iter_needs_pmem_wb(i)) arch_wb_cache_pmem(addr, bytes); @@ -136,28 +109,16 @@ static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes, * @size: number of bytes to zero * * Write zeros into the memory range starting at 'addr' for 'size' bytes. - * This function requires explicit ordering with an arch_wmb_pmem() call. */ -static inline void arch_clear_pmem(void __pmem *addr, size_t size) +static inline void arch_clear_pmem(void *addr, size_t size) { - void *vaddr = (void __force *)addr; - - memset(vaddr, 0, size); + memset(addr, 0, size); arch_wb_cache_pmem(addr, size); } -static inline void arch_invalidate_pmem(void __pmem *addr, size_t size) +static inline void arch_invalidate_pmem(void *addr, size_t size) { - clflush_cache_range((void __force *) addr, size); -} - -static inline bool __arch_has_wmb_pmem(void) -{ - /* - * We require that wmb() be an 'sfence', that is only guaranteed on - * 64-bit builds - */ - return static_cpu_has(X86_FEATURE_PCOMMIT); + clflush_cache_range(addr, size); } #endif /* CONFIG_ARCH_HAS_PMEM_API */ #endif /* __ASM_X86_PMEM_H__ */ diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index 6271281f947d..2b5d686ea9f3 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -83,12 +83,6 @@ extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code, int si_code); -extern unsigned long syscall_trace_enter_phase1(struct pt_regs *, u32 arch); -extern long syscall_trace_enter_phase2(struct pt_regs *, u32 arch, - unsigned long phase1_result); - -extern long syscall_trace_enter(struct pt_regs *); - static inline unsigned long regs_return_value(struct pt_regs *regs) { return regs->ax; diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h index 4916144e3c42..fac9a5c0abe9 100644 --- a/arch/x86/include/asm/required-features.h +++ b/arch/x86/include/asm/required-features.h @@ -99,5 +99,7 @@ #define REQUIRED_MASK14 0 #define REQUIRED_MASK15 0 #define REQUIRED_MASK16 0 +#define REQUIRED_MASK17 0 +#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) #endif /* _ASM_X86_REQUIRED_FEATURES_H */ diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 0576b6157f3a..ebd0c164cd4e 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -33,6 +33,7 @@ static inline struct cpumask *cpu_llc_shared_mask(int cpu) } DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid); +DECLARE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid); DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid); #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) DECLARE_EARLY_PER_CPU_READ_MOSTLY(int, x86_cpu_to_logical_apicid); @@ -135,6 +136,7 @@ int native_cpu_up(unsigned int cpunum, struct task_struct *tidle); int native_cpu_disable(void); int common_cpu_die(unsigned int cpu); void native_cpu_die(unsigned int cpu); +void hlt_play_dead(void); void native_play_dead(void); void play_dead_common(void); void wbinvd_on_cpu(int cpu); @@ -147,6 +149,7 @@ void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle); void smp_store_boot_cpu_info(void); void smp_store_cpu_info(int id); #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) +#define cpu_acpi_id(cpu) per_cpu(x86_cpu_to_acpiid, cpu) #else /* !CONFIG_SMP */ #define wbinvd_on_cpu(cpu) wbinvd() diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h index d96d04377765..587d7914ea4b 100644 --- a/arch/x86/include/asm/special_insns.h +++ b/arch/x86/include/asm/special_insns.h @@ -253,52 +253,6 @@ static inline void clwb(volatile void *__p) : [pax] "a" (p)); } -/** - * pcommit_sfence() - persistent commit and fence - * - * The PCOMMIT instruction ensures that data that has been flushed from the - * processor's cache hierarchy with CLWB, CLFLUSHOPT or CLFLUSH is accepted to - * memory and is durable on the DIMM. The primary use case for this is - * persistent memory. - * - * This function shows how to properly use CLWB/CLFLUSHOPT/CLFLUSH and PCOMMIT - * with appropriate fencing. - * - * Example: - * void flush_and_commit_buffer(void *vaddr, unsigned int size) - * { - * unsigned long clflush_mask = boot_cpu_data.x86_clflush_size - 1; - * void *vend = vaddr + size; - * void *p; - * - * for (p = (void *)((unsigned long)vaddr & ~clflush_mask); - * p < vend; p += boot_cpu_data.x86_clflush_size) - * clwb(p); - * - * // SFENCE to order CLWB/CLFLUSHOPT/CLFLUSH cache flushes - * // MFENCE via mb() also works - * wmb(); - * - * // PCOMMIT and the required SFENCE for ordering - * pcommit_sfence(); - * } - * - * After this function completes the data pointed to by 'vaddr' has been - * accepted to memory and will be durable if the 'vaddr' points to persistent - * memory. - * - * PCOMMIT must always be ordered by an MFENCE or SFENCE, so to help simplify - * things we include both the PCOMMIT and the required SFENCE in the - * alternatives generated by pcommit_sfence(). - */ -static inline void pcommit_sfence(void) -{ - alternative(ASM_NOP7, - ".byte 0x66, 0x0f, 0xae, 0xf8\n\t" /* pcommit */ - "sfence", - X86_FEATURE_PCOMMIT); -} - #define nop() asm volatile ("nop") diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 14c63c7e8337..a002b07a7099 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -72,7 +72,6 @@ #define SECONDARY_EXEC_SHADOW_VMCS 0x00004000 #define SECONDARY_EXEC_ENABLE_PML 0x00020000 #define SECONDARY_EXEC_XSAVES 0x00100000 -#define SECONDARY_EXEC_PCOMMIT 0x00200000 #define SECONDARY_EXEC_TSC_SCALING 0x02000000 #define PIN_BASED_EXT_INTR_MASK 0x00000001 diff --git a/arch/x86/include/asm/xen/cpuid.h b/arch/x86/include/asm/xen/cpuid.h index 0d809e9fc975..3bdd10d71223 100644 --- a/arch/x86/include/asm/xen/cpuid.h +++ b/arch/x86/include/asm/xen/cpuid.h @@ -76,15 +76,18 @@ /* * Leaf 5 (0x40000x04) * HVM-specific features + * EAX: Features + * EBX: vcpu id (iff EAX has XEN_HVM_CPUID_VCPU_ID_PRESENT flag) */ -/* EAX Features */ /* Virtualized APIC registers */ #define XEN_HVM_CPUID_APIC_ACCESS_VIRT (1u << 0) /* Virtualized x2APIC accesses */ #define XEN_HVM_CPUID_X2APIC_VIRT (1u << 1) /* Memory mapped from other domains has valid IOMMU entries */ #define XEN_HVM_CPUID_IOMMU_MAPPINGS (1u << 2) +/* vcpu id is present in EBX */ +#define XEN_HVM_CPUID_VCPU_ID_PRESENT (1u << 3) #define XEN_CPUID_MAX_NUM_LEAVES 4 |