diff options
author | Jane Chu <jane.chu@oracle.com> | 2022-05-16 21:21:46 +0300 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2022-05-16 21:46:44 +0300 |
commit | b3fdf9398a16f01dc013967a4ab25e99c3f4fc12 (patch) | |
tree | db98561e96864f4bbe220a4c45e76d233f5bab76 /arch/x86/mm | |
parent | 7917f9cdb5032ac065ac6a5415f5722f215b2608 (diff) | |
download | linux-b3fdf9398a16f01dc013967a4ab25e99c3f4fc12.tar.xz |
x86/mce: relocate set{clear}_mce_nospec() functions
Relocate the twin mce functions to arch/x86/mm/pat/set_memory.c
file where they belong.
While at it, fixup a function name in a comment.
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Jane Chu <jane.chu@oracle.com>
Acked-by: Borislav Petkov <bp@suse.de>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
[sfr: gate {set,clear}_mce_nospec() by CONFIG_X86_64]
Link: https://lore.kernel.org/r/165272527328.90175.8336008202048685278.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/pat/set_memory.c | 50 |
1 files changed, 48 insertions, 2 deletions
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index abf5ed76e4b7..0caf4b0edcbc 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -19,6 +19,7 @@ #include <linux/vmstat.h> #include <linux/kernel.h> #include <linux/cc_platform.h> +#include <linux/set_memory.h> #include <asm/e820/api.h> #include <asm/processor.h> @@ -29,7 +30,6 @@ #include <asm/pgalloc.h> #include <asm/proto.h> #include <asm/memtype.h> -#include <asm/set_memory.h> #include <asm/hyperv-tlfs.h> #include <asm/mshyperv.h> @@ -1816,7 +1816,7 @@ static inline int cpa_clear_pages_array(struct page **pages, int numpages, } /* - * _set_memory_prot is an internal helper for callers that have been passed + * __set_memory_prot is an internal helper for callers that have been passed * a pgprot_t value from upper layers and a reservation has already been taken. * If you want to set the pgprot to a specific page protocol, use the * set_memory_xx() functions. @@ -1925,6 +1925,52 @@ int set_memory_wb(unsigned long addr, int numpages) } EXPORT_SYMBOL(set_memory_wb); +/* + * Prevent speculative access to the page by either unmapping + * it (if we do not require access to any part of the page) or + * marking it uncacheable (if we want to try to retrieve data + * from non-poisoned lines in the page). + */ +#ifdef CONFIG_X86_64 +int set_mce_nospec(unsigned long pfn, bool unmap) +{ + unsigned long decoy_addr; + int rc; + + /* SGX pages are not in the 1:1 map */ + if (arch_is_platform_page(pfn << PAGE_SHIFT)) + return 0; + /* + * We would like to just call: + * set_memory_XX((unsigned long)pfn_to_kaddr(pfn), 1); + * but doing that would radically increase the odds of a + * speculative access to the poison page because we'd have + * the virtual address of the kernel 1:1 mapping sitting + * around in registers. + * Instead we get tricky. We create a non-canonical address + * that looks just like the one we want, but has bit 63 flipped. + * This relies on set_memory_XX() properly sanitizing any __pa() + * results with __PHYSICAL_MASK or PTE_PFN_MASK. + */ + decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63)); + + if (unmap) + rc = set_memory_np(decoy_addr, 1); + else + rc = set_memory_uc(decoy_addr, 1); + if (rc) + pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn); + return rc; +} + +/* Restore full speculative operation to the pfn. */ +int clear_mce_nospec(unsigned long pfn) +{ + return set_memory_wb((unsigned long) pfn_to_kaddr(pfn), 1); +} +EXPORT_SYMBOL_GPL(clear_mce_nospec); +#endif /* CONFIG_X86_64 */ + int set_memory_x(unsigned long addr, int numpages) { if (!(__supported_pte_mask & _PAGE_NX)) |