diff options
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/Makefile | 3 | ||||
-rw-r--r-- | arch/x86/mm/amdtopology.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/fault.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/init_64.c | 8 | ||||
-rw-r--r-- | arch/x86/mm/ioremap.c | 5 | ||||
-rw-r--r-- | arch/x86/mm/mem_encrypt.c | 13 | ||||
-rw-r--r-- | arch/x86/mm/mem_encrypt_amd.c | 74 | ||||
-rw-r--r-- | arch/x86/mm/mem_encrypt_identity.c | 8 | ||||
-rw-r--r-- | arch/x86/mm/mmio-mod.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/numa_emulation.c | 4 | ||||
-rw-r--r-- | arch/x86/mm/pat/memtype.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/pat/set_memory.c | 11 | ||||
-rw-r--r-- | arch/x86/mm/pgprot.c | 35 | ||||
-rw-r--r-- | arch/x86/mm/pgtable.c | 13 | ||||
-rw-r--r-- | arch/x86/mm/pti.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/setup_nx.c | 62 |
16 files changed, 150 insertions, 96 deletions
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index fe3d3061fc11..f8220fd2c169 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -20,13 +20,12 @@ CFLAGS_REMOVE_mem_encrypt_identity.o = -pg endif obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o mmap.o \ - pgtable.o physaddr.o setup_nx.o tlb.o cpu_entry_area.o maccess.o + pgtable.o physaddr.o tlb.o cpu_entry_area.o maccess.o pgprot.o obj-y += pat/ # Make sure __phys_addr has no stackprotector CFLAGS_physaddr.o := -fno-stack-protector -CFLAGS_setup_nx.o := -fno-stack-protector CFLAGS_mem_encrypt_identity.o := -fno-stack-protector CFLAGS_fault.o := -I $(srctree)/$(src)/../include/asm/trace diff --git a/arch/x86/mm/amdtopology.c b/arch/x86/mm/amdtopology.c index 058b2f36b3a6..b3ca7d23e4b0 100644 --- a/arch/x86/mm/amdtopology.c +++ b/arch/x86/mm/amdtopology.c @@ -154,7 +154,7 @@ int __init amd_numa_init(void) node_set(nodeid, numa_nodes_parsed); } - if (!nodes_weight(numa_nodes_parsed)) + if (nodes_empty(numa_nodes_parsed)) return -ENOENT; /* diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index d0074c6ed31a..fad8faa29d04 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -149,7 +149,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) unsigned char opcode; if (user_mode(regs)) { - if (get_user(opcode, instr)) + if (get_user(opcode, (unsigned char __user *) instr)) break; } else { if (get_kernel_nofault(opcode, instr)) diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 96d34ebb20a9..cb290a2f0747 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -110,7 +110,6 @@ int force_personality32; /* * noexec32=on|off * Control non executable heap for 32bit processes. - * To control the stack too use noexec=off * * on PROT_READ does not imply PROT_EXEC for 32-bit processes (default) * off PROT_READ implies PROT_EXEC @@ -902,6 +901,8 @@ static void __meminit vmemmap_use_sub_pmd(unsigned long start, unsigned long end static void __meminit vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end) { + const unsigned long page = ALIGN_DOWN(start, PMD_SIZE); + vmemmap_flush_unused_pmd(); /* @@ -914,8 +915,7 @@ static void __meminit vmemmap_use_new_sub_pmd(unsigned long start, unsigned long * Mark with PAGE_UNUSED the unused parts of the new memmap range */ if (!IS_ALIGNED(start, PMD_SIZE)) - memset((void *)start, PAGE_UNUSED, - start - ALIGN_DOWN(start, PMD_SIZE)); + memset((void *)page, PAGE_UNUSED, start - page); /* * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of @@ -1269,7 +1269,7 @@ static struct kcore_list kcore_vsyscall; static void __init register_page_bootmem_info(void) { -#if defined(CONFIG_NUMA) || defined(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP) +#if defined(CONFIG_NUMA) || defined(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP) int i; for_each_online_node(i) diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 17a492c27306..1ad0228f8ceb 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -242,10 +242,15 @@ __ioremap_caller(resource_size_t phys_addr, unsigned long size, * If the page being mapped is in memory and SEV is active then * make sure the memory encryption attribute is enabled in the * resulting mapping. + * In TDX guests, memory is marked private by default. If encryption + * is not requested (using encrypted), explicitly set decrypt + * attribute in all IOREMAPPED memory. */ prot = PAGE_KERNEL_IO; if ((io_desc.flags & IORES_MAP_ENCRYPTED) || encrypted) prot = pgprot_encrypted(prot); + else + prot = pgprot_decrypted(prot); switch (pcm) { case _PAGE_CACHE_MODE_UC: diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 50d209939c66..11350e2fd736 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -42,7 +42,14 @@ bool force_dma_unencrypted(struct device *dev) static void print_mem_encrypt_feature_info(void) { - pr_info("AMD Memory Encryption Features active:"); + pr_info("Memory Encryption Features active:"); + + if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) { + pr_cont(" Intel TDX\n"); + return; + } + + pr_cont(" AMD"); /* Secure Memory Encryption */ if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) { @@ -62,6 +69,10 @@ static void print_mem_encrypt_feature_info(void) if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) pr_cont(" SEV-ES"); + /* Secure Nested Paging */ + if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) + pr_cont(" SEV-SNP"); + pr_cont("\n"); } diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c index 6169053c2854..e8f7953fda83 100644 --- a/arch/x86/mm/mem_encrypt_amd.c +++ b/arch/x86/mm/mem_encrypt_amd.c @@ -31,6 +31,7 @@ #include <asm/processor-flags.h> #include <asm/msr.h> #include <asm/cmdline.h> +#include <asm/sev.h> #include "mm_internal.h" @@ -48,6 +49,36 @@ EXPORT_SYMBOL(sme_me_mask); static char sme_early_buffer[PAGE_SIZE] __initdata __aligned(PAGE_SIZE); /* + * SNP-specific routine which needs to additionally change the page state from + * private to shared before copying the data from the source to destination and + * restore after the copy. + */ +static inline void __init snp_memcpy(void *dst, void *src, size_t sz, + unsigned long paddr, bool decrypt) +{ + unsigned long npages = PAGE_ALIGN(sz) >> PAGE_SHIFT; + + if (decrypt) { + /* + * @paddr needs to be accessed decrypted, mark the page shared in + * the RMP table before copying it. + */ + early_snp_set_memory_shared((unsigned long)__va(paddr), paddr, npages); + + memcpy(dst, src, sz); + + /* Restore the page state after the memcpy. */ + early_snp_set_memory_private((unsigned long)__va(paddr), paddr, npages); + } else { + /* + * @paddr need to be accessed encrypted, no need for the page state + * change. + */ + memcpy(dst, src, sz); + } +} + +/* * This routine does not change the underlying encryption setting of the * page(s) that map this memory. It assumes that eventually the memory is * meant to be accessed as either encrypted or decrypted but the contents @@ -95,8 +126,13 @@ static void __init __sme_early_enc_dec(resource_size_t paddr, * Use a temporary buffer, of cache-line multiple size, to * avoid data corruption as documented in the APM. */ - memcpy(sme_early_buffer, src, len); - memcpy(dst, sme_early_buffer, len); + if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) { + snp_memcpy(sme_early_buffer, src, len, paddr, enc); + snp_memcpy(dst, sme_early_buffer, len, paddr, !enc); + } else { + memcpy(sme_early_buffer, src, len); + memcpy(dst, sme_early_buffer, len); + } early_memunmap(dst, len); early_memunmap(src, len); @@ -280,11 +316,24 @@ static void enc_dec_hypercall(unsigned long vaddr, int npages, bool enc) static void amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool enc) { + /* + * To maintain the security guarantees of SEV-SNP guests, make sure + * to invalidate the memory before encryption attribute is cleared. + */ + if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && !enc) + snp_set_memory_shared(vaddr, npages); } /* Return true unconditionally: return value doesn't matter for the SEV side */ static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool enc) { + /* + * After memory is mapped encrypted in the page table, validate it + * so that it is consistent with the page table updates. + */ + if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && enc) + snp_set_memory_private(vaddr, npages); + if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) enc_dec_hypercall(vaddr, npages, enc); @@ -322,14 +371,28 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) clflush_cache_range(__va(pa), size); /* Encrypt/decrypt the contents in-place */ - if (enc) + if (enc) { sme_early_encrypt(pa, size); - else + } else { sme_early_decrypt(pa, size); + /* + * ON SNP, the page state in the RMP table must happen + * before the page table updates. + */ + early_snp_set_memory_shared((unsigned long)__va(pa), pa, 1); + } + /* Change the page encryption mask. */ new_pte = pfn_pte(pfn, new_prot); set_pte_atomic(kpte, new_pte); + + /* + * If page is set encrypted in the page table, then update the RMP table to + * add this page as private. + */ + if (enc) + early_snp_set_memory_private((unsigned long)__va(pa), pa, 1); } static int __init early_set_memory_enc_dec(unsigned long vaddr, @@ -432,9 +495,6 @@ void __init sme_early_init(void) for (i = 0; i < ARRAY_SIZE(protection_map); i++) protection_map[i] = pgprot_encrypted(protection_map[i]); - if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) - swiotlb_force = SWIOTLB_FORCE; - x86_platform.guest.enc_status_change_prepare = amd_enc_status_change_prepare; x86_platform.guest.enc_status_change_finish = amd_enc_status_change_finish; x86_platform.guest.enc_tlb_flush_required = amd_enc_tlb_flush_required; diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c index b43bc24d2bb6..f415498d3175 100644 --- a/arch/x86/mm/mem_encrypt_identity.c +++ b/arch/x86/mm/mem_encrypt_identity.c @@ -45,6 +45,7 @@ #include <asm/sections.h> #include <asm/cmdline.h> #include <asm/coco.h> +#include <asm/sev.h> #include "mm_internal.h" @@ -509,8 +510,11 @@ void __init sme_enable(struct boot_params *bp) bool active_by_default; unsigned long me_mask; char buffer[16]; + bool snp; u64 msr; + snp = snp_init(bp); + /* Check for the SME/SEV support leaf */ eax = 0x80000000; ecx = 0; @@ -542,6 +546,10 @@ void __init sme_enable(struct boot_params *bp) sev_status = __rdmsr(MSR_AMD64_SEV); feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT; + /* The SEV-SNP CC blob should never be present unless SEV-SNP is enabled. */ + if (snp && !(sev_status & MSR_AMD64_SEV_SNP_ENABLED)) + snp_abort(); + /* Check if memory encryption is enabled */ if (feature_mask == AMD_SME_BIT) { /* diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c index 933a2ebad471..c3317f0650d8 100644 --- a/arch/x86/mm/mmio-mod.c +++ b/arch/x86/mm/mmio-mod.c @@ -400,7 +400,7 @@ static void leave_uniprocessor(void) int cpu; int err; - if (!cpumask_available(downed_cpus) || cpumask_weight(downed_cpus) == 0) + if (!cpumask_available(downed_cpus) || cpumask_empty(downed_cpus)) return; pr_notice("Re-enabling CPUs...\n"); for_each_cpu(cpu, downed_cpus) { diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c index 1a02b791d273..9a9305367fdd 100644 --- a/arch/x86/mm/numa_emulation.c +++ b/arch/x86/mm/numa_emulation.c @@ -123,7 +123,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei, * Continue to fill physical nodes with fake nodes until there is no * memory left on any of them. */ - while (nodes_weight(physnode_mask)) { + while (!nodes_empty(physnode_mask)) { for_each_node_mask(i, physnode_mask) { u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN); u64 start, limit, end; @@ -270,7 +270,7 @@ static int __init split_nodes_size_interleave_uniform(struct numa_meminfo *ei, * Fill physical nodes with fake nodes of size until there is no memory * left on any of them. */ - while (nodes_weight(physnode_mask)) { + while (!nodes_empty(physnode_mask)) { for_each_node_mask(i, physnode_mask) { u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN); u64 start, limit, end; diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c index 4ba2a3ee4bce..d5ef64ddd35e 100644 --- a/arch/x86/mm/pat/memtype.c +++ b/arch/x86/mm/pat/memtype.c @@ -101,7 +101,7 @@ int pat_debug_enable; static int __init pat_debug_setup(char *str) { pat_debug_enable = 1; - return 0; + return 1; } __setup("debugpat", pat_debug_setup); diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index 44f0d4260bd8..1abd5438f126 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -638,17 +638,6 @@ pte_t *lookup_address(unsigned long address, unsigned int *level) } EXPORT_SYMBOL_GPL(lookup_address); -/* - * Lookup the page table entry for a virtual address in a given mm. Return a - * pointer to the entry and the level of the mapping. - */ -pte_t *lookup_address_in_mm(struct mm_struct *mm, unsigned long address, - unsigned int *level) -{ - return lookup_address_in_pgd(pgd_offset(mm, address), address, level); -} -EXPORT_SYMBOL_GPL(lookup_address_in_mm); - static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address, unsigned int *level) { diff --git a/arch/x86/mm/pgprot.c b/arch/x86/mm/pgprot.c new file mode 100644 index 000000000000..763742782286 --- /dev/null +++ b/arch/x86/mm/pgprot.c @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/export.h> +#include <linux/mm.h> +#include <asm/pgtable.h> + +pgprot_t vm_get_page_prot(unsigned long vm_flags) +{ + unsigned long val = pgprot_val(protection_map[vm_flags & + (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]); + +#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS + /* + * Take the 4 protection key bits out of the vma->vm_flags value and + * turn them in to the bits that we can put in to a pte. + * + * Only override these if Protection Keys are available (which is only + * on 64-bit). + */ + if (vm_flags & VM_PKEY_BIT0) + val |= _PAGE_PKEY_BIT0; + if (vm_flags & VM_PKEY_BIT1) + val |= _PAGE_PKEY_BIT1; + if (vm_flags & VM_PKEY_BIT2) + val |= _PAGE_PKEY_BIT2; + if (vm_flags & VM_PKEY_BIT3) + val |= _PAGE_PKEY_BIT3; +#endif + + val = __sme_set(val); + if (val & _PAGE_PRESENT) + val &= __supported_pte_mask; + return __pgprot(val); +} +EXPORT_SYMBOL(vm_get_page_prot); diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 3481b35cb4ec..a932d7712d85 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -608,6 +608,16 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma, return young; } + +pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp) +{ + /* + * No flush is necessary. Once an invalid PTE is established, the PTE's + * access and dirty bits cannot be updated. + */ + return pmdp_establish(vma, address, pmdp, pmd_mkinvalid(*pmdp)); +} #endif /** @@ -676,9 +686,8 @@ int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) * * No 512GB pages yet -- always return 0 */ -int p4d_clear_huge(p4d_t *p4d) +void p4d_clear_huge(p4d_t *p4d) { - return 0; } #endif diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index 5d5c7bb50ce9..ffe3b3a087fe 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c @@ -540,7 +540,7 @@ static inline bool pti_kernel_image_global_ok(void) * cases where RANDSTRUCT is in use to help keep the layout a * secret. */ - if (IS_ENABLED(CONFIG_GCC_PLUGIN_RANDSTRUCT)) + if (IS_ENABLED(CONFIG_RANDSTRUCT)) return false; return true; diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c deleted file mode 100644 index ed5667f5169f..000000000000 --- a/arch/x86/mm/setup_nx.c +++ /dev/null @@ -1,62 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include <linux/spinlock.h> -#include <linux/errno.h> -#include <linux/init.h> -#include <linux/pgtable.h> - -#include <asm/proto.h> -#include <asm/cpufeature.h> - -static int disable_nx; - -/* - * noexec = on|off - * - * Control non-executable mappings for processes. - * - * on Enable - * off Disable - */ -static int __init noexec_setup(char *str) -{ - if (!str) - return -EINVAL; - if (!strncmp(str, "on", 2)) { - disable_nx = 0; - } else if (!strncmp(str, "off", 3)) { - disable_nx = 1; - } - x86_configure_nx(); - return 0; -} -early_param("noexec", noexec_setup); - -void x86_configure_nx(void) -{ - if (boot_cpu_has(X86_FEATURE_NX) && !disable_nx) - __supported_pte_mask |= _PAGE_NX; - else - __supported_pte_mask &= ~_PAGE_NX; -} - -void __init x86_report_nx(void) -{ - if (!boot_cpu_has(X86_FEATURE_NX)) { - printk(KERN_NOTICE "Notice: NX (Execute Disable) protection " - "missing in CPU!\n"); - } else { -#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) - if (disable_nx) { - printk(KERN_INFO "NX (Execute Disable) protection: " - "disabled by kernel command line option\n"); - } else { - printk(KERN_INFO "NX (Execute Disable) protection: " - "active\n"); - } -#else - /* 32bit non-PAE kernel, NX cannot be used */ - printk(KERN_NOTICE "Notice: NX (Execute Disable) protection " - "cannot be enabled: non-PAE kernel!\n"); -#endif - } -} |