summaryrefslogtreecommitdiff
path: root/arch/x86/include/asm
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2020-03-04 14:45:26 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2020-04-17 11:50:01 +0300
commitaa04e8d359d7cbeae94300f0cc3a43d28329cebe (patch)
tree7cac1fd2b62d08ff0cbbd8174215d42ef7e7ae56 /arch/x86/include/asm
parentce7a61a0d57d2dd613941d8aef82a5b54fb2e396 (diff)
downloadlinux-aa04e8d359d7cbeae94300f0cc3a43d28329cebe.tar.xz
x86: Don't let pgprot_modify() change the page encryption bit
[ Upstream commit 6db73f17c5f155dbcfd5e48e621c706270b84df0 ] When SEV or SME is enabled and active, vm_get_page_prot() typically returns with the encryption bit set. This means that users of pgprot_modify(, vm_get_page_prot()) (mprotect_fixup(), do_mmap()) end up with a value of vma->vm_pg_prot that is not consistent with the intended protection of the PTEs. This is also important for fault handlers that rely on the VMA vm_page_prot to set the page protection. Fix this by not allowing pgprot_modify() to change the encryption bit, similar to how it's done for PAT bits. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Signed-off-by: Borislav Petkov <bp@suse.de> Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com> Acked-by: Tom Lendacky <thomas.lendacky@amd.com> Link: https://lkml.kernel.org/r/20200304114527.3636-2-thomas_os@shipmail.org Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'arch/x86/include/asm')
-rw-r--r--arch/x86/include/asm/pgtable.h7
-rw-r--r--arch/x86/include/asm/pgtable_types.h2
2 files changed, 6 insertions, 3 deletions
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 0bc530c4eb13..1e6bb4c25334 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -624,12 +624,15 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
return __pmd(val);
}
-/* mprotect needs to preserve PAT bits when updating vm_page_prot */
+/*
+ * mprotect needs to preserve PAT and encryption bits when updating
+ * vm_page_prot
+ */
#define pgprot_modify pgprot_modify
static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
{
pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
- pgprotval_t addbits = pgprot_val(newprot);
+ pgprotval_t addbits = pgprot_val(newprot) & ~_PAGE_CHG_MASK;
return __pgprot(preservebits | addbits);
}
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index b5e49e6bac63..8267dd426b15 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -123,7 +123,7 @@
*/
#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
_PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
- _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
+ _PAGE_SOFT_DIRTY | _PAGE_DEVMAP | _PAGE_ENC)
#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
/*