summaryrefslogtreecommitdiff
path: root/arch/x86/mm/pat
diff options
context:
space:
mode:
authorBrijesh Singh <brijesh.singh@amd.com>2022-02-23 07:35:28 +0300
committerBorislav Petkov <bp@suse.de>2022-02-23 21:14:29 +0300
commit1e8c5971c249893ac33ca983c32bafcf5d50c727 (patch)
treef20c6e8c3533923f1e057be90f6c26da7303cd2b /arch/x86/mm/pat
parentb577f542f93cbba57f8d6185ef1fb13a41ddf162 (diff)
downloadlinux-1e8c5971c249893ac33ca983c32bafcf5d50c727.tar.xz
x86/mm/cpa: Generalize __set_memory_enc_pgtable()
The kernel provides infrastructure to set or clear the encryption mask from the pages for AMD SEV, but TDX requires few tweaks. - TDX and SEV have different requirements to the cache and TLB flushing. - TDX has own routine to notify VMM about page encryption status change. Modify __set_memory_enc_pgtable() and make it flexible enough to cover both AMD SEV and Intel TDX. The AMD-specific behavior is isolated in the callbacks under x86_platform.guest. TDX will provide own version of said callbacks. [ bp: Beat into submission. ] Signed-off-by: Brijesh Singh <brijesh.singh@amd.com> Signed-off-by: Borislav Petkov <bp@suse.de> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Link: https://lore.kernel.org/r/20220223043528.2093214-1-brijesh.singh@amd.com
Diffstat (limited to 'arch/x86/mm/pat')
-rw-r--r--arch/x86/mm/pat/set_memory.c20
1 files changed, 11 insertions, 9 deletions
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index 1441db69cea5..3b75262cfb27 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -2008,10 +2008,12 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
kmap_flush_unused();
vm_unmap_aliases();
- /*
- * Before changing the encryption attribute, we need to flush caches.
- */
- cpa_flush(&cpa, !this_cpu_has(X86_FEATURE_SME_COHERENT));
+ /* Flush the caches as needed before changing the encryption attribute. */
+ if (x86_platform.guest.enc_tlb_flush_required(enc))
+ cpa_flush(&cpa, x86_platform.guest.enc_cache_flush_required());
+
+ /* Notify hypervisor that we are about to set/clr encryption attribute. */
+ x86_platform.guest.enc_status_change_prepare(addr, numpages, enc);
ret = __change_page_attr_set_clr(&cpa, 1);
@@ -2024,11 +2026,11 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
*/
cpa_flush(&cpa, 0);
- /*
- * Notify hypervisor that a given memory range is mapped encrypted
- * or decrypted.
- */
- notify_range_enc_status_changed(addr, numpages, enc);
+ /* Notify hypervisor that we have successfully set/clr encryption attribute. */
+ if (!ret) {
+ if (!x86_platform.guest.enc_status_change_finish(addr, numpages, enc))
+ ret = -EIO;
+ }
return ret;
}