diff options
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/ioremap.c | 6 | ||||
-rw-r--r-- | arch/x86/mm/mem_encrypt.c | 21 |
2 files changed, 11 insertions, 16 deletions
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index a7250fa3d45f..b59a5cbc6bc5 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -92,7 +92,7 @@ static unsigned int __ioremap_check_ram(struct resource *res) */ static unsigned int __ioremap_check_encrypted(struct resource *res) { - if (!sev_active()) + if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) return 0; switch (res->desc) { @@ -112,7 +112,7 @@ static unsigned int __ioremap_check_encrypted(struct resource *res) */ static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *desc) { - if (!sev_active()) + if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) return; if (!IS_ENABLED(CONFIG_EFI)) @@ -556,7 +556,7 @@ static bool memremap_should_map_decrypted(resource_size_t phys_addr, case E820_TYPE_NVS: case E820_TYPE_UNUSABLE: /* For SEV, these areas are encrypted */ - if (sev_active()) + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) break; fallthrough; diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 2163485a74e1..932007a6913b 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -194,7 +194,7 @@ void __init sme_early_init(void) for (i = 0; i < ARRAY_SIZE(protection_map); i++) protection_map[i] = pgprot_encrypted(protection_map[i]); - if (sev_active()) + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) swiotlb_force = SWIOTLB_FORCE; } @@ -203,7 +203,7 @@ void __init sev_setup_arch(void) phys_addr_t total_mem = memblock_phys_mem_size(); unsigned long size; - if (!sev_active()) + if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) return; /* @@ -364,8 +364,8 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size) /* * SME and SEV are very similar but they are not the same, so there are * times that the kernel will need to distinguish between SME and SEV. The - * sme_active() and sev_active() functions are used for this. When a - * distinction isn't needed, the mem_encrypt_active() function can be used. + * cc_platform_has() function is used for this. When a distinction isn't + * needed, the CC_ATTR_MEM_ENCRYPT attribute can be used. * * The trampoline code is a good example for this requirement. Before * paging is activated, SME will access all memory as decrypted, but SEV @@ -373,11 +373,6 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size) * up under SME the trampoline area cannot be encrypted, whereas under SEV * the trampoline area must be encrypted. */ -bool sev_active(void) -{ - return sev_status & MSR_AMD64_SEV_ENABLED; -} -EXPORT_SYMBOL_GPL(sev_active); /* Needs to be called from non-instrumentable code */ bool noinstr sev_es_active(void) @@ -391,7 +386,7 @@ bool force_dma_unencrypted(struct device *dev) /* * For SEV, all DMA must be to unencrypted addresses. */ - if (sev_active()) + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) return true; /* @@ -450,7 +445,7 @@ static void print_mem_encrypt_feature_info(void) } /* Secure Encrypted Virtualization */ - if (sev_active()) + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) pr_cont(" SEV"); /* Encrypted Register State */ @@ -473,7 +468,7 @@ void __init mem_encrypt_init(void) * With SEV, we need to unroll the rep string I/O instructions, * but SEV-ES supports them through the #VC handler. */ - if (sev_active() && !sev_es_active()) + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) && !sev_es_active()) static_branch_enable(&sev_enable_key); print_mem_encrypt_feature_info(); @@ -481,6 +476,6 @@ void __init mem_encrypt_init(void) int arch_has_restricted_virtio_memory_access(void) { - return sev_active(); + return cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT); } EXPORT_SYMBOL_GPL(arch_has_restricted_virtio_memory_access); |