summaryrefslogtreecommitdiff
path: root/arch/x86/coco/sev/shared.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/coco/sev/shared.c')
-rw-r--r--arch/x86/coco/sev/shared.c49
1 files changed, 49 insertions, 0 deletions
diff --git a/arch/x86/coco/sev/shared.c b/arch/x86/coco/sev/shared.c
index 71de53194089..16b799f37d6c 100644
--- a/arch/x86/coco/sev/shared.c
+++ b/arch/x86/coco/sev/shared.c
@@ -1243,6 +1243,24 @@ static void svsm_pval_terminate(struct svsm_pvalidate_call *pc, int ret, u64 svs
__pval_terminate(pfn, action, page_size, ret, svsm_ret);
}
+static inline void sev_evict_cache(void *va, int npages)
+{
+ volatile u8 val __always_unused;
+ u8 *bytes = va;
+ int page_idx;
+
+ /*
+ * For SEV guests, a read from the first/last cache-lines of a 4K page
+ * using the guest key is sufficient to cause a flush of all cache-lines
+ * associated with that 4K page without incurring all the overhead of a
+ * full CLFLUSH sequence.
+ */
+ for (page_idx = 0; page_idx < npages; page_idx++) {
+ val = bytes[page_idx * PAGE_SIZE];
+ val = bytes[page_idx * PAGE_SIZE + PAGE_SIZE - 1];
+ }
+}
+
static void svsm_pval_4k_page(unsigned long paddr, bool validate)
{
struct svsm_pvalidate_call *pc;
@@ -1267,6 +1285,7 @@ static void svsm_pval_4k_page(unsigned long paddr, bool validate)
pc->entry[0].page_size = RMP_PG_SIZE_4K;
pc->entry[0].action = validate;
pc->entry[0].ignore_cf = 0;
+ pc->entry[0].rsvd = 0;
pc->entry[0].pfn = paddr >> PAGE_SHIFT;
/* Protocol 0, Call ID 1 */
@@ -1295,6 +1314,13 @@ static void pvalidate_4k_page(unsigned long vaddr, unsigned long paddr, bool val
if (ret)
__pval_terminate(PHYS_PFN(paddr), validate, RMP_PG_SIZE_4K, ret, 0);
}
+
+ /*
+ * If validating memory (making it private) and affected by the
+ * cache-coherency vulnerability, perform the cache eviction mitigation.
+ */
+ if (validate && !has_cpuflag(X86_FEATURE_COHERENCY_SFW_NO))
+ sev_evict_cache((void *)vaddr, 1);
}
static void pval_pages(struct snp_psc_desc *desc)
@@ -1348,6 +1374,7 @@ static u64 svsm_build_ca_from_pfn_range(u64 pfn, u64 pfn_end, bool action,
pe->page_size = RMP_PG_SIZE_4K;
pe->action = action;
pe->ignore_cf = 0;
+ pe->rsvd = 0;
pe->pfn = pfn;
pe++;
@@ -1378,6 +1405,7 @@ static int svsm_build_ca_from_psc_desc(struct snp_psc_desc *desc, unsigned int d
pe->page_size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K;
pe->action = e->operation == SNP_PAGE_STATE_PRIVATE;
pe->ignore_cf = 0;
+ pe->rsvd = 0;
pe->pfn = e->gfn;
pe++;
@@ -1479,10 +1507,31 @@ static void svsm_pval_pages(struct snp_psc_desc *desc)
static void pvalidate_pages(struct snp_psc_desc *desc)
{
+ struct psc_entry *e;
+ unsigned int i;
+
if (snp_vmpl)
svsm_pval_pages(desc);
else
pval_pages(desc);
+
+ /*
+ * If not affected by the cache-coherency vulnerability there is no need
+ * to perform the cache eviction mitigation.
+ */
+ if (cpu_feature_enabled(X86_FEATURE_COHERENCY_SFW_NO))
+ return;
+
+ for (i = 0; i <= desc->hdr.end_entry; i++) {
+ e = &desc->entries[i];
+
+ /*
+ * If validating memory (making it private) perform the cache
+ * eviction mitigation.
+ */
+ if (e->operation == SNP_PAGE_STATE_PRIVATE)
+ sev_evict_cache(pfn_to_kaddr(e->gfn), e->pagesize ? 512 : 1);
+ }
}
static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc)