summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/memory.h29
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mem_protect.c51
-rw-r--r--arch/arm64/kvm/hyp/nvhe/setup.c6
3 files changed, 53 insertions, 33 deletions
diff --git a/arch/arm64/kvm/hyp/include/nvhe/memory.h b/arch/arm64/kvm/hyp/include/nvhe/memory.h
index a1754aecf8f8..eb0c2ebd1743 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/memory.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/memory.h
@@ -16,8 +16,7 @@
* accessible by another component;
* 10: The page is accessible but not owned by the component;
* The storage of this state depends on the component: either in the
- * hyp_vmemmap for the host state or in PTE software bits for the hypervisor
- * and guests.
+ * hyp_vmemmap for the host and hyp states or in PTE software bits for guests.
*/
enum pkvm_page_state {
PKVM_PAGE_OWNED = 0ULL,
@@ -25,13 +24,14 @@ enum pkvm_page_state {
PKVM_PAGE_SHARED_BORROWED = BIT(1),
/*
- * 'Meta-states' are not stored directly in PTE SW bits for hyp and
- * guest states, but inferred from the context (e.g. invalid PTE
- * entries). For the host, meta-states are stored directly in the
+ * 'Meta-states' are not stored directly in PTE SW bits for guest
+ * states, but inferred from the context (e.g. invalid PTE entries).
+ * For the host and hyp, meta-states are stored directly in the
* struct hyp_page.
*/
PKVM_NOPAGE = BIT(0) | BIT(1),
};
+#define PKVM_PAGE_STATE_MASK (BIT(0) | BIT(1))
#define PKVM_PAGE_STATE_PROT_MASK (KVM_PGTABLE_PROT_SW0 | KVM_PGTABLE_PROT_SW1)
static inline enum kvm_pgtable_prot pkvm_mkstate(enum kvm_pgtable_prot prot,
@@ -52,7 +52,14 @@ struct hyp_page {
u8 order;
/* Host state. Guarded by the host stage-2 lock. */
- unsigned __host_state : 8;
+ unsigned __host_state : 4;
+
+ /*
+ * Complement of the hyp state. Guarded by the hyp stage-1 lock. We use
+ * the complement so that the initial 0 in __hyp_state_comp (due to the
+ * entire vmemmap starting off zeroed) encodes PKVM_NOPAGE.
+ */
+ unsigned __hyp_state_comp : 4;
u32 host_share_guest_count;
};
@@ -99,6 +106,16 @@ static inline void set_host_state(phys_addr_t phys, enum pkvm_page_state state)
hyp_phys_to_page(phys)->__host_state = state;
}
+static inline enum pkvm_page_state get_hyp_state(phys_addr_t phys)
+{
+ return hyp_phys_to_page(phys)->__hyp_state_comp ^ PKVM_PAGE_STATE_MASK;
+}
+
+static inline void set_hyp_state(phys_addr_t phys, enum pkvm_page_state state)
+{
+ hyp_phys_to_page(phys)->__hyp_state_comp = state ^ PKVM_PAGE_STATE_MASK;
+}
+
/*
* Refcounting for 'struct hyp_page'.
* hyp_pool::lock must be held if atomic access to the refcount is required.
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index e9060e6205cb..25ff84c053c1 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -649,24 +649,24 @@ static int __host_set_page_state_range(u64 addr, u64 size,
return 0;
}
-static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte, u64 addr)
+static void __hyp_set_page_state_range(phys_addr_t phys, u64 size, enum pkvm_page_state state)
{
- if (!kvm_pte_valid(pte))
- return PKVM_NOPAGE;
+ phys_addr_t end = phys + size;
- return pkvm_getstate(kvm_pgtable_hyp_pte_prot(pte));
+ for (; phys < end; phys += PAGE_SIZE)
+ set_hyp_state(phys, state);
}
-static int __hyp_check_page_state_range(u64 addr, u64 size,
- enum pkvm_page_state state)
+static int __hyp_check_page_state_range(phys_addr_t phys, u64 size, enum pkvm_page_state state)
{
- struct check_walk_data d = {
- .desired = state,
- .get_page_state = hyp_get_page_state,
- };
+ phys_addr_t end = phys + size;
+
+ for (; phys < end; phys += PAGE_SIZE) {
+ if (get_hyp_state(phys) != state)
+ return -EPERM;
+ }
- hyp_assert_lock_held(&pkvm_pgd_lock);
- return check_page_state_range(&pkvm_pgtable, addr, size, &d);
+ return 0;
}
static enum pkvm_page_state guest_get_page_state(kvm_pte_t pte, u64 addr)
@@ -694,7 +694,6 @@ int __pkvm_host_share_hyp(u64 pfn)
{
u64 phys = hyp_pfn_to_phys(pfn);
void *virt = __hyp_va(phys);
- enum kvm_pgtable_prot prot;
u64 size = PAGE_SIZE;
int ret;
@@ -705,13 +704,13 @@ int __pkvm_host_share_hyp(u64 pfn)
if (ret)
goto unlock;
if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
- ret = __hyp_check_page_state_range((u64)virt, size, PKVM_NOPAGE);
+ ret = __hyp_check_page_state_range(phys, size, PKVM_NOPAGE);
if (ret)
goto unlock;
}
- prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_SHARED_BORROWED);
- WARN_ON(pkvm_create_mappings_locked(virt, virt + size, prot));
+ __hyp_set_page_state_range(phys, size, PKVM_PAGE_SHARED_BORROWED);
+ WARN_ON(pkvm_create_mappings_locked(virt, virt + size, PAGE_HYP));
WARN_ON(__host_set_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED));
unlock:
@@ -734,7 +733,7 @@ int __pkvm_host_unshare_hyp(u64 pfn)
ret = __host_check_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED);
if (ret)
goto unlock;
- ret = __hyp_check_page_state_range(virt, size, PKVM_PAGE_SHARED_BORROWED);
+ ret = __hyp_check_page_state_range(phys, size, PKVM_PAGE_SHARED_BORROWED);
if (ret)
goto unlock;
if (hyp_page_count((void *)virt)) {
@@ -742,6 +741,7 @@ int __pkvm_host_unshare_hyp(u64 pfn)
goto unlock;
}
+ __hyp_set_page_state_range(phys, size, PKVM_NOPAGE);
WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, virt, size) != size);
WARN_ON(__host_set_page_state_range(phys, size, PKVM_PAGE_OWNED));
@@ -757,7 +757,6 @@ int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
u64 phys = hyp_pfn_to_phys(pfn);
u64 size = PAGE_SIZE * nr_pages;
void *virt = __hyp_va(phys);
- enum kvm_pgtable_prot prot;
int ret;
host_lock_component();
@@ -767,13 +766,13 @@ int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
if (ret)
goto unlock;
if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
- ret = __hyp_check_page_state_range((u64)virt, size, PKVM_NOPAGE);
+ ret = __hyp_check_page_state_range(phys, size, PKVM_NOPAGE);
if (ret)
goto unlock;
}
- prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_OWNED);
- WARN_ON(pkvm_create_mappings_locked(virt, virt + size, prot));
+ __hyp_set_page_state_range(phys, size, PKVM_PAGE_OWNED);
+ WARN_ON(pkvm_create_mappings_locked(virt, virt + size, PAGE_HYP));
WARN_ON(host_stage2_set_owner_locked(phys, size, PKVM_ID_HYP));
unlock:
@@ -793,7 +792,7 @@ int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages)
host_lock_component();
hyp_lock_component();
- ret = __hyp_check_page_state_range(virt, size, PKVM_PAGE_OWNED);
+ ret = __hyp_check_page_state_range(phys, size, PKVM_PAGE_OWNED);
if (ret)
goto unlock;
if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
@@ -802,6 +801,7 @@ int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages)
goto unlock;
}
+ __hyp_set_page_state_range(phys, size, PKVM_NOPAGE);
WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, virt, size) != size);
WARN_ON(host_stage2_set_owner_locked(phys, size, PKVM_ID_HOST));
@@ -816,19 +816,18 @@ int hyp_pin_shared_mem(void *from, void *to)
{
u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
u64 end = PAGE_ALIGN((u64)to);
+ u64 phys = __hyp_pa(start);
u64 size = end - start;
int ret;
host_lock_component();
hyp_lock_component();
- ret = __host_check_page_state_range(__hyp_pa(start), size,
- PKVM_PAGE_SHARED_OWNED);
+ ret = __host_check_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED);
if (ret)
goto unlock;
- ret = __hyp_check_page_state_range(start, size,
- PKVM_PAGE_SHARED_BORROWED);
+ ret = __hyp_check_page_state_range(phys, size, PKVM_PAGE_SHARED_BORROWED);
if (ret)
goto unlock;
diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c
index 1a414288fe8c..955c431af5d0 100644
--- a/arch/arm64/kvm/hyp/nvhe/setup.c
+++ b/arch/arm64/kvm/hyp/nvhe/setup.c
@@ -194,16 +194,20 @@ static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx,
/*
* Adjust the host stage-2 mappings to match the ownership attributes
- * configured in the hypervisor stage-1.
+ * configured in the hypervisor stage-1, and make sure to propagate them
+ * to the hyp_vmemmap state.
*/
state = pkvm_getstate(kvm_pgtable_hyp_pte_prot(ctx->old));
switch (state) {
case PKVM_PAGE_OWNED:
+ set_hyp_state(phys, PKVM_PAGE_OWNED);
return host_stage2_set_owner_locked(phys, PAGE_SIZE, PKVM_ID_HYP);
case PKVM_PAGE_SHARED_OWNED:
+ set_hyp_state(phys, PKVM_PAGE_SHARED_OWNED);
set_host_state(phys, PKVM_PAGE_SHARED_BORROWED);
break;
case PKVM_PAGE_SHARED_BORROWED:
+ set_hyp_state(phys, PKVM_PAGE_SHARED_BORROWED);
set_host_state(phys, PKVM_PAGE_SHARED_OWNED);
break;
default: