summaryrefslogtreecommitdiff
path: root/arch/arm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/include/asm/kvm_mmu.h10
-rw-r--r--arch/arm/kernel/vmlinux.lds.S11
-rw-r--r--arch/arm/kvm/mmu.c69
-rw-r--r--arch/arm/xen/enlighten.c104
4 files changed, 63 insertions, 131 deletions
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 4cf48c3aca13..405aa1883307 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -269,6 +269,16 @@ static inline void __kvm_flush_dcache_pud(pud_t pud)
void kvm_set_way_flush(struct kvm_vcpu *vcpu);
void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
+static inline bool __kvm_cpu_uses_extended_idmap(void)
+{
+ return false;
+}
+
+static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
+ pgd_t *hyp_pgd,
+ pgd_t *merged_hyp_pgd,
+ unsigned long hyp_idmap_start) { }
+
#endif /* !__ASSEMBLY__ */
#endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 7a301be9ac67..8b60fde5ce48 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -11,7 +11,7 @@
#ifdef CONFIG_ARM_KERNMEM_PERMS
#include <asm/pgtable.h>
#endif
-
+
#define PROC_INFO \
. = ALIGN(4); \
VMLINUX_SYMBOL(__proc_info_begin) = .; \
@@ -23,7 +23,7 @@
VMLINUX_SYMBOL(__idmap_text_start) = .; \
*(.idmap.text) \
VMLINUX_SYMBOL(__idmap_text_end) = .; \
- . = ALIGN(32); \
+ . = ALIGN(PAGE_SIZE); \
VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
*(.hyp.idmap.text) \
VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
@@ -343,8 +343,11 @@ SECTIONS
*/
ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
+
/*
- * The HYP init code can't be more than a page long.
+ * The HYP init code can't be more than a page long,
+ * and should not cross a page boundary.
* The above comment applies as well.
*/
-ASSERT(((__hyp_idmap_text_end - __hyp_idmap_text_start) <= PAGE_SIZE), "HYP init code too big")
+ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
+ "HYP init code too big or misaligned")
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 15b050d46fc9..1d5accbd3dcf 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -35,9 +35,9 @@ extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
static pgd_t *boot_hyp_pgd;
static pgd_t *hyp_pgd;
+static pgd_t *merged_hyp_pgd;
static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
-static void *init_bounce_page;
static unsigned long hyp_idmap_start;
static unsigned long hyp_idmap_end;
static phys_addr_t hyp_idmap_vector;
@@ -405,9 +405,6 @@ void free_boot_hyp_pgd(void)
if (hyp_pgd)
unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
- free_page((unsigned long)init_bounce_page);
- init_bounce_page = NULL;
-
mutex_unlock(&kvm_hyp_pgd_mutex);
}
@@ -438,6 +435,11 @@ void free_hyp_pgds(void)
free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
hyp_pgd = NULL;
}
+ if (merged_hyp_pgd) {
+ clear_page(merged_hyp_pgd);
+ free_page((unsigned long)merged_hyp_pgd);
+ merged_hyp_pgd = NULL;
+ }
mutex_unlock(&kvm_hyp_pgd_mutex);
}
@@ -1622,12 +1624,18 @@ void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
phys_addr_t kvm_mmu_get_httbr(void)
{
- return virt_to_phys(hyp_pgd);
+ if (__kvm_cpu_uses_extended_idmap())
+ return virt_to_phys(merged_hyp_pgd);
+ else
+ return virt_to_phys(hyp_pgd);
}
phys_addr_t kvm_mmu_get_boot_httbr(void)
{
- return virt_to_phys(boot_hyp_pgd);
+ if (__kvm_cpu_uses_extended_idmap())
+ return virt_to_phys(merged_hyp_pgd);
+ else
+ return virt_to_phys(boot_hyp_pgd);
}
phys_addr_t kvm_get_idmap_vector(void)
@@ -1643,39 +1651,11 @@ int kvm_mmu_init(void)
hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end);
hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init);
- if ((hyp_idmap_start ^ hyp_idmap_end) & PAGE_MASK) {
- /*
- * Our init code is crossing a page boundary. Allocate
- * a bounce page, copy the code over and use that.
- */
- size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start;
- phys_addr_t phys_base;
-
- init_bounce_page = (void *)__get_free_page(GFP_KERNEL);
- if (!init_bounce_page) {
- kvm_err("Couldn't allocate HYP init bounce page\n");
- err = -ENOMEM;
- goto out;
- }
-
- memcpy(init_bounce_page, __hyp_idmap_text_start, len);
- /*
- * Warning: the code we just copied to the bounce page
- * must be flushed to the point of coherency.
- * Otherwise, the data may be sitting in L2, and HYP
- * mode won't be able to observe it as it runs with
- * caches off at that point.
- */
- kvm_flush_dcache_to_poc(init_bounce_page, len);
-
- phys_base = kvm_virt_to_phys(init_bounce_page);
- hyp_idmap_vector += phys_base - hyp_idmap_start;
- hyp_idmap_start = phys_base;
- hyp_idmap_end = phys_base + len;
-
- kvm_info("Using HYP init bounce page @%lx\n",
- (unsigned long)phys_base);
- }
+ /*
+ * We rely on the linker script to ensure at build time that the HYP
+ * init code does not cross a page boundary.
+ */
+ BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
@@ -1698,6 +1678,17 @@ int kvm_mmu_init(void)
goto out;
}
+ if (__kvm_cpu_uses_extended_idmap()) {
+ merged_hyp_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+ if (!merged_hyp_pgd) {
+ kvm_err("Failed to allocate extra HYP pgd\n");
+ goto out;
+ }
+ __kvm_extend_hypmap(boot_hyp_pgd, hyp_pgd, merged_hyp_pgd,
+ hyp_idmap_start);
+ return 0;
+ }
+
/* Map the very same page at the trampoline VA */
err = __create_hyp_mappings(boot_hyp_pgd,
TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 263a2044c65b..224081ccc92f 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -53,105 +53,33 @@ EXPORT_SYMBOL_GPL(xen_platform_pci_unplug);
static __read_mostly int xen_events_irq = -1;
-/* map fgmfn of domid to lpfn in the current domain */
-static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
- unsigned int domid)
+int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
+ unsigned long addr,
+ xen_pfn_t *mfn, int nr,
+ int *err_ptr, pgprot_t prot,
+ unsigned domid,
+ struct page **pages)
{
- int rc;
- struct xen_add_to_physmap_range xatp = {
- .domid = DOMID_SELF,
- .foreign_domid = domid,
- .size = 1,
- .space = XENMAPSPACE_gmfn_foreign,
- };
- xen_ulong_t idx = fgmfn;
- xen_pfn_t gpfn = lpfn;
- int err = 0;
-
- set_xen_guest_handle(xatp.idxs, &idx);
- set_xen_guest_handle(xatp.gpfns, &gpfn);
- set_xen_guest_handle(xatp.errs, &err);
-
- rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
- if (rc || err) {
- pr_warn("Failed to map pfn to mfn rc:%d:%d pfn:%lx mfn:%lx\n",
- rc, err, lpfn, fgmfn);
- return 1;
- }
- return 0;
-}
-
-struct remap_data {
- xen_pfn_t fgmfn; /* foreign domain's gmfn */
- pgprot_t prot;
- domid_t domid;
- struct vm_area_struct *vma;
- int index;
- struct page **pages;
- struct xen_remap_mfn_info *info;
-};
-
-static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
- void *data)
-{
- struct remap_data *info = data;
- struct page *page = info->pages[info->index++];
- unsigned long pfn = page_to_pfn(page);
- pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot));
-
- if (map_foreign_page(pfn, info->fgmfn, info->domid))
- return -EFAULT;
- set_pte_at(info->vma->vm_mm, addr, ptep, pte);
-
- return 0;
+ return xen_xlate_remap_gfn_array(vma, addr, mfn, nr, err_ptr,
+ prot, domid, pages);
}
+EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array);
+/* Not used by XENFEAT_auto_translated guests. */
int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
- unsigned long addr,
- xen_pfn_t mfn, int nr,
- pgprot_t prot, unsigned domid,
- struct page **pages)
+ unsigned long addr,
+ xen_pfn_t mfn, int nr,
+ pgprot_t prot, unsigned domid,
+ struct page **pages)
{
- int err;
- struct remap_data data;
-
- /* TBD: Batching, current sole caller only does page at a time */
- if (nr > 1)
- return -EINVAL;
-
- data.fgmfn = mfn;
- data.prot = prot;
- data.domid = domid;
- data.vma = vma;
- data.index = 0;
- data.pages = pages;
- err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT,
- remap_pte_fn, &data);
- return err;
+ return -ENOSYS;
}
EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
int nr, struct page **pages)
{
- int i;
-
- for (i = 0; i < nr; i++) {
- struct xen_remove_from_physmap xrp;
- unsigned long rc, pfn;
-
- pfn = page_to_pfn(pages[i]);
-
- xrp.domid = DOMID_SELF;
- xrp.gpfn = pfn;
- rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
- if (rc) {
- pr_warn("Failed to unmap pfn:%lx rc:%ld\n",
- pfn, rc);
- return rc;
- }
- }
- return 0;
+ return xen_xlate_unmap_gfn_range(vma, nr, pages);
}
EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);