summaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-02-21 02:29:37 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-21 02:29:37 +0400
commit6db167dfc013b0e114c81077ac091ba26a69f4ed (patch)
tree1978b8a2774e0fdb3690065625b0b5eb84475552 /arch/arm/mm
parent32f9aab8ebd886211a7b3e552753af014c3e5225 (diff)
parent9cb543124a2d31af42ce61a4c30765ecc8e5f1fa (diff)
downloadlinux-6db167dfc013b0e114c81077ac091ba26a69f4ed.tar.xz
Merge branch 'for-linus-2' of git://git.linaro.org/people/rmk/linux-arm
Pull ARM updates (part two) from Russell King: - breakpoint and perf updates from Will Deacon. - hypervisor boot mode updates from Will. - support for Power State Coordination Interface via the Hypervisor - core ARM support for KVM * 'for-linus-2' of git://git.linaro.org/people/rmk/linux-arm: (32 commits) KVM: ARM: Add maintainer entry for KVM/ARM KVM: ARM: Power State Coordination Interface implementation KVM: ARM: Handle I/O aborts KVM: ARM: Handle guest faults in KVM KVM: ARM: VFP userspace interface KVM: ARM: Demux CCSIDR in the userspace API KVM: ARM: User space API for getting/setting co-proc registers KVM: ARM: Emulation framework and CP15 emulation KVM: ARM: World-switch implementation KVM: ARM: Inject IRQs and FIQs from userspace KVM: ARM: Memory virtualization setup KVM: ARM: Hypervisor initialization KVM: ARM: Initial skeleton to compile KVM support ARM: Section based HYP idmap ARM: Add page table and page defines needed by KVM ARM: perf: simplify __hw_perf_event_init err handling ARM: perf: remove unnecessary checks for idx < 0 ARM: perf: handle armpmu_register failing ARM: perf: don't pretend to support counting of L1I writes ARM: perf: remove redundant NULL check on cpu_pmu ...
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig10
-rw-r--r--arch/arm/mm/idmap.c55
-rw-r--r--arch/arm/mm/mmu.c22
3 files changed, 69 insertions, 18 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 3fd629d5a513..025d17328730 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -629,8 +629,9 @@ config ARM_THUMBEE
make use of it. Say N for code that can run on CPUs without ThumbEE.
config ARM_VIRT_EXT
- bool "Native support for the ARM Virtualization Extensions"
- depends on MMU && CPU_V7
+ bool
+ depends on MMU
+ default y if CPU_V7
help
Enable the kernel to make use of the ARM Virtualization
Extensions to install hypervisors without run-time firmware
@@ -640,11 +641,6 @@ config ARM_VIRT_EXT
use of this feature. Refer to Documentation/arm/Booting for
details.
- It is safe to enable this option even if the kernel may not be
- booted in HYP mode, may not have support for the
- virtualization extensions, or may be booted with a
- non-compliant bootloader.
-
config SWP_EMULATE
bool "Emulate SWP/SWPB instructions"
depends on !CPU_USE_DOMAINS && CPU_V7
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
index 99db769307ec..2dffc010cc41 100644
--- a/arch/arm/mm/idmap.c
+++ b/arch/arm/mm/idmap.c
@@ -1,4 +1,6 @@
+#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/slab.h>
#include <asm/cputype.h>
#include <asm/idmap.h>
@@ -6,6 +8,7 @@
#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/system_info.h>
+#include <asm/virt.h>
pgd_t *idmap_pgd;
@@ -59,11 +62,17 @@ static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
} while (pud++, addr = next, addr != end);
}
-static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
+static void identity_mapping_add(pgd_t *pgd, const char *text_start,
+ const char *text_end, unsigned long prot)
{
- unsigned long prot, next;
+ unsigned long addr, end;
+ unsigned long next;
+
+ addr = virt_to_phys(text_start);
+ end = virt_to_phys(text_end);
+
+ prot |= PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
- prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
prot |= PMD_BIT4;
@@ -74,28 +83,52 @@ static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long e
} while (pgd++, addr = next, addr != end);
}
+#if defined(CONFIG_ARM_VIRT_EXT) && defined(CONFIG_ARM_LPAE)
+pgd_t *hyp_pgd;
+
+extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
+
+static int __init init_static_idmap_hyp(void)
+{
+ hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
+ if (!hyp_pgd)
+ return -ENOMEM;
+
+ pr_info("Setting up static HYP identity map for 0x%p - 0x%p\n",
+ __hyp_idmap_text_start, __hyp_idmap_text_end);
+ identity_mapping_add(hyp_pgd, __hyp_idmap_text_start,
+ __hyp_idmap_text_end, PMD_SECT_AP1);
+
+ return 0;
+}
+#else
+static int __init init_static_idmap_hyp(void)
+{
+ return 0;
+}
+#endif
+
extern char __idmap_text_start[], __idmap_text_end[];
static int __init init_static_idmap(void)
{
- phys_addr_t idmap_start, idmap_end;
+ int ret;
idmap_pgd = pgd_alloc(&init_mm);
if (!idmap_pgd)
return -ENOMEM;
- /* Add an identity mapping for the physical address of the section. */
- idmap_start = virt_to_phys((void *)__idmap_text_start);
- idmap_end = virt_to_phys((void *)__idmap_text_end);
+ pr_info("Setting up static identity map for 0x%p - 0x%p\n",
+ __idmap_text_start, __idmap_text_end);
+ identity_mapping_add(idmap_pgd, __idmap_text_start,
+ __idmap_text_end, 0);
- pr_info("Setting up static identity map for 0x%llx - 0x%llx\n",
- (long long)idmap_start, (long long)idmap_end);
- identity_mapping_add(idmap_pgd, idmap_start, idmap_end);
+ ret = init_static_idmap_hyp();
/* Flush L1 for the hardware to see this page table content */
flush_cache_louis();
- return 0;
+ return ret;
}
early_initcall(init_static_idmap);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 69bb735474fe..e95a996ab78f 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -57,6 +57,9 @@ static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
static unsigned int ecc_mask __initdata = 0;
pgprot_t pgprot_user;
pgprot_t pgprot_kernel;
+pgprot_t pgprot_hyp_device;
+pgprot_t pgprot_s2;
+pgprot_t pgprot_s2_device;
EXPORT_SYMBOL(pgprot_user);
EXPORT_SYMBOL(pgprot_kernel);
@@ -66,34 +69,46 @@ struct cachepolicy {
unsigned int cr_mask;
pmdval_t pmd;
pteval_t pte;
+ pteval_t pte_s2;
};
+#ifdef CONFIG_ARM_LPAE
+#define s2_policy(policy) policy
+#else
+#define s2_policy(policy) 0
+#endif
+
static struct cachepolicy cache_policies[] __initdata = {
{
.policy = "uncached",
.cr_mask = CR_W|CR_C,
.pmd = PMD_SECT_UNCACHED,
.pte = L_PTE_MT_UNCACHED,
+ .pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED),
}, {
.policy = "buffered",
.cr_mask = CR_C,
.pmd = PMD_SECT_BUFFERED,
.pte = L_PTE_MT_BUFFERABLE,
+ .pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED),
}, {
.policy = "writethrough",
.cr_mask = 0,
.pmd = PMD_SECT_WT,
.pte = L_PTE_MT_WRITETHROUGH,
+ .pte_s2 = s2_policy(L_PTE_S2_MT_WRITETHROUGH),
}, {
.policy = "writeback",
.cr_mask = 0,
.pmd = PMD_SECT_WB,
.pte = L_PTE_MT_WRITEBACK,
+ .pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK),
}, {
.policy = "writealloc",
.cr_mask = 0,
.pmd = PMD_SECT_WBWA,
.pte = L_PTE_MT_WRITEALLOC,
+ .pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK),
}
};
@@ -310,6 +325,7 @@ static void __init build_mem_type_table(void)
struct cachepolicy *cp;
unsigned int cr = get_cr();
pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
+ pteval_t hyp_device_pgprot, s2_pgprot, s2_device_pgprot;
int cpu_arch = cpu_architecture();
int i;
@@ -421,6 +437,8 @@ static void __init build_mem_type_table(void)
*/
cp = &cache_policies[cachepolicy];
vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
+ s2_pgprot = cp->pte_s2;
+ hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte;
/*
* ARMv6 and above have extended page tables.
@@ -444,6 +462,7 @@ static void __init build_mem_type_table(void)
user_pgprot |= L_PTE_SHARED;
kern_pgprot |= L_PTE_SHARED;
vecs_pgprot |= L_PTE_SHARED;
+ s2_pgprot |= L_PTE_SHARED;
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
@@ -498,6 +517,9 @@ static void __init build_mem_type_table(void)
pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
L_PTE_DIRTY | kern_pgprot);
+ pgprot_s2 = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | s2_pgprot);
+ pgprot_s2_device = __pgprot(s2_device_pgprot);
+ pgprot_hyp_device = __pgprot(hyp_device_pgprot);
mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;