diff options
author | Ard Biesheuvel <ard.biesheuvel@linaro.org> | 2015-03-19 19:42:27 +0300 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2015-03-23 14:35:29 +0300 |
commit | dd006da21646f1c86f0242eb8f527d093303127a (patch) | |
tree | b67ea3943e04f7d6b643b0f38c4cba7f3db42ace /arch/arm64/mm | |
parent | 6232cfd0fa01fe392df0b18a3a06628f130b83b2 (diff) | |
download | linux-dd006da21646f1c86f0242eb8f527d093303127a.tar.xz |
arm64: mm: increase VA range of identity map
The page size and the number of translation levels, and hence the supported
virtual address range, are build-time configurables on arm64 whose optimal
values are use case dependent. However, in the current implementation, if
the system's RAM is located at a very high offset, the virtual address range
needs to reflect that merely because the identity mapping, which is only used
to enable or disable the MMU, requires the extended virtual range to map the
physical memory at an equal virtual offset.
This patch relaxes that requirement, by increasing the number of translation
levels for the identity mapping only, and only when actually needed, i.e.,
when system RAM's offset is found to be out of reach at runtime.
Tested-by: Laura Abbott <lauraa@codeaurora.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r-- | arch/arm64/mm/mmu.c | 7 | ||||
-rw-r--r-- | arch/arm64/mm/proc-macros.S | 10 | ||||
-rw-r--r-- | arch/arm64/mm/proc.S | 3 |
3 files changed, 19 insertions, 1 deletions
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index c9267acb699c..428aaf86c95b 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -40,6 +40,8 @@ #include "mm.h" +u64 idmap_t0sz = TCR_T0SZ(VA_BITS); + /* * Empty_zero_page is a special page that is used for zero-initialized data * and COW. @@ -454,6 +456,7 @@ void __init paging_init(void) */ cpu_set_reserved_ttbr0(); flush_tlb_all(); + cpu_set_default_tcr_t0sz(); } /* @@ -461,8 +464,10 @@ void __init paging_init(void) */ void setup_mm_for_reboot(void) { - cpu_switch_mm(idmap_pg_dir, &init_mm); + cpu_set_reserved_ttbr0(); flush_tlb_all(); + cpu_set_idmap_tcr_t0sz(); + cpu_switch_mm(idmap_pg_dir, &init_mm); } /* diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S index 005d29e2977d..4c4d93c4bf65 100644 --- a/arch/arm64/mm/proc-macros.S +++ b/arch/arm64/mm/proc-macros.S @@ -52,3 +52,13 @@ mov \reg, #4 // bytes per word lsl \reg, \reg, \tmp // actual cache line size .endm + +/* + * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map + */ + .macro tcr_set_idmap_t0sz, valreg, tmpreg +#ifndef CONFIG_ARM64_VA_BITS_48 + ldr_l \tmpreg, idmap_t0sz + bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH +#endif + .endm diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 28eebfb6af76..cdd754e19b9b 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -156,6 +156,7 @@ ENTRY(cpu_do_resume) msr cpacr_el1, x6 msr ttbr0_el1, x1 msr ttbr1_el1, x7 + tcr_set_idmap_t0sz x8, x7 msr tcr_el1, x8 msr vbar_el1, x9 msr mdscr_el1, x10 @@ -233,6 +234,8 @@ ENTRY(__cpu_setup) */ ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 + tcr_set_idmap_t0sz x10, x9 + /* * Read the PARange bits from ID_AA64MMFR0_EL1 and set the IPS bits in * TCR_EL1. |