summaryrefslogtreecommitdiff
path: root/arch/arm64/mm/proc.S
diff options
context:
space:
mode:
authorKristina Martsenko <kristina.martsenko@arm.com>2017-12-13 20:07:18 +0300
committerCatalin Marinas <catalin.marinas@arm.com>2017-12-22 20:35:21 +0300
commit529c4b05a3cb2f324aac347042ee6d641478e946 (patch)
tree2fab0e91f3abe37f2b7ee29a02e0c41af6ad03d6 /arch/arm64/mm/proc.S
parent787fd1d019b269af7912249231dfe34a5fe3e7c8 (diff)
downloadlinux-529c4b05a3cb2f324aac347042ee6d641478e946.tar.xz
arm64: handle 52-bit addresses in TTBR
The top 4 bits of a 52-bit physical address are positioned at bits 2..5 in the TTBR registers. Introduce a couple of macros to move the bits there, and change all TTBR writers to use them. Leave TTBR0 PAN code unchanged, to avoid complicating it. A system with 52-bit PA will have PAN anyway (because it's ARMv8.1 or later), and a system without 52-bit PA can only use up to 48-bit PAs. A later patch in this series will add a kconfig dependency to ensure PAN is configured. In addition, when using 52-bit PA there is a special alignment requirement on the top-level table. We don't currently have any VA_BITS configuration that would violate the requirement, but one could be added in the future, so add a compile-time BUG_ON to check for it. Tested-by: Suzuki K Poulose <suzuki.poulose@arm.com> Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Tested-by: Bob Picco <bob.picco@oracle.com> Reviewed-by: Bob Picco <bob.picco@oracle.com> Signed-off-by: Kristina Martsenko <kristina.martsenko@arm.com> [catalin.marinas@arm.com: added TTBR_BADD_MASK_52 comment] Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/mm/proc.S')
-rw-r--r--arch/arm64/mm/proc.S13
1 files changed, 8 insertions, 5 deletions
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 4f133cb340dc..e79db5a7576a 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -138,10 +138,11 @@ ENDPROC(cpu_do_resume)
* - pgd_phys - physical address of new TTB
*/
ENTRY(cpu_do_switch_mm)
- pre_ttbr0_update_workaround x0, x2, x3
+ phys_to_ttbr x0, x2
+ pre_ttbr0_update_workaround x2, x3, x4
mmid x1, x1 // get mm->context.id
- bfi x0, x1, #48, #16 // set the ASID
- msr ttbr0_el1, x0 // set TTBR0
+ bfi x2, x1, #48, #16 // set the ASID
+ msr ttbr0_el1, x2 // set TTBR0
isb
post_ttbr0_update_workaround
ret
@@ -158,14 +159,16 @@ ENTRY(idmap_cpu_replace_ttbr1)
save_and_disable_daif flags=x2
adrp x1, empty_zero_page
- msr ttbr1_el1, x1
+ phys_to_ttbr x1, x3
+ msr ttbr1_el1, x3
isb
tlbi vmalle1
dsb nsh
isb
- msr ttbr1_el1, x0
+ phys_to_ttbr x0, x3
+ msr ttbr1_el1, x3
isb
restore_daif x2