summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorArd Biesheuvel <ardb@kernel.org>2023-01-11 13:22:35 +0300
committerCatalin Marinas <catalin.marinas@arm.com>2023-01-24 14:51:08 +0300
commit3dcf60bbfd284e5ebfa40c56172222425d10abf0 (patch)
tree6af09231e1e0d8eed859f0d030fef188710a17c5
parent32b135a7fafebe7843abe5425159fa081ae56b7c (diff)
downloadlinux-3dcf60bbfd284e5ebfa40c56172222425d10abf0.tar.xz
arm64: head: Clean the ID map and the HYP text to the PoC if needed
If we enter with the MMU and caches enabled, the bootloader may not have performed any cache maintenance to the PoC. So clean the ID mapped page to the PoC, to ensure that instruction and data accesses with the MMU off see the correct data. For similar reasons, clean all the HYP text to the PoC as well when entering at EL2 with the MMU and caches enabled. Note that this means primary_entry() itself needs to be moved into the ID map as well, as we will return from init_kernel_el() with the MMU and caches off. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Link: https://lore.kernel.org/r/20230111102236.1430401-6-ardb@kernel.org Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-rw-r--r--arch/arm64/kernel/head.S31
-rw-r--r--arch/arm64/kernel/sleep.S1
2 files changed, 28 insertions, 4 deletions
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index d75f41920645..dc56e1d8f36e 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -70,7 +70,7 @@
__EFI_PE_HEADER
- __INIT
+ .section ".idmap.text","awx"
/*
* The following callee saved general purpose registers are used on the
@@ -90,6 +90,17 @@ SYM_CODE_START(primary_entry)
bl record_mmu_state
bl preserve_boot_args
bl create_idmap
+
+ /*
+ * If we entered with the MMU and caches on, clean the ID mapped part
+ * of the primary boot code to the PoC so we can safely execute it with
+ * the MMU off.
+ */
+ cbz x19, 0f
+ adrp x0, __idmap_text_start
+ adr_l x1, __idmap_text_end
+ bl dcache_clean_poc
+0: mov x0, x19
bl init_kernel_el // w0=cpu_boot_mode
mov x20, x0
@@ -111,6 +122,7 @@ SYM_CODE_START(primary_entry)
b __primary_switch
SYM_CODE_END(primary_entry)
+ __INIT
SYM_CODE_START_LOCAL(record_mmu_state)
mrs x19, CurrentEL
cmp x19, #CurrentEL_EL2
@@ -507,10 +519,12 @@ SYM_FUNC_END(__primary_switched)
* Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in x0 if
* booted in EL1 or EL2 respectively, with the top 32 bits containing
* potential context flags. These flags are *not* stored in __boot_cpu_mode.
+ *
+ * x0: whether we are being called from the primary boot path with the MMU on
*/
SYM_FUNC_START(init_kernel_el)
- mrs x0, CurrentEL
- cmp x0, #CurrentEL_EL2
+ mrs x1, CurrentEL
+ cmp x1, #CurrentEL_EL2
b.eq init_el2
SYM_INNER_LABEL(init_el1, SYM_L_LOCAL)
@@ -525,6 +539,14 @@ SYM_INNER_LABEL(init_el1, SYM_L_LOCAL)
eret
SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
+ msr elr_el2, lr
+
+ // clean all HYP code to the PoC if we booted at EL2 with the MMU on
+ cbz x0, 0f
+ adrp x0, __hyp_idmap_text_start
+ adr_l x1, __hyp_text_end
+ bl dcache_clean_poc
+0:
mov_q x0, HCR_HOST_NVHE_FLAGS
msr hcr_el2, x0
isb
@@ -558,7 +580,6 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
msr sctlr_el1, x1
mov x2, xzr
2:
- msr elr_el2, lr
mov w0, #BOOT_CPU_MODE_EL2
orr x0, x0, x2
eret
@@ -569,6 +590,7 @@ SYM_FUNC_END(init_kernel_el)
* cores are held until we're ready for them to initialise.
*/
SYM_FUNC_START(secondary_holding_pen)
+ mov x0, xzr
bl init_kernel_el // w0=cpu_boot_mode
mrs x2, mpidr_el1
mov_q x1, MPIDR_HWID_BITMASK
@@ -586,6 +608,7 @@ SYM_FUNC_END(secondary_holding_pen)
* be used where CPUs are brought online dynamically by the kernel.
*/
SYM_FUNC_START(secondary_entry)
+ mov x0, xzr
bl init_kernel_el // w0=cpu_boot_mode
b secondary_startup
SYM_FUNC_END(secondary_entry)
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index 7b7c56e04834..2ae7cff1953a 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -99,6 +99,7 @@ SYM_FUNC_END(__cpu_suspend_enter)
.pushsection ".idmap.text", "awx"
SYM_CODE_START(cpu_resume)
+ mov x0, xzr
bl init_kernel_el
mov x19, x0 // preserve boot mode
#if VA_BITS > 48