summaryrefslogtreecommitdiff
path: root/arch/arm
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2014-11-04 13:40:46 +0300
committerRussell King <rmk+kernel@arm.linux.org.uk>2014-11-14 02:45:20 +0300
commit238962ac71910d6c20162ea5230685fead1836a4 (patch)
tree4dcd3ffbc4dbbad6d78280ffd209dcb29962ef55 /arch/arm
parent9ff0bb5ba60638a688a46e93df8c5009896672eb (diff)
downloadlinux-238962ac71910d6c20162ea5230685fead1836a4.tar.xz
ARM: 8191/1: decompressor: ensure I-side picks up relocated code
To speed up decompression, the decompressor sets up a flat, cacheable mapping of memory. However, when there is insufficient space to hold the page tables for this mapping, we don't bother to enable the caches and subsequently skip all the cache maintenance hooks. Skipping the cache maintenance before jumping to the relocated code allows the processor to predict the branch and populate the I-cache with stale data before the relocation loop has completed (since a bootloader may have SCTLR.I set, which permits normal, cacheable instruction fetches regardless of SCTLR.M). This patch moves the cache maintenance check into the maintenance routines themselves, allowing the v6/v7 versions to invalidate the I-cache regardless of the MMU state. Cc: <stable@vger.kernel.org> Reported-by: Marc Carino <marc.ceeeee@gmail.com> Tested-by: Julien Grall <julien.grall@linaro.org> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/boot/compressed/head.S20
1 files changed, 16 insertions, 4 deletions
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 413fd94b5301..68be9017593d 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -397,8 +397,7 @@ dtb_check_done:
add sp, sp, r6
#endif
- tst r4, #1
- bleq cache_clean_flush
+ bl cache_clean_flush
adr r0, BSYM(restart)
add r0, r0, r6
@@ -1047,6 +1046,8 @@ cache_clean_flush:
b call_cache_fn
__armv4_mpu_cache_flush:
+ tst r4, #1
+ movne pc, lr
mov r2, #1
mov r3, #0
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
@@ -1064,6 +1065,8 @@ __armv4_mpu_cache_flush:
mov pc, lr
__fa526_cache_flush:
+ tst r4, #1
+ movne pc, lr
mov r1, #0
mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache
mcr p15, 0, r1, c7, c5, 0 @ flush I cache
@@ -1072,13 +1075,16 @@ __fa526_cache_flush:
__armv6_mmu_cache_flush:
mov r1, #0
- mcr p15, 0, r1, c7, c14, 0 @ clean+invalidate D
+ tst r4, #1
+ mcreq p15, 0, r1, c7, c14, 0 @ clean+invalidate D
mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB
- mcr p15, 0, r1, c7, c15, 0 @ clean+invalidate unified
+ mcreq p15, 0, r1, c7, c15, 0 @ clean+invalidate unified
mcr p15, 0, r1, c7, c10, 4 @ drain WB
mov pc, lr
__armv7_mmu_cache_flush:
+ tst r4, #1
+ bne iflush
mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1
tst r10, #0xf << 16 @ hierarchical cache (ARMv7)
mov r10, #0
@@ -1139,6 +1145,8 @@ iflush:
mov pc, lr
__armv5tej_mmu_cache_flush:
+ tst r4, #1
+ movne pc, lr
1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache
bne 1b
mcr p15, 0, r0, c7, c5, 0 @ flush I cache
@@ -1146,6 +1154,8 @@ __armv5tej_mmu_cache_flush:
mov pc, lr
__armv4_mmu_cache_flush:
+ tst r4, #1
+ movne pc, lr
mov r2, #64*1024 @ default: 32K dcache size (*2)
mov r11, #32 @ default: 32 byte line size
mrc p15, 0, r3, c0, c0, 1 @ read cache type
@@ -1179,6 +1189,8 @@ no_cache_id:
__armv3_mmu_cache_flush:
__armv3_mpu_cache_flush:
+ tst r4, #1
+ movne pc, lr
mov r1, #0
mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3
mov pc, lr