summaryrefslogtreecommitdiff
path: root/arch/arm/mm/cache-v4wb.S
diff options
context:
space:
mode:
authorLinus Walleij <linus.walleij@linaro.org>2024-04-23 10:29:31 +0300
committerRussell King (Oracle) <rmk+kernel@armlinux.org.uk>2024-04-29 16:14:16 +0300
commit1036b89580dc611cfb5dfe66af6b35452dfb272c (patch)
tree8af0e6080d0557da7d6bfb095f23022debf50052 /arch/arm/mm/cache-v4wb.S
parent6b0ef2792c223636a86f2c9c3fcb26502a03d5a7 (diff)
downloadlinux-1036b89580dc611cfb5dfe66af6b35452dfb272c.tar.xz
ARM: 9385/2: mm: Type-annotate all cache assembly routines
Tag all references to assembly functions with SYM_TYPED_FUNC_START() and SYM_FUNC_END() so they also become CFI-safe. When we add SYM_TYPED_FUNC_START() to assembly calls, a function prototype signature will be emitted into the object file at (pc-4) at the call site, so that the KCFI runtime check can compare this to the expected call. Example: 8011ae38: a540670c .word 0xa540670c 8011ae3c <v7_flush_icache_all>: 8011ae3c: e3a00000 mov r0, #0 8011ae40: ee070f11 mcr 15, 0, r0, cr7, cr1, {0} 8011ae44: e12fff1e bx lr This means no "fallthrough" code can enter a SYM_TYPED_FUNC_START() call from above it: there will be a function prototype signature there, so those are consistently converted to a branch or ret lr depending on context. Tested-by: Kees Cook <keescook@chromium.org> Reviewed-by: Sami Tolvanen <samitolvanen@google.com> Signed-off-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
Diffstat (limited to 'arch/arm/mm/cache-v4wb.S')
-rw-r--r--arch/arm/mm/cache-v4wb.S39
1 files changed, 23 insertions, 16 deletions
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S
index ad382cee0fdb..0d97b594e23f 100644
--- a/arch/arm/mm/cache-v4wb.S
+++ b/arch/arm/mm/cache-v4wb.S
@@ -6,6 +6,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/page.h>
#include "proc-macros.S"
@@ -53,11 +54,11 @@ flush_base:
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(v4wb_flush_icache_all)
+SYM_TYPED_FUNC_START(v4wb_flush_icache_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr
-ENDPROC(v4wb_flush_icache_all)
+SYM_FUNC_END(v4wb_flush_icache_all)
/*
* flush_user_cache_all()
@@ -65,14 +66,16 @@ ENDPROC(v4wb_flush_icache_all)
* Clean and invalidate all cache entries in a particular address
* space.
*/
-ENTRY(v4wb_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(v4wb_flush_user_cache_all)
+ b v4wb_flush_kern_cache_all
+SYM_FUNC_END(v4wb_flush_user_cache_all)
+
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(v4wb_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(v4wb_flush_kern_cache_all)
mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
__flush_whole_cache:
@@ -93,6 +96,7 @@ __flush_whole_cache:
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain write buffer
ret lr
+SYM_FUNC_END(v4wb_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, flags)
@@ -104,7 +108,7 @@ __flush_whole_cache:
* - end - end address (exclusive, page aligned)
* - flags - vma_area_struct flags describing address space
*/
-ENTRY(v4wb_flush_user_cache_range)
+SYM_TYPED_FUNC_START(v4wb_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
tst r2, #VM_EXEC @ executable region?
@@ -121,6 +125,7 @@ ENTRY(v4wb_flush_user_cache_range)
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer
ret lr
+SYM_FUNC_END(v4wb_flush_user_cache_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -131,9 +136,10 @@ ENTRY(v4wb_flush_user_cache_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(v4wb_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(v4wb_flush_kern_dcache_area)
add r1, r0, r1
- /* fall through */
+ b v4wb_coherent_user_range
+SYM_FUNC_END(v4wb_flush_kern_dcache_area)
/*
* coherent_kern_range(start, end)
@@ -145,8 +151,9 @@ ENTRY(v4wb_flush_kern_dcache_area)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(v4wb_coherent_kern_range)
- /* fall through */
+SYM_TYPED_FUNC_START(v4wb_coherent_kern_range)
+ b v4wb_coherent_user_range
+SYM_FUNC_END(v4wb_coherent_kern_range)
/*
* coherent_user_range(start, end)
@@ -158,7 +165,7 @@ ENTRY(v4wb_coherent_kern_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(v4wb_coherent_user_range)
+SYM_TYPED_FUNC_START(v4wb_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
@@ -169,7 +176,7 @@ ENTRY(v4wb_coherent_user_range)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
-
+SYM_FUNC_END(v4wb_coherent_user_range)
/*
* dma_inv_range(start, end)
@@ -231,13 +238,13 @@ v4wb_dma_clean_range:
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(v4wb_dma_map_area)
+SYM_TYPED_FUNC_START(v4wb_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq v4wb_dma_clean_range
bcs v4wb_dma_inv_range
b v4wb_dma_flush_range
-ENDPROC(v4wb_dma_map_area)
+SYM_FUNC_END(v4wb_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -245,9 +252,9 @@ ENDPROC(v4wb_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(v4wb_dma_unmap_area)
+SYM_TYPED_FUNC_START(v4wb_dma_unmap_area)
ret lr
-ENDPROC(v4wb_dma_unmap_area)
+SYM_FUNC_END(v4wb_dma_unmap_area)
.globl v4wb_flush_kern_cache_louis
.equ v4wb_flush_kern_cache_louis, v4wb_flush_kern_cache_all