summaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel/vdso64/cacheflush.S
diff options
context:
space:
mode:
authorChristophe Leroy <christophe.leroy@c-s.fr>2020-04-28 16:16:47 +0300
committerMichael Ellerman <mpe@ellerman.id.au>2020-07-15 05:04:40 +0300
commit793d74a8c78e05d6833bfcf582e24e40bd92518f (patch)
tree61572a7a2c34ba05a2514c80a73f4f37ffe481ce /arch/powerpc/kernel/vdso64/cacheflush.S
parent96032f983ca32ad1d43c73da922dbc7022754c3c (diff)
downloadlinux-793d74a8c78e05d6833bfcf582e24e40bd92518f.tar.xz
powerpc/vdso64: Switch from __get_datapage() to get_datapage inline macro
On the same way as already done on PPC32, drop __get_datapage() function and use get_datapage inline macro instead. See commit ec0895f08f99 ("powerpc/vdso32: inline __get_datapage()") Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/e13d95312e0b9792556b19b4bb8955cc1ff19fc7.1588079622.git.christophe.leroy@c-s.fr
Diffstat (limited to 'arch/powerpc/kernel/vdso64/cacheflush.S')
-rw-r--r--arch/powerpc/kernel/vdso64/cacheflush.S9
1 files changed, 4 insertions, 5 deletions
diff --git a/arch/powerpc/kernel/vdso64/cacheflush.S b/arch/powerpc/kernel/vdso64/cacheflush.S
index 526f5ba2593e..cab14324242b 100644
--- a/arch/powerpc/kernel/vdso64/cacheflush.S
+++ b/arch/powerpc/kernel/vdso64/cacheflush.S
@@ -8,6 +8,7 @@
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/vdso.h>
+#include <asm/vdso_datapage.h>
#include <asm/asm-offsets.h>
.text
@@ -24,14 +25,12 @@ V_FUNCTION_BEGIN(__kernel_sync_dicache)
.cfi_startproc
mflr r12
.cfi_register lr,r12
- mr r11,r3
- bl V_LOCAL_FUNC(__get_datapage)
+ get_datapage r10, r0
mtlr r12
- mr r10,r3
lwz r7,CFG_DCACHE_BLOCKSZ(r10)
addi r5,r7,-1
- andc r6,r11,r5 /* round low to line bdy */
+ andc r6,r3,r5 /* round low to line bdy */
subf r8,r6,r4 /* compute length */
add r8,r8,r5 /* ensure we get enough */
lwz r9,CFG_DCACHE_LOGBLOCKSZ(r10)
@@ -48,7 +47,7 @@ V_FUNCTION_BEGIN(__kernel_sync_dicache)
lwz r7,CFG_ICACHE_BLOCKSZ(r10)
addi r5,r7,-1
- andc r6,r11,r5 /* round low to line bdy */
+ andc r6,r3,r5 /* round low to line bdy */
subf r8,r6,r4 /* compute length */
add r8,r8,r5
lwz r9,CFG_ICACHE_LOGBLOCKSZ(r10)