summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2022-09-26 08:56:18 +0300
committerMichael Ellerman <mpe@ellerman.id.au>2022-09-28 12:22:13 +0300
commitb830c8754e046f96e84da9d3b3e028c4ceef2b18 (patch)
tree99a663084d640cf5ddbfaa76f29e2056f88954b4
parent2f5182cffa43f31c241131a2c10a4ecd8e90fb3e (diff)
downloadlinux-b830c8754e046f96e84da9d3b3e028c4ceef2b18.tar.xz
powerpc/64: avoid using r13 in relocate
relocate() uses r13 in early boot before it is used for the paca. Use a different register for this so r13 is kept unchanged until it is set to the paca pointer. Avoid r14 as well while we're here, there's no reason not to use the volatile registers which is a bit less surprising, and r14 could be used as another fixed reg one day. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20220926055620.2676869-4-npiggin@gmail.com
-rw-r--r--arch/powerpc/kernel/reloc_64.S14
1 files changed, 7 insertions, 7 deletions
diff --git a/arch/powerpc/kernel/reloc_64.S b/arch/powerpc/kernel/reloc_64.S
index 232e4549defe..efd52f2e7033 100644
--- a/arch/powerpc/kernel/reloc_64.S
+++ b/arch/powerpc/kernel/reloc_64.S
@@ -27,8 +27,8 @@ _GLOBAL(relocate)
add r9,r9,r12 /* r9 has runtime addr of .rela.dyn section */
ld r10,(p_st - 0b)(r12)
add r10,r10,r12 /* r10 has runtime addr of _stext */
- ld r13,(p_sym - 0b)(r12)
- add r13,r13,r12 /* r13 has runtime addr of .dynsym */
+ ld r4,(p_sym - 0b)(r12)
+ add r4,r4,r12 /* r4 has runtime addr of .dynsym */
/*
* Scan the dynamic section for the RELA, RELASZ and RELAENT entries.
@@ -84,16 +84,16 @@ _GLOBAL(relocate)
ld r0,16(r9) /* reloc->r_addend */
b .Lstore
.Luaddr64:
- srdi r14,r0,32 /* ELF64_R_SYM(reloc->r_info) */
+ srdi r5,r0,32 /* ELF64_R_SYM(reloc->r_info) */
clrldi r0,r0,32
cmpdi r0,R_PPC64_UADDR64
bne .Lnext
ld r6,0(r9)
ld r0,16(r9)
- mulli r14,r14,24 /* 24 == sizeof(elf64_sym) */
- add r14,r14,r13 /* elf64_sym[ELF64_R_SYM] */
- ld r14,8(r14)
- add r0,r0,r14
+ mulli r5,r5,24 /* 24 == sizeof(elf64_sym) */
+ add r5,r5,r4 /* elf64_sym[ELF64_R_SYM] */
+ ld r5,8(r5)
+ add r0,r0,r5
.Lstore:
add r0,r0,r3
stdx r0,r7,r6