summaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel/fpu.S
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2006-01-13 06:56:25 +0300
committerPaul Mackerras <paulus@samba.org>2006-01-13 13:16:23 +0300
commite58c3495e6007af59382540bb21ee941e470d88d (patch)
tree24b559cb768bfa5cf4bdef69f2943b081a1f5afa /arch/powerpc/kernel/fpu.S
parent7e78e5e502d4f220d24c6f738f2fdb078ad33607 (diff)
downloadlinux-e58c3495e6007af59382540bb21ee941e470d88d.tar.xz
[PATCH] powerpc: Cleanup LOADADDR etc. asm macros
This patch consolidates the variety of macros used for loading 32 or 64-bit constants in assembler (LOADADDR, LOADBASE, SET_REG_TO_*). The idea is to make the set of macros consistent across 32 and 64 bit and to make it more obvious which is the appropriate one to use in a given situation. The new macros and their semantics are described in the comments in ppc_asm.h. In the process, we change several places that were unnecessarily using immediate loads on ppc64 to use the GOT/TOC. Likewise we cleanup a couple of places where we were clumsily subtracting PAGE_OFFSET with asm instructions to use assemble-time arithmetic or the toreal() macro instead. Signed-off-by: David Gibson <dwg@au1.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel/fpu.S')
-rw-r--r--arch/powerpc/kernel/fpu.S10
1 files changed, 5 insertions, 5 deletions
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index b780b42c95fc..e4362dfa37fb 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -39,9 +39,9 @@ _GLOBAL(load_up_fpu)
* to another. Instead we call giveup_fpu in switch_to.
*/
#ifndef CONFIG_SMP
- LOADBASE(r3, last_task_used_math)
+ LOAD_REG_ADDRBASE(r3, last_task_used_math)
toreal(r3)
- PPC_LL r4,OFF(last_task_used_math)(r3)
+ PPC_LL r4,ADDROFF(last_task_used_math)(r3)
PPC_LCMPI 0,r4,0
beq 1f
toreal(r4)
@@ -77,7 +77,7 @@ _GLOBAL(load_up_fpu)
#ifndef CONFIG_SMP
subi r4,r5,THREAD
fromreal(r4)
- PPC_STL r4,OFF(last_task_used_math)(r3)
+ PPC_STL r4,ADDROFF(last_task_used_math)(r3)
#endif /* CONFIG_SMP */
/* restore registers and return */
/* we haven't used ctr or xer or lr */
@@ -113,8 +113,8 @@ _GLOBAL(giveup_fpu)
1:
#ifndef CONFIG_SMP
li r5,0
- LOADBASE(r4,last_task_used_math)
- PPC_STL r5,OFF(last_task_used_math)(r4)
+ LOAD_REG_ADDRBASE(r4,last_task_used_math)
+ PPC_STL r5,ADDROFF(last_task_used_math)(r4)
#endif /* CONFIG_SMP */
blr