summaryrefslogtreecommitdiff
path: root/arch/powerpc/boot
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2006-01-14 07:06:51 +0300
committerPaul Mackerras <paulus@samba.org>2006-01-14 07:06:51 +0300
commitb4e7de0f3575f4862f04921c5bd0cb5680cc8d71 (patch)
treeb74a9a277168b35119d9dd5657ab567c21fc2111 /arch/powerpc/boot
parent66a45dd3620ee5f913ba1af3d2dca8b9bdfa2b96 (diff)
downloadlinux-b4e7de0f3575f4862f04921c5bd0cb5680cc8d71.tar.xz
powerpc: Avoid unaligned loads and stores in boot memcpy code
The 601 processor will generate an alignment exception for accesses which cross a page boundary. In the boot wrapper code, OF is still handling all exceptions, and it doesn't have an alignment exception handler that emulates the instruction and continues. This changes the memcpy and memmove routines in the boot wrapper to avoid doing unaligned accesses. If the source and destination are misaligned with respect to each other, we just copy one byte at a time. Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/boot')
-rw-r--r--arch/powerpc/boot/string.S20
1 files changed, 17 insertions, 3 deletions
diff --git a/arch/powerpc/boot/string.S b/arch/powerpc/boot/string.S
index b1eeaed7db17..ac3d43b6a324 100644
--- a/arch/powerpc/boot/string.S
+++ b/arch/powerpc/boot/string.S
@@ -107,10 +107,12 @@ memcpy:
rlwinm. r7,r5,32-3,3,31 /* r7 = r5 >> 3 */
addi r6,r3,-4
addi r4,r4,-4
- beq 2f /* if less than 8 bytes to do */
+ beq 3f /* if less than 8 bytes to do */
andi. r0,r6,3 /* get dest word aligned */
mtctr r7
bne 5f
+ andi. r0,r4,3 /* check src word aligned too */
+ bne 3f
1: lwz r7,4(r4)
lwzu r8,8(r4)
stw r7,4(r6)
@@ -132,6 +134,11 @@ memcpy:
bdnz 4b
blr
5: subfic r0,r0,4
+ cmpw cr1,r0,r5
+ add r7,r0,r4
+ andi. r7,r7,3 /* will source be word-aligned too? */
+ ble cr1,3b
+ bne 3b /* do byte-by-byte if not */
mtctr r0
6: lbz r7,4(r4)
addi r4,r4,1
@@ -149,10 +156,12 @@ backwards_memcpy:
rlwinm. r7,r5,32-3,3,31 /* r7 = r5 >> 3 */
add r6,r3,r5
add r4,r4,r5
- beq 2f
+ beq 3f
andi. r0,r6,3
mtctr r7
bne 5f
+ andi. r0,r4,3
+ bne 3f
1: lwz r7,-4(r4)
lwzu r8,-8(r4)
stw r7,-4(r6)
@@ -171,7 +180,12 @@ backwards_memcpy:
stbu r0,-1(r6)
bdnz 4b
blr
-5: mtctr r0
+5: cmpw cr1,r0,r5
+ subf r7,r0,r4
+ andi. r7,r7,3
+ ble cr1,3b
+ bne 3b
+ mtctr r0
6: lbzu r7,-1(r4)
stbu r7,-1(r6)
bdnz 6b