summaryrefslogtreecommitdiff
path: root/arch/arm/kernel/head.S
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2014-03-25 23:45:31 +0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2014-04-04 01:46:34 +0400
commite26a9e00afc482b971afcaef1db8c9034d4d6d7c (patch)
tree6ed907fccdafda9a6656794db0543692f7e5d46f /arch/arm/kernel/head.S
parent83b3f64d46d1d9bf1e0be1d16f805ccdd52d31c6 (diff)
downloadlinux-e26a9e00afc482b971afcaef1db8c9034d4d6d7c.tar.xz
ARM: Better virt_to_page() handling
virt_to_page() is incredibly inefficient when virt-to-phys patching is enabled. This is because we end up with this calculation: page = &mem_map[asm virt_to_phys(addr) >> 12 - __pv_phys_offset >> 12] in assembly. The asm virt_to_phys() is equivalent this this operation: addr - PAGE_OFFSET + __pv_phys_offset and we can see that because this is assembly, the compiler has no chance to optimise some of that away. This should reduce down to: page = &mem_map[(addr - PAGE_OFFSET) >> 12] for the common cases. Permit the compiler to make this optimisation by giving it more of the information it needs - do this by providing a virt_to_pfn() macro. Another issue which makes this more complex is that __pv_phys_offset is a 64-bit type on all platforms. This is needlessly wasteful - if we store the physical offset as a PFN, we can save a lot of work having to deal with 64-bit values, which sometimes ends up producing incredibly horrid code: a4c: e3009000 movw r9, #0 a4c: R_ARM_MOVW_ABS_NC __pv_phys_offset a50: e3409000 movt r9, #0 ; r9 = &__pv_phys_offset a50: R_ARM_MOVT_ABS __pv_phys_offset a54: e3002000 movw r2, #0 a54: R_ARM_MOVW_ABS_NC __pv_phys_offset a58: e3402000 movt r2, #0 ; r2 = &__pv_phys_offset a58: R_ARM_MOVT_ABS __pv_phys_offset a5c: e5999004 ldr r9, [r9, #4] ; r9 = high word of __pv_phys_offset a60: e3001000 movw r1, #0 a60: R_ARM_MOVW_ABS_NC mem_map a64: e592c000 ldr ip, [r2] ; ip = low word of __pv_phys_offset Reviewed-by: Nicolas Pitre <nico@linaro.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/kernel/head.S')
-rw-r--r--arch/arm/kernel/head.S17
1 files changed, 9 insertions, 8 deletions
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 914616e0bdcd..3aca959fee8d 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -584,9 +584,10 @@ __fixup_pv_table:
subs r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET
add r4, r4, r3 @ adjust table start address
add r5, r5, r3 @ adjust table end address
- add r6, r6, r3 @ adjust __pv_phys_offset address
+ add r6, r6, r3 @ adjust __pv_phys_pfn_offset address
add r7, r7, r3 @ adjust __pv_offset address
- str r8, [r6, #LOW_OFFSET] @ save computed PHYS_OFFSET to __pv_phys_offset
+ mov r0, r8, lsr #12 @ convert to PFN
+ str r0, [r6, #LOW_OFFSET] @ save computed PHYS_OFFSET to __pv_phys_pfn_offset
strcc ip, [r7, #HIGH_OFFSET] @ save to __pv_offset high bits
mov r6, r3, lsr #24 @ constant for add/sub instructions
teq r3, r6, lsl #24 @ must be 16MiB aligned
@@ -600,7 +601,7 @@ ENDPROC(__fixup_pv_table)
1: .long .
.long __pv_table_begin
.long __pv_table_end
-2: .long __pv_phys_offset
+2: .long __pv_phys_pfn_offset
.long __pv_offset
.text
@@ -688,11 +689,11 @@ ENTRY(fixup_pv_table)
ENDPROC(fixup_pv_table)
.data
- .globl __pv_phys_offset
- .type __pv_phys_offset, %object
-__pv_phys_offset:
- .quad 0
- .size __pv_phys_offset, . -__pv_phys_offset
+ .globl __pv_phys_pfn_offset
+ .type __pv_phys_pfn_offset, %object
+__pv_phys_pfn_offset:
+ .word 0
+ .size __pv_phys_pfn_offset, . -__pv_phys_pfn_offset
.globl __pv_offset
.type __pv_offset, %object