diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2014-03-25 23:45:31 +0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2014-04-04 01:46:34 +0400 |
commit | e26a9e00afc482b971afcaef1db8c9034d4d6d7c (patch) | |
tree | 6ed907fccdafda9a6656794db0543692f7e5d46f /arch/arm/include/asm/memory.h | |
parent | 83b3f64d46d1d9bf1e0be1d16f805ccdd52d31c6 (diff) | |
download | linux-e26a9e00afc482b971afcaef1db8c9034d4d6d7c.tar.xz |
ARM: Better virt_to_page() handling
virt_to_page() is incredibly inefficient when virt-to-phys patching is
enabled. This is because we end up with this calculation:
page = &mem_map[asm virt_to_phys(addr) >> 12 - __pv_phys_offset >> 12]
in assembly. The asm virt_to_phys() is equivalent this this operation:
addr - PAGE_OFFSET + __pv_phys_offset
and we can see that because this is assembly, the compiler has no chance
to optimise some of that away. This should reduce down to:
page = &mem_map[(addr - PAGE_OFFSET) >> 12]
for the common cases. Permit the compiler to make this optimisation by
giving it more of the information it needs - do this by providing a
virt_to_pfn() macro.
Another issue which makes this more complex is that __pv_phys_offset is
a 64-bit type on all platforms. This is needlessly wasteful - if we
store the physical offset as a PFN, we can save a lot of work having
to deal with 64-bit values, which sometimes ends up producing incredibly
horrid code:
a4c: e3009000 movw r9, #0
a4c: R_ARM_MOVW_ABS_NC __pv_phys_offset
a50: e3409000 movt r9, #0 ; r9 = &__pv_phys_offset
a50: R_ARM_MOVT_ABS __pv_phys_offset
a54: e3002000 movw r2, #0
a54: R_ARM_MOVW_ABS_NC __pv_phys_offset
a58: e3402000 movt r2, #0 ; r2 = &__pv_phys_offset
a58: R_ARM_MOVT_ABS __pv_phys_offset
a5c: e5999004 ldr r9, [r9, #4] ; r9 = high word of __pv_phys_offset
a60: e3001000 movw r1, #0
a60: R_ARM_MOVW_ABS_NC mem_map
a64: e592c000 ldr ip, [r2] ; ip = low word of __pv_phys_offset
Reviewed-by: Nicolas Pitre <nico@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/include/asm/memory.h')
-rw-r--r-- | arch/arm/include/asm/memory.h | 41 |
1 files changed, 24 insertions, 17 deletions
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 8756e4bcdba0..2438d72cf4e6 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -169,9 +169,17 @@ * Physical vs virtual RAM address space conversion. These are * private definitions which should NOT be used outside memory.h * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. + * + * PFNs are used to describe any physical page; this means + * PFN 0 == physical address 0. */ -#ifndef __virt_to_phys -#ifdef CONFIG_ARM_PATCH_PHYS_VIRT +#if defined(__virt_to_phys) +#define PHYS_OFFSET PLAT_PHYS_OFFSET +#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT)) + +#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) + +#elif defined(CONFIG_ARM_PATCH_PHYS_VIRT) /* * Constants used to force the right instruction encodings and shifts @@ -180,12 +188,17 @@ #define __PV_BITS_31_24 0x81000000 #define __PV_BITS_7_0 0x81 -extern u64 __pv_phys_offset; +extern unsigned long __pv_phys_pfn_offset; extern u64 __pv_offset; extern void fixup_pv_table(const void *, unsigned long); extern const void *__pv_table_begin, *__pv_table_end; -#define PHYS_OFFSET __pv_phys_offset +#define PHYS_OFFSET ((phys_addr_t)__pv_phys_pfn_offset << PAGE_SHIFT) +#define PHYS_PFN_OFFSET (__pv_phys_pfn_offset) + +#define virt_to_pfn(kaddr) \ + ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \ + PHYS_PFN_OFFSET) #define __pv_stub(from,to,instr,type) \ __asm__("@ __pv_stub\n" \ @@ -246,6 +259,7 @@ static inline unsigned long __phys_to_virt(phys_addr_t x) #else #define PHYS_OFFSET PLAT_PHYS_OFFSET +#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT)) static inline phys_addr_t __virt_to_phys(unsigned long x) { @@ -257,18 +271,11 @@ static inline unsigned long __phys_to_virt(phys_addr_t x) return x - PHYS_OFFSET + PAGE_OFFSET; } -#endif -#endif +#define virt_to_pfn(kaddr) \ + ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \ + PHYS_PFN_OFFSET) -/* - * PFNs are used to describe any physical page; this means - * PFN 0 == physical address 0. - * - * This is the PFN of the first RAM page in the kernel - * direct-mapped view. We assume this is the first page - * of RAM in the mem_map as well. - */ -#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT)) +#endif /* * These are *only* valid on the kernel direct mapped RAM memory. @@ -346,9 +353,9 @@ static inline __deprecated void *bus_to_virt(unsigned long x) */ #define ARCH_PFN_OFFSET PHYS_PFN_OFFSET -#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) +#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) #define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \ - && pfn_valid(__pa(kaddr) >> PAGE_SHIFT) ) + && pfn_valid(virt_to_pfn(kaddr))) #endif |