From baaece224570a935210a59257b5d9073e99843ea Mon Sep 17 00:00:00 2001 From: Pawel Moll Date: Tue, 25 Jan 2011 15:53:03 +0100 Subject: ARM: 6635/2: Configure reference clock for Versatile Express timers Timers on Versatile Express mainboard are used as system clock/event sources. Driver assumes that they are clocked with 1MHz signal. Old V2M firmware apparently configured it by default, but on newer boards one can observe that "sleep 1" command takes over 30 seconds to finish, as the timers are fed with 32kHz instead... This patch performs required magic and also removes code clearing timer's control registers, as exactly the same operations are performed by the timer driver few jiffies later. Signed-off-by: Pawel Moll Tested-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/hardware/sp810.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/hardware/sp810.h b/arch/arm/include/asm/hardware/sp810.h index a101f10bb5b1..721847dc68ab 100644 --- a/arch/arm/include/asm/hardware/sp810.h +++ b/arch/arm/include/asm/hardware/sp810.h @@ -50,6 +50,12 @@ #define SCPCELLID2 0xFF8 #define SCPCELLID3 0xFFC +#define SCCTRL_TIMEREN0SEL_REFCLK (0 << 15) +#define SCCTRL_TIMEREN0SEL_TIMCLK (1 << 15) + +#define SCCTRL_TIMEREN1SEL_REFCLK (0 << 17) +#define SCCTRL_TIMEREN1SEL_TIMCLK (1 << 17) + static inline void sysctl_soft_reset(void __iomem *base) { /* writing any value to SCSYSSTAT reg will reset system */ -- cgit v1.2.3 From 05b112ff98070dc1f3293e8cd8e4c6f468d1084a Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 25 Jan 2011 11:18:25 +0100 Subject: ARM: 6637/1: Make the argument to virt_to_phys() "const volatile" Changing the virt_to_phys() argument to "const volatile void *" avoids compiler warnings in some situations where this function is used. Signed-off-by: Catalin Marinas Acked-by: Stephen Boyd Acked-by: Arnd Bergmann Signed-off-by: Russell King --- arch/arm/include/asm/memory.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 23c2e8e5c0fa..d0ee74b7cf86 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -188,7 +188,7 @@ * translation for translating DMA addresses. Use the driver * DMA support - see dma-mapping.h. */ -static inline unsigned long virt_to_phys(void *x) +static inline unsigned long virt_to_phys(const volatile void *x) { return __virt_to_phys((unsigned long)(x)); } -- cgit v1.2.3 From 5756e9dd0de6d5c307773f8f734c0684b3098fdd Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Wed, 26 Jan 2011 18:34:26 +0100 Subject: ARM: 6640/1: Thumb-2: Symbol manipulation macros for function body copying In low-level board support code, there is sometimes a need to copy a function body to another location at run-time. A straightforward call to memcpy doesn't work in Thumb-2, because bit 0 of external Thumb function symbols is set to 1, indicating that the function is Thumb. Without corrective measures, this will cause an off-by-one copy, and the copy may be called using the wrong instruction set. This patch adds an fncpy() macro to help with such copies. Particular care is needed, because C doesn't guarantee any defined behaviour when casting a function pointer to any other type. This has been observed to lead to strange optimisation side-effects when doing the arithmetic which is required in order to copy/move function bodies correctly in Thumb-2. Thanks to Russell King and Nicolas Pitre for their input on this patch. Signed-off-by: Dave Martin Tested-by: Jean Pihet Tested-by: Tony Lindgren Tested-by: Kevin Hilman Signed-off-by: Russell King --- arch/arm/include/asm/fncpy.h | 94 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) create mode 100644 arch/arm/include/asm/fncpy.h (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h new file mode 100644 index 000000000000..de5354746924 --- /dev/null +++ b/arch/arm/include/asm/fncpy.h @@ -0,0 +1,94 @@ +/* + * arch/arm/include/asm/fncpy.h - helper macros for function body copying + * + * Copyright (C) 2011 Linaro Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/* + * These macros are intended for use when there is a need to copy a low-level + * function body into special memory. + * + * For example, when reconfiguring the SDRAM controller, the code doing the + * reconfiguration may need to run from SRAM. + * + * NOTE: that the copied function body must be entirely self-contained and + * position-independent in order for this to work properly. + * + * NOTE: in order for embedded literals and data to get referenced correctly, + * the alignment of functions must be preserved when copying. To ensure this, + * the source and destination addresses for fncpy() must be aligned to a + * multiple of 8 bytes: you will be get a BUG() if this condition is not met. + * You will typically need a ".align 3" directive in the assembler where the + * function to be copied is defined, and ensure that your allocator for the + * destination buffer returns 8-byte-aligned pointers. + * + * Typical usage example: + * + * extern int f(args); + * extern uint32_t size_of_f; + * int (*copied_f)(args); + * void *sram_buffer; + * + * copied_f = fncpy(sram_buffer, &f, size_of_f); + * + * ... later, call the function: ... + * + * copied_f(args); + * + * The size of the function to be copied can't be determined from C: + * this must be determined by other means, such as adding assmbler directives + * in the file where f is defined. + */ + +#ifndef __ASM_FNCPY_H +#define __ASM_FNCPY_H + +#include +#include + +#include +#include + +/* + * Minimum alignment requirement for the source and destination addresses + * for function copying. + */ +#define FNCPY_ALIGN 8 + +#define fncpy(dest_buf, funcp, size) ({ \ + uintptr_t __funcp_address; \ + typeof(funcp) __result; \ + \ + asm("" : "=r" (__funcp_address) : "0" (funcp)); \ + \ + /* \ + * Ensure alignment of source and destination addresses, \ + * disregarding the function's Thumb bit: \ + */ \ + BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \ + (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \ + \ + memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \ + flush_icache_range((unsigned long)(dest_buf), \ + (unsigned long)(dest_buf) + (size)); \ + \ + asm("" : "=r" (__result) \ + : "0" ((uintptr_t)(dest_buf) | (__funcp_address & 1))); \ + \ + __result; \ +}) + +#endif /* !__ASM_FNCPY_H */ -- cgit v1.2.3 From c1928022ef94662a88329e35fa0968b1be328b8e Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 30 Jan 2011 11:29:40 +0000 Subject: ARM: io: ensure inb/outb() et.al. are properly ordered on ARMv6+ Ensure that the ISA/PCI IO space accessors are properly ordered on ARMv6+ architectures. These should always be ordered with respect to all other accesses. This also fixes __iormb() and __iowmb() not being visible to ioread/ iowrite if a platform defines its own MMIO accessors. Signed-off-by: Russell King --- arch/arm/include/asm/io.h | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 20e0f7c9e03e..d66605dea55a 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -95,6 +95,15 @@ static inline void __iomem *__typesafe_io(unsigned long addr) return (void __iomem *)addr; } +/* IO barriers */ +#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE +#define __iormb() rmb() +#define __iowmb() wmb() +#else +#define __iormb() do { } while (0) +#define __iowmb() do { } while (0) +#endif + /* * Now, pick up the machine-defined IO definitions */ @@ -125,17 +134,17 @@ static inline void __iomem *__typesafe_io(unsigned long addr) * The {in,out}[bwl] macros are for emulating x86-style PCI/ISA IO space. */ #ifdef __io -#define outb(v,p) __raw_writeb(v,__io(p)) -#define outw(v,p) __raw_writew((__force __u16) \ - cpu_to_le16(v),__io(p)) -#define outl(v,p) __raw_writel((__force __u32) \ - cpu_to_le32(v),__io(p)) +#define outb(v,p) ({ __iowmb(); __raw_writeb(v,__io(p)); }) +#define outw(v,p) ({ __iowmb(); __raw_writew((__force __u16) \ + cpu_to_le16(v),__io(p)); }) +#define outl(v,p) ({ __iowmb(); __raw_writel((__force __u32) \ + cpu_to_le32(v),__io(p)); }) -#define inb(p) ({ __u8 __v = __raw_readb(__io(p)); __v; }) +#define inb(p) ({ __u8 __v = __raw_readb(__io(p)); __iormb(); __v; }) #define inw(p) ({ __u16 __v = le16_to_cpu((__force __le16) \ - __raw_readw(__io(p))); __v; }) + __raw_readw(__io(p))); __iormb(); __v; }) #define inl(p) ({ __u32 __v = le32_to_cpu((__force __le32) \ - __raw_readl(__io(p))); __v; }) + __raw_readl(__io(p))); __iormb(); __v; }) #define outsb(p,d,l) __raw_writesb(__io(p),d,l) #define outsw(p,d,l) __raw_writesw(__io(p),d,l) @@ -192,14 +201,6 @@ extern void _memset_io(volatile void __iomem *, int, size_t); #define writel_relaxed(v,c) ((void)__raw_writel((__force u32) \ cpu_to_le32(v),__mem_pci(c))) -#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE -#define __iormb() rmb() -#define __iowmb() wmb() -#else -#define __iormb() do { } while (0) -#define __iowmb() do { } while (0) -#endif - #define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) #define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) #define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) -- cgit v1.2.3 From 6323f0ccedf756dfe5f46549cec69a2d6d97937b Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 16 Jan 2011 18:02:17 +0000 Subject: ARM: bitops: switch set/clear/change bitops to use ldrex/strex Switch the set/clear/change bitops to use the word-based exclusive operations, which are only present in a wider range of ARM architectures than the byte-based exclusive operations. Tested record: - Nicolas Pitre: ext3,rw,le - Sourav Poddar: nfs,le - Will Deacon: ext3,rw,le - Tony Lindgren: ext3+nfs,le Reviewed-by: Nicolas Pitre Tested-by: Sourav Poddar Tested-by: Will Deacon Tested-by: Tony Lindgren Signed-off-by: Russell King --- arch/arm/include/asm/bitops.h | 60 ++++++++++++++++--------------------------- arch/arm/kernel/armksyms.c | 18 +++++-------- arch/arm/lib/bitops.h | 38 ++++++++++++++------------- arch/arm/lib/changebit.S | 10 ++------ arch/arm/lib/clearbit.S | 11 ++------ arch/arm/lib/setbit.S | 11 ++------ arch/arm/lib/testchangebit.S | 9 +++---- arch/arm/lib/testclearbit.S | 9 +++---- arch/arm/lib/testsetbit.S | 9 +++---- 9 files changed, 63 insertions(+), 112 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h index 7b1bb2bbaf88..af54ed102f5f 100644 --- a/arch/arm/include/asm/bitops.h +++ b/arch/arm/include/asm/bitops.h @@ -148,15 +148,19 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p) * Note that bit 0 is defined to be 32-bit word bit 0, not byte 0 bit 0. */ +/* + * Native endian assembly bitops. nr = 0 -> word 0 bit 0. + */ +extern void _set_bit(int nr, volatile unsigned long * p); +extern void _clear_bit(int nr, volatile unsigned long * p); +extern void _change_bit(int nr, volatile unsigned long * p); +extern int _test_and_set_bit(int nr, volatile unsigned long * p); +extern int _test_and_clear_bit(int nr, volatile unsigned long * p); +extern int _test_and_change_bit(int nr, volatile unsigned long * p); + /* * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. */ -extern void _set_bit_le(int nr, volatile unsigned long * p); -extern void _clear_bit_le(int nr, volatile unsigned long * p); -extern void _change_bit_le(int nr, volatile unsigned long * p); -extern int _test_and_set_bit_le(int nr, volatile unsigned long * p); -extern int _test_and_clear_bit_le(int nr, volatile unsigned long * p); -extern int _test_and_change_bit_le(int nr, volatile unsigned long * p); extern int _find_first_zero_bit_le(const void * p, unsigned size); extern int _find_next_zero_bit_le(const void * p, int size, int offset); extern int _find_first_bit_le(const unsigned long *p, unsigned size); @@ -165,12 +169,6 @@ extern int _find_next_bit_le(const unsigned long *p, int size, int offset); /* * Big endian assembly bitops. nr = 0 -> byte 3 bit 0. */ -extern void _set_bit_be(int nr, volatile unsigned long * p); -extern void _clear_bit_be(int nr, volatile unsigned long * p); -extern void _change_bit_be(int nr, volatile unsigned long * p); -extern int _test_and_set_bit_be(int nr, volatile unsigned long * p); -extern int _test_and_clear_bit_be(int nr, volatile unsigned long * p); -extern int _test_and_change_bit_be(int nr, volatile unsigned long * p); extern int _find_first_zero_bit_be(const void * p, unsigned size); extern int _find_next_zero_bit_be(const void * p, int size, int offset); extern int _find_first_bit_be(const unsigned long *p, unsigned size); @@ -180,33 +178,26 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset); /* * The __* form of bitops are non-atomic and may be reordered. */ -#define ATOMIC_BITOP_LE(name,nr,p) \ - (__builtin_constant_p(nr) ? \ - ____atomic_##name(nr, p) : \ - _##name##_le(nr,p)) - -#define ATOMIC_BITOP_BE(name,nr,p) \ - (__builtin_constant_p(nr) ? \ - ____atomic_##name(nr, p) : \ - _##name##_be(nr,p)) +#define ATOMIC_BITOP(name,nr,p) \ + (__builtin_constant_p(nr) ? ____atomic_##name(nr, p) : _##name(nr,p)) #else -#define ATOMIC_BITOP_LE(name,nr,p) _##name##_le(nr,p) -#define ATOMIC_BITOP_BE(name,nr,p) _##name##_be(nr,p) +#define ATOMIC_BITOP(name,nr,p) _##name(nr,p) #endif -#define NONATOMIC_BITOP(name,nr,p) \ - (____nonatomic_##name(nr, p)) +/* + * Native endian atomic definitions. + */ +#define set_bit(nr,p) ATOMIC_BITOP(set_bit,nr,p) +#define clear_bit(nr,p) ATOMIC_BITOP(clear_bit,nr,p) +#define change_bit(nr,p) ATOMIC_BITOP(change_bit,nr,p) +#define test_and_set_bit(nr,p) ATOMIC_BITOP(test_and_set_bit,nr,p) +#define test_and_clear_bit(nr,p) ATOMIC_BITOP(test_and_clear_bit,nr,p) +#define test_and_change_bit(nr,p) ATOMIC_BITOP(test_and_change_bit,nr,p) #ifndef __ARMEB__ /* * These are the little endian, atomic definitions. */ -#define set_bit(nr,p) ATOMIC_BITOP_LE(set_bit,nr,p) -#define clear_bit(nr,p) ATOMIC_BITOP_LE(clear_bit,nr,p) -#define change_bit(nr,p) ATOMIC_BITOP_LE(change_bit,nr,p) -#define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p) -#define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p) -#define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p) #define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) #define find_first_bit(p,sz) _find_first_bit_le(p,sz) @@ -215,16 +206,9 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset); #define WORD_BITOFF_TO_LE(x) ((x)) #else - /* * These are the big endian, atomic definitions. */ -#define set_bit(nr,p) ATOMIC_BITOP_BE(set_bit,nr,p) -#define clear_bit(nr,p) ATOMIC_BITOP_BE(clear_bit,nr,p) -#define change_bit(nr,p) ATOMIC_BITOP_BE(change_bit,nr,p) -#define test_and_set_bit(nr,p) ATOMIC_BITOP_BE(test_and_set_bit,nr,p) -#define test_and_clear_bit(nr,p) ATOMIC_BITOP_BE(test_and_clear_bit,nr,p) -#define test_and_change_bit(nr,p) ATOMIC_BITOP_BE(test_and_change_bit,nr,p) #define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz) #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off) #define find_first_bit(p,sz) _find_first_bit_be(p,sz) diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index e5e1e5387678..d5d4185f0c24 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -140,24 +140,18 @@ EXPORT_SYMBOL(__aeabi_ulcmp); #endif /* bitops */ -EXPORT_SYMBOL(_set_bit_le); -EXPORT_SYMBOL(_test_and_set_bit_le); -EXPORT_SYMBOL(_clear_bit_le); -EXPORT_SYMBOL(_test_and_clear_bit_le); -EXPORT_SYMBOL(_change_bit_le); -EXPORT_SYMBOL(_test_and_change_bit_le); +EXPORT_SYMBOL(_set_bit); +EXPORT_SYMBOL(_test_and_set_bit); +EXPORT_SYMBOL(_clear_bit); +EXPORT_SYMBOL(_test_and_clear_bit); +EXPORT_SYMBOL(_change_bit); +EXPORT_SYMBOL(_test_and_change_bit); EXPORT_SYMBOL(_find_first_zero_bit_le); EXPORT_SYMBOL(_find_next_zero_bit_le); EXPORT_SYMBOL(_find_first_bit_le); EXPORT_SYMBOL(_find_next_bit_le); #ifdef __ARMEB__ -EXPORT_SYMBOL(_set_bit_be); -EXPORT_SYMBOL(_test_and_set_bit_be); -EXPORT_SYMBOL(_clear_bit_be); -EXPORT_SYMBOL(_test_and_clear_bit_be); -EXPORT_SYMBOL(_change_bit_be); -EXPORT_SYMBOL(_test_and_change_bit_be); EXPORT_SYMBOL(_find_first_zero_bit_be); EXPORT_SYMBOL(_find_next_zero_bit_be); EXPORT_SYMBOL(_find_first_bit_be); diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h index bd00551fb797..a9d9d152a751 100644 --- a/arch/arm/lib/bitops.h +++ b/arch/arm/lib/bitops.h @@ -1,15 +1,15 @@ - -#if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_CPU_32v6K) +#if __LINUX_ARM_ARCH__ >= 6 .macro bitop, instr ands ip, r1, #3 strneb r1, [ip] @ assert word-aligned mov r2, #1 - and r3, r0, #7 @ Get bit offset - add r1, r1, r0, lsr #3 @ Get byte offset + and r3, r0, #31 @ Get bit offset + mov r0, r0, lsr #5 + add r1, r1, r0, lsl #2 @ Get word offset mov r3, r2, lsl r3 -1: ldrexb r2, [r1] +1: ldrex r2, [r1] \instr r2, r2, r3 - strexb r0, r2, [r1] + strex r0, r2, [r1] cmp r0, #0 bne 1b mov pc, lr @@ -18,15 +18,16 @@ .macro testop, instr, store ands ip, r1, #3 strneb r1, [ip] @ assert word-aligned - and r3, r0, #7 @ Get bit offset mov r2, #1 - add r1, r1, r0, lsr #3 @ Get byte offset + and r3, r0, #31 @ Get bit offset + mov r0, r0, lsr #5 + add r1, r1, r0, lsl #2 @ Get word offset mov r3, r2, lsl r3 @ create mask smp_dmb -1: ldrexb r2, [r1] +1: ldrex r2, [r1] ands r0, r2, r3 @ save old value of bit - \instr r2, r2, r3 @ toggle bit - strexb ip, r2, [r1] + \instr r2, r2, r3 @ toggle bit + strex ip, r2, [r1] cmp ip, #0 bne 1b smp_dmb @@ -38,13 +39,14 @@ .macro bitop, instr ands ip, r1, #3 strneb r1, [ip] @ assert word-aligned - and r2, r0, #7 + and r2, r0, #31 + mov r0, r0, lsr #5 mov r3, #1 mov r3, r3, lsl r2 save_and_disable_irqs ip - ldrb r2, [r1, r0, lsr #3] + ldr r2, [r1, r0, lsl #2] \instr r2, r2, r3 - strb r2, [r1, r0, lsr #3] + str r2, [r1, r0, lsl #2] restore_irqs ip mov pc, lr .endm @@ -60,11 +62,11 @@ .macro testop, instr, store ands ip, r1, #3 strneb r1, [ip] @ assert word-aligned - add r1, r1, r0, lsr #3 - and r3, r0, #7 - mov r0, #1 + and r3, r0, #31 + mov r0, r0, lsr #5 save_and_disable_irqs ip - ldrb r2, [r1] + ldr r2, [r1, r0, lsl #2]! + mov r0, #1 tst r2, r0, lsl r3 \instr r2, r2, r0, lsl r3 \store r2, [r1] diff --git a/arch/arm/lib/changebit.S b/arch/arm/lib/changebit.S index 80f3115cbee2..68ed5b62e839 100644 --- a/arch/arm/lib/changebit.S +++ b/arch/arm/lib/changebit.S @@ -12,12 +12,6 @@ #include "bitops.h" .text -/* Purpose : Function to change a bit - * Prototype: int change_bit(int bit, void *addr) - */ -ENTRY(_change_bit_be) - eor r0, r0, #0x18 @ big endian byte ordering -ENTRY(_change_bit_le) +ENTRY(_change_bit) bitop eor -ENDPROC(_change_bit_be) -ENDPROC(_change_bit_le) +ENDPROC(_change_bit) diff --git a/arch/arm/lib/clearbit.S b/arch/arm/lib/clearbit.S index 1a63e43a1df0..4c04c3b51eeb 100644 --- a/arch/arm/lib/clearbit.S +++ b/arch/arm/lib/clearbit.S @@ -12,13 +12,6 @@ #include "bitops.h" .text -/* - * Purpose : Function to clear a bit - * Prototype: int clear_bit(int bit, void *addr) - */ -ENTRY(_clear_bit_be) - eor r0, r0, #0x18 @ big endian byte ordering -ENTRY(_clear_bit_le) +ENTRY(_clear_bit) bitop bic -ENDPROC(_clear_bit_be) -ENDPROC(_clear_bit_le) +ENDPROC(_clear_bit) diff --git a/arch/arm/lib/setbit.S b/arch/arm/lib/setbit.S index 1dd7176c4b2b..bbee5c66a23e 100644 --- a/arch/arm/lib/setbit.S +++ b/arch/arm/lib/setbit.S @@ -12,13 +12,6 @@ #include "bitops.h" .text -/* - * Purpose : Function to set a bit - * Prototype: int set_bit(int bit, void *addr) - */ -ENTRY(_set_bit_be) - eor r0, r0, #0x18 @ big endian byte ordering -ENTRY(_set_bit_le) +ENTRY(_set_bit) bitop orr -ENDPROC(_set_bit_be) -ENDPROC(_set_bit_le) +ENDPROC(_set_bit) diff --git a/arch/arm/lib/testchangebit.S b/arch/arm/lib/testchangebit.S index 5c98dc567f0f..15a4d431f229 100644 --- a/arch/arm/lib/testchangebit.S +++ b/arch/arm/lib/testchangebit.S @@ -12,9 +12,6 @@ #include "bitops.h" .text -ENTRY(_test_and_change_bit_be) - eor r0, r0, #0x18 @ big endian byte ordering -ENTRY(_test_and_change_bit_le) - testop eor, strb -ENDPROC(_test_and_change_bit_be) -ENDPROC(_test_and_change_bit_le) +ENTRY(_test_and_change_bit) + testop eor, str +ENDPROC(_test_and_change_bit) diff --git a/arch/arm/lib/testclearbit.S b/arch/arm/lib/testclearbit.S index 543d7094d18e..521b66b5b95d 100644 --- a/arch/arm/lib/testclearbit.S +++ b/arch/arm/lib/testclearbit.S @@ -12,9 +12,6 @@ #include "bitops.h" .text -ENTRY(_test_and_clear_bit_be) - eor r0, r0, #0x18 @ big endian byte ordering -ENTRY(_test_and_clear_bit_le) - testop bicne, strneb -ENDPROC(_test_and_clear_bit_be) -ENDPROC(_test_and_clear_bit_le) +ENTRY(_test_and_clear_bit) + testop bicne, strne +ENDPROC(_test_and_clear_bit) diff --git a/arch/arm/lib/testsetbit.S b/arch/arm/lib/testsetbit.S index 0b3f390401ce..1c98cc2185bb 100644 --- a/arch/arm/lib/testsetbit.S +++ b/arch/arm/lib/testsetbit.S @@ -12,9 +12,6 @@ #include "bitops.h" .text -ENTRY(_test_and_set_bit_be) - eor r0, r0, #0x18 @ big endian byte ordering -ENTRY(_test_and_set_bit_le) - testop orreq, streqb -ENDPROC(_test_and_set_bit_be) -ENDPROC(_test_and_set_bit_le) +ENTRY(_test_and_set_bit) + testop orreq, streq +ENDPROC(_test_and_set_bit) -- cgit v1.2.3 From 000d9c78eb5cd7f18e3d6a381d66e606bc9b8196 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 15 Jan 2011 16:22:12 +0000 Subject: ARM: v6k: remove CPU_32v6K dependencies in asm/spinlock.h SMP requires at least the ARMv6K extensions to be present, so if we're running on SMP, the WFE and SEV instructions must be available. However, when we run on UP, the v6K extensions may not be available, and so we don't want WFE/SEV to be in the instruction stream. Use the SMP alternatives infrastructure to replace these instructions with NOPs if we build for SMP but run on UP. Tested-by: Tony Lindgren Tested-by: Sourav Poddar Tested-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/spinlock.h | 37 +++++++++++++++++++++++++------------ 1 file changed, 25 insertions(+), 12 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index 17eb355707dd..da1af5240159 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -5,17 +5,36 @@ #error SMP not supported on pre-ARMv6 CPUs #endif +/* + * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K + * extensions, so when running on UP, we have to patch these instructions away. + */ +#define ALT_SMP(smp, up) \ + "9998: " smp "\n" \ + " .pushsection \".alt.smp.init\", \"a\"\n" \ + " .long 9998b\n" \ + " " up "\n" \ + " .popsection\n" + +#ifdef CONFIG_THUMB2_KERNEL +#define SEV ALT_SMP("sev.w", "nop.w") +#define WFE(cond) ALT_SMP("wfe" cond ".w", "nop.w") +#else +#define SEV ALT_SMP("sev", "nop") +#define WFE(cond) ALT_SMP("wfe" cond, "nop") +#endif + static inline void dsb_sev(void) { #if __LINUX_ARM_ARCH__ >= 7 __asm__ __volatile__ ( "dsb\n" - "sev" + SEV ); -#elif defined(CONFIG_CPU_32v6K) +#else __asm__ __volatile__ ( "mcr p15, 0, %0, c7, c10, 4\n" - "sev" + SEV : : "r" (0) ); #endif @@ -46,9 +65,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) __asm__ __volatile__( "1: ldrex %0, [%1]\n" " teq %0, #0\n" -#ifdef CONFIG_CPU_32v6K -" wfene\n" -#endif + WFE("ne") " strexeq %0, %2, [%1]\n" " teqeq %0, #0\n" " bne 1b" @@ -107,9 +124,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw) __asm__ __volatile__( "1: ldrex %0, [%1]\n" " teq %0, #0\n" -#ifdef CONFIG_CPU_32v6K -" wfene\n" -#endif + WFE("ne") " strexeq %0, %2, [%1]\n" " teq %0, #0\n" " bne 1b" @@ -176,9 +191,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw) "1: ldrex %0, [%2]\n" " adds %0, %0, #1\n" " strexpl %1, %0, [%2]\n" -#ifdef CONFIG_CPU_32v6K -" wfemi\n" -#endif + WFE("mi") " rsbpls %0, %1, #0\n" " bmi 1b" : "=&r" (tmp), "=&r" (tmp2) -- cgit v1.2.3 From e399b1a4e1d205bdc816cb550d2064f2eb1ddc4c Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 17 Jan 2011 15:08:32 +0000 Subject: ARM: v6k: introduce CPU_V6K option Introduce a CPU_V6K configuration option for platforms to select if they have a V6K CPU core. This allows us to identify whether we need to support ARMv6 CPUs without the V6K SMP extensions at build time. Currently CPU_V6K is just an alias for CPU_V6, and all places which reference CPU_V6 are replaced by (CPU_V6 || CPU_V6K). Select CPU_V6K from platforms which are known to be V6K-only. Acked-by: Tony Lindgren Tested-by: Sourav Poddar Tested-by: Will Deacon Signed-off-by: Russell King --- arch/arm/Kconfig | 10 ++++----- arch/arm/Makefile | 1 + arch/arm/boot/compressed/head.S | 2 +- arch/arm/boot/compressed/misc.c | 2 +- arch/arm/include/asm/cacheflush.h | 5 +++-- arch/arm/include/asm/proc-fns.h | 2 +- arch/arm/kernel/debug.S | 2 +- arch/arm/kernel/perf_event_v6.c | 4 ++-- arch/arm/mm/Kconfig | 47 +++++++++++++++++++++++++-------------- arch/arm/mm/Makefile | 1 + arch/arm/mm/mmap.c | 2 +- 11 files changed, 47 insertions(+), 31 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 5cff165b7eb0..95ba92ff0d41 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -24,7 +24,7 @@ config ARM select HAVE_PERF_EVENTS select PERF_USE_VMALLOC select HAVE_REGS_AND_STACK_ACCESS_API - select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V7)) + select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)) select HAVE_C_RECORDMCOUNT select HAVE_GENERIC_HARDIRQS select HAVE_SPARSE_IRQ @@ -1048,7 +1048,7 @@ config XSCALE_PMU default y config CPU_HAS_PMU - depends on (CPU_V6 || CPU_V7 || XSCALE_PMU) && \ + depends on (CPU_V6 || CPU_V6K || CPU_V7 || XSCALE_PMU) && \ (!ARCH_OMAP3 || OMAP3_EMU) default y bool @@ -1064,7 +1064,7 @@ endif config ARM_ERRATA_411920 bool "ARM errata: Invalidation of the Instruction Cache operation can fail" - depends on CPU_V6 + depends on CPU_V6 || CPU_V6K help Invalidation of the Instruction Cache operation can fail. This erratum is present in 1136 (before r1p4), 1156 and 1176. @@ -1361,7 +1361,7 @@ config HZ config THUMB2_KERNEL bool "Compile the kernel in Thumb-2 mode (EXPERIMENTAL)" - depends on CPU_V7 && !CPU_V6 && EXPERIMENTAL + depends on CPU_V7 && !CPU_V6 && !CPU_V6K && EXPERIMENTAL select AEABI select ARM_ASM_UNIFIED help @@ -1852,7 +1852,7 @@ config FPE_FASTFPE config VFP bool "VFP-format floating point maths" - depends on CPU_V6 || CPU_ARM926T || CPU_V7 || CPU_FEROCEON + depends on CPU_V6 || CPU_V6K || CPU_ARM926T || CPU_V7 || CPU_FEROCEON help Say Y to include VFP support code in the kernel. This is needed if your hardware includes a VFP unit. diff --git a/arch/arm/Makefile b/arch/arm/Makefile index c22c1adfedd6..9c430525e13e 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -89,6 +89,7 @@ tune-$(CONFIG_CPU_XSCALE) :=$(call cc-option,-mtune=xscale,-mtune=strongarm110) tune-$(CONFIG_CPU_XSC3) :=$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale tune-$(CONFIG_CPU_FEROCEON) :=$(call cc-option,-mtune=marvell-f,-mtune=xscale) tune-$(CONFIG_CPU_V6) :=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm) +tune-$(CONFIG_CPU_V6K) :=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm) ifeq ($(CONFIG_AEABI),y) CFLAGS_ABI :=-mabi=aapcs-linux -mno-thumb-interwork diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 7193884ed8b0..91f20f0b3044 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -21,7 +21,7 @@ #if defined(CONFIG_DEBUG_ICEDCC) -#ifdef CONFIG_CPU_V6 +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) .macro loadsp, rb, tmp .endm .macro writeb, ch, rb diff --git a/arch/arm/boot/compressed/misc.c b/arch/arm/boot/compressed/misc.c index e653a6d3c8d9..4657e877bf8f 100644 --- a/arch/arm/boot/compressed/misc.c +++ b/arch/arm/boot/compressed/misc.c @@ -36,7 +36,7 @@ extern void error(char *x); #ifdef CONFIG_DEBUG_ICEDCC -#ifdef CONFIG_CPU_V6 +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) static void icedcc_putc(int ch) { diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 3acd8fa25e34..7d0614f599a7 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -116,7 +116,7 @@ # define MULTI_CACHE 1 #endif -#if defined(CONFIG_CPU_V6) +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) //# ifdef _CACHE # define MULTI_CACHE 1 //# else @@ -316,7 +316,8 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *, * Optimized __flush_icache_all for the common cases. Note that UP ARMv7 * will fall through to use __flush_icache_all_generic. */ -#if (defined(CONFIG_CPU_V7) && defined(CONFIG_CPU_V6)) || \ +#if (defined(CONFIG_CPU_V7) && \ + (defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \ defined(CONFIG_SMP_ON_UP) #define __flush_icache_preferred __cpuc_flush_icache_all #elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP) diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h index 8fdae9bc9abb..296ca47489f9 100644 --- a/arch/arm/include/asm/proc-fns.h +++ b/arch/arm/include/asm/proc-fns.h @@ -231,7 +231,7 @@ # endif #endif -#ifdef CONFIG_CPU_V6 +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) # ifdef CPU_NAME # undef MULTI_CPU # define MULTI_CPU diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S index a0f07521ca8a..d2d983be096d 100644 --- a/arch/arm/kernel/debug.S +++ b/arch/arm/kernel/debug.S @@ -25,7 +25,7 @@ .macro addruart, rp, rv .endm -#if defined(CONFIG_CPU_V6) +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) .macro senduart, rd, rx mcr p14, 0, \rd, c0, c5, 0 diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index c058bfc8532b..6fc2d228db55 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c @@ -30,7 +30,7 @@ * enable the interrupt. */ -#ifdef CONFIG_CPU_V6 +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) enum armv6_perf_types { ARMV6_PERFCTR_ICACHE_MISS = 0x0, ARMV6_PERFCTR_IBUF_STALL = 0x1, @@ -669,4 +669,4 @@ static const struct arm_pmu *__init armv6mpcore_pmu_init(void) { return NULL; } -#endif /* CONFIG_CPU_V6 */ +#endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */ diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 9d30c6f804b9..559e9330bb18 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -402,16 +402,18 @@ config CPU_V6 select CPU_TLB_V6 if MMU # ARMv6k -config CPU_32v6K - bool "Support ARM V6K processor extensions" if !SMP - depends on CPU_V6 || CPU_V7 - default y if SMP && !(ARCH_MX3 || ARCH_OMAP2) - help - Say Y here if your ARMv6 processor supports the 'K' extension. - This enables the kernel to use some instructions not present - on previous processors, and as such a kernel build with this - enabled will not boot on processors with do not support these - instructions. +config CPU_V6K + bool "Support ARM V6K processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX || ARCH_DOVE + select CPU_32v6 + select CPU_32v6K if !ARCH_OMAP2 + select CPU_ABRT_EV6 + select CPU_PABRT_V6 + select CPU_CACHE_V6 + select CPU_CACHE_VIPT + select CPU_CP15_MMU + select CPU_HAS_ASID if MMU + select CPU_COPY_V6 if MMU + select CPU_TLB_V6 if MMU # ARMv7 config CPU_V7 @@ -453,6 +455,17 @@ config CPU_32v6 bool select TLS_REG_EMUL if !CPU_32v6K && !MMU +config CPU_32v6K + bool "Support ARM V6K processor extensions" if !SMP + depends on CPU_V6 || CPU_V6K || CPU_V7 + default y if SMP && !(ARCH_MX3 || ARCH_OMAP2) + help + Say Y here if your ARMv6 processor supports the 'K' extension. + This enables the kernel to use some instructions not present + on previous processors, and as such a kernel build with this + enabled will not boot on processors with do not support these + instructions. + config CPU_32v7 bool @@ -623,7 +636,7 @@ comment "Processor Features" config ARM_THUMB bool "Support Thumb user binaries" - depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V7 || CPU_FEROCEON + depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || CPU_V7 || CPU_FEROCEON default y help Say Y if you want to include kernel support for running user space @@ -681,7 +694,7 @@ config CPU_BIG_ENDIAN config CPU_ENDIAN_BE8 bool depends on CPU_BIG_ENDIAN - default CPU_V6 || CPU_V7 + default CPU_V6 || CPU_V6K || CPU_V7 help Support for the BE-8 (big-endian) mode on ARMv6 and ARMv7 processors. @@ -747,7 +760,7 @@ config CPU_CACHE_ROUND_ROBIN config CPU_BPREDICT_DISABLE bool "Disable branch prediction" - depends on CPU_ARM1020 || CPU_V6 || CPU_MOHAWK || CPU_XSC3 || CPU_V7 || CPU_FA526 + depends on CPU_ARM1020 || CPU_V6 || CPU_V6K || CPU_MOHAWK || CPU_XSC3 || CPU_V7 || CPU_FA526 help Say Y here to disable branch prediction. If unsure, say N. @@ -767,7 +780,7 @@ config NEEDS_SYSCALL_FOR_CMPXCHG config DMA_CACHE_RWFO bool "Enable read/write for ownership DMA cache maintenance" - depends on CPU_V6 && SMP + depends on (CPU_V6 || CPU_V6K) && SMP default y help The Snoop Control Unit on ARM11MPCore does not detect the @@ -823,7 +836,7 @@ config CACHE_L2X0 config CACHE_PL310 bool depends on CACHE_L2X0 - default y if CPU_V7 && !CPU_V6 + default y if CPU_V7 && !(CPU_V6 || CPU_V6K) help This option enables optimisations for the PL310 cache controller. @@ -851,10 +864,10 @@ config ARM_L1_CACHE_SHIFT default 5 config ARM_DMA_MEM_BUFFERABLE - bool "Use non-cacheable memory for DMA" if CPU_V6 && !CPU_V7 + bool "Use non-cacheable memory for DMA" if (CPU_V6 || CPU_V6K) && !CPU_V7 depends on !(MACH_REALVIEW_PB1176 || REALVIEW_EB_ARM11MP || \ MACH_REALVIEW_PB11MP) - default y if CPU_V6 || CPU_V7 + default y if CPU_V6 || CPU_V6K || CPU_V7 help Historically, the kernel has used strongly ordered mappings to provide DMA coherent memory. With the advent of ARMv7, mapping diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 00d74a04af3a..bca7e61928c7 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile @@ -90,6 +90,7 @@ obj-$(CONFIG_CPU_XSC3) += proc-xsc3.o obj-$(CONFIG_CPU_MOHAWK) += proc-mohawk.o obj-$(CONFIG_CPU_FEROCEON) += proc-feroceon.o obj-$(CONFIG_CPU_V6) += proc-v6.o +obj-$(CONFIG_CPU_V6K) += proc-v6.o obj-$(CONFIG_CPU_V7) += proc-v7.o AFLAGS_proc-v6.o :=-Wa,-march=armv6 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index b0a98305055c..afe209e1e1f8 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -31,7 +31,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long start_addr; -#ifdef CONFIG_CPU_V6 +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) unsigned int cache_type; int do_align = 0, aliasing = 0; -- cgit v1.2.3 From 4ed67a53591db641543d57f31c182591a429dc93 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 17 Jan 2011 15:42:42 +0000 Subject: ARM: v6k: select cmpxchg code sequences according to V6 variants If CONFIG_CPU_V6 is enabled, we must avoid the byte/halfword/doubleword exclusive operations, which aren't implemented before V6K. Use the generic versions (or omit them) instead. If CONFIG_CPU_V6 is not set, but CONFIG_CPU_32v6K is enabled, we have the K extnesions, so use these new instructions. Acked-by: Tony Lindgren Tested-by: Sourav Poddar Tested-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/system.h | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 97f6d60297d5..9a87823642d0 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -347,6 +347,7 @@ void cpu_idle_wait(void); #include #if __LINUX_ARM_ARCH__ < 6 +/* min ARCH < ARMv6 */ #ifdef CONFIG_SMP #error "SMP is not supported on this platform" @@ -365,7 +366,7 @@ void cpu_idle_wait(void); #include #endif -#else /* __LINUX_ARM_ARCH__ >= 6 */ +#else /* min ARCH >= ARMv6 */ extern void __bad_cmpxchg(volatile void *ptr, int size); @@ -379,7 +380,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long oldval, res; switch (size) { -#ifdef CONFIG_CPU_32v6K +#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ case 1: do { asm volatile("@ __cmpxchg1\n" @@ -404,7 +405,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, : "memory", "cc"); } while (res); break; -#endif /* CONFIG_CPU_32v6K */ +#endif case 4: do { asm volatile("@ __cmpxchg4\n" @@ -450,12 +451,12 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, unsigned long ret; switch (size) { -#ifndef CONFIG_CPU_32v6K +#ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */ case 1: case 2: ret = __cmpxchg_local_generic(ptr, old, new, size); break; -#endif /* !CONFIG_CPU_32v6K */ +#endif default: ret = __cmpxchg(ptr, old, new, size); } @@ -469,7 +470,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, (unsigned long)(n), \ sizeof(*(ptr)))) -#ifdef CONFIG_CPU_32v6K +#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ /* * Note : ARMv7-M (currently unsupported by Linux) does not support @@ -524,11 +525,11 @@ static inline unsigned long long __cmpxchg64_mb(volatile void *ptr, (unsigned long long)(o), \ (unsigned long long)(n))) -#else /* !CONFIG_CPU_32v6K */ +#else /* min ARCH = ARMv6 */ #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) -#endif /* CONFIG_CPU_32v6K */ +#endif #endif /* __LINUX_ARM_ARCH__ >= 6 */ -- cgit v1.2.3 From 37bc618fe2689a7f8de8fac82e72b00ecea4d43d Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 17 Jan 2011 16:38:56 +0000 Subject: ARM: v6k: select TLS register code according to V6 variants If CONFIG_CPU_V6 is enabled, we may or may not have the TLS register. Use the conditional code which copes with this variability. Otherwise, if CONFIG_CPU_32v6K is set, we know we have the TLS register on all supported CPUs, so use it unconditionally. Acked-by: Nicolas Pitre Acked-by: Tony Lindgren Tested-by: Sourav Poddar Tested-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/tls.h | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h index e71d6ff8d104..60843eb0f61c 100644 --- a/arch/arm/include/asm/tls.h +++ b/arch/arm/include/asm/tls.h @@ -28,15 +28,14 @@ #define tls_emu 1 #define has_tls_reg 1 #define set_tls set_tls_none -#elif __LINUX_ARM_ARCH__ >= 7 || \ - (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K)) -#define tls_emu 0 -#define has_tls_reg 1 -#define set_tls set_tls_v6k -#elif __LINUX_ARM_ARCH__ == 6 +#elif defined(CONFIG_CPU_V6) #define tls_emu 0 #define has_tls_reg (elf_hwcap & HWCAP_TLS) #define set_tls set_tls_v6 +#elif defined(CONFIG_CPU_32v6K) +#define tls_emu 0 +#define has_tls_reg 1 +#define set_tls set_tls_v6k #else #define tls_emu 0 #define has_tls_reg 0 -- cgit v1.2.3 From 774c096bf9e49eebf7b5d2d9fdddf632c29ccea0 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 23 Jan 2011 13:04:53 +0000 Subject: ARM: v6/v7 cache: allow cache calls to be optimized The v6 cache call optimization was disabled to allow the optional block cache operations to be subsituted on CPUs which supported those operations. However, as that functionality was removed, we no longer need to prevent this optimization being taken advantage of. The v7 cache call optimization was just a copy of the v6, so also fix that too. Tested-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/cacheflush.h | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 7d0614f599a7..d9b4c42d62ff 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -116,20 +116,20 @@ # define MULTI_CACHE 1 #endif -#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) -//# ifdef _CACHE +#if defined(CONFIG_CPU_CACHE_V6) +# ifdef _CACHE # define MULTI_CACHE 1 -//# else -//# define _CACHE v6 -//# endif +# else +# define _CACHE v6 +# endif #endif -#if defined(CONFIG_CPU_V7) -//# ifdef _CACHE +#if defined(CONFIG_CPU_CACHE_V7) +# ifdef _CACHE # define MULTI_CACHE 1 -//# else -//# define _CACHE v7 -//# endif +# else +# define _CACHE v7 +# endif #endif #if !defined(_CACHE) && !defined(MULTI_CACHE) -- cgit v1.2.3 From 917692f5f7ec63de3b093c825913d68e910db282 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Wed, 9 Feb 2011 12:06:59 +0100 Subject: ARM: 6655/1: Correct WFE() in asm/spinlock.h for Thumb-2 The content for ALT_SMP() in the definition of WFE() expands to 6 bytes (IT cc ; WFEcc.W), which breaks the assumptions of the fixup code, leading to lockups when the affected code gets run. This patch works around the problem by explicitly using an IT + WFEcc.N pair. Signed-off-by: Dave Martin Acked-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/spinlock.h | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index da1af5240159..fdd3820edff8 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -18,7 +18,23 @@ #ifdef CONFIG_THUMB2_KERNEL #define SEV ALT_SMP("sev.w", "nop.w") -#define WFE(cond) ALT_SMP("wfe" cond ".w", "nop.w") +/* + * For Thumb-2, special care is needed to ensure that the conditional WFE + * instruction really does assemble to exactly 4 bytes (as required by + * the SMP_ON_UP fixup code). By itself "wfene" might cause the + * assembler to insert a extra (16-bit) IT instruction, depending on the + * presence or absence of neighbouring conditional instructions. + * + * To avoid this unpredictableness, an approprite IT is inserted explicitly: + * the assembler won't change IT instructions which are explicitly present + * in the input. + */ +#define WFE(cond) ALT_SMP( \ + "it " cond "\n\t" \ + "wfe" cond ".n", \ + \ + "nop.w" \ +) #else #define SEV ALT_SMP("sev", "nop") #define WFE(cond) ALT_SMP("wfe" cond, "nop") -- cgit v1.2.3 From 292ec42af7c6361435fe9df50cd59ec76f6741c6 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 4 Feb 2011 10:36:39 +0000 Subject: ARM: pm: add function to set WFI low-power mode for SMP CPUs Add a function to set the SCU low-power mode for SMP CPUs. This centralizes this functionality rather than having to expose the SCU register definitions to each platform. Signed-off-by: Russell King --- arch/arm/include/asm/smp_scu.h | 7 +++++++ arch/arm/kernel/smp_scu.c | 23 +++++++++++++++++++++++ 2 files changed, 30 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/smp_scu.h b/arch/arm/include/asm/smp_scu.h index 2376835015d6..4eb6d005ffaa 100644 --- a/arch/arm/include/asm/smp_scu.h +++ b/arch/arm/include/asm/smp_scu.h @@ -1,7 +1,14 @@ #ifndef __ASMARM_ARCH_SCU_H #define __ASMARM_ARCH_SCU_H +#define SCU_PM_NORMAL 0 +#define SCU_PM_DORMANT 2 +#define SCU_PM_POWEROFF 3 + +#ifndef __ASSEMBLER__ unsigned int scu_get_core_count(void __iomem *); void scu_enable(void __iomem *); +int scu_power_mode(void __iomem *, unsigned int); +#endif #endif diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c index 9ab4149bd983..a1e757c3439b 100644 --- a/arch/arm/kernel/smp_scu.c +++ b/arch/arm/kernel/smp_scu.c @@ -50,3 +50,26 @@ void __init scu_enable(void __iomem *scu_base) */ flush_cache_all(); } + +/* + * Set the executing CPUs power mode as defined. This will be in + * preparation for it executing a WFI instruction. + * + * This function must be called with preemption disabled, and as it + * has the side effect of disabling coherency, caches must have been + * flushed. Interrupts must also have been disabled. + */ +int scu_power_mode(void __iomem *scu_base, unsigned int mode) +{ + unsigned int val; + int cpu = smp_processor_id(); + + if (mode > 3 || mode == 1 || cpu > 3) + return -EINVAL; + + val = __raw_readb(scu_base + SCU_CPU_STATUS + cpu) & ~0x03; + val |= mode; + __raw_writeb(val, scu_base + SCU_CPU_STATUS + cpu); + + return 0; +} -- cgit v1.2.3 From 753790e713d80b50b867fa1ed32ec0eb5e82ae8e Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 6 Feb 2011 15:32:24 +0000 Subject: ARM: move cache/processor/fault glue to separate include files This allows the cache/processor/fault glue to be more easily used from assembler code. Tested on Assabet and Tegra 2. Tested-by: Colin Cross Signed-off-by: Russell King --- arch/arm/include/asm/cacheflush.h | 133 +---------------- arch/arm/include/asm/cpu-multi32.h | 69 --------- arch/arm/include/asm/cpu-single.h | 44 ------ arch/arm/include/asm/glue-cache.h | 146 ++++++++++++++++++ arch/arm/include/asm/glue-df.h | 110 ++++++++++++++ arch/arm/include/asm/glue-pf.h | 57 +++++++ arch/arm/include/asm/glue-proc.h | 261 ++++++++++++++++++++++++++++++++ arch/arm/include/asm/glue.h | 138 ----------------- arch/arm/include/asm/proc-fns.h | 299 ++++++++----------------------------- arch/arm/kernel/asm-offsets.c | 2 + arch/arm/kernel/entry-armv.S | 3 +- 11 files changed, 644 insertions(+), 618 deletions(-) delete mode 100644 arch/arm/include/asm/cpu-multi32.h delete mode 100644 arch/arm/include/asm/cpu-single.h create mode 100644 arch/arm/include/asm/glue-cache.h create mode 100644 arch/arm/include/asm/glue-df.h create mode 100644 arch/arm/include/asm/glue-pf.h create mode 100644 arch/arm/include/asm/glue-proc.h (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 3acd8fa25e34..18a56640d97d 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -12,130 +12,13 @@ #include -#include +#include #include #include #include #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) -/* - * Cache Model - * =========== - */ -#undef _CACHE -#undef MULTI_CACHE - -#if defined(CONFIG_CPU_CACHE_V3) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE v3 -# endif -#endif - -#if defined(CONFIG_CPU_CACHE_V4) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE v4 -# endif -#endif - -#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \ - defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \ - defined(CONFIG_CPU_ARM1026) -# define MULTI_CACHE 1 -#endif - -#if defined(CONFIG_CPU_FA526) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE fa -# endif -#endif - -#if defined(CONFIG_CPU_ARM926T) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE arm926 -# endif -#endif - -#if defined(CONFIG_CPU_ARM940T) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE arm940 -# endif -#endif - -#if defined(CONFIG_CPU_ARM946E) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE arm946 -# endif -#endif - -#if defined(CONFIG_CPU_CACHE_V4WB) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE v4wb -# endif -#endif - -#if defined(CONFIG_CPU_XSCALE) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE xscale -# endif -#endif - -#if defined(CONFIG_CPU_XSC3) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE xsc3 -# endif -#endif - -#if defined(CONFIG_CPU_MOHAWK) -# ifdef _CACHE -# define MULTI_CACHE 1 -# else -# define _CACHE mohawk -# endif -#endif - -#if defined(CONFIG_CPU_FEROCEON) -# define MULTI_CACHE 1 -#endif - -#if defined(CONFIG_CPU_V6) -//# ifdef _CACHE -# define MULTI_CACHE 1 -//# else -//# define _CACHE v6 -//# endif -#endif - -#if defined(CONFIG_CPU_V7) -//# ifdef _CACHE -# define MULTI_CACHE 1 -//# else -//# define _CACHE v7 -//# endif -#endif - -#if !defined(_CACHE) && !defined(MULTI_CACHE) -#error Unknown cache maintainence model -#endif - /* * This flag is used to indicate that the page pointed to by a pte is clean * and does not require cleaning before returning it to the user. @@ -249,19 +132,11 @@ extern struct cpu_cache_fns cpu_cache; * visible to the CPU. */ #define dmac_map_area cpu_cache.dma_map_area -#define dmac_unmap_area cpu_cache.dma_unmap_area +#define dmac_unmap_area cpu_cache.dma_unmap_area #define dmac_flush_range cpu_cache.dma_flush_range #else -#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all) -#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all) -#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all) -#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) -#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range) -#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range) -#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area) - extern void __cpuc_flush_icache_all(void); extern void __cpuc_flush_kern_all(void); extern void __cpuc_flush_user_all(void); @@ -276,10 +151,6 @@ extern void __cpuc_flush_dcache_area(void *, size_t); * is visible to DMA, or data written by DMA to system memory is * visible to the CPU. */ -#define dmac_map_area __glue(_CACHE,_dma_map_area) -#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area) -#define dmac_flush_range __glue(_CACHE,_dma_flush_range) - extern void dmac_map_area(const void *, size_t, int); extern void dmac_unmap_area(const void *, size_t, int); extern void dmac_flush_range(const void *, const void *); diff --git a/arch/arm/include/asm/cpu-multi32.h b/arch/arm/include/asm/cpu-multi32.h deleted file mode 100644 index e2b5b0b2116a..000000000000 --- a/arch/arm/include/asm/cpu-multi32.h +++ /dev/null @@ -1,69 +0,0 @@ -/* - * arch/arm/include/asm/cpu-multi32.h - * - * Copyright (C) 2000 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include - -struct mm_struct; - -/* - * Don't change this structure - ASM code - * relies on it. - */ -extern struct processor { - /* MISC - * get data abort address/flags - */ - void (*_data_abort)(unsigned long pc); - /* - * Retrieve prefetch fault address - */ - unsigned long (*_prefetch_abort)(unsigned long lr); - /* - * Set up any processor specifics - */ - void (*_proc_init)(void); - /* - * Disable any processor specifics - */ - void (*_proc_fin)(void); - /* - * Special stuff for a reset - */ - void (*reset)(unsigned long addr) __attribute__((noreturn)); - /* - * Idle the processor - */ - int (*_do_idle)(void); - /* - * Processor architecture specific - */ - /* - * clean a virtual address range from the - * D-cache without flushing the cache. - */ - void (*dcache_clean_area)(void *addr, int size); - - /* - * Set the page table - */ - void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm); - /* - * Set a possibly extended PTE. Non-extended PTEs should - * ignore 'ext'. - */ - void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext); -} processor; - -#define cpu_proc_init() processor._proc_init() -#define cpu_proc_fin() processor._proc_fin() -#define cpu_reset(addr) processor.reset(addr) -#define cpu_do_idle() processor._do_idle() -#define cpu_dcache_clean_area(addr,sz) processor.dcache_clean_area(addr,sz) -#define cpu_set_pte_ext(ptep,pte,ext) processor.set_pte_ext(ptep,pte,ext) -#define cpu_do_switch_mm(pgd,mm) processor.switch_mm(pgd,mm) diff --git a/arch/arm/include/asm/cpu-single.h b/arch/arm/include/asm/cpu-single.h deleted file mode 100644 index f073a6d2a406..000000000000 --- a/arch/arm/include/asm/cpu-single.h +++ /dev/null @@ -1,44 +0,0 @@ -/* - * arch/arm/include/asm/cpu-single.h - * - * Copyright (C) 2000 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -/* - * Single CPU - */ -#ifdef __STDC__ -#define __catify_fn(name,x) name##x -#else -#define __catify_fn(name,x) name/**/x -#endif -#define __cpu_fn(name,x) __catify_fn(name,x) - -/* - * If we are supporting multiple CPUs, then we must use a table of - * function pointers for this lot. Otherwise, we can optimise the - * table away. - */ -#define cpu_proc_init __cpu_fn(CPU_NAME,_proc_init) -#define cpu_proc_fin __cpu_fn(CPU_NAME,_proc_fin) -#define cpu_reset __cpu_fn(CPU_NAME,_reset) -#define cpu_do_idle __cpu_fn(CPU_NAME,_do_idle) -#define cpu_dcache_clean_area __cpu_fn(CPU_NAME,_dcache_clean_area) -#define cpu_do_switch_mm __cpu_fn(CPU_NAME,_switch_mm) -#define cpu_set_pte_ext __cpu_fn(CPU_NAME,_set_pte_ext) - -#include - -struct mm_struct; - -/* declare all the functions as extern */ -extern void cpu_proc_init(void); -extern void cpu_proc_fin(void); -extern int cpu_do_idle(void); -extern void cpu_dcache_clean_area(void *, int); -extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); -extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); -extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h new file mode 100644 index 000000000000..0591d35001e5 --- /dev/null +++ b/arch/arm/include/asm/glue-cache.h @@ -0,0 +1,146 @@ +/* + * arch/arm/include/asm/glue-cache.h + * + * Copyright (C) 1999-2002 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef ASM_GLUE_CACHE_H +#define ASM_GLUE_CACHE_H + +#include + +/* + * Cache Model + * =========== + */ +#undef _CACHE +#undef MULTI_CACHE + +#if defined(CONFIG_CPU_CACHE_V3) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE v3 +# endif +#endif + +#if defined(CONFIG_CPU_CACHE_V4) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE v4 +# endif +#endif + +#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \ + defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \ + defined(CONFIG_CPU_ARM1026) +# define MULTI_CACHE 1 +#endif + +#if defined(CONFIG_CPU_FA526) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE fa +# endif +#endif + +#if defined(CONFIG_CPU_ARM926T) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE arm926 +# endif +#endif + +#if defined(CONFIG_CPU_ARM940T) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE arm940 +# endif +#endif + +#if defined(CONFIG_CPU_ARM946E) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE arm946 +# endif +#endif + +#if defined(CONFIG_CPU_CACHE_V4WB) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE v4wb +# endif +#endif + +#if defined(CONFIG_CPU_XSCALE) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE xscale +# endif +#endif + +#if defined(CONFIG_CPU_XSC3) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE xsc3 +# endif +#endif + +#if defined(CONFIG_CPU_MOHAWK) +# ifdef _CACHE +# define MULTI_CACHE 1 +# else +# define _CACHE mohawk +# endif +#endif + +#if defined(CONFIG_CPU_FEROCEON) +# define MULTI_CACHE 1 +#endif + +#if defined(CONFIG_CPU_V6) +//# ifdef _CACHE +# define MULTI_CACHE 1 +//# else +//# define _CACHE v6 +//# endif +#endif + +#if defined(CONFIG_CPU_V7) +//# ifdef _CACHE +# define MULTI_CACHE 1 +//# else +//# define _CACHE v7 +//# endif +#endif + +#if !defined(_CACHE) && !defined(MULTI_CACHE) +#error Unknown cache maintainence model +#endif + +#ifndef MULTI_CACHE +#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all) +#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all) +#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all) +#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) +#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range) +#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range) +#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area) + +#define dmac_map_area __glue(_CACHE,_dma_map_area) +#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area) +#define dmac_flush_range __glue(_CACHE,_dma_flush_range) +#endif + +#endif diff --git a/arch/arm/include/asm/glue-df.h b/arch/arm/include/asm/glue-df.h new file mode 100644 index 000000000000..354d571e8bcc --- /dev/null +++ b/arch/arm/include/asm/glue-df.h @@ -0,0 +1,110 @@ +/* + * arch/arm/include/asm/glue-df.h + * + * Copyright (C) 1997-1999 Russell King + * Copyright (C) 2000-2002 Deep Blue Solutions Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef ASM_GLUE_DF_H +#define ASM_GLUE_DF_H + +#include + +/* + * Data Abort Model + * ================ + * + * We have the following to choose from: + * arm6 - ARM6 style + * arm7 - ARM7 style + * v4_early - ARMv4 without Thumb early abort handler + * v4t_late - ARMv4 with Thumb late abort handler + * v4t_early - ARMv4 with Thumb early abort handler + * v5tej_early - ARMv5 with Thumb and Java early abort handler + * xscale - ARMv5 with Thumb with Xscale extensions + * v6_early - ARMv6 generic early abort handler + * v7_early - ARMv7 generic early abort handler + */ +#undef CPU_DABORT_HANDLER +#undef MULTI_DABORT + +#if defined(CONFIG_CPU_ARM610) +# ifdef CPU_DABORT_HANDLER +# define MULTI_DABORT 1 +# else +# define CPU_DABORT_HANDLER cpu_arm6_data_abort +# endif +#endif + +#if defined(CONFIG_CPU_ARM710) +# ifdef CPU_DABORT_HANDLER +# define MULTI_DABORT 1 +# else +# define CPU_DABORT_HANDLER cpu_arm7_data_abort +# endif +#endif + +#ifdef CONFIG_CPU_ABRT_LV4T +# ifdef CPU_DABORT_HANDLER +# define MULTI_DABORT 1 +# else +# define CPU_DABORT_HANDLER v4t_late_abort +# endif +#endif + +#ifdef CONFIG_CPU_ABRT_EV4 +# ifdef CPU_DABORT_HANDLER +# define MULTI_DABORT 1 +# else +# define CPU_DABORT_HANDLER v4_early_abort +# endif +#endif + +#ifdef CONFIG_CPU_ABRT_EV4T +# ifdef CPU_DABORT_HANDLER +# define MULTI_DABORT 1 +# else +# define CPU_DABORT_HANDLER v4t_early_abort +# endif +#endif + +#ifdef CONFIG_CPU_ABRT_EV5TJ +# ifdef CPU_DABORT_HANDLER +# define MULTI_DABORT 1 +# else +# define CPU_DABORT_HANDLER v5tj_early_abort +# endif +#endif + +#ifdef CONFIG_CPU_ABRT_EV5T +# ifdef CPU_DABORT_HANDLER +# define MULTI_DABORT 1 +# else +# define CPU_DABORT_HANDLER v5t_early_abort +# endif +#endif + +#ifdef CONFIG_CPU_ABRT_EV6 +# ifdef CPU_DABORT_HANDLER +# define MULTI_DABORT 1 +# else +# define CPU_DABORT_HANDLER v6_early_abort +# endif +#endif + +#ifdef CONFIG_CPU_ABRT_EV7 +# ifdef CPU_DABORT_HANDLER +# define MULTI_DABORT 1 +# else +# define CPU_DABORT_HANDLER v7_early_abort +# endif +#endif + +#ifndef CPU_DABORT_HANDLER +#error Unknown data abort handler type +#endif + +#endif diff --git a/arch/arm/include/asm/glue-pf.h b/arch/arm/include/asm/glue-pf.h new file mode 100644 index 000000000000..d385f37c13f0 --- /dev/null +++ b/arch/arm/include/asm/glue-pf.h @@ -0,0 +1,57 @@ +/* + * arch/arm/include/asm/glue-pf.h + * + * Copyright (C) 1997-1999 Russell King + * Copyright (C) 2000-2002 Deep Blue Solutions Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef ASM_GLUE_PF_H +#define ASM_GLUE_PF_H + +#include + +/* + * Prefetch Abort Model + * ================ + * + * We have the following to choose from: + * legacy - no IFSR, no IFAR + * v6 - ARMv6: IFSR, no IFAR + * v7 - ARMv7: IFSR and IFAR + */ + +#undef CPU_PABORT_HANDLER +#undef MULTI_PABORT + +#ifdef CONFIG_CPU_PABRT_LEGACY +# ifdef CPU_PABORT_HANDLER +# define MULTI_PABORT 1 +# else +# define CPU_PABORT_HANDLER legacy_pabort +# endif +#endif + +#ifdef CONFIG_CPU_PABRT_V6 +# ifdef CPU_PABORT_HANDLER +# define MULTI_PABORT 1 +# else +# define CPU_PABORT_HANDLER v6_pabort +# endif +#endif + +#ifdef CONFIG_CPU_PABRT_V7 +# ifdef CPU_PABORT_HANDLER +# define MULTI_PABORT 1 +# else +# define CPU_PABORT_HANDLER v7_pabort +# endif +#endif + +#ifndef CPU_PABORT_HANDLER +#error Unknown prefetch abort handler type +#endif + +#endif diff --git a/arch/arm/include/asm/glue-proc.h b/arch/arm/include/asm/glue-proc.h new file mode 100644 index 000000000000..e3bf443f2d18 --- /dev/null +++ b/arch/arm/include/asm/glue-proc.h @@ -0,0 +1,261 @@ +/* + * arch/arm/include/asm/glue-proc.h + * + * Copyright (C) 1997-1999 Russell King + * Copyright (C) 2000 Deep Blue Solutions Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef ASM_GLUE_PROC_H +#define ASM_GLUE_PROC_H + +#include + +/* + * Work out if we need multiple CPU support + */ +#undef MULTI_CPU +#undef CPU_NAME + +/* + * CPU_NAME - the prefix for CPU related functions + */ + +#ifdef CONFIG_CPU_ARM610 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm6 +# endif +#endif + +#ifdef CONFIG_CPU_ARM7TDMI +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm7tdmi +# endif +#endif + +#ifdef CONFIG_CPU_ARM710 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm7 +# endif +#endif + +#ifdef CONFIG_CPU_ARM720T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm720 +# endif +#endif + +#ifdef CONFIG_CPU_ARM740T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm740 +# endif +#endif + +#ifdef CONFIG_CPU_ARM9TDMI +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm9tdmi +# endif +#endif + +#ifdef CONFIG_CPU_ARM920T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm920 +# endif +#endif + +#ifdef CONFIG_CPU_ARM922T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm922 +# endif +#endif + +#ifdef CONFIG_CPU_FA526 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_fa526 +# endif +#endif + +#ifdef CONFIG_CPU_ARM925T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm925 +# endif +#endif + +#ifdef CONFIG_CPU_ARM926T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm926 +# endif +#endif + +#ifdef CONFIG_CPU_ARM940T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm940 +# endif +#endif + +#ifdef CONFIG_CPU_ARM946E +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm946 +# endif +#endif + +#ifdef CONFIG_CPU_SA110 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_sa110 +# endif +#endif + +#ifdef CONFIG_CPU_SA1100 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_sa1100 +# endif +#endif + +#ifdef CONFIG_CPU_ARM1020 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm1020 +# endif +#endif + +#ifdef CONFIG_CPU_ARM1020E +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm1020e +# endif +#endif + +#ifdef CONFIG_CPU_ARM1022 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm1022 +# endif +#endif + +#ifdef CONFIG_CPU_ARM1026 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm1026 +# endif +#endif + +#ifdef CONFIG_CPU_XSCALE +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_xscale +# endif +#endif + +#ifdef CONFIG_CPU_XSC3 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_xsc3 +# endif +#endif + +#ifdef CONFIG_CPU_MOHAWK +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_mohawk +# endif +#endif + +#ifdef CONFIG_CPU_FEROCEON +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_feroceon +# endif +#endif + +#ifdef CONFIG_CPU_V6 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_v6 +# endif +#endif + +#ifdef CONFIG_CPU_V7 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_v7 +# endif +#endif + +#ifndef MULTI_CPU +#define cpu_proc_init __glue(CPU_NAME,_proc_init) +#define cpu_proc_fin __glue(CPU_NAME,_proc_fin) +#define cpu_reset __glue(CPU_NAME,_reset) +#define cpu_do_idle __glue(CPU_NAME,_do_idle) +#define cpu_dcache_clean_area __glue(CPU_NAME,_dcache_clean_area) +#define cpu_do_switch_mm __glue(CPU_NAME,_switch_mm) +#define cpu_set_pte_ext __glue(CPU_NAME,_set_pte_ext) +#endif + +#endif diff --git a/arch/arm/include/asm/glue.h b/arch/arm/include/asm/glue.h index 234a3fc1c78e..0ec35d1698aa 100644 --- a/arch/arm/include/asm/glue.h +++ b/arch/arm/include/asm/glue.h @@ -15,7 +15,6 @@ */ #ifdef __KERNEL__ - #ifdef __STDC__ #define ____glue(name,fn) name##fn #else @@ -23,141 +22,4 @@ #endif #define __glue(name,fn) ____glue(name,fn) - - -/* - * Data Abort Model - * ================ - * - * We have the following to choose from: - * arm6 - ARM6 style - * arm7 - ARM7 style - * v4_early - ARMv4 without Thumb early abort handler - * v4t_late - ARMv4 with Thumb late abort handler - * v4t_early - ARMv4 with Thumb early abort handler - * v5tej_early - ARMv5 with Thumb and Java early abort handler - * xscale - ARMv5 with Thumb with Xscale extensions - * v6_early - ARMv6 generic early abort handler - * v7_early - ARMv7 generic early abort handler - */ -#undef CPU_DABORT_HANDLER -#undef MULTI_DABORT - -#if defined(CONFIG_CPU_ARM610) -# ifdef CPU_DABORT_HANDLER -# define MULTI_DABORT 1 -# else -# define CPU_DABORT_HANDLER cpu_arm6_data_abort -# endif -#endif - -#if defined(CONFIG_CPU_ARM710) -# ifdef CPU_DABORT_HANDLER -# define MULTI_DABORT 1 -# else -# define CPU_DABORT_HANDLER cpu_arm7_data_abort -# endif -#endif - -#ifdef CONFIG_CPU_ABRT_LV4T -# ifdef CPU_DABORT_HANDLER -# define MULTI_DABORT 1 -# else -# define CPU_DABORT_HANDLER v4t_late_abort -# endif -#endif - -#ifdef CONFIG_CPU_ABRT_EV4 -# ifdef CPU_DABORT_HANDLER -# define MULTI_DABORT 1 -# else -# define CPU_DABORT_HANDLER v4_early_abort -# endif -#endif - -#ifdef CONFIG_CPU_ABRT_EV4T -# ifdef CPU_DABORT_HANDLER -# define MULTI_DABORT 1 -# else -# define CPU_DABORT_HANDLER v4t_early_abort -# endif -#endif - -#ifdef CONFIG_CPU_ABRT_EV5TJ -# ifdef CPU_DABORT_HANDLER -# define MULTI_DABORT 1 -# else -# define CPU_DABORT_HANDLER v5tj_early_abort -# endif -#endif - -#ifdef CONFIG_CPU_ABRT_EV5T -# ifdef CPU_DABORT_HANDLER -# define MULTI_DABORT 1 -# else -# define CPU_DABORT_HANDLER v5t_early_abort -# endif -#endif - -#ifdef CONFIG_CPU_ABRT_EV6 -# ifdef CPU_DABORT_HANDLER -# define MULTI_DABORT 1 -# else -# define CPU_DABORT_HANDLER v6_early_abort -# endif -#endif - -#ifdef CONFIG_CPU_ABRT_EV7 -# ifdef CPU_DABORT_HANDLER -# define MULTI_DABORT 1 -# else -# define CPU_DABORT_HANDLER v7_early_abort -# endif -#endif - -#ifndef CPU_DABORT_HANDLER -#error Unknown data abort handler type -#endif - -/* - * Prefetch Abort Model - * ================ - * - * We have the following to choose from: - * legacy - no IFSR, no IFAR - * v6 - ARMv6: IFSR, no IFAR - * v7 - ARMv7: IFSR and IFAR - */ - -#undef CPU_PABORT_HANDLER -#undef MULTI_PABORT - -#ifdef CONFIG_CPU_PABRT_LEGACY -# ifdef CPU_PABORT_HANDLER -# define MULTI_PABORT 1 -# else -# define CPU_PABORT_HANDLER legacy_pabort -# endif -#endif - -#ifdef CONFIG_CPU_PABRT_V6 -# ifdef CPU_PABORT_HANDLER -# define MULTI_PABORT 1 -# else -# define CPU_PABORT_HANDLER v6_pabort -# endif -#endif - -#ifdef CONFIG_CPU_PABRT_V7 -# ifdef CPU_PABORT_HANDLER -# define MULTI_PABORT 1 -# else -# define CPU_PABORT_HANDLER v7_pabort -# endif -#endif - -#ifndef CPU_PABORT_HANDLER -#error Unknown prefetch abort handler type -#endif - #endif diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h index 8fdae9bc9abb..69802150be22 100644 --- a/arch/arm/include/asm/proc-fns.h +++ b/arch/arm/include/asm/proc-fns.h @@ -13,248 +13,77 @@ #ifdef __KERNEL__ +#include +#include -/* - * Work out if we need multiple CPU support - */ -#undef MULTI_CPU -#undef CPU_NAME +#ifndef __ASSEMBLY__ + +struct mm_struct; /* - * CPU_NAME - the prefix for CPU related functions + * Don't change this structure - ASM code relies on it. */ - -#ifdef CONFIG_CPU_ARM610 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm6 -# endif -#endif - -#ifdef CONFIG_CPU_ARM7TDMI -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm7tdmi -# endif -#endif - -#ifdef CONFIG_CPU_ARM710 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm7 -# endif -#endif - -#ifdef CONFIG_CPU_ARM720T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm720 -# endif -#endif - -#ifdef CONFIG_CPU_ARM740T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm740 -# endif -#endif - -#ifdef CONFIG_CPU_ARM9TDMI -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm9tdmi -# endif -#endif - -#ifdef CONFIG_CPU_ARM920T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm920 -# endif -#endif - -#ifdef CONFIG_CPU_ARM922T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm922 -# endif -#endif - -#ifdef CONFIG_CPU_FA526 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_fa526 -# endif -#endif - -#ifdef CONFIG_CPU_ARM925T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm925 -# endif -#endif - -#ifdef CONFIG_CPU_ARM926T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm926 -# endif -#endif - -#ifdef CONFIG_CPU_ARM940T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm940 -# endif -#endif - -#ifdef CONFIG_CPU_ARM946E -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm946 -# endif -#endif - -#ifdef CONFIG_CPU_SA110 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_sa110 -# endif -#endif - -#ifdef CONFIG_CPU_SA1100 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_sa1100 -# endif -#endif - -#ifdef CONFIG_CPU_ARM1020 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm1020 -# endif -#endif - -#ifdef CONFIG_CPU_ARM1020E -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm1020e -# endif -#endif - -#ifdef CONFIG_CPU_ARM1022 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm1022 -# endif -#endif - -#ifdef CONFIG_CPU_ARM1026 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm1026 -# endif -#endif - -#ifdef CONFIG_CPU_XSCALE -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_xscale -# endif -#endif - -#ifdef CONFIG_CPU_XSC3 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_xsc3 -# endif -#endif - -#ifdef CONFIG_CPU_MOHAWK -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_mohawk -# endif -#endif - -#ifdef CONFIG_CPU_FEROCEON -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_feroceon -# endif -#endif - -#ifdef CONFIG_CPU_V6 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_v6 -# endif -#endif - -#ifdef CONFIG_CPU_V7 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_v7 -# endif -#endif - -#ifndef __ASSEMBLY__ +extern struct processor { + /* MISC + * get data abort address/flags + */ + void (*_data_abort)(unsigned long pc); + /* + * Retrieve prefetch fault address + */ + unsigned long (*_prefetch_abort)(unsigned long lr); + /* + * Set up any processor specifics + */ + void (*_proc_init)(void); + /* + * Disable any processor specifics + */ + void (*_proc_fin)(void); + /* + * Special stuff for a reset + */ + void (*reset)(unsigned long addr) __attribute__((noreturn)); + /* + * Idle the processor + */ + int (*_do_idle)(void); + /* + * Processor architecture specific + */ + /* + * clean a virtual address range from the + * D-cache without flushing the cache. + */ + void (*dcache_clean_area)(void *addr, int size); + + /* + * Set the page table + */ + void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm); + /* + * Set a possibly extended PTE. Non-extended PTEs should + * ignore 'ext'. + */ + void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext); +} processor; #ifndef MULTI_CPU -#include +extern void cpu_proc_init(void); +extern void cpu_proc_fin(void); +extern int cpu_do_idle(void); +extern void cpu_dcache_clean_area(void *, int); +extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); +extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); +extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); #else -#include +#define cpu_proc_init() processor._proc_init() +#define cpu_proc_fin() processor._proc_fin() +#define cpu_reset(addr) processor.reset(addr) +#define cpu_do_idle() processor._do_idle() +#define cpu_dcache_clean_area(addr,sz) processor.dcache_clean_area(addr,sz) +#define cpu_set_pte_ext(ptep,pte,ext) processor.set_pte_ext(ptep,pte,ext) +#define cpu_do_switch_mm(pgd,mm) processor.switch_mm(pgd,mm) #endif #include diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 82da66172132..5302a917271b 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -13,6 +13,8 @@ #include #include #include +#include +#include #include #include #include diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 2b46fea36c9f..e8d885676807 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -16,7 +16,8 @@ */ #include -#include +#include +#include #include #include #include -- cgit v1.2.3 From f4117ac9e237b74afdf5e001d5ea26a4d15e9847 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 4 Jan 2011 18:07:14 +0000 Subject: ARM: P2V: separate PHYS_OFFSET from platform definitions This uncouple PHYS_OFFSET from the platform definitions, thereby facilitating run-time computation of the physical memory offset. Acked-by: Nicolas Pitre Acked-by: Viresh Kumar Acked-by: H Hartley Sweeten Acked-by: Magnus Damm Acked-by: Tony Lindgren Acked-by: Jean-Christophe PLAGNIOL-VILLARD Acked-by: Wan ZongShun Acked-by: Kukjin Kim Acked-by: Eric Miao Acked-by: Jiandong Zheng Signed-off-by: Russell King --- arch/arm/include/asm/memory.h | 2 ++ arch/arm/kernel/tcm.c | 2 +- arch/arm/mach-aaec2000/include/mach/memory.h | 2 +- arch/arm/mach-at91/include/mach/memory.h | 2 +- arch/arm/mach-bcmring/include/mach/hardware.h | 2 +- arch/arm/mach-bcmring/include/mach/memory.h | 2 +- arch/arm/mach-clps711x/include/mach/memory.h | 2 +- arch/arm/mach-cns3xxx/include/mach/memory.h | 2 +- arch/arm/mach-davinci/include/mach/memory.h | 4 ++-- arch/arm/mach-dove/include/mach/memory.h | 2 +- arch/arm/mach-ebsa110/include/mach/memory.h | 2 +- arch/arm/mach-ep93xx/include/mach/memory.h | 10 +++++----- arch/arm/mach-footbridge/include/mach/memory.h | 2 +- arch/arm/mach-gemini/include/mach/memory.h | 4 ++-- arch/arm/mach-h720x/include/mach/memory.h | 2 +- arch/arm/mach-integrator/include/mach/memory.h | 2 +- arch/arm/mach-iop13xx/include/mach/memory.h | 2 +- arch/arm/mach-iop32x/include/mach/memory.h | 2 +- arch/arm/mach-iop33x/include/mach/memory.h | 2 +- arch/arm/mach-ixp2000/include/mach/memory.h | 2 +- arch/arm/mach-ixp23xx/include/mach/memory.h | 2 +- arch/arm/mach-ixp4xx/include/mach/memory.h | 2 +- arch/arm/mach-kirkwood/include/mach/memory.h | 2 +- arch/arm/mach-ks8695/include/mach/memory.h | 2 +- arch/arm/mach-lh7a40x/include/mach/memory.h | 2 +- arch/arm/mach-loki/include/mach/memory.h | 2 +- arch/arm/mach-lpc32xx/include/mach/memory.h | 2 +- arch/arm/mach-mmp/include/mach/memory.h | 2 +- arch/arm/mach-msm/board-msm7x30.c | 2 +- arch/arm/mach-msm/include/mach/memory.h | 10 +++++----- arch/arm/mach-mv78xx0/include/mach/memory.h | 2 +- arch/arm/mach-mx3/mach-kzm_arm11_01.c | 2 +- arch/arm/mach-netx/include/mach/memory.h | 2 +- arch/arm/mach-nomadik/include/mach/memory.h | 2 +- arch/arm/mach-ns9xxx/include/mach/memory.h | 2 +- arch/arm/mach-nuc93x/include/mach/memory.h | 2 +- arch/arm/mach-orion5x/include/mach/memory.h | 2 +- arch/arm/mach-pnx4008/include/mach/memory.h | 2 +- arch/arm/mach-pxa/include/mach/memory.h | 2 +- arch/arm/mach-realview/include/mach/memory.h | 4 ++-- arch/arm/mach-rpc/include/mach/memory.h | 2 +- arch/arm/mach-s3c2400/include/mach/memory.h | 2 +- arch/arm/mach-s3c2410/include/mach/memory.h | 2 +- arch/arm/mach-s3c24a0/include/mach/memory.h | 2 +- arch/arm/mach-s3c64xx/include/mach/memory.h | 2 +- arch/arm/mach-s5p6442/include/mach/memory.h | 2 +- arch/arm/mach-s5p64x0/include/mach/memory.h | 2 +- arch/arm/mach-s5pc100/include/mach/memory.h | 2 +- arch/arm/mach-s5pv210/include/mach/memory.h | 2 +- arch/arm/mach-s5pv310/include/mach/memory.h | 2 +- arch/arm/mach-sa1100/include/mach/memory.h | 2 +- arch/arm/mach-shark/include/mach/memory.h | 2 +- arch/arm/mach-shmobile/include/mach/memory.h | 2 +- arch/arm/mach-tegra/include/mach/memory.h | 2 +- arch/arm/mach-u300/include/mach/memory.h | 6 +++--- arch/arm/mach-u300/u300.c | 2 +- arch/arm/mach-ux500/include/mach/memory.h | 2 +- arch/arm/mach-versatile/include/mach/memory.h | 2 +- arch/arm/mach-vexpress/include/mach/memory.h | 2 +- arch/arm/mach-w90x900/include/mach/memory.h | 2 +- arch/arm/plat-mxc/include/mach/memory.h | 18 +++++++++--------- arch/arm/plat-omap/include/plat/memory.h | 4 ++-- arch/arm/plat-spear/include/plat/memory.h | 2 +- arch/arm/plat-stmp3xxx/include/mach/memory.h | 2 +- arch/arm/plat-tcc/include/mach/memory.h | 2 +- 65 files changed, 88 insertions(+), 86 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index d0ee74b7cf86..2efec578a62e 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -24,6 +24,8 @@ */ #define UL(x) _AC(x, UL) +#define PHYS_OFFSET PLAT_PHYS_OFFSET + #ifdef CONFIG_MMU /* diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c index 26685c2f7a49..f5cf660eefcc 100644 --- a/arch/arm/kernel/tcm.c +++ b/arch/arm/kernel/tcm.c @@ -15,7 +15,7 @@ #include /* memcpy */ #include #include -#include +#include #include "tcm.h" static struct gen_pool *tcm_pool; diff --git a/arch/arm/mach-aaec2000/include/mach/memory.h b/arch/arm/mach-aaec2000/include/mach/memory.h index 4f93c567a35a..4a10bf0bd369 100644 --- a/arch/arm/mach-aaec2000/include/mach/memory.h +++ b/arch/arm/mach-aaec2000/include/mach/memory.h @@ -12,6 +12,6 @@ #define __ASM_ARCH_MEMORY_H -#define PHYS_OFFSET UL(0xf0000000) +#define PLAT_PHYS_OFFSET UL(0xf0000000) #endif /* __ASM_ARCH_MEMORY_H */ diff --git a/arch/arm/mach-at91/include/mach/memory.h b/arch/arm/mach-at91/include/mach/memory.h index 14f4ef4b6a9e..c2cfe5040642 100644 --- a/arch/arm/mach-at91/include/mach/memory.h +++ b/arch/arm/mach-at91/include/mach/memory.h @@ -23,6 +23,6 @@ #include -#define PHYS_OFFSET (AT91_SDRAM_BASE) +#define PLAT_PHYS_OFFSET (AT91_SDRAM_BASE) #endif diff --git a/arch/arm/mach-bcmring/include/mach/hardware.h b/arch/arm/mach-bcmring/include/mach/hardware.h index 447eb340c611..8bf3564fba50 100644 --- a/arch/arm/mach-bcmring/include/mach/hardware.h +++ b/arch/arm/mach-bcmring/include/mach/hardware.h @@ -31,7 +31,7 @@ * *_SIZE is the size of the region * *_BASE is the virtual address */ -#define RAM_START PHYS_OFFSET +#define RAM_START PLAT_PHYS_OFFSET #define RAM_SIZE (CFG_GLOBAL_RAM_SIZE-CFG_GLOBAL_RAM_SIZE_RESERVED) #define RAM_BASE PAGE_OFFSET diff --git a/arch/arm/mach-bcmring/include/mach/memory.h b/arch/arm/mach-bcmring/include/mach/memory.h index 114f942bb4f3..15162e4c75f9 100644 --- a/arch/arm/mach-bcmring/include/mach/memory.h +++ b/arch/arm/mach-bcmring/include/mach/memory.h @@ -23,7 +23,7 @@ * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. */ -#define PHYS_OFFSET CFG_GLOBAL_RAM_BASE +#define PLAT_PHYS_OFFSET CFG_GLOBAL_RAM_BASE /* * Maximum DMA memory allowed is 14M diff --git a/arch/arm/mach-clps711x/include/mach/memory.h b/arch/arm/mach-clps711x/include/mach/memory.h index f45c8e892cb5..3a032a67725c 100644 --- a/arch/arm/mach-clps711x/include/mach/memory.h +++ b/arch/arm/mach-clps711x/include/mach/memory.h @@ -23,7 +23,7 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0xc0000000) +#define PLAT_PHYS_OFFSET UL(0xc0000000) #if !defined(CONFIG_ARCH_CDB89712) && !defined (CONFIG_ARCH_AUTCPU12) diff --git a/arch/arm/mach-cns3xxx/include/mach/memory.h b/arch/arm/mach-cns3xxx/include/mach/memory.h index 3b6b769b7a27..dc16c5c5d86b 100644 --- a/arch/arm/mach-cns3xxx/include/mach/memory.h +++ b/arch/arm/mach-cns3xxx/include/mach/memory.h @@ -13,7 +13,7 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #define __phys_to_bus(x) ((x) + PHYS_OFFSET) #define __bus_to_phys(x) ((x) - PHYS_OFFSET) diff --git a/arch/arm/mach-davinci/include/mach/memory.h b/arch/arm/mach-davinci/include/mach/memory.h index 22eb97c1c30b..78822723f382 100644 --- a/arch/arm/mach-davinci/include/mach/memory.h +++ b/arch/arm/mach-davinci/include/mach/memory.h @@ -26,9 +26,9 @@ #if defined(CONFIG_ARCH_DAVINCI_DA8XX) && defined(CONFIG_ARCH_DAVINCI_DMx) #error Cannot enable DaVinci and DA8XX platforms concurrently #elif defined(CONFIG_ARCH_DAVINCI_DA8XX) -#define PHYS_OFFSET DA8XX_DDR_BASE +#define PLAT_PHYS_OFFSET DA8XX_DDR_BASE #else -#define PHYS_OFFSET DAVINCI_DDR_BASE +#define PLAT_PHYS_OFFSET DAVINCI_DDR_BASE #endif #define DDR2_SDRCR_OFFSET 0xc diff --git a/arch/arm/mach-dove/include/mach/memory.h b/arch/arm/mach-dove/include/mach/memory.h index d66872074946..bbc93fee6c75 100644 --- a/arch/arm/mach-dove/include/mach/memory.h +++ b/arch/arm/mach-dove/include/mach/memory.h @@ -5,6 +5,6 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #endif diff --git a/arch/arm/mach-ebsa110/include/mach/memory.h b/arch/arm/mach-ebsa110/include/mach/memory.h index 0ca66d080c69..8e49066ad850 100644 --- a/arch/arm/mach-ebsa110/include/mach/memory.h +++ b/arch/arm/mach-ebsa110/include/mach/memory.h @@ -19,7 +19,7 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) /* * Cache flushing area - SRAM diff --git a/arch/arm/mach-ep93xx/include/mach/memory.h b/arch/arm/mach-ep93xx/include/mach/memory.h index 554064e90307..c9400cf0051c 100644 --- a/arch/arm/mach-ep93xx/include/mach/memory.h +++ b/arch/arm/mach-ep93xx/include/mach/memory.h @@ -6,15 +6,15 @@ #define __ASM_ARCH_MEMORY_H #if defined(CONFIG_EP93XX_SDCE3_SYNC_PHYS_OFFSET) -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #elif defined(CONFIG_EP93XX_SDCE0_PHYS_OFFSET) -#define PHYS_OFFSET UL(0xc0000000) +#define PLAT_PHYS_OFFSET UL(0xc0000000) #elif defined(CONFIG_EP93XX_SDCE1_PHYS_OFFSET) -#define PHYS_OFFSET UL(0xd0000000) +#define PLAT_PHYS_OFFSET UL(0xd0000000) #elif defined(CONFIG_EP93XX_SDCE2_PHYS_OFFSET) -#define PHYS_OFFSET UL(0xe0000000) +#define PLAT_PHYS_OFFSET UL(0xe0000000) #elif defined(CONFIG_EP93XX_SDCE3_ASYNC_PHYS_OFFSET) -#define PHYS_OFFSET UL(0xf0000000) +#define PLAT_PHYS_OFFSET UL(0xf0000000) #else #error "Kconfig bug: No EP93xx PHYS_OFFSET set" #endif diff --git a/arch/arm/mach-footbridge/include/mach/memory.h b/arch/arm/mach-footbridge/include/mach/memory.h index 8d64f4574087..5c6df377f969 100644 --- a/arch/arm/mach-footbridge/include/mach/memory.h +++ b/arch/arm/mach-footbridge/include/mach/memory.h @@ -62,7 +62,7 @@ extern unsigned long __bus_to_pfn(unsigned long); /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #define FLUSH_BASE_PHYS 0x50000000 diff --git a/arch/arm/mach-gemini/include/mach/memory.h b/arch/arm/mach-gemini/include/mach/memory.h index 2d14d5bf1f9f..a50915f764d8 100644 --- a/arch/arm/mach-gemini/include/mach/memory.h +++ b/arch/arm/mach-gemini/include/mach/memory.h @@ -11,9 +11,9 @@ #define __MACH_MEMORY_H #ifdef CONFIG_GEMINI_MEM_SWAP -# define PHYS_OFFSET UL(0x00000000) +# define PLAT_PHYS_OFFSET UL(0x00000000) #else -# define PHYS_OFFSET UL(0x10000000) +# define PLAT_PHYS_OFFSET UL(0x10000000) #endif #endif /* __MACH_MEMORY_H */ diff --git a/arch/arm/mach-h720x/include/mach/memory.h b/arch/arm/mach-h720x/include/mach/memory.h index ef4c1e26f18e..9d3687651462 100644 --- a/arch/arm/mach-h720x/include/mach/memory.h +++ b/arch/arm/mach-h720x/include/mach/memory.h @@ -7,7 +7,7 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H -#define PHYS_OFFSET UL(0x40000000) +#define PLAT_PHYS_OFFSET UL(0x40000000) /* * This is the maximum DMA address that can be DMAd to. * There should not be more than (0xd0000000 - 0xc0000000) diff --git a/arch/arm/mach-integrator/include/mach/memory.h b/arch/arm/mach-integrator/include/mach/memory.h index 991f24d2c115..334d5e271889 100644 --- a/arch/arm/mach-integrator/include/mach/memory.h +++ b/arch/arm/mach-integrator/include/mach/memory.h @@ -23,7 +23,7 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #define BUS_OFFSET UL(0x80000000) #define __virt_to_bus(x) ((x) - PAGE_OFFSET + BUS_OFFSET) diff --git a/arch/arm/mach-iop13xx/include/mach/memory.h b/arch/arm/mach-iop13xx/include/mach/memory.h index 3ad455318868..1afa99ef97fa 100644 --- a/arch/arm/mach-iop13xx/include/mach/memory.h +++ b/arch/arm/mach-iop13xx/include/mach/memory.h @@ -6,7 +6,7 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #ifndef __ASSEMBLY__ diff --git a/arch/arm/mach-iop32x/include/mach/memory.h b/arch/arm/mach-iop32x/include/mach/memory.h index c30f6450ad50..169cc239f76c 100644 --- a/arch/arm/mach-iop32x/include/mach/memory.h +++ b/arch/arm/mach-iop32x/include/mach/memory.h @@ -8,6 +8,6 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0xa0000000) +#define PLAT_PHYS_OFFSET UL(0xa0000000) #endif diff --git a/arch/arm/mach-iop33x/include/mach/memory.h b/arch/arm/mach-iop33x/include/mach/memory.h index a30a96aa6d2d..8e1daf7006b6 100644 --- a/arch/arm/mach-iop33x/include/mach/memory.h +++ b/arch/arm/mach-iop33x/include/mach/memory.h @@ -8,6 +8,6 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #endif diff --git a/arch/arm/mach-ixp2000/include/mach/memory.h b/arch/arm/mach-ixp2000/include/mach/memory.h index 98e3471be15b..5f0c4fd4076a 100644 --- a/arch/arm/mach-ixp2000/include/mach/memory.h +++ b/arch/arm/mach-ixp2000/include/mach/memory.h @@ -13,7 +13,7 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #include diff --git a/arch/arm/mach-ixp23xx/include/mach/memory.h b/arch/arm/mach-ixp23xx/include/mach/memory.h index 6ef65d813f16..6cf0704e946a 100644 --- a/arch/arm/mach-ixp23xx/include/mach/memory.h +++ b/arch/arm/mach-ixp23xx/include/mach/memory.h @@ -17,7 +17,7 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET (0x00000000) +#define PLAT_PHYS_OFFSET (0x00000000) #define IXP23XX_PCI_SDRAM_OFFSET (*((volatile int *)IXP23XX_PCI_SDRAM_BAR) & 0xfffffff0) diff --git a/arch/arm/mach-ixp4xx/include/mach/memory.h b/arch/arm/mach-ixp4xx/include/mach/memory.h index 0136eaa29224..6d388c9d0e20 100644 --- a/arch/arm/mach-ixp4xx/include/mach/memory.h +++ b/arch/arm/mach-ixp4xx/include/mach/memory.h @@ -12,7 +12,7 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #if !defined(__ASSEMBLY__) && defined(CONFIG_PCI) diff --git a/arch/arm/mach-kirkwood/include/mach/memory.h b/arch/arm/mach-kirkwood/include/mach/memory.h index 45431e131465..4600b44e3ad3 100644 --- a/arch/arm/mach-kirkwood/include/mach/memory.h +++ b/arch/arm/mach-kirkwood/include/mach/memory.h @@ -5,6 +5,6 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #endif diff --git a/arch/arm/mach-ks8695/include/mach/memory.h b/arch/arm/mach-ks8695/include/mach/memory.h index bace9a681adc..f7e1b9bce345 100644 --- a/arch/arm/mach-ks8695/include/mach/memory.h +++ b/arch/arm/mach-ks8695/include/mach/memory.h @@ -18,7 +18,7 @@ /* * Physical SRAM offset. */ -#define PHYS_OFFSET KS8695_SDRAM_PA +#define PLAT_PHYS_OFFSET KS8695_SDRAM_PA #ifndef __ASSEMBLY__ diff --git a/arch/arm/mach-lh7a40x/include/mach/memory.h b/arch/arm/mach-lh7a40x/include/mach/memory.h index edb8f5faf5d5..f77bde80fe41 100644 --- a/arch/arm/mach-lh7a40x/include/mach/memory.h +++ b/arch/arm/mach-lh7a40x/include/mach/memory.h @@ -17,7 +17,7 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0xc0000000) +#define PLAT_PHYS_OFFSET UL(0xc0000000) /* * Sparsemem version of the above diff --git a/arch/arm/mach-loki/include/mach/memory.h b/arch/arm/mach-loki/include/mach/memory.h index 2ed7e6e732c2..66366657a875 100644 --- a/arch/arm/mach-loki/include/mach/memory.h +++ b/arch/arm/mach-loki/include/mach/memory.h @@ -5,6 +5,6 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #endif diff --git a/arch/arm/mach-lpc32xx/include/mach/memory.h b/arch/arm/mach-lpc32xx/include/mach/memory.h index 044e1acecbe6..a647dd624afa 100644 --- a/arch/arm/mach-lpc32xx/include/mach/memory.h +++ b/arch/arm/mach-lpc32xx/include/mach/memory.h @@ -22,6 +22,6 @@ /* * Physical DRAM offset of bank 0 */ -#define PHYS_OFFSET UL(0x80000000) +#define PLAT_PHYS_OFFSET UL(0x80000000) #endif diff --git a/arch/arm/mach-mmp/include/mach/memory.h b/arch/arm/mach-mmp/include/mach/memory.h index bdb21d70714c..d68b50a2d6a0 100644 --- a/arch/arm/mach-mmp/include/mach/memory.h +++ b/arch/arm/mach-mmp/include/mach/memory.h @@ -9,6 +9,6 @@ #ifndef __ASM_MACH_MEMORY_H #define __ASM_MACH_MEMORY_H -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #endif /* __ASM_MACH_MEMORY_H */ diff --git a/arch/arm/mach-msm/board-msm7x30.c b/arch/arm/mach-msm/board-msm7x30.c index 6f3b9735e970..decbf80b429e 100644 --- a/arch/arm/mach-msm/board-msm7x30.c +++ b/arch/arm/mach-msm/board-msm7x30.c @@ -26,11 +26,11 @@ #include #include +#include #include #include #include -#include #include #include diff --git a/arch/arm/mach-msm/include/mach/memory.h b/arch/arm/mach-msm/include/mach/memory.h index 070e17d237f1..176875df241f 100644 --- a/arch/arm/mach-msm/include/mach/memory.h +++ b/arch/arm/mach-msm/include/mach/memory.h @@ -18,15 +18,15 @@ /* physical offset of RAM */ #if defined(CONFIG_ARCH_QSD8X50) && defined(CONFIG_MSM_SOC_REV_A) -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #elif defined(CONFIG_ARCH_QSD8X50) -#define PHYS_OFFSET UL(0x20000000) +#define PLAT_PHYS_OFFSET UL(0x20000000) #elif defined(CONFIG_ARCH_MSM7X30) -#define PHYS_OFFSET UL(0x00200000) +#define PLAT_PHYS_OFFSET UL(0x00200000) #elif defined(CONFIG_ARCH_MSM8X60) -#define PHYS_OFFSET UL(0x40200000) +#define PLAT_PHYS_OFFSET UL(0x40200000) #else -#define PHYS_OFFSET UL(0x10000000) +#define PLAT_PHYS_OFFSET UL(0x10000000) #endif #endif diff --git a/arch/arm/mach-mv78xx0/include/mach/memory.h b/arch/arm/mach-mv78xx0/include/mach/memory.h index e663042d307f..a648c51f2e42 100644 --- a/arch/arm/mach-mv78xx0/include/mach/memory.h +++ b/arch/arm/mach-mv78xx0/include/mach/memory.h @@ -5,6 +5,6 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #endif diff --git a/arch/arm/mach-mx3/mach-kzm_arm11_01.c b/arch/arm/mach-mx3/mach-kzm_arm11_01.c index a5f3eb24e4d5..df1a6ce8e3e1 100644 --- a/arch/arm/mach-mx3/mach-kzm_arm11_01.c +++ b/arch/arm/mach-mx3/mach-kzm_arm11_01.c @@ -27,6 +27,7 @@ #include #include +#include #include #include #include @@ -36,7 +37,6 @@ #include #include #include -#include #include "devices-imx31.h" #include "devices.h" diff --git a/arch/arm/mach-netx/include/mach/memory.h b/arch/arm/mach-netx/include/mach/memory.h index 9a363f297f90..59561496c36e 100644 --- a/arch/arm/mach-netx/include/mach/memory.h +++ b/arch/arm/mach-netx/include/mach/memory.h @@ -20,7 +20,7 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H -#define PHYS_OFFSET UL(0x80000000) +#define PLAT_PHYS_OFFSET UL(0x80000000) #endif diff --git a/arch/arm/mach-nomadik/include/mach/memory.h b/arch/arm/mach-nomadik/include/mach/memory.h index 1e5689d98ecd..d3325211ba6a 100644 --- a/arch/arm/mach-nomadik/include/mach/memory.h +++ b/arch/arm/mach-nomadik/include/mach/memory.h @@ -23,6 +23,6 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #endif diff --git a/arch/arm/mach-ns9xxx/include/mach/memory.h b/arch/arm/mach-ns9xxx/include/mach/memory.h index 6107193adbfe..5c65aee6e7a9 100644 --- a/arch/arm/mach-ns9xxx/include/mach/memory.h +++ b/arch/arm/mach-ns9xxx/include/mach/memory.h @@ -19,6 +19,6 @@ #define NS9XXX_CS2STAT_LENGTH UL(0x1000) #define NS9XXX_CS3STAT_LENGTH UL(0x1000) -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #endif diff --git a/arch/arm/mach-nuc93x/include/mach/memory.h b/arch/arm/mach-nuc93x/include/mach/memory.h index 323ab0db3f7d..ef9864b002a6 100644 --- a/arch/arm/mach-nuc93x/include/mach/memory.h +++ b/arch/arm/mach-nuc93x/include/mach/memory.h @@ -16,6 +16,6 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #endif diff --git a/arch/arm/mach-orion5x/include/mach/memory.h b/arch/arm/mach-orion5x/include/mach/memory.h index 52a2955d0f87..6769917882fe 100644 --- a/arch/arm/mach-orion5x/include/mach/memory.h +++ b/arch/arm/mach-orion5x/include/mach/memory.h @@ -7,6 +7,6 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #endif diff --git a/arch/arm/mach-pnx4008/include/mach/memory.h b/arch/arm/mach-pnx4008/include/mach/memory.h index 0e8770081058..1275db61cee5 100644 --- a/arch/arm/mach-pnx4008/include/mach/memory.h +++ b/arch/arm/mach-pnx4008/include/mach/memory.h @@ -16,6 +16,6 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0x80000000) +#define PLAT_PHYS_OFFSET UL(0x80000000) #endif diff --git a/arch/arm/mach-pxa/include/mach/memory.h b/arch/arm/mach-pxa/include/mach/memory.h index 92361a66b223..7f68724dcc27 100644 --- a/arch/arm/mach-pxa/include/mach/memory.h +++ b/arch/arm/mach-pxa/include/mach/memory.h @@ -15,7 +15,7 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0xa0000000) +#define PLAT_PHYS_OFFSET UL(0xa0000000) #if !defined(__ASSEMBLY__) && defined(CONFIG_MACH_ARMCORE) && defined(CONFIG_PCI) void cmx2xx_pci_adjust_zones(unsigned long *size, unsigned long *holes); diff --git a/arch/arm/mach-realview/include/mach/memory.h b/arch/arm/mach-realview/include/mach/memory.h index 5dafc157b276..e05fc2c4c080 100644 --- a/arch/arm/mach-realview/include/mach/memory.h +++ b/arch/arm/mach-realview/include/mach/memory.h @@ -24,9 +24,9 @@ * Physical DRAM offset. */ #ifdef CONFIG_REALVIEW_HIGH_PHYS_OFFSET -#define PHYS_OFFSET UL(0x70000000) +#define PLAT_PHYS_OFFSET UL(0x70000000) #else -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #endif #if !defined(__ASSEMBLY__) && defined(CONFIG_ZONE_DMA) diff --git a/arch/arm/mach-rpc/include/mach/memory.h b/arch/arm/mach-rpc/include/mach/memory.h index 78191bf25192..18a221093bf5 100644 --- a/arch/arm/mach-rpc/include/mach/memory.h +++ b/arch/arm/mach-rpc/include/mach/memory.h @@ -21,7 +21,7 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0x10000000) +#define PLAT_PHYS_OFFSET UL(0x10000000) /* * Cache flushing area - ROM diff --git a/arch/arm/mach-s3c2400/include/mach/memory.h b/arch/arm/mach-s3c2400/include/mach/memory.h index cf5901ffd385..3f33670dd012 100644 --- a/arch/arm/mach-s3c2400/include/mach/memory.h +++ b/arch/arm/mach-s3c2400/include/mach/memory.h @@ -15,6 +15,6 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H -#define PHYS_OFFSET UL(0x0C000000) +#define PLAT_PHYS_OFFSET UL(0x0C000000) #endif diff --git a/arch/arm/mach-s3c2410/include/mach/memory.h b/arch/arm/mach-s3c2410/include/mach/memory.h index 6f1e5871ae4b..f92b97b89c0c 100644 --- a/arch/arm/mach-s3c2410/include/mach/memory.h +++ b/arch/arm/mach-s3c2410/include/mach/memory.h @@ -11,6 +11,6 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H -#define PHYS_OFFSET UL(0x30000000) +#define PLAT_PHYS_OFFSET UL(0x30000000) #endif diff --git a/arch/arm/mach-s3c24a0/include/mach/memory.h b/arch/arm/mach-s3c24a0/include/mach/memory.h index 7d74fd5c8d66..7d208a71b172 100644 --- a/arch/arm/mach-s3c24a0/include/mach/memory.h +++ b/arch/arm/mach-s3c24a0/include/mach/memory.h @@ -11,7 +11,7 @@ #ifndef __ASM_ARCH_24A0_MEMORY_H #define __ASM_ARCH_24A0_MEMORY_H __FILE__ -#define PHYS_OFFSET UL(0x10000000) +#define PLAT_PHYS_OFFSET UL(0x10000000) #define __virt_to_bus(x) __virt_to_phys(x) #define __bus_to_virt(x) __phys_to_virt(x) diff --git a/arch/arm/mach-s3c64xx/include/mach/memory.h b/arch/arm/mach-s3c64xx/include/mach/memory.h index 42cc54e2ee30..4760cdae1eb6 100644 --- a/arch/arm/mach-s3c64xx/include/mach/memory.h +++ b/arch/arm/mach-s3c64xx/include/mach/memory.h @@ -13,7 +13,7 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H -#define PHYS_OFFSET UL(0x50000000) +#define PLAT_PHYS_OFFSET UL(0x50000000) #define CONSISTENT_DMA_SIZE SZ_8M diff --git a/arch/arm/mach-s5p6442/include/mach/memory.h b/arch/arm/mach-s5p6442/include/mach/memory.h index 9ddd877ba2ea..cfe259dded33 100644 --- a/arch/arm/mach-s5p6442/include/mach/memory.h +++ b/arch/arm/mach-s5p6442/include/mach/memory.h @@ -13,7 +13,7 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H -#define PHYS_OFFSET UL(0x20000000) +#define PLAT_PHYS_OFFSET UL(0x20000000) #define CONSISTENT_DMA_SIZE SZ_8M #endif /* __ASM_ARCH_MEMORY_H */ diff --git a/arch/arm/mach-s5p64x0/include/mach/memory.h b/arch/arm/mach-s5p64x0/include/mach/memory.h index 1b036b0a24ce..365a6eb4b88f 100644 --- a/arch/arm/mach-s5p64x0/include/mach/memory.h +++ b/arch/arm/mach-s5p64x0/include/mach/memory.h @@ -13,7 +13,7 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H __FILE__ -#define PHYS_OFFSET UL(0x20000000) +#define PLAT_PHYS_OFFSET UL(0x20000000) #define CONSISTENT_DMA_SIZE SZ_8M #endif /* __ASM_ARCH_MEMORY_H */ diff --git a/arch/arm/mach-s5pc100/include/mach/memory.h b/arch/arm/mach-s5pc100/include/mach/memory.h index 4b60d18179f7..bda4e79fd5fc 100644 --- a/arch/arm/mach-s5pc100/include/mach/memory.h +++ b/arch/arm/mach-s5pc100/include/mach/memory.h @@ -13,6 +13,6 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H -#define PHYS_OFFSET UL(0x20000000) +#define PLAT_PHYS_OFFSET UL(0x20000000) #endif diff --git a/arch/arm/mach-s5pv210/include/mach/memory.h b/arch/arm/mach-s5pv210/include/mach/memory.h index d503e0c4ce4f..7b5fcf0da0c4 100644 --- a/arch/arm/mach-s5pv210/include/mach/memory.h +++ b/arch/arm/mach-s5pv210/include/mach/memory.h @@ -13,7 +13,7 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H -#define PHYS_OFFSET UL(0x20000000) +#define PLAT_PHYS_OFFSET UL(0x20000000) #define CONSISTENT_DMA_SIZE (SZ_8M + SZ_4M + SZ_2M) /* diff --git a/arch/arm/mach-s5pv310/include/mach/memory.h b/arch/arm/mach-s5pv310/include/mach/memory.h index 1dffb4823245..470b01bf8614 100644 --- a/arch/arm/mach-s5pv310/include/mach/memory.h +++ b/arch/arm/mach-s5pv310/include/mach/memory.h @@ -13,7 +13,7 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H __FILE__ -#define PHYS_OFFSET UL(0x40000000) +#define PLAT_PHYS_OFFSET UL(0x40000000) /* Maximum of 256MiB in one bank */ #define MAX_PHYSMEM_BITS 32 diff --git a/arch/arm/mach-sa1100/include/mach/memory.h b/arch/arm/mach-sa1100/include/mach/memory.h index 128a1dfa96b9..a44da6a2916c 100644 --- a/arch/arm/mach-sa1100/include/mach/memory.h +++ b/arch/arm/mach-sa1100/include/mach/memory.h @@ -12,7 +12,7 @@ /* * Physical DRAM offset is 0xc0000000 on the SA1100 */ -#define PHYS_OFFSET UL(0xc0000000) +#define PLAT_PHYS_OFFSET UL(0xc0000000) #ifndef __ASSEMBLY__ diff --git a/arch/arm/mach-shark/include/mach/memory.h b/arch/arm/mach-shark/include/mach/memory.h index d9c4812f1c31..9afb17000008 100644 --- a/arch/arm/mach-shark/include/mach/memory.h +++ b/arch/arm/mach-shark/include/mach/memory.h @@ -15,7 +15,7 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0x08000000) +#define PLAT_PHYS_OFFSET UL(0x08000000) #ifndef __ASSEMBLY__ diff --git a/arch/arm/mach-shmobile/include/mach/memory.h b/arch/arm/mach-shmobile/include/mach/memory.h index 377584e57e03..ad00c3c258f4 100644 --- a/arch/arm/mach-shmobile/include/mach/memory.h +++ b/arch/arm/mach-shmobile/include/mach/memory.h @@ -1,7 +1,7 @@ #ifndef __ASM_MACH_MEMORY_H #define __ASM_MACH_MEMORY_H -#define PHYS_OFFSET UL(CONFIG_MEMORY_START) +#define PLAT_PHYS_OFFSET UL(CONFIG_MEMORY_START) #define MEM_SIZE UL(CONFIG_MEMORY_SIZE) /* DMA memory at 0xf6000000 - 0xffdfffff */ diff --git a/arch/arm/mach-tegra/include/mach/memory.h b/arch/arm/mach-tegra/include/mach/memory.h index 6151bab62af2..537db3aa81a7 100644 --- a/arch/arm/mach-tegra/include/mach/memory.h +++ b/arch/arm/mach-tegra/include/mach/memory.h @@ -22,7 +22,7 @@ #define __MACH_TEGRA_MEMORY_H /* physical offset of RAM */ -#define PHYS_OFFSET UL(0) +#define PLAT_PHYS_OFFSET UL(0) #endif diff --git a/arch/arm/mach-u300/include/mach/memory.h b/arch/arm/mach-u300/include/mach/memory.h index bf134bcc129d..888e2e351ee1 100644 --- a/arch/arm/mach-u300/include/mach/memory.h +++ b/arch/arm/mach-u300/include/mach/memory.h @@ -15,17 +15,17 @@ #ifdef CONFIG_MACH_U300_DUAL_RAM -#define PHYS_OFFSET UL(0x48000000) +#define PLAT_PHYS_OFFSET UL(0x48000000) #define BOOT_PARAMS_OFFSET (PHYS_OFFSET + 0x100) #else #ifdef CONFIG_MACH_U300_2MB_ALIGNMENT_FIX -#define PHYS_OFFSET (0x28000000 + \ +#define PLAT_PHYS_OFFSET (0x28000000 + \ (CONFIG_MACH_U300_ACCESS_MEM_SIZE - \ (CONFIG_MACH_U300_ACCESS_MEM_SIZE & 1))*1024*1024) #else -#define PHYS_OFFSET (0x28000000 + \ +#define PLAT_PHYS_OFFSET (0x28000000 + \ (CONFIG_MACH_U300_ACCESS_MEM_SIZE + \ (CONFIG_MACH_U300_ACCESS_MEM_SIZE & 1))*1024*1024) #endif diff --git a/arch/arm/mach-u300/u300.c b/arch/arm/mach-u300/u300.c index 07c35a846424..48b3b7f39966 100644 --- a/arch/arm/mach-u300/u300.c +++ b/arch/arm/mach-u300/u300.c @@ -19,9 +19,9 @@ #include #include #include -#include #include #include +#include static void __init u300_reserve(void) { diff --git a/arch/arm/mach-ux500/include/mach/memory.h b/arch/arm/mach-ux500/include/mach/memory.h index 510571a59e25..2ef697a67006 100644 --- a/arch/arm/mach-ux500/include/mach/memory.h +++ b/arch/arm/mach-ux500/include/mach/memory.h @@ -12,7 +12,7 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #define BUS_OFFSET UL(0x00000000) #endif diff --git a/arch/arm/mach-versatile/include/mach/memory.h b/arch/arm/mach-versatile/include/mach/memory.h index 79aeab86b903..dacc9d8e4e6a 100644 --- a/arch/arm/mach-versatile/include/mach/memory.h +++ b/arch/arm/mach-versatile/include/mach/memory.h @@ -23,6 +23,6 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #endif diff --git a/arch/arm/mach-vexpress/include/mach/memory.h b/arch/arm/mach-vexpress/include/mach/memory.h index be28232ae639..5b7fcd439d87 100644 --- a/arch/arm/mach-vexpress/include/mach/memory.h +++ b/arch/arm/mach-vexpress/include/mach/memory.h @@ -20,6 +20,6 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H -#define PHYS_OFFSET UL(0x60000000) +#define PLAT_PHYS_OFFSET UL(0x60000000) #endif diff --git a/arch/arm/mach-w90x900/include/mach/memory.h b/arch/arm/mach-w90x900/include/mach/memory.h index 971b80702c27..f02905ba7746 100644 --- a/arch/arm/mach-w90x900/include/mach/memory.h +++ b/arch/arm/mach-w90x900/include/mach/memory.h @@ -18,6 +18,6 @@ #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #endif diff --git a/arch/arm/plat-mxc/include/mach/memory.h b/arch/arm/plat-mxc/include/mach/memory.h index 83861408133f..5d51cbb98893 100644 --- a/arch/arm/plat-mxc/include/mach/memory.h +++ b/arch/arm/plat-mxc/include/mach/memory.h @@ -23,23 +23,23 @@ #if !defined(CONFIG_RUNTIME_PHYS_OFFSET) # if defined CONFIG_ARCH_MX1 -# define PHYS_OFFSET MX1_PHYS_OFFSET +# define PLAT_PHYS_OFFSET MX1_PHYS_OFFSET # elif defined CONFIG_MACH_MX21 -# define PHYS_OFFSET MX21_PHYS_OFFSET +# define PLAT_PHYS_OFFSET MX21_PHYS_OFFSET # elif defined CONFIG_ARCH_MX25 -# define PHYS_OFFSET MX25_PHYS_OFFSET +# define PLAT_PHYS_OFFSET MX25_PHYS_OFFSET # elif defined CONFIG_MACH_MX27 -# define PHYS_OFFSET MX27_PHYS_OFFSET +# define PLAT_PHYS_OFFSET MX27_PHYS_OFFSET # elif defined CONFIG_ARCH_MX3 -# define PHYS_OFFSET MX3x_PHYS_OFFSET +# define PLAT_PHYS_OFFSET MX3x_PHYS_OFFSET # elif defined CONFIG_ARCH_MXC91231 -# define PHYS_OFFSET MXC91231_PHYS_OFFSET +# define PLAT_PHYS_OFFSET MXC91231_PHYS_OFFSET # elif defined CONFIG_ARCH_MX50 -# define PHYS_OFFSET MX50_PHYS_OFFSET +# define PLAT_PHYS_OFFSET MX50_PHYS_OFFSET # elif defined CONFIG_ARCH_MX51 -# define PHYS_OFFSET MX51_PHYS_OFFSET +# define PLAT_PHYS_OFFSET MX51_PHYS_OFFSET # elif defined CONFIG_ARCH_MX53 -# define PHYS_OFFSET MX53_PHYS_OFFSET +# define PLAT_PHYS_OFFSET MX53_PHYS_OFFSET # endif #endif diff --git a/arch/arm/plat-omap/include/plat/memory.h b/arch/arm/plat-omap/include/plat/memory.h index f8d922fb5584..e6720aa2d553 100644 --- a/arch/arm/plat-omap/include/plat/memory.h +++ b/arch/arm/plat-omap/include/plat/memory.h @@ -37,9 +37,9 @@ * Physical DRAM offset. */ #if defined(CONFIG_ARCH_OMAP1) -#define PHYS_OFFSET UL(0x10000000) +#define PLAT_PHYS_OFFSET UL(0x10000000) #else -#define PHYS_OFFSET UL(0x80000000) +#define PLAT_PHYS_OFFSET UL(0x80000000) #endif /* diff --git a/arch/arm/plat-spear/include/plat/memory.h b/arch/arm/plat-spear/include/plat/memory.h index 27a4aba77343..7e3599e1104e 100644 --- a/arch/arm/plat-spear/include/plat/memory.h +++ b/arch/arm/plat-spear/include/plat/memory.h @@ -15,6 +15,6 @@ #define __PLAT_MEMORY_H /* Physical DRAM offset */ -#define PHYS_OFFSET UL(0x00000000) +#define PLAT_PHYS_OFFSET UL(0x00000000) #endif /* __PLAT_MEMORY_H */ diff --git a/arch/arm/plat-stmp3xxx/include/mach/memory.h b/arch/arm/plat-stmp3xxx/include/mach/memory.h index 7b875a07a1a7..61fa54882e12 100644 --- a/arch/arm/plat-stmp3xxx/include/mach/memory.h +++ b/arch/arm/plat-stmp3xxx/include/mach/memory.h @@ -17,6 +17,6 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0x40000000) +#define PLAT_PHYS_OFFSET UL(0x40000000) #endif diff --git a/arch/arm/plat-tcc/include/mach/memory.h b/arch/arm/plat-tcc/include/mach/memory.h index cd91ba8a670b..28a6e0cd13b3 100644 --- a/arch/arm/plat-tcc/include/mach/memory.h +++ b/arch/arm/plat-tcc/include/mach/memory.h @@ -13,6 +13,6 @@ /* * Physical DRAM offset. */ -#define PHYS_OFFSET UL(0x20000000) +#define PLAT_PHYS_OFFSET UL(0x20000000) #endif -- cgit v1.2.3 From dc21af99fadcfa0ae65b52fd0895f85824f0c288 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 4 Jan 2011 19:09:43 +0000 Subject: ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching This idea came from Nicolas, Eric Miao produced an initial version, which was then rewritten into this. Patch the physical to virtual translations at runtime. As we modify the code, this makes it incompatible with XIP kernels, but allows us to achieve this with minimal loss of performance. As many translations are of the form: physical = virtual + (PHYS_OFFSET - PAGE_OFFSET) virtual = physical - (PHYS_OFFSET - PAGE_OFFSET) we generate an 'add' instruction for __virt_to_phys(), and a 'sub' instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET - PAGE_OFFSET) by comparing the address prior to MMU initialization with where it should be once the MMU has been initialized, and place this constant into the above add/sub instructions. Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for the C-mode PHYS_OFFSET variable definition to use. At present, we are unable to support Realview with Sparsemem enabled as this uses a complex mapping function, and MSM as this requires a constant which will not fit in our math instruction. Add a module version magic string for this feature to prevent incompatible modules being loaded. Tested-by: Tony Lindgren Reviewed-by: Nicolas Pitre Tested-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/Kconfig | 13 +++++++++ arch/arm/include/asm/memory.h | 55 ++++++++++++++++++++++++++-------- arch/arm/include/asm/module.h | 15 ++++++++-- arch/arm/kernel/armksyms.c | 4 +++ arch/arm/kernel/head.S | 68 +++++++++++++++++++++++++++++++++++++++++++ arch/arm/kernel/module.c | 23 ++++++++++++++- arch/arm/kernel/vmlinux.lds.S | 4 +++ 7 files changed, 167 insertions(+), 15 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 5cff165b7eb0..4147f76e7988 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -191,6 +191,19 @@ config VECTORS_BASE help The base address of exception vectors. +config ARM_PATCH_PHYS_VIRT + bool "Patch physical to virtual translations at runtime (EXPERIMENTAL)" + depends on EXPERIMENTAL + depends on !XIP_KERNEL && !THUMB2_KERNEL && MMU + depends on !ARCH_MSM + depends on !ARCH_REALVIEW || !SPARSEMEM + help + Patch phys-to-virt translation functions at runtime according to + the position of the kernel in system memory. + + This can only be used with non-XIP, non-Thumb2, MMU kernels where + the base of physical memory is at a 16MB boundary. + source "init/Kconfig" source "kernel/Kconfig.freezer" diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 2efec578a62e..7197879e1cb7 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -24,8 +24,6 @@ */ #define UL(x) _AC(x, UL) -#define PHYS_OFFSET PLAT_PHYS_OFFSET - #ifdef CONFIG_MMU /* @@ -134,16 +132,6 @@ #define DTCM_OFFSET UL(0xfffe8000) #endif -/* - * Physical vs virtual RAM address space conversion. These are - * private definitions which should NOT be used outside memory.h - * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. - */ -#ifndef __virt_to_phys -#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET) -#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET) -#endif - /* * Convert a physical address to a Page Frame Number and back */ @@ -158,6 +146,49 @@ #ifndef __ASSEMBLY__ +/* + * Physical vs virtual RAM address space conversion. These are + * private definitions which should NOT be used outside memory.h + * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. + */ +#ifndef __virt_to_phys +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT + +extern unsigned long __pv_phys_offset; +#define PHYS_OFFSET __pv_phys_offset + +#define __pv_stub(from,to,instr) \ + __asm__("@ __pv_stub\n" \ + "1: " instr " %0, %1, %2\n" \ + " .pushsection .pv_table,\"a\"\n" \ + " .long 1b\n" \ + " .popsection\n" \ + : "=r" (to) \ + : "r" (from), "I" (0x81000000)) + +static inline unsigned long __virt_to_phys(unsigned long x) +{ + unsigned long t; + __pv_stub(x, t, "add"); + return t; +} + +static inline unsigned long __phys_to_virt(unsigned long x) +{ + unsigned long t; + __pv_stub(x, t, "sub"); + return t; +} +#else +#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET) +#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET) +#endif +#endif + +#ifndef PHYS_OFFSET +#define PHYS_OFFSET PLAT_PHYS_OFFSET +#endif + /* * The DMA mask corresponding to the maximum bus address allocatable * using GFP_DMA. The default here places no restriction on DMA diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h index 12c8e680cbff..d072c21332ee 100644 --- a/arch/arm/include/asm/module.h +++ b/arch/arm/include/asm/module.h @@ -25,8 +25,19 @@ struct mod_arch_specific { }; /* - * Include the ARM architecture version. + * Add the ARM architecture version to the version magic string */ -#define MODULE_ARCH_VERMAGIC "ARMv" __stringify(__LINUX_ARM_ARCH__) " " +#define MODULE_ARCH_VERMAGIC_ARMVSN "ARMv" __stringify(__LINUX_ARM_ARCH__) " " + +/* Add __virt_to_phys patching state as well */ +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT +#define MODULE_ARCH_VERMAGIC_P2V "p2v8 " +#else +#define MODULE_ARCH_VERMAGIC_P2V "" +#endif + +#define MODULE_ARCH_VERMAGIC \ + MODULE_ARCH_VERMAGIC_ARMVSN \ + MODULE_ARCH_VERMAGIC_P2V #endif /* _ASM_ARM_MODULE_H */ diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index e5e1e5387678..9615423c37dd 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -170,3 +170,7 @@ EXPORT_SYMBOL(mcount); #endif EXPORT_SYMBOL(__gnu_mcount_nc); #endif + +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT +EXPORT_SYMBOL(__pv_phys_offset); +#endif diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 03a588b6e15c..1db8ead2e331 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -97,6 +97,9 @@ ENTRY(stext) bl __vet_atags #ifdef CONFIG_SMP_ON_UP bl __fixup_smp +#endif +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT + bl __fixup_pv_table #endif bl __create_page_tables @@ -438,4 +441,69 @@ smp_on_up: #endif +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT + +/* __fixup_pv_table - patch the stub instructions with the delta between + * PHYS_OFFSET and PAGE_OFFSET, which is assumed to be 16MiB aligned and + * can be expressed by an immediate shifter operand. The stub instruction + * has a form of '(add|sub) rd, rn, #imm'. + */ + __HEAD +__fixup_pv_table: + adr r0, 1f + ldmia r0, {r3-r5, r7} + sub r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET + add r4, r4, r3 @ adjust table start address + add r5, r5, r3 @ adjust table end address + str r8, [r7, r3]! @ save computed PHYS_OFFSET to __pv_phys_offset + mov r6, r3, lsr #24 @ constant for add/sub instructions + teq r3, r6, lsl #24 @ must be 16MiB aligned + bne __error + str r6, [r7, #4] @ save to __pv_offset + b __fixup_a_pv_table +ENDPROC(__fixup_pv_table) + + .align +1: .long . + .long __pv_table_begin + .long __pv_table_end +2: .long __pv_phys_offset + + .text +__fixup_a_pv_table: + b 3f +2: ldr ip, [r7, r3] + bic ip, ip, #0x000000ff + orr ip, ip, r6 + str ip, [r7, r3] +3: cmp r4, r5 + ldrcc r7, [r4], #4 @ use branch for delay slot + bcc 2b + mov pc, lr +ENDPROC(__fixup_a_pv_table) + +ENTRY(fixup_pv_table) + stmfd sp!, {r4 - r7, lr} + ldr r2, 2f @ get address of __pv_phys_offset + mov r3, #0 @ no offset + mov r4, r0 @ r0 = table start + add r5, r0, r1 @ r1 = table size + ldr r6, [r2, #4] @ get __pv_offset + bl __fixup_a_pv_table + ldmfd sp!, {r4 - r7, pc} +ENDPROC(fixup_pv_table) + + .align +2: .long __pv_phys_offset + + .data + .globl __pv_phys_offset + .type __pv_phys_offset, %object +__pv_phys_offset: + .long 0 + .size __pv_phys_offset, . - __pv_phys_offset +__pv_offset: + .long 0 +#endif + #include "head-common.S" diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index 2cfe8161b478..c5679f6d9f64 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -268,12 +268,28 @@ struct mod_unwind_map { const Elf_Shdr *txt_sec; }; +static const Elf_Shdr *find_mod_section(const Elf32_Ehdr *hdr, + const Elf_Shdr *sechdrs, const char *name) +{ + const Elf_Shdr *s, *se; + const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; + + for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) + if (strcmp(name, secstrs + s->sh_name) == 0) + return s; + + return NULL; +} + +extern void fixup_pv_table(const void *, unsigned long); + int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod) { + const Elf_Shdr *s = NULL; #ifdef CONFIG_ARM_UNWIND const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; - const Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum; + const Elf_Shdr *sechdrs_end = sechdrs + hdr->e_shnum; struct mod_unwind_map maps[ARM_SEC_MAX]; int i; @@ -314,6 +330,11 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, maps[i].unw_sec->sh_size, maps[i].txt_sec->sh_addr, maps[i].txt_sec->sh_size); +#endif +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT + s = find_mod_section(hdr, sechdrs, ".pv_table"); + if (s) + fixup_pv_table((void *)s->sh_addr, s->sh_size); #endif return 0; } diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 86b66f3f2031..45b5651777ee 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -57,6 +57,10 @@ SECTIONS __smpalt_end = .; #endif + __pv_table_begin = .; + *(.pv_table) + __pv_table_end = .; + INIT_SETUP(16) INIT_CALLS -- cgit v1.2.3 From cada3c0841e1deaec4c0f92654610b028dc683ff Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 4 Jan 2011 19:39:29 +0000 Subject: ARM: P2V: extend to 16-bit translation offsets MSM's memory is aligned to 2MB, which is more than we can do with our existing method as we're limited to the upper 8 bits. Extend this by using two instructions to 16 bits, automatically selected when MSM is enabled. Acked-by: Tony Lindgren Reviewed-by: Nicolas Pitre Tested-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/Kconfig | 5 ++++- arch/arm/include/asm/memory.h | 21 +++++++++++++++++---- arch/arm/include/asm/module.h | 4 ++++ arch/arm/kernel/head.S | 15 ++++++++++++++- 4 files changed, 39 insertions(+), 6 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 4147f76e7988..b357c29e7dfc 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -195,7 +195,6 @@ config ARM_PATCH_PHYS_VIRT bool "Patch physical to virtual translations at runtime (EXPERIMENTAL)" depends on EXPERIMENTAL depends on !XIP_KERNEL && !THUMB2_KERNEL && MMU - depends on !ARCH_MSM depends on !ARCH_REALVIEW || !SPARSEMEM help Patch phys-to-virt translation functions at runtime according to @@ -204,6 +203,10 @@ config ARM_PATCH_PHYS_VIRT This can only be used with non-XIP, non-Thumb2, MMU kernels where the base of physical memory is at a 16MB boundary. +config ARM_PATCH_PHYS_VIRT_16BIT + def_bool y + depends on ARM_PATCH_PHYS_VIRT && ARCH_MSM + source "init/Kconfig" source "kernel/Kconfig.freezer" diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 7197879e1cb7..2398b3fc0268 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -154,29 +154,42 @@ #ifndef __virt_to_phys #ifdef CONFIG_ARM_PATCH_PHYS_VIRT +/* + * Constants used to force the right instruction encodings and shifts + * so that all we need to do is modify the 8-bit constant field. + */ +#define __PV_BITS_31_24 0x81000000 +#define __PV_BITS_23_16 0x00810000 + extern unsigned long __pv_phys_offset; #define PHYS_OFFSET __pv_phys_offset -#define __pv_stub(from,to,instr) \ +#define __pv_stub(from,to,instr,type) \ __asm__("@ __pv_stub\n" \ "1: " instr " %0, %1, %2\n" \ " .pushsection .pv_table,\"a\"\n" \ " .long 1b\n" \ " .popsection\n" \ : "=r" (to) \ - : "r" (from), "I" (0x81000000)) + : "r" (from), "I" (type)) static inline unsigned long __virt_to_phys(unsigned long x) { unsigned long t; - __pv_stub(x, t, "add"); + __pv_stub(x, t, "add", __PV_BITS_31_24); +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT + __pv_stub(t, t, "add", __PV_BITS_23_16); +#endif return t; } static inline unsigned long __phys_to_virt(unsigned long x) { unsigned long t; - __pv_stub(x, t, "sub"); + __pv_stub(x, t, "sub", __PV_BITS_31_24); +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT + __pv_stub(t, t, "sub", __PV_BITS_23_16); +#endif return t; } #else diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h index d072c21332ee..a2b775b81cfa 100644 --- a/arch/arm/include/asm/module.h +++ b/arch/arm/include/asm/module.h @@ -31,7 +31,11 @@ struct mod_arch_specific { /* Add __virt_to_phys patching state as well */ #ifdef CONFIG_ARM_PATCH_PHYS_VIRT +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT +#define MODULE_ARCH_VERMAGIC_P2V "p2v16 " +#else #define MODULE_ARCH_VERMAGIC_P2V "p2v8 " +#endif #else #define MODULE_ARCH_VERMAGIC_P2V "" #endif diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 1db8ead2e331..a94dd99d54c3 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -456,8 +456,13 @@ __fixup_pv_table: add r4, r4, r3 @ adjust table start address add r5, r5, r3 @ adjust table end address str r8, [r7, r3]! @ save computed PHYS_OFFSET to __pv_phys_offset +#ifndef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT mov r6, r3, lsr #24 @ constant for add/sub instructions teq r3, r6, lsl #24 @ must be 16MiB aligned +#else + mov r6, r3, lsr #16 @ constant for add/sub instructions + teq r3, r6, lsl #16 @ must be 64kiB aligned +#endif bne __error str r6, [r7, #4] @ save to __pv_offset b __fixup_a_pv_table @@ -471,10 +476,18 @@ ENDPROC(__fixup_pv_table) .text __fixup_a_pv_table: +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT + and r0, r6, #255 @ offset bits 23-16 + mov r6, r6, lsr #8 @ offset bits 31-24 +#else + mov r0, #0 @ just in case... +#endif b 3f 2: ldr ip, [r7, r3] bic ip, ip, #0x000000ff - orr ip, ip, r6 + tst ip, #0x400 @ rotate shift tells us LS or MS byte + orrne ip, ip, r6 @ mask in offset bits 31-24 + orreq ip, ip, r0 @ mask in offset bits 23-16 str ip, [r7, r3] 3: cmp r4, r5 ldrcc r7, [r4], #4 @ use branch for delay slot -- cgit v1.2.3 From 3a6b1676c6f27f7fad1a3d6fab5a95f90b1e7402 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 15 Feb 2011 17:28:28 +0100 Subject: ARM: 6675/1: use phys_addr_t instead of unsigned long in conversion code The unsigned long datatype is not sufficient for mapping physical addresses >= 4GB. This patch ensures that the address conversion code in asm/memory.h casts to the correct type when handling physical addresses. The internal v2p macros only deal with lowmem addresses, so these do not need to be modified. Acked-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/memory.h | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 2398b3fc0268..431077c5a867 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -15,6 +15,7 @@ #include #include +#include #include #include @@ -135,8 +136,8 @@ /* * Convert a physical address to a Page Frame Number and back */ -#define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT) -#define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) +#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT)) +#define __pfn_to_phys(pfn) ((phys_addr_t)(pfn) << PAGE_SHIFT) /* * Convert a page to/from a physical address @@ -234,12 +235,12 @@ static inline unsigned long __phys_to_virt(unsigned long x) * translation for translating DMA addresses. Use the driver * DMA support - see dma-mapping.h. */ -static inline unsigned long virt_to_phys(const volatile void *x) +static inline phys_addr_t virt_to_phys(const volatile void *x) { return __virt_to_phys((unsigned long)(x)); } -static inline void *phys_to_virt(unsigned long x) +static inline void *phys_to_virt(phys_addr_t x) { return (void *)(__phys_to_virt((unsigned long)(x))); } -- cgit v1.2.3 From 885028e4ba4caf49d565c96481e1a05220ecb517 Mon Sep 17 00:00:00 2001 From: Srinidhi Kasagar Date: Thu, 17 Feb 2011 07:03:51 +0100 Subject: ARM: 6741/1: errata: pl310 cache sync operation may be faulty The effect of cache sync operation is to drain the store buffer and wait for all internal buffers to be empty. In normal conditions, store buffer is able to merge the normal memory writes within its 32-byte data buffers. Due to this erratum present in r3p0, the effect of cache sync operation on the store buffer still remains when the operation completes. This means that the store buffer is always asked to drain and this prevents it from merging any further writes. This can severely affect performance on the write traffic esp. on Normal memory NC one. The proposed workaround is to replace the normal offset of cache sync operation(0x730) by another offset targeting an unmapped PL310 register 0x740. Signed-off-by: srinidhi kasagar Acked-by: Linus Walleij Acked-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/Kconfig | 15 +++++++++++++++ arch/arm/include/asm/hardware/cache-l2x0.h | 1 + arch/arm/mm/cache-l2x0.c | 6 ++++++ 3 files changed, 22 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 26d45e5b636b..ba9fc213f344 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1177,6 +1177,21 @@ config ARM_ERRATA_743622 visible impact on the overall performance or power consumption of the processor. +config ARM_ERRATA_753970 + bool "ARM errata: cache sync operation may be faulty" + depends on CACHE_PL310 + help + This option enables the workaround for the 753970 PL310 (r3p0) erratum. + + Under some condition the effect of cache sync operation on + the store buffer still remains when the operation completes. + This means that the store buffer is always asked to drain and + this prevents it from merging any further writes. The workaround + is to replace the normal offset of cache sync operation (0x730) + by another offset targeting an unmapped PL310 register 0x740. + This has the same effect as the cache sync operation: store buffer + drain and waiting for all buffers empty. + endmenu source "arch/arm/common/Kconfig" diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h index 5aeec1e1735c..16bd48031583 100644 --- a/arch/arm/include/asm/hardware/cache-l2x0.h +++ b/arch/arm/include/asm/hardware/cache-l2x0.h @@ -36,6 +36,7 @@ #define L2X0_RAW_INTR_STAT 0x21C #define L2X0_INTR_CLEAR 0x220 #define L2X0_CACHE_SYNC 0x730 +#define L2X0_DUMMY_REG 0x740 #define L2X0_INV_LINE_PA 0x770 #define L2X0_INV_WAY 0x77C #define L2X0_CLEAN_LINE_PA 0x7B0 diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 170c9bb95866..f2ce38e085d2 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -49,7 +49,13 @@ static inline void cache_wait(void __iomem *reg, unsigned long mask) static inline void cache_sync(void) { void __iomem *base = l2x0_base; + +#ifdef CONFIG_ARM_ERRATA_753970 + /* write to an unmmapped register */ + writel_relaxed(0, base + L2X0_DUMMY_REG); +#else writel_relaxed(0, base + L2X0_CACHE_SYNC); +#endif cache_wait(base + L2X0_CACHE_SYNC, 1); } -- cgit v1.2.3 From b8272a61c16decd4c8627fc1181bdd174c922c3f Mon Sep 17 00:00:00 2001 From: Shiraz Hashim Date: Wed, 16 Feb 2011 07:40:29 +0100 Subject: ARM: 6722/1: SPEAr: sp810: switch to slow mode before reset In sysctl_soft_reset(), switch to slow mode before resetting the system via the system controller. This is required. Reviewed-by: Stanley Miao Signed-off-by: Shiraz Hashim Signed-off-by: Russell King --- arch/arm/include/asm/hardware/sp810.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/hardware/sp810.h b/arch/arm/include/asm/hardware/sp810.h index 721847dc68ab..e0d1c0cfa548 100644 --- a/arch/arm/include/asm/hardware/sp810.h +++ b/arch/arm/include/asm/hardware/sp810.h @@ -58,6 +58,9 @@ static inline void sysctl_soft_reset(void __iomem *base) { + /* switch to slow mode */ + writel(0x2, base + SCCTRL); + /* writing any value to SCSYSSTAT reg will reset system */ writel(0, base + SCSYSSTAT); } -- cgit v1.2.3 From 06824ba824b3e9f2fedb38bee79af0643198ed7f Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 20 Feb 2011 12:16:45 +0000 Subject: ARM: tlb: delay page freeing for SMP and ARMv7 CPUs We need to delay freeing any mapped page on SMP and ARMv7 systems to ensure that the data is not accessed by other CPUs, or is used for speculative prefetch with ARMv7. This includes not only mapped pages but also pages used for the page tables themselves. This avoids races with the MMU/other CPUs accessing pages after they've been freed but before we've invalidated the TLB. Signed-off-by: Russell King --- arch/arm/include/asm/tlb.h | 102 +++++++++++++++++++++++++++++++++++++++------ 1 file changed, 89 insertions(+), 13 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index f41a6f57cd12..e7690887b958 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -18,7 +18,6 @@ #define __ASMARM_TLB_H #include -#include #ifndef CONFIG_MMU @@ -27,7 +26,23 @@ #else /* !CONFIG_MMU */ +#include #include +#include + +/* + * We need to delay page freeing for SMP as other CPUs can access pages + * which have been removed but not yet had their TLB entries invalidated. + * Also, as ARMv7 speculative prefetch can drag new entries into the TLB, + * we need to apply this same delaying tactic to ensure correct operation. + */ +#if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7) +#define tlb_fast_mode(tlb) 0 +#define FREE_PTE_NR 500 +#else +#define tlb_fast_mode(tlb) 1 +#define FREE_PTE_NR 0 +#endif /* * TLB handling. This allows us to remove pages from the page @@ -36,12 +51,58 @@ struct mmu_gather { struct mm_struct *mm; unsigned int fullmm; + struct vm_area_struct *vma; unsigned long range_start; unsigned long range_end; + unsigned int nr; + struct page *pages[FREE_PTE_NR]; }; DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); +/* + * This is unnecessarily complex. There's three ways the TLB shootdown + * code is used: + * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region(). + * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called. + * tlb->vma will be non-NULL. + * 2. Unmapping all vmas. See exit_mmap(). + * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called. + * tlb->vma will be non-NULL. Additionally, page tables will be freed. + * 3. Unmapping argument pages. See shift_arg_pages(). + * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called. + * tlb->vma will be NULL. + */ +static inline void tlb_flush(struct mmu_gather *tlb) +{ + if (tlb->fullmm || !tlb->vma) + flush_tlb_mm(tlb->mm); + else if (tlb->range_end > 0) { + flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end); + tlb->range_start = TASK_SIZE; + tlb->range_end = 0; + } +} + +static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr) +{ + if (!tlb->fullmm) { + if (addr < tlb->range_start) + tlb->range_start = addr; + if (addr + PAGE_SIZE > tlb->range_end) + tlb->range_end = addr + PAGE_SIZE; + } +} + +static inline void tlb_flush_mmu(struct mmu_gather *tlb) +{ + tlb_flush(tlb); + if (!tlb_fast_mode(tlb)) { + free_pages_and_swap_cache(tlb->pages, tlb->nr); + tlb->nr = 0; + } +} + static inline struct mmu_gather * tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) { @@ -49,6 +110,8 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) tlb->mm = mm; tlb->fullmm = full_mm_flush; + tlb->vma = NULL; + tlb->nr = 0; return tlb; } @@ -56,8 +119,7 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) static inline void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) { - if (tlb->fullmm) - flush_tlb_mm(tlb->mm); + tlb_flush_mmu(tlb); /* keep the page table cache within bounds */ check_pgt_cache(); @@ -71,12 +133,7 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) static inline void tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr) { - if (!tlb->fullmm) { - if (addr < tlb->range_start) - tlb->range_start = addr; - if (addr + PAGE_SIZE > tlb->range_end) - tlb->range_end = addr + PAGE_SIZE; - } + tlb_add_flush(tlb, addr); } /* @@ -89,6 +146,7 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { if (!tlb->fullmm) { flush_cache_range(vma, vma->vm_start, vma->vm_end); + tlb->vma = vma; tlb->range_start = TASK_SIZE; tlb->range_end = 0; } @@ -97,12 +155,30 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { - if (!tlb->fullmm && tlb->range_end > 0) - flush_tlb_range(vma, tlb->range_start, tlb->range_end); + if (!tlb->fullmm) + tlb_flush(tlb); +} + +static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) +{ + if (tlb_fast_mode(tlb)) { + free_page_and_swap_cache(page); + } else { + tlb->pages[tlb->nr++] = page; + if (tlb->nr >= FREE_PTE_NR) + tlb_flush_mmu(tlb); + } +} + +static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, + unsigned long addr) +{ + pgtable_page_dtor(pte); + tlb_add_flush(tlb, addr); + tlb_remove_page(tlb, pte); } -#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) -#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep) +#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr) #define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) #define tlb_migrate_finish(mm) do { } while (0) -- cgit v1.2.3 From 58e9c47fa0dd76693b2c85c010c7430a4de77c6d Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 20 Feb 2011 12:27:49 +0000 Subject: ARM: tlb: move noMMU tlb_flush() to asm/tlb.h There's no need to noMMU to put tlb_flush() in asm/tlbflush.h - it's part of the tlb shootdown interface. Move it to asm/tlb.h instead, as per x86. Signed-off-by: Russell King --- arch/arm/include/asm/tlb.h | 3 +++ arch/arm/include/asm/tlbflush.h | 7 +------ 2 files changed, 4 insertions(+), 6 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index e7690887b958..82dfe5d0c41e 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -22,6 +22,9 @@ #ifndef CONFIG_MMU #include + +#define tlb_flush(tlb) ((void) tlb) + #include #else /* !CONFIG_MMU */ diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index ce7378ea15a2..d2005de383b8 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -10,12 +10,7 @@ #ifndef _ASMARM_TLBFLUSH_H #define _ASMARM_TLBFLUSH_H - -#ifndef CONFIG_MMU - -#define tlb_flush(tlb) ((void) tlb) - -#else /* CONFIG_MMU */ +#ifdef CONFIG_MMU #include -- cgit v1.2.3 From f6b0fa02e8b0708d17d631afce456524eadf87ff Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 6 Feb 2011 15:48:39 +0000 Subject: ARM: pm: add generic CPU suspend/resume support This adds core support for saving and restoring CPU coprocessor registers for suspend/resume support. This contains support for suspend with ARM920, ARM926, SA11x0, PXA25x, PXA27x, PXA3xx, V6 and V7 CPUs. Tested on Assabet and Tegra 2. Tested-by: Colin Cross Tested-by: Kukjin Kim Signed-off-by: Russell King --- arch/arm/include/asm/glue-proc.h | 3 + arch/arm/include/asm/proc-fns.h | 7 +++ arch/arm/kernel/Makefile | 1 + arch/arm/kernel/asm-offsets.c | 9 +++ arch/arm/kernel/sleep.S | 109 ++++++++++++++++++++++++++++++++++++ arch/arm/mm/proc-arm1020.S | 3 + arch/arm/mm/proc-arm1020e.S | 3 + arch/arm/mm/proc-arm1022.S | 3 + arch/arm/mm/proc-arm1026.S | 3 + arch/arm/mm/proc-arm6_7.S | 6 ++ arch/arm/mm/proc-arm720.S | 3 + arch/arm/mm/proc-arm740.S | 3 + arch/arm/mm/proc-arm7tdmi.S | 3 + arch/arm/mm/proc-arm920.S | 37 +++++++++++++ arch/arm/mm/proc-arm922.S | 3 + arch/arm/mm/proc-arm925.S | 3 + arch/arm/mm/proc-arm926.S | 37 +++++++++++++ arch/arm/mm/proc-arm940.S | 3 + arch/arm/mm/proc-arm946.S | 3 + arch/arm/mm/proc-arm9tdmi.S | 3 + arch/arm/mm/proc-fa526.S | 3 + arch/arm/mm/proc-feroceon.S | 3 + arch/arm/mm/proc-mohawk.S | 3 + arch/arm/mm/proc-sa110.S | 3 + arch/arm/mm/proc-sa1100.S | 39 +++++++++++++ arch/arm/mm/proc-v6.S | 50 +++++++++++++++++ arch/arm/mm/proc-v7.S | 116 +++++++++++++++++++++++++++++---------- arch/arm/mm/proc-xsc3.S | 48 +++++++++++++++- arch/arm/mm/proc-xscale.S | 45 ++++++++++++++- 29 files changed, 522 insertions(+), 33 deletions(-) create mode 100644 arch/arm/kernel/sleep.S (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/glue-proc.h b/arch/arm/include/asm/glue-proc.h index e3bf443f2d18..6469521d092f 100644 --- a/arch/arm/include/asm/glue-proc.h +++ b/arch/arm/include/asm/glue-proc.h @@ -256,6 +256,9 @@ #define cpu_dcache_clean_area __glue(CPU_NAME,_dcache_clean_area) #define cpu_do_switch_mm __glue(CPU_NAME,_switch_mm) #define cpu_set_pte_ext __glue(CPU_NAME,_set_pte_ext) +#define cpu_suspend_size __glue(CPU_NAME,_suspend_size) +#define cpu_do_suspend __glue(CPU_NAME,_do_suspend) +#define cpu_do_resume __glue(CPU_NAME,_do_resume) #endif #endif diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h index 69802150be22..8ec535e11fd7 100644 --- a/arch/arm/include/asm/proc-fns.h +++ b/arch/arm/include/asm/proc-fns.h @@ -66,6 +66,11 @@ extern struct processor { * ignore 'ext'. */ void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext); + + /* Suspend/resume */ + unsigned int suspend_size; + void (*do_suspend)(void *); + void (*do_resume)(void *); } processor; #ifndef MULTI_CPU @@ -86,6 +91,8 @@ extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); #define cpu_do_switch_mm(pgd,mm) processor.switch_mm(pgd,mm) #endif +extern void cpu_resume(void); + #include #ifdef CONFIG_MMU diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 185ee822c935..74554f1742d7 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -29,6 +29,7 @@ obj-$(CONFIG_MODULES) += armksyms.o module.o obj-$(CONFIG_ARTHUR) += arthur.o obj-$(CONFIG_ISA_DMA) += dma-isa.o obj-$(CONFIG_PCI) += bios32.o isa.o +obj-$(CONFIG_PM) += sleep.o obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o obj-$(CONFIG_SMP) += smp.o smp_tlb.o obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 5302a917271b..927522cfc12e 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -115,6 +116,14 @@ int main(void) #endif #ifdef MULTI_PABORT DEFINE(PROCESSOR_PABT_FUNC, offsetof(struct processor, _prefetch_abort)); +#endif +#ifdef MULTI_CPU + DEFINE(CPU_SLEEP_SIZE, offsetof(struct processor, suspend_size)); + DEFINE(CPU_DO_SUSPEND, offsetof(struct processor, do_suspend)); + DEFINE(CPU_DO_RESUME, offsetof(struct processor, do_resume)); +#endif +#ifdef MULTI_CACHE + DEFINE(CACHE_FLUSH_KERN_ALL, offsetof(struct cpu_cache_fns, flush_kern_all)); #endif BLANK(); DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S new file mode 100644 index 000000000000..2ba17946619e --- /dev/null +++ b/arch/arm/kernel/sleep.S @@ -0,0 +1,109 @@ +#include +#include +#include +#include +#include +#include + .text + +/* + * Save CPU state for a suspend + * r1 = v:p offset + * r3 = virtual return function + * Note: sp is decremented to allocate space for CPU state on stack + * r0-r3,r9,r10,lr corrupted + */ +ENTRY(cpu_suspend) + mov r9, lr +#ifdef MULTI_CPU + ldr r10, =processor + mov r2, sp @ current virtual SP + ldr r0, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state + ldr ip, [r10, #CPU_DO_RESUME] @ virtual resume function + sub sp, sp, r0 @ allocate CPU state on stack + mov r0, sp @ save pointer + add ip, ip, r1 @ convert resume fn to phys + stmfd sp!, {r1, r2, r3, ip} @ save v:p, virt SP, retfn, phys resume fn + ldr r3, =sleep_save_sp + add r2, sp, r1 @ convert SP to phys + str r2, [r3] @ save phys SP + mov lr, pc + ldr pc, [r10, #CPU_DO_SUSPEND] @ save CPU state +#else + mov r2, sp @ current virtual SP + ldr r0, =cpu_suspend_size + sub sp, sp, r0 @ allocate CPU state on stack + mov r0, sp @ save pointer + stmfd sp!, {r1, r2, r3} @ save v:p, virt SP, return fn + ldr r3, =sleep_save_sp + add r2, sp, r1 @ convert SP to phys + str r2, [r3] @ save phys SP + bl cpu_do_suspend +#endif + + @ flush data cache +#ifdef MULTI_CACHE + ldr r10, =cpu_cache + mov lr, r9 + ldr pc, [r10, #CACHE_FLUSH_KERN_ALL] +#else + mov lr, r9 + b __cpuc_flush_kern_all +#endif +ENDPROC(cpu_suspend) + .ltorg + +/* + * r0 = control register value + * r1 = v:p offset (preserved by cpu_do_resume) + * r2 = phys page table base + * r3 = L1 section flags + */ +ENTRY(cpu_resume_mmu) + adr r4, cpu_resume_turn_mmu_on + mov r4, r4, lsr #20 + orr r3, r3, r4, lsl #20 + ldr r5, [r2, r4, lsl #2] @ save old mapping + str r3, [r2, r4, lsl #2] @ setup 1:1 mapping for mmu code + sub r2, r2, r1 + ldr r3, =cpu_resume_after_mmu + bic r1, r0, #CR_C @ ensure D-cache is disabled + b cpu_resume_turn_mmu_on +ENDPROC(cpu_resume_mmu) + .ltorg + .align 5 +cpu_resume_turn_mmu_on: + mcr p15, 0, r1, c1, c0, 0 @ turn on MMU, I-cache, etc + mrc p15, 0, r1, c0, c0, 0 @ read id reg + mov r1, r1 + mov r1, r1 + mov pc, r3 @ jump to virtual address +ENDPROC(cpu_resume_turn_mmu_on) +cpu_resume_after_mmu: + str r5, [r2, r4, lsl #2] @ restore old mapping + mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache + mov pc, lr +ENDPROC(cpu_resume_after_mmu) + +/* + * Note: Yes, part of the following code is located into the .data section. + * This is to allow sleep_save_sp to be accessed with a relative load + * while we can't rely on any MMU translation. We could have put + * sleep_save_sp in the .text section as well, but some setups might + * insist on it to be truly read-only. + */ + .data + .align +ENTRY(cpu_resume) + ldr r0, sleep_save_sp @ stack phys addr + msr cpsr_c, #PSR_I_BIT | PSR_F_BIT | SVC_MODE @ set SVC, irqs off +#ifdef MULTI_CPU + ldmia r0!, {r1, sp, lr, pc} @ load v:p, stack, return fn, resume fn +#else + ldmia r0!, {r1, sp, lr} @ load v:p, stack, return fn + b cpu_do_resume +#endif +ENDPROC(cpu_resume) + +sleep_save_sp: + .word 0 @ preserve stack phys ptr here diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S index bcf748d9f4e2..226e3d8351c2 100644 --- a/arch/arm/mm/proc-arm1020.S +++ b/arch/arm/mm/proc-arm1020.S @@ -493,6 +493,9 @@ arm1020_processor_functions: .word cpu_arm1020_dcache_clean_area .word cpu_arm1020_switch_mm .word cpu_arm1020_set_pte_ext + .word 0 + .word 0 + .word 0 .size arm1020_processor_functions, . - arm1020_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S index ab7ec26657ea..86d9c2cf0bce 100644 --- a/arch/arm/mm/proc-arm1020e.S +++ b/arch/arm/mm/proc-arm1020e.S @@ -474,6 +474,9 @@ arm1020e_processor_functions: .word cpu_arm1020e_dcache_clean_area .word cpu_arm1020e_switch_mm .word cpu_arm1020e_set_pte_ext + .word 0 + .word 0 + .word 0 .size arm1020e_processor_functions, . - arm1020e_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S index 831c5e54e22f..83d3dd34f846 100644 --- a/arch/arm/mm/proc-arm1022.S +++ b/arch/arm/mm/proc-arm1022.S @@ -457,6 +457,9 @@ arm1022_processor_functions: .word cpu_arm1022_dcache_clean_area .word cpu_arm1022_switch_mm .word cpu_arm1022_set_pte_ext + .word 0 + .word 0 + .word 0 .size arm1022_processor_functions, . - arm1022_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S index e3f7e9a166bf..686043ee7281 100644 --- a/arch/arm/mm/proc-arm1026.S +++ b/arch/arm/mm/proc-arm1026.S @@ -452,6 +452,9 @@ arm1026_processor_functions: .word cpu_arm1026_dcache_clean_area .word cpu_arm1026_switch_mm .word cpu_arm1026_set_pte_ext + .word 0 + .word 0 + .word 0 .size arm1026_processor_functions, . - arm1026_processor_functions .section .rodata diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S index 6a7be1863edd..5f79dc4ce3fb 100644 --- a/arch/arm/mm/proc-arm6_7.S +++ b/arch/arm/mm/proc-arm6_7.S @@ -284,6 +284,9 @@ ENTRY(arm6_processor_functions) .word cpu_arm6_dcache_clean_area .word cpu_arm6_switch_mm .word cpu_arm6_set_pte_ext + .word 0 + .word 0 + .word 0 .size arm6_processor_functions, . - arm6_processor_functions /* @@ -301,6 +304,9 @@ ENTRY(arm7_processor_functions) .word cpu_arm7_dcache_clean_area .word cpu_arm7_switch_mm .word cpu_arm7_set_pte_ext + .word 0 + .word 0 + .word 0 .size arm7_processor_functions, . - arm7_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S index c285395f44b2..665266da143c 100644 --- a/arch/arm/mm/proc-arm720.S +++ b/arch/arm/mm/proc-arm720.S @@ -185,6 +185,9 @@ ENTRY(arm720_processor_functions) .word cpu_arm720_dcache_clean_area .word cpu_arm720_switch_mm .word cpu_arm720_set_pte_ext + .word 0 + .word 0 + .word 0 .size arm720_processor_functions, . - arm720_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S index 38b27dcba727..6f9d12effee1 100644 --- a/arch/arm/mm/proc-arm740.S +++ b/arch/arm/mm/proc-arm740.S @@ -130,6 +130,9 @@ ENTRY(arm740_processor_functions) .word cpu_arm740_dcache_clean_area .word cpu_arm740_switch_mm .word 0 @ cpu_*_set_pte + .word 0 + .word 0 + .word 0 .size arm740_processor_functions, . - arm740_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S index 0c9786de20af..e4c165ca6696 100644 --- a/arch/arm/mm/proc-arm7tdmi.S +++ b/arch/arm/mm/proc-arm7tdmi.S @@ -70,6 +70,9 @@ ENTRY(arm7tdmi_processor_functions) .word cpu_arm7tdmi_dcache_clean_area .word cpu_arm7tdmi_switch_mm .word 0 @ cpu_*_set_pte + .word 0 + .word 0 + .word 0 .size arm7tdmi_processor_functions, . - arm7tdmi_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 6109f278a904..219980ec8b6e 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S @@ -387,6 +387,40 @@ ENTRY(cpu_arm920_set_pte_ext) #endif mov pc, lr +/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ +.globl cpu_arm920_suspend_size +.equ cpu_arm920_suspend_size, 4 * 3 +#ifdef CONFIG_PM +ENTRY(cpu_arm920_do_suspend) + stmfd sp!, {r4 - r7, lr} + mrc p15, 0, r4, c13, c0, 0 @ PID + mrc p15, 0, r5, c3, c0, 0 @ Domain ID + mrc p15, 0, r6, c2, c0, 0 @ TTB address + mrc p15, 0, r7, c1, c0, 0 @ Control register + stmia r0, {r4 - r7} + ldmfd sp!, {r4 - r7, pc} +ENDPROC(cpu_arm920_do_suspend) + +ENTRY(cpu_arm920_do_resume) + mov ip, #0 + mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs + mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches + ldmia r0, {r4 - r7} + mcr p15, 0, r4, c13, c0, 0 @ PID + mcr p15, 0, r5, c3, c0, 0 @ Domain ID + mcr p15, 0, r6, c2, c0, 0 @ TTB address + mov r0, r7 @ control register + mov r2, r6, lsr #14 @ get TTB0 base + mov r2, r2, lsl #14 + ldr r3, =PMD_TYPE_SECT | PMD_SECT_BUFFERABLE | \ + PMD_SECT_CACHEABLE | PMD_BIT4 | PMD_SECT_AP_WRITE + b cpu_resume_mmu +ENDPROC(cpu_arm920_do_resume) +#else +#define cpu_arm920_do_suspend 0 +#define cpu_arm920_do_resume 0 +#endif + __CPUINIT .type __arm920_setup, #function @@ -432,6 +466,9 @@ arm920_processor_functions: .word cpu_arm920_dcache_clean_area .word cpu_arm920_switch_mm .word cpu_arm920_set_pte_ext + .word cpu_arm920_suspend_size + .word cpu_arm920_do_suspend + .word cpu_arm920_do_resume .size arm920_processor_functions, . - arm920_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S index bb2f0f46a5e6..36154b1e792a 100644 --- a/arch/arm/mm/proc-arm922.S +++ b/arch/arm/mm/proc-arm922.S @@ -436,6 +436,9 @@ arm922_processor_functions: .word cpu_arm922_dcache_clean_area .word cpu_arm922_switch_mm .word cpu_arm922_set_pte_ext + .word 0 + .word 0 + .word 0 .size arm922_processor_functions, . - arm922_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S index c13e01accfe2..89c5e0009c4c 100644 --- a/arch/arm/mm/proc-arm925.S +++ b/arch/arm/mm/proc-arm925.S @@ -503,6 +503,9 @@ arm925_processor_functions: .word cpu_arm925_dcache_clean_area .word cpu_arm925_switch_mm .word cpu_arm925_set_pte_ext + .word 0 + .word 0 + .word 0 .size arm925_processor_functions, . - arm925_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 42eb4315740b..6a4bdb2c94a7 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S @@ -401,6 +401,40 @@ ENTRY(cpu_arm926_set_pte_ext) #endif mov pc, lr +/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ +.globl cpu_arm926_suspend_size +.equ cpu_arm926_suspend_size, 4 * 3 +#ifdef CONFIG_PM +ENTRY(cpu_arm926_do_suspend) + stmfd sp!, {r4 - r7, lr} + mrc p15, 0, r4, c13, c0, 0 @ PID + mrc p15, 0, r5, c3, c0, 0 @ Domain ID + mrc p15, 0, r6, c2, c0, 0 @ TTB address + mrc p15, 0, r7, c1, c0, 0 @ Control register + stmia r0, {r4 - r7} + ldmfd sp!, {r4 - r7, pc} +ENDPROC(cpu_arm926_do_suspend) + +ENTRY(cpu_arm926_do_resume) + mov ip, #0 + mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs + mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches + ldmia r0, {r4 - r7} + mcr p15, 0, r4, c13, c0, 0 @ PID + mcr p15, 0, r5, c3, c0, 0 @ Domain ID + mcr p15, 0, r6, c2, c0, 0 @ TTB address + mov r0, r7 @ control register + mov r2, r6, lsr #14 @ get TTB0 base + mov r2, r2, lsl #14 + ldr r3, =PMD_TYPE_SECT | PMD_SECT_BUFFERABLE | \ + PMD_SECT_CACHEABLE | PMD_BIT4 | PMD_SECT_AP_WRITE + b cpu_resume_mmu +ENDPROC(cpu_arm926_do_resume) +#else +#define cpu_arm926_do_suspend 0 +#define cpu_arm926_do_resume 0 +#endif + __CPUINIT .type __arm926_setup, #function @@ -456,6 +490,9 @@ arm926_processor_functions: .word cpu_arm926_dcache_clean_area .word cpu_arm926_switch_mm .word cpu_arm926_set_pte_ext + .word cpu_arm926_suspend_size + .word cpu_arm926_do_suspend + .word cpu_arm926_do_resume .size arm926_processor_functions, . - arm926_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S index 7b11cdb9935f..26aea3f71c26 100644 --- a/arch/arm/mm/proc-arm940.S +++ b/arch/arm/mm/proc-arm940.S @@ -363,6 +363,9 @@ ENTRY(arm940_processor_functions) .word cpu_arm940_dcache_clean_area .word cpu_arm940_switch_mm .word 0 @ cpu_*_set_pte + .word 0 + .word 0 + .word 0 .size arm940_processor_functions, . - arm940_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S index 1a5bbf080342..8063345406fe 100644 --- a/arch/arm/mm/proc-arm946.S +++ b/arch/arm/mm/proc-arm946.S @@ -419,6 +419,9 @@ ENTRY(arm946_processor_functions) .word cpu_arm946_dcache_clean_area .word cpu_arm946_switch_mm .word 0 @ cpu_*_set_pte + .word 0 + .word 0 + .word 0 .size arm946_processor_functions, . - arm946_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S index db67e3134d7a..7b7ebd4d096d 100644 --- a/arch/arm/mm/proc-arm9tdmi.S +++ b/arch/arm/mm/proc-arm9tdmi.S @@ -70,6 +70,9 @@ ENTRY(arm9tdmi_processor_functions) .word cpu_arm9tdmi_dcache_clean_area .word cpu_arm9tdmi_switch_mm .word 0 @ cpu_*_set_pte + .word 0 + .word 0 + .word 0 .size arm9tdmi_processor_functions, . - arm9tdmi_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S index 7c9ad621f0e6..fc2a4ae15cf4 100644 --- a/arch/arm/mm/proc-fa526.S +++ b/arch/arm/mm/proc-fa526.S @@ -195,6 +195,9 @@ fa526_processor_functions: .word cpu_fa526_dcache_clean_area .word cpu_fa526_switch_mm .word cpu_fa526_set_pte_ext + .word 0 + .word 0 + .word 0 .size fa526_processor_functions, . - fa526_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S index b4597edbff97..d3883eed7a4a 100644 --- a/arch/arm/mm/proc-feroceon.S +++ b/arch/arm/mm/proc-feroceon.S @@ -554,6 +554,9 @@ feroceon_processor_functions: .word cpu_feroceon_dcache_clean_area .word cpu_feroceon_switch_mm .word cpu_feroceon_set_pte_ext + .word 0 + .word 0 + .word 0 .size feroceon_processor_functions, . - feroceon_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index 4458ee6aa713..9d4f2ae63370 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S @@ -388,6 +388,9 @@ mohawk_processor_functions: .word cpu_mohawk_dcache_clean_area .word cpu_mohawk_switch_mm .word cpu_mohawk_set_pte_ext + .word 0 + .word 0 + .word 0 .size mohawk_processor_functions, . - mohawk_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S index 5aa8d59c2e85..46f09ed16b98 100644 --- a/arch/arm/mm/proc-sa110.S +++ b/arch/arm/mm/proc-sa110.S @@ -203,6 +203,9 @@ ENTRY(sa110_processor_functions) .word cpu_sa110_dcache_clean_area .word cpu_sa110_switch_mm .word cpu_sa110_set_pte_ext + .word 0 + .word 0 + .word 0 .size sa110_processor_functions, . - sa110_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index 2ac4e6f10713..74483d1977fe 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S @@ -169,6 +169,42 @@ ENTRY(cpu_sa1100_set_pte_ext) #endif mov pc, lr +.globl cpu_sa1100_suspend_size +.equ cpu_sa1100_suspend_size, 4*4 +#ifdef CONFIG_PM +ENTRY(cpu_sa1100_do_suspend) + stmfd sp!, {r4 - r7, lr} + mrc p15, 0, r4, c3, c0, 0 @ domain ID + mrc p15, 0, r5, c2, c0, 0 @ translation table base addr + mrc p15, 0, r6, c13, c0, 0 @ PID + mrc p15, 0, r7, c1, c0, 0 @ control reg + stmia r0, {r4 - r7} @ store cp regs + ldmfd sp!, {r4 - r7, pc} +ENDPROC(cpu_sa1100_do_suspend) + +ENTRY(cpu_sa1100_do_resume) + ldmia r0, {r4 - r7} @ load cp regs + mov r1, #0 + mcr p15, 0, r1, c8, c7, 0 @ flush I+D TLBs + mcr p15, 0, r1, c7, c7, 0 @ flush I&D cache + mcr p15, 0, r1, c9, c0, 0 @ invalidate RB + mcr p15, 0, r1, c9, c0, 5 @ allow user space to use RB + + mcr p15, 0, r4, c3, c0, 0 @ domain ID + mcr p15, 0, r5, c2, c0, 0 @ translation table base addr + mcr p15, 0, r6, c13, c0, 0 @ PID + mov r0, r7 @ control register + mov r2, r5, lsr #14 @ get TTB0 base + mov r2, r2, lsl #14 + ldr r3, =PMD_TYPE_SECT | PMD_SECT_BUFFERABLE | \ + PMD_SECT_CACHEABLE | PMD_SECT_AP_WRITE + b cpu_resume_mmu +ENDPROC(cpu_sa1100_do_resume) +#else +#define cpu_sa1100_do_suspend 0 +#define cpu_sa1100_do_resume 0 +#endif + __CPUINIT .type __sa1100_setup, #function @@ -218,6 +254,9 @@ ENTRY(sa1100_processor_functions) .word cpu_sa1100_dcache_clean_area .word cpu_sa1100_switch_mm .word cpu_sa1100_set_pte_ext + .word cpu_sa1100_suspend_size + .word cpu_sa1100_do_suspend + .word cpu_sa1100_do_resume .size sa1100_processor_functions, . - sa1100_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 59a7e1ffe7bc..832b6bdc192c 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -121,6 +121,53 @@ ENTRY(cpu_v6_set_pte_ext) #endif mov pc, lr +/* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */ +.globl cpu_v6_suspend_size +.equ cpu_v6_suspend_size, 4 * 8 +#ifdef CONFIG_PM +ENTRY(cpu_v6_do_suspend) + stmfd sp!, {r4 - r11, lr} + mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID + mrc p15, 0, r5, c13, c0, 1 @ Context ID + mrc p15, 0, r6, c3, c0, 0 @ Domain ID + mrc p15, 0, r7, c2, c0, 0 @ Translation table base 0 + mrc p15, 0, r8, c2, c0, 1 @ Translation table base 1 + mrc p15, 0, r9, c1, c0, 1 @ auxillary control register + mrc p15, 0, r10, c1, c0, 2 @ co-processor access control + mrc p15, 0, r11, c1, c0, 0 @ control register + stmia r0, {r4 - r11} + ldmfd sp!, {r4- r11, pc} +ENDPROC(cpu_v6_do_suspend) + +ENTRY(cpu_v6_do_resume) + mov ip, #0 + mcr p15, 0, ip, c7, c14, 0 @ clean+invalidate D cache + mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache + mcr p15, 0, ip, c7, c15, 0 @ clean+invalidate cache + mcr p15, 0, ip, c7, c10, 4 @ drain write buffer + ldmia r0, {r4 - r11} + mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID + mcr p15, 0, r5, c13, c0, 1 @ Context ID + mcr p15, 0, r6, c3, c0, 0 @ Domain ID + mcr p15, 0, r7, c2, c0, 0 @ Translation table base 0 + mcr p15, 0, r8, c2, c0, 1 @ Translation table base 1 + mcr p15, 0, r9, c1, c0, 1 @ auxillary control register + mcr p15, 0, r10, c1, c0, 2 @ co-processor access control + mcr p15, 0, ip, c2, c0, 2 @ TTB control register + mcr p15, 0, ip, c7, c5, 4 @ ISB + mov r0, r11 @ control register + mov r2, r7, lsr #14 @ get TTB0 base + mov r2, r2, lsl #14 + ldr r3, cpu_resume_l1_flags + b cpu_resume_mmu +ENDPROC(cpu_v6_do_resume) +cpu_resume_l1_flags: + ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_SMP) + ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_UP) +#else +#define cpu_v6_do_suspend 0 +#define cpu_v6_do_resume 0 +#endif .type cpu_v6_name, #object @@ -206,6 +253,9 @@ ENTRY(v6_processor_functions) .word cpu_v6_dcache_clean_area .word cpu_v6_switch_mm .word cpu_v6_set_pte_ext + .word cpu_v6_suspend_size + .word cpu_v6_do_suspend + .word cpu_v6_do_resume .size v6_processor_functions, . - v6_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 0c1172b56b4e..a5187ddfb267 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -171,6 +171,87 @@ cpu_v7_name: .ascii "ARMv7 Processor" .align + /* + * Memory region attributes with SCTLR.TRE=1 + * + * n = TEX[0],C,B + * TR = PRRR[2n+1:2n] - memory type + * IR = NMRR[2n+1:2n] - inner cacheable property + * OR = NMRR[2n+17:2n+16] - outer cacheable property + * + * n TR IR OR + * UNCACHED 000 00 + * BUFFERABLE 001 10 00 00 + * WRITETHROUGH 010 10 10 10 + * WRITEBACK 011 10 11 11 + * reserved 110 + * WRITEALLOC 111 10 01 01 + * DEV_SHARED 100 01 + * DEV_NONSHARED 100 01 + * DEV_WC 001 10 + * DEV_CACHED 011 10 + * + * Other attributes: + * + * DS0 = PRRR[16] = 0 - device shareable property + * DS1 = PRRR[17] = 1 - device shareable property + * NS0 = PRRR[18] = 0 - normal shareable property + * NS1 = PRRR[19] = 1 - normal shareable property + * NOS = PRRR[24+n] = 1 - not outer shareable + */ +.equ PRRR, 0xff0a81a8 +.equ NMRR, 0x40e040e0 + +/* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */ +.globl cpu_v7_suspend_size +.equ cpu_v7_suspend_size, 4 * 8 +#ifdef CONFIG_PM +ENTRY(cpu_v7_do_suspend) + stmfd sp!, {r4 - r11, lr} + mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID + mrc p15, 0, r5, c13, c0, 1 @ Context ID + mrc p15, 0, r6, c3, c0, 0 @ Domain ID + mrc p15, 0, r7, c2, c0, 0 @ TTB 0 + mrc p15, 0, r8, c2, c0, 1 @ TTB 1 + mrc p15, 0, r9, c1, c0, 0 @ Control register + mrc p15, 0, r10, c1, c0, 1 @ Auxiliary control register + mrc p15, 0, r11, c1, c0, 2 @ Co-processor access control + stmia r0, {r4 - r11} + ldmfd sp!, {r4 - r11, pc} +ENDPROC(cpu_v7_do_suspend) + +ENTRY(cpu_v7_do_resume) + mov ip, #0 + mcr p15, 0, ip, c8, c7, 0 @ invalidate TLBs + mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache + ldmia r0, {r4 - r11} + mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID + mcr p15, 0, r5, c13, c0, 1 @ Context ID + mcr p15, 0, r6, c3, c0, 0 @ Domain ID + mcr p15, 0, r7, c2, c0, 0 @ TTB 0 + mcr p15, 0, r8, c2, c0, 1 @ TTB 1 + mcr p15, 0, ip, c2, c0, 2 @ TTB control register + mcr p15, 0, r10, c1, c0, 1 @ Auxillary control register + mcr p15, 0, r11, c1, c0, 2 @ Co-processor access control + ldr r4, =PRRR @ PRRR + ldr r5, =NMRR @ NMRR + mcr p15, 0, r4, c10, c2, 0 @ write PRRR + mcr p15, 0, r5, c10, c2, 1 @ write NMRR + isb + mov r0, r9 @ control register + mov r2, r7, lsr #14 @ get TTB0 base + mov r2, r2, lsl #14 + ldr r3, cpu_resume_l1_flags + b cpu_resume_mmu +ENDPROC(cpu_v7_do_resume) +cpu_resume_l1_flags: + ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_SMP) + ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_UP) +#else +#define cpu_v7_do_suspend 0 +#define cpu_v7_do_resume 0 +#endif + __CPUINIT /* @@ -276,36 +357,8 @@ __v7_setup: ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP) ALT_UP(orr r4, r4, #TTB_FLAGS_UP) mcr p15, 0, r4, c2, c0, 1 @ load TTB1 - /* - * Memory region attributes with SCTLR.TRE=1 - * - * n = TEX[0],C,B - * TR = PRRR[2n+1:2n] - memory type - * IR = NMRR[2n+1:2n] - inner cacheable property - * OR = NMRR[2n+17:2n+16] - outer cacheable property - * - * n TR IR OR - * UNCACHED 000 00 - * BUFFERABLE 001 10 00 00 - * WRITETHROUGH 010 10 10 10 - * WRITEBACK 011 10 11 11 - * reserved 110 - * WRITEALLOC 111 10 01 01 - * DEV_SHARED 100 01 - * DEV_NONSHARED 100 01 - * DEV_WC 001 10 - * DEV_CACHED 011 10 - * - * Other attributes: - * - * DS0 = PRRR[16] = 0 - device shareable property - * DS1 = PRRR[17] = 1 - device shareable property - * NS0 = PRRR[18] = 0 - normal shareable property - * NS1 = PRRR[19] = 1 - normal shareable property - * NOS = PRRR[24+n] = 1 - not outer shareable - */ - ldr r5, =0xff0a81a8 @ PRRR - ldr r6, =0x40e040e0 @ NMRR + ldr r5, =PRRR @ PRRR + ldr r6, =NMRR @ NMRR mcr p15, 0, r5, c10, c2, 0 @ write PRRR mcr p15, 0, r6, c10, c2, 1 @ write NMRR #endif @@ -351,6 +404,9 @@ ENTRY(v7_processor_functions) .word cpu_v7_dcache_clean_area .word cpu_v7_switch_mm .word cpu_v7_set_pte_ext + .word 0 + .word 0 + .word 0 .size v7_processor_functions, . - v7_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index ec26355cb7c2..63d8b2044e84 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S @@ -413,9 +413,52 @@ ENTRY(cpu_xsc3_set_pte_ext) mov pc, lr .ltorg - .align +.globl cpu_xsc3_suspend_size +.equ cpu_xsc3_suspend_size, 4 * 8 +#ifdef CONFIG_PM +ENTRY(cpu_xsc3_do_suspend) + stmfd sp!, {r4 - r10, lr} + mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode + mrc p15, 0, r5, c15, c1, 0 @ CP access reg + mrc p15, 0, r6, c13, c0, 0 @ PID + mrc p15, 0, r7, c3, c0, 0 @ domain ID + mrc p15, 0, r8, c2, c0, 0 @ translation table base addr + mrc p15, 0, r9, c1, c0, 1 @ auxiliary control reg + mrc p15, 0, r10, c1, c0, 0 @ control reg + bic r4, r4, #2 @ clear frequency change bit + stmia r0, {r1, r4 - r10} @ store v:p offset + cp regs + ldmia sp!, {r4 - r10, pc} +ENDPROC(cpu_xsc3_do_suspend) + +ENTRY(cpu_xsc3_do_resume) + ldmia r0, {r1, r4 - r10} @ load v:p offset + cp regs + mov ip, #0 + mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB + mcr p15, 0, ip, c7, c10, 4 @ drain write (&fill) buffer + mcr p15, 0, ip, c7, c5, 4 @ flush prefetch buffer + mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs + mcr p14, 0, r4, c6, c0, 0 @ clock configuration, turbo mode. + mcr p15, 0, r5, c15, c1, 0 @ CP access reg + mcr p15, 0, r6, c13, c0, 0 @ PID + mcr p15, 0, r7, c3, c0, 0 @ domain ID + mcr p15, 0, r8, c2, c0, 0 @ translation table base addr + mcr p15, 0, r9, c1, c0, 1 @ auxiliary control reg + + @ temporarily map resume_turn_on_mmu into the page table, + @ otherwise prefetch abort occurs after MMU is turned on + mov r0, r10 @ control register + mov r2, r8, lsr #14 @ get TTB0 base + mov r2, r2, lsl #14 + ldr r3, =0x542e @ section flags + b cpu_resume_mmu +ENDPROC(cpu_xsc3_do_resume) +#else +#define cpu_xsc3_do_suspend 0 +#define cpu_xsc3_do_resume 0 +#endif + __CPUINIT .type __xsc3_setup, #function @@ -476,6 +519,9 @@ ENTRY(xsc3_processor_functions) .word cpu_xsc3_dcache_clean_area .word cpu_xsc3_switch_mm .word cpu_xsc3_set_pte_ext + .word cpu_xsc3_suspend_size + .word cpu_xsc3_do_suspend + .word cpu_xsc3_do_resume .size xsc3_processor_functions, . - xsc3_processor_functions .section ".rodata" diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 5a37c5e45c41..086038cd86ab 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -513,11 +513,49 @@ ENTRY(cpu_xscale_set_pte_ext) xscale_set_pte_ext_epilogue mov pc, lr - .ltorg - .align +.globl cpu_xscale_suspend_size +.equ cpu_xscale_suspend_size, 4 * 7 +#ifdef CONFIG_PM +ENTRY(cpu_xscale_do_suspend) + stmfd sp!, {r4 - r10, lr} + mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode + mrc p15, 0, r5, c15, c1, 0 @ CP access reg + mrc p15, 0, r6, c13, c0, 0 @ PID + mrc p15, 0, r7, c3, c0, 0 @ domain ID + mrc p15, 0, r8, c2, c0, 0 @ translation table base addr + mrc p15, 0, r9, c1, c1, 0 @ auxiliary control reg + mrc p15, 0, r10, c1, c0, 0 @ control reg + bic r4, r4, #2 @ clear frequency change bit + stmia r0, {r4 - r10} @ store cp regs + ldmfd sp!, {r4 - r10, pc} +ENDPROC(cpu_xscale_do_suspend) + +ENTRY(cpu_xscale_do_resume) + ldmia r0, {r4 - r10} @ load cp regs + mov ip, #0 + mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs + mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB + mcr p14, 0, r4, c6, c0, 0 @ clock configuration, turbo mode. + mcr p15, 0, r5, c15, c1, 0 @ CP access reg + mcr p15, 0, r6, c13, c0, 0 @ PID + mcr p15, 0, r7, c3, c0, 0 @ domain ID + mcr p15, 0, r8, c2, c0, 0 @ translation table base addr + mcr p15, 0, r9, c1, c1, 0 @ auxiliary control reg + mov r0, r10 @ control register + mov r2, r8, lsr #14 @ get TTB0 base + mov r2, r2, lsl #14 + ldr r3, =PMD_TYPE_SECT | PMD_SECT_BUFFERABLE | \ + PMD_SECT_CACHEABLE | PMD_SECT_AP_WRITE + b cpu_resume_mmu +ENDPROC(cpu_xscale_do_resume) +#else +#define cpu_xscale_do_suspend 0 +#define cpu_xscale_do_resume 0 +#endif + __CPUINIT .type __xscale_setup, #function @@ -565,6 +603,9 @@ ENTRY(xscale_processor_functions) .word cpu_xscale_dcache_clean_area .word cpu_xscale_switch_mm .word cpu_xscale_set_pte_ext + .word cpu_xscale_suspend_size + .word cpu_xscale_do_suspend + .word cpu_xscale_do_resume .size xscale_processor_functions, . - xscale_processor_functions .section ".rodata" -- cgit v1.2.3 From 97594b0f35c0708cb9551c070b9693a52ec24ebf Mon Sep 17 00:00:00 2001 From: Uwe Kleine-König Date: Tue, 22 Feb 2011 23:29:37 +0100 Subject: ARM: 6757/1: fix tlb.h induced linux/swap.h build failure MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 06824ba (ARM: tlb: delay page freeing for SMP and ARMv7 CPUs) introduced a build failure for builds with CONFIG_SWAP=n: In file included from arch/arm/mm/init.c:27: arch/arm/include/asm/tlb.h: In function 'tlb_flush_mmu': arch/arm/include/asm/tlb.h:101: error: implicit declaration of function 'release_pages' arch/arm/include/asm/tlb.h: In function 'tlb_remove_page': arch/arm/include/asm/tlb.h:165: error: implicit declaration of function 'page_cache_release' as linux/swap.h doesn't include linux/pagemap.h but actually needs it (see comments in linux/swap.h as to why this is.) Fix that by #including in as it's done by x86. Signed-off-by: Uwe Kleine-König Acked-by: Tony Lindgren Signed-off-by: Russell King --- arch/arm/include/asm/pgalloc.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h index 9763be04f77e..22de005f159c 100644 --- a/arch/arm/include/asm/pgalloc.h +++ b/arch/arm/include/asm/pgalloc.h @@ -10,6 +10,8 @@ #ifndef _ASMARM_PGALLOC_H #define _ASMARM_PGALLOC_H +#include + #include #include #include -- cgit v1.2.3 From 2bbd7e9b74271b2d6a14b4840fc44afbea83774d Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 8 Jan 2011 12:05:09 +0000 Subject: ARM: fix some sparse errors in generic ARM code arch/arm/kernel/return_address.c:37:6: warning: symbol 'return_address' was not declared. Should it be static? arch/arm/kernel/setup.c:76:14: warning: symbol 'processor_id' was not declared. Should it be static? arch/arm/kernel/traps.c:259:1: warning: symbol 'die_lock' was not declared. Should it be static? arch/arm/vfp/vfpmodule.c:156:6: warning: symbol 'vfp_raise_sigfpe' was not declared. Should it be static? Signed-off-by: Russell King --- arch/arm/include/asm/cputype.h | 3 ++- arch/arm/kernel/return_address.c | 1 + arch/arm/kernel/traps.c | 2 +- arch/arm/vfp/vfpmodule.c | 2 +- 4 files changed, 5 insertions(+), 3 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index 20ae96cc0020..ed5bc9e05a4e 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -23,6 +23,8 @@ #define CPUID_EXT_ISAR4 "c2, 4" #define CPUID_EXT_ISAR5 "c2, 5" +extern unsigned int processor_id; + #ifdef CONFIG_CPU_CP15 #define read_cpuid(reg) \ ({ \ @@ -43,7 +45,6 @@ __val; \ }) #else -extern unsigned int processor_id; #define read_cpuid(reg) (processor_id) #define read_cpuid_ext(reg) 0 #endif diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c index df246da4ceca..0b13a72f855d 100644 --- a/arch/arm/kernel/return_address.c +++ b/arch/arm/kernel/return_address.c @@ -9,6 +9,7 @@ * the Free Software Foundation. */ #include +#include #if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) #include diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index ee57640ba2bb..7f53c3651c58 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -256,7 +256,7 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt return ret; } -DEFINE_SPINLOCK(die_lock); +static DEFINE_SPINLOCK(die_lock); /* * This function is protected against re-entrancy. diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 0797cb528b46..25b89d817105 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -153,7 +153,7 @@ static struct notifier_block vfp_notifier_block = { * Raise a SIGFPE for the current process. * sicode describes the signal being raised. */ -void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs) +static void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs) { siginfo_t info; -- cgit v1.2.3 From aaa50048f6ce44af66ce0389d4cc6a8348333271 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 25 Jan 2011 21:35:38 +0100 Subject: ARM: 6639/1: allow highmem on SMP platforms without h/w TLB ops broadcast In commit e616c591405c168f6dc3dfd1221e105adfe49b8d, highmem support was deactivated for SMP platforms without hardware TLB ops broadcast because usage of kmap_high_get() requires that IRQs be disabled when kmap_lock is locked which is incompatible with the IPI mechanism used by the software TLB ops broadcast invoked through flush_all_zero_pkmaps(). The reason for kmap_high_get() is to ensure that the currently kmap'd page usage count does not decrease to zero while we're using its existing virtual mapping in an atomic context. With a VIVT cache this is essential to do due to cache coherency issues, but with a VIPT cache this is only an optimization so not to pay the price of establishing a second mapping if an existing one can be used. However, on VIPT platforms without hardware TLB maintenance we can give up on that optimization in order to be able to use highmem. From ARMv7 onwards the TLB ops are broadcasted in hardware, so let's disable ARCH_NEEDS_KMAP_HIGH_GET only when CONFIG_SMP and CONFIG_CPU_TLB_V6 are defined. Signed-off-by: Nicolas Pitre Tested-by: Saeed Bishara Signed-off-by: Russell King --- arch/arm/include/asm/highmem.h | 29 +++++++++++++++++++++++++++-- arch/arm/mm/mmu.c | 10 ---------- 2 files changed, 27 insertions(+), 12 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index 7080e2c8fa62..a4edd19dd3d6 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h @@ -19,11 +19,36 @@ extern pte_t *pkmap_page_table; +extern void *kmap_high(struct page *page); +extern void kunmap_high(struct page *page); + +/* + * The reason for kmap_high_get() is to ensure that the currently kmap'd + * page usage count does not decrease to zero while we're using its + * existing virtual mapping in an atomic context. With a VIVT cache this + * is essential to do, but with a VIPT cache this is only an optimization + * so not to pay the price of establishing a second mapping if an existing + * one can be used. However, on platforms without hardware TLB maintenance + * broadcast, we simply cannot use ARCH_NEEDS_KMAP_HIGH_GET at all since + * the locking involved must also disable IRQs which is incompatible with + * the IPI mechanism used by global TLB operations. + */ #define ARCH_NEEDS_KMAP_HIGH_GET +#if defined(CONFIG_SMP) && defined(CONFIG_CPU_TLB_V6) +#undef ARCH_NEEDS_KMAP_HIGH_GET +#if defined(CONFIG_HIGHMEM) && defined(CONFIG_CPU_CACHE_VIVT) +#error "The sum of features in your kernel config cannot be supported together" +#endif +#endif -extern void *kmap_high(struct page *page); +#ifdef ARCH_NEEDS_KMAP_HIGH_GET extern void *kmap_high_get(struct page *page); -extern void kunmap_high(struct page *page); +#else +static inline void *kmap_high_get(struct page *page) +{ + return NULL; +} +#endif /* * The following functions are already defined by diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 3c67e92f7d59..ff7b43b5885a 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -827,16 +827,6 @@ static void __init sanity_check_meminfo(void) * rather difficult. */ reason = "with VIPT aliasing cache"; - } else if (is_smp() && tlb_ops_need_broadcast()) { - /* - * kmap_high needs to occasionally flush TLB entries, - * however, if the TLB entries need to be broadcast - * we may deadlock: - * kmap_high(irqs off)->flush_all_zero_pkmaps-> - * flush_tlb_kernel_range->smp_call_function_many - * (must not be called with irqs off) - */ - reason = "without hardware TLB ops broadcasting"; } if (reason) { printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n", -- cgit v1.2.3 From 425fc47adb5bb69f76285be77a09a3341a30799e Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 14 Feb 2011 14:31:09 +0100 Subject: ARM: 6668/1: ptrace: remove single-step emulation code PTRACE_SINGLESTEP is a ptrace request designed to offer single-stepping support to userspace when the underlying architecture has hardware support for this operation. On ARM, we set arch_has_single_step() to 1 and attempt to emulate hardware single-stepping by disassembling the current instruction to determine the next pc and placing a software breakpoint on that location. Unfortunately this has the following problems: 1.) Only a subset of ARMv7 instructions are supported 2.) Thumb-2 is unsupported 3.) The code is not SMP safe We could try to fix this code, but it turns out that because of the above issues it is rarely used in practice. GDB, for example, uses PTRACE_POKETEXT and PTRACE_PEEKTEXT to manage breakpoints itself and does not require any kernel assistance. This patch removes the single-step emulation code from ptrace meaning that the PTRACE_SINGLESTEP request will return -EIO on ARM. Portable code must check the return value from a ptrace call and handle the failure gracefully. Acked-by: Nicolas Pitre Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/processor.h | 12 -- arch/arm/include/asm/ptrace.h | 2 - arch/arm/include/asm/traps.h | 1 + arch/arm/kernel/ptrace.c | 383 +-------------------------------------- arch/arm/kernel/ptrace.h | 37 ---- arch/arm/kernel/signal.c | 9 - arch/arm/kernel/traps.c | 2 +- 7 files changed, 3 insertions(+), 443 deletions(-) delete mode 100644 arch/arm/kernel/ptrace.h (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h index 67357baaeeeb..b439b41aeac1 100644 --- a/arch/arm/include/asm/processor.h +++ b/arch/arm/include/asm/processor.h @@ -29,19 +29,7 @@ #define STACK_TOP_MAX TASK_SIZE #endif -union debug_insn { - u32 arm; - u16 thumb; -}; - -struct debug_entry { - u32 address; - union debug_insn insn; -}; - struct debug_info { - int nsaved; - struct debug_entry bp[2]; #ifdef CONFIG_HAVE_HW_BREAKPOINT struct perf_event *hbp[ARM_MAX_HBP_SLOTS]; #endif diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index 783d50f32618..a8ff22b2a391 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -130,8 +130,6 @@ struct pt_regs { #ifdef __KERNEL__ -#define arch_has_single_step() (1) - #define user_mode(regs) \ (((regs)->ARM_cpsr & 0xf) == 0) diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h index 1b960d5ef6a5..f90756dc16dc 100644 --- a/arch/arm/include/asm/traps.h +++ b/arch/arm/include/asm/traps.h @@ -45,6 +45,7 @@ static inline int in_exception_text(unsigned long ptr) extern void __init early_trap_init(void); extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame); +extern void ptrace_break(struct task_struct *tsk, struct pt_regs *regs); extern void *vectors_page; diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 19c6816db61e..eace844511f1 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -26,8 +26,6 @@ #include #include -#include "ptrace.h" - #define REG_PC 15 #define REG_PSR 16 /* @@ -184,389 +182,12 @@ put_user_reg(struct task_struct *task, int offset, long data) return ret; } -static inline int -read_u32(struct task_struct *task, unsigned long addr, u32 *res) -{ - int ret; - - ret = access_process_vm(task, addr, res, sizeof(*res), 0); - - return ret == sizeof(*res) ? 0 : -EIO; -} - -static inline int -read_instr(struct task_struct *task, unsigned long addr, u32 *res) -{ - int ret; - - if (addr & 1) { - u16 val; - ret = access_process_vm(task, addr & ~1, &val, sizeof(val), 0); - ret = ret == sizeof(val) ? 0 : -EIO; - *res = val; - } else { - u32 val; - ret = access_process_vm(task, addr & ~3, &val, sizeof(val), 0); - ret = ret == sizeof(val) ? 0 : -EIO; - *res = val; - } - return ret; -} - -/* - * Get value of register `rn' (in the instruction) - */ -static unsigned long -ptrace_getrn(struct task_struct *child, unsigned long insn) -{ - unsigned int reg = (insn >> 16) & 15; - unsigned long val; - - val = get_user_reg(child, reg); - if (reg == 15) - val += 8; - - return val; -} - -/* - * Get value of operand 2 (in an ALU instruction) - */ -static unsigned long -ptrace_getaluop2(struct task_struct *child, unsigned long insn) -{ - unsigned long val; - int shift; - int type; - - if (insn & 1 << 25) { - val = insn & 255; - shift = (insn >> 8) & 15; - type = 3; - } else { - val = get_user_reg (child, insn & 15); - - if (insn & (1 << 4)) - shift = (int)get_user_reg (child, (insn >> 8) & 15); - else - shift = (insn >> 7) & 31; - - type = (insn >> 5) & 3; - } - - switch (type) { - case 0: val <<= shift; break; - case 1: val >>= shift; break; - case 2: - val = (((signed long)val) >> shift); - break; - case 3: - val = (val >> shift) | (val << (32 - shift)); - break; - } - return val; -} - -/* - * Get value of operand 2 (in a LDR instruction) - */ -static unsigned long -ptrace_getldrop2(struct task_struct *child, unsigned long insn) -{ - unsigned long val; - int shift; - int type; - - val = get_user_reg(child, insn & 15); - shift = (insn >> 7) & 31; - type = (insn >> 5) & 3; - - switch (type) { - case 0: val <<= shift; break; - case 1: val >>= shift; break; - case 2: - val = (((signed long)val) >> shift); - break; - case 3: - val = (val >> shift) | (val << (32 - shift)); - break; - } - return val; -} - -#define OP_MASK 0x01e00000 -#define OP_AND 0x00000000 -#define OP_EOR 0x00200000 -#define OP_SUB 0x00400000 -#define OP_RSB 0x00600000 -#define OP_ADD 0x00800000 -#define OP_ADC 0x00a00000 -#define OP_SBC 0x00c00000 -#define OP_RSC 0x00e00000 -#define OP_ORR 0x01800000 -#define OP_MOV 0x01a00000 -#define OP_BIC 0x01c00000 -#define OP_MVN 0x01e00000 - -static unsigned long -get_branch_address(struct task_struct *child, unsigned long pc, unsigned long insn) -{ - u32 alt = 0; - - switch (insn & 0x0e000000) { - case 0x00000000: - case 0x02000000: { - /* - * data processing - */ - long aluop1, aluop2, ccbit; - - if ((insn & 0x0fffffd0) == 0x012fff10) { - /* - * bx or blx - */ - alt = get_user_reg(child, insn & 15); - break; - } - - - if ((insn & 0xf000) != 0xf000) - break; - - aluop1 = ptrace_getrn(child, insn); - aluop2 = ptrace_getaluop2(child, insn); - ccbit = get_user_reg(child, REG_PSR) & PSR_C_BIT ? 1 : 0; - - switch (insn & OP_MASK) { - case OP_AND: alt = aluop1 & aluop2; break; - case OP_EOR: alt = aluop1 ^ aluop2; break; - case OP_SUB: alt = aluop1 - aluop2; break; - case OP_RSB: alt = aluop2 - aluop1; break; - case OP_ADD: alt = aluop1 + aluop2; break; - case OP_ADC: alt = aluop1 + aluop2 + ccbit; break; - case OP_SBC: alt = aluop1 - aluop2 + ccbit; break; - case OP_RSC: alt = aluop2 - aluop1 + ccbit; break; - case OP_ORR: alt = aluop1 | aluop2; break; - case OP_MOV: alt = aluop2; break; - case OP_BIC: alt = aluop1 & ~aluop2; break; - case OP_MVN: alt = ~aluop2; break; - } - break; - } - - case 0x04000000: - case 0x06000000: - /* - * ldr - */ - if ((insn & 0x0010f000) == 0x0010f000) { - unsigned long base; - - base = ptrace_getrn(child, insn); - if (insn & 1 << 24) { - long aluop2; - - if (insn & 0x02000000) - aluop2 = ptrace_getldrop2(child, insn); - else - aluop2 = insn & 0xfff; - - if (insn & 1 << 23) - base += aluop2; - else - base -= aluop2; - } - read_u32(child, base, &alt); - } - break; - - case 0x08000000: - /* - * ldm - */ - if ((insn & 0x00108000) == 0x00108000) { - unsigned long base; - unsigned int nr_regs; - - if (insn & (1 << 23)) { - nr_regs = hweight16(insn & 65535) << 2; - - if (!(insn & (1 << 24))) - nr_regs -= 4; - } else { - if (insn & (1 << 24)) - nr_regs = -4; - else - nr_regs = 0; - } - - base = ptrace_getrn(child, insn); - - read_u32(child, base + nr_regs, &alt); - break; - } - break; - - case 0x0a000000: { - /* - * bl or b - */ - signed long displ; - /* It's a branch/branch link: instead of trying to - * figure out whether the branch will be taken or not, - * we'll put a breakpoint at both locations. This is - * simpler, more reliable, and probably not a whole lot - * slower than the alternative approach of emulating the - * branch. - */ - displ = (insn & 0x00ffffff) << 8; - displ = (displ >> 6) + 8; - if (displ != 0 && displ != 4) - alt = pc + displ; - } - break; - } - - return alt; -} - -static int -swap_insn(struct task_struct *task, unsigned long addr, - void *old_insn, void *new_insn, int size) -{ - int ret; - - ret = access_process_vm(task, addr, old_insn, size, 0); - if (ret == size) - ret = access_process_vm(task, addr, new_insn, size, 1); - return ret; -} - -static void -add_breakpoint(struct task_struct *task, struct debug_info *dbg, unsigned long addr) -{ - int nr = dbg->nsaved; - - if (nr < 2) { - u32 new_insn = BREAKINST_ARM; - int res; - - res = swap_insn(task, addr, &dbg->bp[nr].insn, &new_insn, 4); - - if (res == 4) { - dbg->bp[nr].address = addr; - dbg->nsaved += 1; - } - } else - printk(KERN_ERR "ptrace: too many breakpoints\n"); -} - -/* - * Clear one breakpoint in the user program. We copy what the hardware - * does and use bit 0 of the address to indicate whether this is a Thumb - * breakpoint or an ARM breakpoint. - */ -static void clear_breakpoint(struct task_struct *task, struct debug_entry *bp) -{ - unsigned long addr = bp->address; - union debug_insn old_insn; - int ret; - - if (addr & 1) { - ret = swap_insn(task, addr & ~1, &old_insn.thumb, - &bp->insn.thumb, 2); - - if (ret != 2 || old_insn.thumb != BREAKINST_THUMB) - printk(KERN_ERR "%s:%d: corrupted Thumb breakpoint at " - "0x%08lx (0x%04x)\n", task->comm, - task_pid_nr(task), addr, old_insn.thumb); - } else { - ret = swap_insn(task, addr & ~3, &old_insn.arm, - &bp->insn.arm, 4); - - if (ret != 4 || old_insn.arm != BREAKINST_ARM) - printk(KERN_ERR "%s:%d: corrupted ARM breakpoint at " - "0x%08lx (0x%08x)\n", task->comm, - task_pid_nr(task), addr, old_insn.arm); - } -} - -void ptrace_set_bpt(struct task_struct *child) -{ - struct pt_regs *regs; - unsigned long pc; - u32 insn; - int res; - - regs = task_pt_regs(child); - pc = instruction_pointer(regs); - - if (thumb_mode(regs)) { - printk(KERN_WARNING "ptrace: can't handle thumb mode\n"); - return; - } - - res = read_instr(child, pc, &insn); - if (!res) { - struct debug_info *dbg = &child->thread.debug; - unsigned long alt; - - dbg->nsaved = 0; - - alt = get_branch_address(child, pc, insn); - if (alt) - add_breakpoint(child, dbg, alt); - - /* - * Note that we ignore the result of setting the above - * breakpoint since it may fail. When it does, this is - * not so much an error, but a forewarning that we may - * be receiving a prefetch abort shortly. - * - * If we don't set this breakpoint here, then we can - * lose control of the thread during single stepping. - */ - if (!alt || predicate(insn) != PREDICATE_ALWAYS) - add_breakpoint(child, dbg, pc + 4); - } -} - -/* - * Ensure no single-step breakpoint is pending. Returns non-zero - * value if child was being single-stepped. - */ -void ptrace_cancel_bpt(struct task_struct *child) -{ - int i, nsaved = child->thread.debug.nsaved; - - child->thread.debug.nsaved = 0; - - if (nsaved > 2) { - printk("ptrace_cancel_bpt: bogus nsaved: %d!\n", nsaved); - nsaved = 2; - } - - for (i = 0; i < nsaved; i++) - clear_breakpoint(child, &child->thread.debug.bp[i]); -} - -void user_disable_single_step(struct task_struct *task) -{ - task->ptrace &= ~PT_SINGLESTEP; - ptrace_cancel_bpt(task); -} - -void user_enable_single_step(struct task_struct *task) -{ - task->ptrace |= PT_SINGLESTEP; -} - /* * Called by kernel/ptrace.c when detaching.. */ void ptrace_disable(struct task_struct *child) { - user_disable_single_step(child); + /* Nothing to do. */ } /* @@ -576,8 +197,6 @@ void ptrace_break(struct task_struct *tsk, struct pt_regs *regs) { siginfo_t info; - ptrace_cancel_bpt(tsk); - info.si_signo = SIGTRAP; info.si_errno = 0; info.si_code = TRAP_BRKPT; diff --git a/arch/arm/kernel/ptrace.h b/arch/arm/kernel/ptrace.h deleted file mode 100644 index 3926605b82ea..000000000000 --- a/arch/arm/kernel/ptrace.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * linux/arch/arm/kernel/ptrace.h - * - * Copyright (C) 2000-2003 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include - -extern void ptrace_cancel_bpt(struct task_struct *); -extern void ptrace_set_bpt(struct task_struct *); -extern void ptrace_break(struct task_struct *, struct pt_regs *); - -/* - * Send SIGTRAP if we're single-stepping - */ -static inline void single_step_trap(struct task_struct *task) -{ - if (task->ptrace & PT_SINGLESTEP) { - ptrace_cancel_bpt(task); - send_sig(SIGTRAP, task, 1); - } -} - -static inline void single_step_clear(struct task_struct *task) -{ - if (task->ptrace & PT_SINGLESTEP) - ptrace_cancel_bpt(task); -} - -static inline void single_step_set(struct task_struct *task) -{ - if (task->ptrace & PT_SINGLESTEP) - ptrace_set_bpt(task); -} diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 907d5a620bca..7709668c4842 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -20,7 +20,6 @@ #include #include -#include "ptrace.h" #include "signal.h" #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) @@ -348,8 +347,6 @@ asmlinkage int sys_sigreturn(struct pt_regs *regs) if (restore_sigframe(regs, frame)) goto badframe; - single_step_trap(current); - return regs->ARM_r0; badframe: @@ -383,8 +380,6 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) if (do_sigaltstack(&frame->sig.uc.uc_stack, NULL, regs->ARM_sp) == -EFAULT) goto badframe; - single_step_trap(current); - return regs->ARM_r0; badframe: @@ -704,8 +699,6 @@ static void do_signal(struct pt_regs *regs, int syscall) if (try_to_freeze()) goto no_signal; - single_step_clear(current); - signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { sigset_t *oldset; @@ -724,7 +717,6 @@ static void do_signal(struct pt_regs *regs, int syscall) if (test_thread_flag(TIF_RESTORE_SIGMASK)) clear_thread_flag(TIF_RESTORE_SIGMASK); } - single_step_set(current); return; } @@ -770,7 +762,6 @@ static void do_signal(struct pt_regs *regs, int syscall) sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); } } - single_step_set(current); } asmlinkage void diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 7f53c3651c58..21ac43f1c2d0 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -32,7 +33,6 @@ #include #include -#include "ptrace.h" #include "signal.h" static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; -- cgit v1.2.3 From 8f3112707fabc2f9f932a4ac1c5b92f3266e4662 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Thu, 24 Feb 2011 22:57:14 +0100 Subject: ARM: 6765/1: remove obsolete comment from asm/mach/arch.h Since commit 6fc31d54 this comment is no longer true. Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/include/asm/mach/arch.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h index 3a0893a76a3b..bf13b814c1b8 100644 --- a/arch/arm/include/asm/mach/arch.h +++ b/arch/arm/include/asm/mach/arch.h @@ -15,10 +15,6 @@ struct meminfo; struct sys_timer; struct machine_desc { - /* - * Note! The first two elements are used - * by assembler code in head.S, head-common.S - */ unsigned int nr; /* architecture number */ const char *name; /* architecture name */ unsigned long boot_params; /* tagged list */ -- cgit v1.2.3 From 80f0aad77f3e1e9d9e518b09ac46963d628ae2be Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Fri, 25 Feb 2011 17:54:52 +0100 Subject: ARM: 6766/1: Thumb-2: Reflect ARM/Thumb-2 configuration in module vermagic Loading Thumb-2 modules into an ARM kernel or vice-versa isn't guaranteed to work safely, since the kernel is not interworking- aware everywhere. This patch adds "thumb2" to the module vermagic when CONFIG_THUMB2_KERNEL is enabled, to help avoid accidental loading of modules into the wrong kernel. Signed-off-by: Dave Martin Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/include/asm/module.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h index a2b775b81cfa..543b44916d2c 100644 --- a/arch/arm/include/asm/module.h +++ b/arch/arm/include/asm/module.h @@ -40,8 +40,16 @@ struct mod_arch_specific { #define MODULE_ARCH_VERMAGIC_P2V "" #endif +/* Add instruction set architecture tag to distinguish ARM/Thumb kernels */ +#ifdef CONFIG_THUMB2_KERNEL +#define MODULE_ARCH_VERMAGIC_ARMTHUMB "thumb2 " +#else +#define MODULE_ARCH_VERMAGIC_ARMTHUMB "" +#endif + #define MODULE_ARCH_VERMAGIC \ MODULE_ARCH_VERMAGIC_ARMVSN \ + MODULE_ARCH_VERMAGIC_ARMTHUMB \ MODULE_ARCH_VERMAGIC_P2V #endif /* _ASM_ARM_MODULE_H */ -- cgit v1.2.3 From 2839e06c95d12ada034cf9b63da60334c7c6358b Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Tue, 8 Mar 2011 06:59:54 +0100 Subject: ARM: 6795/1: l2x0: Errata fix for flush by Way operation can cause data corrupti PL310 implements the Clean & Invalidate by Way L2 cache maintenance operation (offset 0x7FC). This operation runs in background so that PL310 can handle normal accesses while it is in progress. Under very rare circumstances, due to this erratum, write data can be lost when PL310 treats a cacheable write transaction during a Clean & Invalidate by Way operation. Workaround: Disable Write-Back and Cache Linefill (Debug Control Register) Clean & Invalidate by Way (0x7FC) Re-enable Write-Back and Cache Linefill (Debug Control Register) This patch also removes any OMAP dependency on PL310 Errata's Signed-off-by: Santosh Shilimkar Acked-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/Kconfig | 15 ++++++++++++--- arch/arm/include/asm/outercache.h | 1 + arch/arm/mm/cache-l2x0.c | 32 ++++++++++++++++++-------------- 3 files changed, 31 insertions(+), 17 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 65ea7bb57c4d..ef41f7e39f69 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1135,7 +1135,7 @@ config ARM_ERRATA_742231 config PL310_ERRATA_588369 bool "Clean & Invalidate maintenance operations do not invalidate clean lines" - depends on CACHE_L2X0 && ARCH_OMAP4 + depends on CACHE_L2X0 help The PL310 L2 cache controller implements three types of Clean & Invalidate maintenance operations: by Physical Address @@ -1144,8 +1144,7 @@ config PL310_ERRATA_588369 clean operation followed immediately by an invalidate operation, both performing to the same memory location. This functionality is not correctly implemented in PL310 as clean lines are not - invalidated as a result of these operations. Note that this errata - uses Texas Instrument's secure monitor api. + invalidated as a result of these operations. config ARM_ERRATA_720789 bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID" @@ -1172,6 +1171,16 @@ config ARM_ERRATA_743622 visible impact on the overall performance or power consumption of the processor. +config PL310_ERRATA_727915 + bool "Background Clean & Invalidate by Way operation can cause data corruption" + depends on CACHE_L2X0 + help + PL310 implements the Clean & Invalidate by Way L2 cache maintenance + operation (offset 0x7FC). This operation runs in background so that + PL310 can handle normal accesses while it is in progress. Under very + rare circumstances, due to this erratum, write data can be lost when + PL310 treats a cacheable write transaction during a Clean & + Invalidate by Way operation. endmenu source "arch/arm/common/Kconfig" diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h index fc1900925275..348d513afa92 100644 --- a/arch/arm/include/asm/outercache.h +++ b/arch/arm/include/asm/outercache.h @@ -31,6 +31,7 @@ struct outer_cache_fns { #ifdef CONFIG_OUTER_CACHE_SYNC void (*sync)(void); #endif + void (*set_debug)(unsigned long); }; #ifdef CONFIG_OUTER_CACHE diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 170c9bb95866..803bce8845a7 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -67,18 +67,24 @@ static inline void l2x0_inv_line(unsigned long addr) writel_relaxed(addr, base + L2X0_INV_LINE_PA); } -#ifdef CONFIG_PL310_ERRATA_588369 -static void debug_writel(unsigned long val) -{ - extern void omap_smc1(u32 fn, u32 arg); +#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) - /* - * Texas Instrument secure monitor api to modify the - * PL310 Debug Control Register. - */ - omap_smc1(0x100, val); +#define debug_writel(val) outer_cache.set_debug(val) + +static void l2x0_set_debug(unsigned long val) +{ + writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL); } +#else +/* Optimised out for non-errata case */ +static inline void debug_writel(unsigned long val) +{ +} + +#define l2x0_set_debug NULL +#endif +#ifdef CONFIG_PL310_ERRATA_588369 static inline void l2x0_flush_line(unsigned long addr) { void __iomem *base = l2x0_base; @@ -91,11 +97,6 @@ static inline void l2x0_flush_line(unsigned long addr) } #else -/* Optimised out for non-errata case */ -static inline void debug_writel(unsigned long val) -{ -} - static inline void l2x0_flush_line(unsigned long addr) { void __iomem *base = l2x0_base; @@ -119,9 +120,11 @@ static void l2x0_flush_all(void) /* clean all ways */ spin_lock_irqsave(&l2x0_lock, flags); + debug_writel(0x03); writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY); cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask); cache_sync(); + debug_writel(0x00); spin_unlock_irqrestore(&l2x0_lock, flags); } @@ -329,6 +332,7 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) outer_cache.flush_all = l2x0_flush_all; outer_cache.inv_all = l2x0_inv_all; outer_cache.disable = l2x0_disable; + outer_cache.set_debug = l2x0_set_debug; printk(KERN_INFO "%s cache controller enabled\n", type); printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", -- cgit v1.2.3 From d7ed36a4ea84e3a850f9932e2058ceef987d1acd Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Wed, 2 Mar 2011 08:03:22 +0100 Subject: ARM: 6777/1: gic: Add hooks for architecture specific extensions Few architectures combine the GIC with an external interrupt controller. On such systems it may be necessary to update both the GIC registers and the external controller's registers to control IRQ behavior. This can be addressed in couple of possible methods. 1. Export common GIC routines along with 'struct irq_chip gic_chip' and allow architectures to have custom function by override. 2. Provide architecture specific function pointer hooks within GIC library and leave platforms to add the necessary code as part of these hooks. First one might be non-intrusive but have few shortcomings like arch needs to have there own custom gic library. Locks used should be common since it caters to same IRQs etc. Maintenance point of view also it leads to multiple file fixes. The second probably is cleaner and portable. It ensures that all the common GIC infrastructure is not touched and also provides archs to address their specific issue. Cc: Russell King Signed-off-by: Santosh Shilimkar Acked-by: Colin Cross Tested-by: Colin Cross Signed-off-by: Russell King --- arch/arm/common/gic.c | 47 +++++++++++++++++++++++++++++++++++++ arch/arm/include/asm/hardware/gic.h | 1 + 2 files changed, 48 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c index e21c1f4218d3..cb6b041c39d2 100644 --- a/arch/arm/common/gic.c +++ b/arch/arm/common/gic.c @@ -44,6 +44,19 @@ struct gic_chip_data { void __iomem *cpu_base; }; +/* + * Supported arch specific GIC irq extension. + * Default make them NULL. + */ +struct irq_chip gic_arch_extn = { + .irq_ack = NULL, + .irq_mask = NULL, + .irq_unmask = NULL, + .irq_retrigger = NULL, + .irq_set_type = NULL, + .irq_set_wake = NULL, +}; + #ifndef MAX_GIC_NR #define MAX_GIC_NR 1 #endif @@ -74,6 +87,8 @@ static inline unsigned int gic_irq(struct irq_data *d) static void gic_ack_irq(struct irq_data *d) { spin_lock(&irq_controller_lock); + if (gic_arch_extn.irq_ack) + gic_arch_extn.irq_ack(d); writel(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI); spin_unlock(&irq_controller_lock); } @@ -84,6 +99,8 @@ static void gic_mask_irq(struct irq_data *d) spin_lock(&irq_controller_lock); writel(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); + if (gic_arch_extn.irq_mask) + gic_arch_extn.irq_mask(d); spin_unlock(&irq_controller_lock); } @@ -92,6 +109,8 @@ static void gic_unmask_irq(struct irq_data *d) u32 mask = 1 << (d->irq % 32); spin_lock(&irq_controller_lock); + if (gic_arch_extn.irq_unmask) + gic_arch_extn.irq_unmask(d); writel(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); spin_unlock(&irq_controller_lock); } @@ -116,6 +135,9 @@ static int gic_set_type(struct irq_data *d, unsigned int type) spin_lock(&irq_controller_lock); + if (gic_arch_extn.irq_set_type) + gic_arch_extn.irq_set_type(d, type); + val = readl(base + GIC_DIST_CONFIG + confoff); if (type == IRQ_TYPE_LEVEL_HIGH) val &= ~confmask; @@ -141,6 +163,14 @@ static int gic_set_type(struct irq_data *d, unsigned int type) return 0; } +static int gic_retrigger(struct irq_data *d) +{ + if (gic_arch_extn.irq_retrigger) + return gic_arch_extn.irq_retrigger(d); + + return -ENXIO; +} + #ifdef CONFIG_SMP static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) @@ -166,6 +196,21 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, } #endif +#ifdef CONFIG_PM +static int gic_set_wake(struct irq_data *d, unsigned int on) +{ + int ret = -ENXIO; + + if (gic_arch_extn.irq_set_wake) + ret = gic_arch_extn.irq_set_wake(d, on); + + return ret; +} + +#else +#define gic_set_wake NULL +#endif + static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) { struct gic_chip_data *chip_data = get_irq_data(irq); @@ -201,9 +246,11 @@ static struct irq_chip gic_chip = { .irq_mask = gic_mask_irq, .irq_unmask = gic_unmask_irq, .irq_set_type = gic_set_type, + .irq_retrigger = gic_retrigger, #ifdef CONFIG_SMP .irq_set_affinity = gic_set_affinity, #endif + .irq_set_wake = gic_set_wake, }; void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h index 84557d321001..0691f9dcc500 100644 --- a/arch/arm/include/asm/hardware/gic.h +++ b/arch/arm/include/asm/hardware/gic.h @@ -34,6 +34,7 @@ #ifndef __ASSEMBLY__ extern void __iomem *gic_cpu_base_addr; +extern struct irq_chip gic_arch_extn; void gic_init(unsigned int, unsigned int, void __iomem *, void __iomem *); void gic_secondary_init(unsigned int); -- cgit v1.2.3 From 5dab26af1bacad9a7189d904fbc8b4fe8e95dd81 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 4 Mar 2011 12:38:54 +0100 Subject: ARM: 6784/1: errata: no automatic Store Buffer drain on Cortex-A9 On revisions of the Cortex-A9 prior to r2p0, the Store Buffer does not have any automatic draining mechanism and therefore a livelock may occur if an external agent continuously polls a memory location waiting to observe an update. This workaround defines cpu_relax() as smp_mb(), preventing correctly written polling loops from denying visibility of updates to memory. Acked-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/Kconfig | 11 +++++++++++ arch/arm/include/asm/processor.h | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index ec0f6589af05..d3f2de37a4b7 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1213,6 +1213,17 @@ config ARM_ERRATA_754322 the new ASID. This workaround places two dsb instructions in the mm switching code so that no page table walks can cross the ASID switch. +config ARM_ERRATA_754327 + bool "ARM errata: no automatic Store Buffer drain" + depends on CPU_V7 && SMP + help + This option enables the workaround for the 754327 Cortex-A9 (prior to + r2p0) erratum. The Store Buffer does not have any automatic draining + mechanism and therefore a livelock may occur if an external agent + continuously polls a memory location waiting to observe an update. + This workaround defines cpu_relax() as smp_mb(), preventing correctly + written polling loops from denying visibility of updates to memory. + endmenu source "arch/arm/common/Kconfig" diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h index 67357baaeeeb..7a1f03c10f1b 100644 --- a/arch/arm/include/asm/processor.h +++ b/arch/arm/include/asm/processor.h @@ -95,7 +95,7 @@ extern void release_thread(struct task_struct *); unsigned long get_wchan(struct task_struct *p); -#if __LINUX_ARM_ARCH__ == 6 +#if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327) #define cpu_relax() smp_mb() #else #define cpu_relax() barrier() -- cgit v1.2.3 From 23bfdacf4eb525ff3404161429cedaa281c23e47 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 10 Mar 2011 14:03:01 +0100 Subject: ARM: 6798/1: aout-core: zero thread debug registers in a.out core dump The removal of the single-step emulation from ptrace on ARM means that thread_struct no longer has software breakpoint fields in its debug member. This patch fixes the a.out core dump code so that the debug registers are zeroed rather than trying to copy from non-existent fields. Cc: Nicolas Pitre Signed-off-by: Bryan Wu Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/a.out-core.h | 6 +----- arch/arm/include/asm/user.h | 2 +- 2 files changed, 2 insertions(+), 6 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/a.out-core.h b/arch/arm/include/asm/a.out-core.h index 93d04acaa31f..92f10cb5c70c 100644 --- a/arch/arm/include/asm/a.out-core.h +++ b/arch/arm/include/asm/a.out-core.h @@ -32,11 +32,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT; dump->u_ssize = 0; - dump->u_debugreg[0] = tsk->thread.debug.bp[0].address; - dump->u_debugreg[1] = tsk->thread.debug.bp[1].address; - dump->u_debugreg[2] = tsk->thread.debug.bp[0].insn.arm; - dump->u_debugreg[3] = tsk->thread.debug.bp[1].insn.arm; - dump->u_debugreg[4] = tsk->thread.debug.nsaved; + memset(dump->u_debugreg, 0, sizeof(dump->u_debugreg)); if (dump->start_stack < 0x04000000) dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT; diff --git a/arch/arm/include/asm/user.h b/arch/arm/include/asm/user.h index 05ac4b06876a..35917b3a97f9 100644 --- a/arch/arm/include/asm/user.h +++ b/arch/arm/include/asm/user.h @@ -71,7 +71,7 @@ struct user{ /* the registers. */ unsigned long magic; /* To uniquely identify a core file */ char u_comm[32]; /* User command that was responsible */ - int u_debugreg[8]; + int u_debugreg[8]; /* No longer used */ struct user_fp u_fp; /* FP state */ struct user_fp_struct * u_fp0;/* Used by gdb to help find the values for */ /* the FP registers. */ -- cgit v1.2.3 From 10a8c3839810ac9af1aec836d61b92e7a879f5fa Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 14 Mar 2011 14:00:30 +0100 Subject: ARM: 6806/1: irq: introduce entry and exit functions for chained handlers Some chained IRQ handlers are written to cope with primary chips of potentially different flow types. Whether this a sensible thing to do is a point of contention. This patch introduces entry/exit functions for chained handlers which infer the flow type of the primary chip as fasteoi or level-type by checking whether or not the ->irq_eoi function pointer is present and calling back to the primary chip as necessary. Other methods of flow control are not considered. Acked-by: Thomas Gleixner Acked-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/mach/irq.h | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h index 22ac140edd9e..febe495d0c6e 100644 --- a/arch/arm/include/asm/mach/irq.h +++ b/arch/arm/include/asm/mach/irq.h @@ -34,4 +34,35 @@ do { \ raw_spin_unlock(&desc->lock); \ } while(0) +#ifndef __ASSEMBLY__ +/* + * Entry/exit functions for chained handlers where the primary IRQ chip + * may implement either fasteoi or level-trigger flow control. + */ +static inline void chained_irq_enter(struct irq_chip *chip, + struct irq_desc *desc) +{ + /* FastEOI controllers require no action on entry. */ + if (chip->irq_eoi) + return; + + if (chip->irq_mask_ack) { + chip->irq_mask_ack(&desc->irq_data); + } else { + chip->irq_mask(&desc->irq_data); + if (chip->irq_ack) + chip->irq_ack(&desc->irq_data); + } +} + +static inline void chained_irq_exit(struct irq_chip *chip, + struct irq_desc *desc) +{ + if (chip->irq_eoi) + chip->irq_eoi(&desc->irq_data); + else + chip->irq_unmask(&desc->irq_data); +} +#endif + #endif -- cgit v1.2.3