diff options
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r-- | arch/arm/include/asm/atomic.h | 132 | ||||
-rw-r--r-- | arch/arm/include/asm/elf.h | 4 | ||||
-rw-r--r-- | arch/arm/include/asm/hwcap.h | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/io.h | 50 | ||||
-rw-r--r-- | arch/arm/include/asm/irq.h | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/kexec.h | 22 | ||||
-rw-r--r-- | arch/arm/include/asm/kgdb.h | 6 | ||||
-rw-r--r-- | arch/arm/include/asm/local64.h | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/mach/arch.h | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/mach/irq.h | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/mach/map.h | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/mach/pci.h | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/mach/udc_pxa2xx.h | 4 | ||||
-rw-r--r-- | arch/arm/include/asm/memblock.h | 16 | ||||
-rw-r--r-- | arch/arm/include/asm/memory.h | 76 | ||||
-rw-r--r-- | arch/arm/include/asm/mmzone.h | 30 | ||||
-rw-r--r-- | arch/arm/include/asm/processor.h | 4 | ||||
-rw-r--r-- | arch/arm/include/asm/ptrace.h | 36 | ||||
-rw-r--r-- | arch/arm/include/asm/setup.h | 8 | ||||
-rw-r--r-- | arch/arm/include/asm/stackprotector.h | 38 | ||||
-rw-r--r-- | arch/arm/include/asm/system.h | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/tls.h | 46 | ||||
-rw-r--r-- | arch/arm/include/asm/vfpmacros.h | 18 |
23 files changed, 308 insertions, 194 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index a0162fa94564..7e79503ab89b 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -40,12 +40,12 @@ static inline void atomic_add(int i, atomic_t *v) int result; __asm__ __volatile__("@ atomic_add\n" -"1: ldrex %0, [%2]\n" -" add %0, %0, %3\n" -" strex %1, %0, [%2]\n" +"1: ldrex %0, [%3]\n" +" add %0, %0, %4\n" +" strex %1, %0, [%3]\n" " teq %1, #0\n" " bne 1b" - : "=&r" (result), "=&r" (tmp) + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "Ir" (i) : "cc"); } @@ -58,12 +58,12 @@ static inline int atomic_add_return(int i, atomic_t *v) smp_mb(); __asm__ __volatile__("@ atomic_add_return\n" -"1: ldrex %0, [%2]\n" -" add %0, %0, %3\n" -" strex %1, %0, [%2]\n" +"1: ldrex %0, [%3]\n" +" add %0, %0, %4\n" +" strex %1, %0, [%3]\n" " teq %1, #0\n" " bne 1b" - : "=&r" (result), "=&r" (tmp) + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "Ir" (i) : "cc"); @@ -78,12 +78,12 @@ static inline void atomic_sub(int i, atomic_t *v) int result; __asm__ __volatile__("@ atomic_sub\n" -"1: ldrex %0, [%2]\n" -" sub %0, %0, %3\n" -" strex %1, %0, [%2]\n" +"1: ldrex %0, [%3]\n" +" sub %0, %0, %4\n" +" strex %1, %0, [%3]\n" " teq %1, #0\n" " bne 1b" - : "=&r" (result), "=&r" (tmp) + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "Ir" (i) : "cc"); } @@ -96,12 +96,12 @@ static inline int atomic_sub_return(int i, atomic_t *v) smp_mb(); __asm__ __volatile__("@ atomic_sub_return\n" -"1: ldrex %0, [%2]\n" -" sub %0, %0, %3\n" -" strex %1, %0, [%2]\n" +"1: ldrex %0, [%3]\n" +" sub %0, %0, %4\n" +" strex %1, %0, [%3]\n" " teq %1, #0\n" " bne 1b" - : "=&r" (result), "=&r" (tmp) + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "Ir" (i) : "cc"); @@ -118,11 +118,11 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) do { __asm__ __volatile__("@ atomic_cmpxchg\n" - "ldrex %1, [%2]\n" + "ldrex %1, [%3]\n" "mov %0, #0\n" - "teq %1, %3\n" - "strexeq %0, %4, [%2]\n" - : "=&r" (res), "=&r" (oldval) + "teq %1, %4\n" + "strexeq %0, %5, [%3]\n" + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) : "r" (&ptr->counter), "Ir" (old), "r" (new) : "cc"); } while (res); @@ -137,12 +137,12 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) unsigned long tmp, tmp2; __asm__ __volatile__("@ atomic_clear_mask\n" -"1: ldrex %0, [%2]\n" -" bic %0, %0, %3\n" -" strex %1, %0, [%2]\n" +"1: ldrex %0, [%3]\n" +" bic %0, %0, %4\n" +" strex %1, %0, [%3]\n" " teq %1, #0\n" " bne 1b" - : "=&r" (tmp), "=&r" (tmp2) + : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr) : "r" (addr), "Ir" (mask) : "cc"); } @@ -249,7 +249,7 @@ static inline u64 atomic64_read(atomic64_t *v) __asm__ __volatile__("@ atomic64_read\n" " ldrexd %0, %H0, [%1]" : "=&r" (result) - : "r" (&v->counter) + : "r" (&v->counter), "Qo" (v->counter) ); return result; @@ -260,11 +260,11 @@ static inline void atomic64_set(atomic64_t *v, u64 i) u64 tmp; __asm__ __volatile__("@ atomic64_set\n" -"1: ldrexd %0, %H0, [%1]\n" -" strexd %0, %2, %H2, [%1]\n" +"1: ldrexd %0, %H0, [%2]\n" +" strexd %0, %3, %H3, [%2]\n" " teq %0, #0\n" " bne 1b" - : "=&r" (tmp) + : "=&r" (tmp), "=Qo" (v->counter) : "r" (&v->counter), "r" (i) : "cc"); } @@ -275,13 +275,13 @@ static inline void atomic64_add(u64 i, atomic64_t *v) unsigned long tmp; __asm__ __volatile__("@ atomic64_add\n" -"1: ldrexd %0, %H0, [%2]\n" -" adds %0, %0, %3\n" -" adc %H0, %H0, %H3\n" -" strexd %1, %0, %H0, [%2]\n" +"1: ldrexd %0, %H0, [%3]\n" +" adds %0, %0, %4\n" +" adc %H0, %H0, %H4\n" +" strexd %1, %0, %H0, [%3]\n" " teq %1, #0\n" " bne 1b" - : "=&r" (result), "=&r" (tmp) + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "r" (i) : "cc"); } @@ -294,13 +294,13 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v) smp_mb(); __asm__ __volatile__("@ atomic64_add_return\n" -"1: ldrexd %0, %H0, [%2]\n" -" adds %0, %0, %3\n" -" adc %H0, %H0, %H3\n" -" strexd %1, %0, %H0, [%2]\n" +"1: ldrexd %0, %H0, [%3]\n" +" adds %0, %0, %4\n" +" adc %H0, %H0, %H4\n" +" strexd %1, %0, %H0, [%3]\n" " teq %1, #0\n" " bne 1b" - : "=&r" (result), "=&r" (tmp) + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "r" (i) : "cc"); @@ -315,13 +315,13 @@ static inline void atomic64_sub(u64 i, atomic64_t *v) unsigned long tmp; __asm__ __volatile__("@ atomic64_sub\n" -"1: ldrexd %0, %H0, [%2]\n" -" subs %0, %0, %3\n" -" sbc %H0, %H0, %H3\n" -" strexd %1, %0, %H0, [%2]\n" +"1: ldrexd %0, %H0, [%3]\n" +" subs %0, %0, %4\n" +" sbc %H0, %H0, %H4\n" +" strexd %1, %0, %H0, [%3]\n" " teq %1, #0\n" " bne 1b" - : "=&r" (result), "=&r" (tmp) + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "r" (i) : "cc"); } @@ -334,13 +334,13 @@ static inline u64 atomic64_sub_return(u64 i, atomic64_t *v) smp_mb(); __asm__ __volatile__("@ atomic64_sub_return\n" -"1: ldrexd %0, %H0, [%2]\n" -" subs %0, %0, %3\n" -" sbc %H0, %H0, %H3\n" -" strexd %1, %0, %H0, [%2]\n" +"1: ldrexd %0, %H0, [%3]\n" +" subs %0, %0, %4\n" +" sbc %H0, %H0, %H4\n" +" strexd %1, %0, %H0, [%3]\n" " teq %1, #0\n" " bne 1b" - : "=&r" (result), "=&r" (tmp) + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "r" (i) : "cc"); @@ -358,12 +358,12 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new) do { __asm__ __volatile__("@ atomic64_cmpxchg\n" - "ldrexd %1, %H1, [%2]\n" + "ldrexd %1, %H1, [%3]\n" "mov %0, #0\n" - "teq %1, %3\n" - "teqeq %H1, %H3\n" - "strexdeq %0, %4, %H4, [%2]" - : "=&r" (res), "=&r" (oldval) + "teq %1, %4\n" + "teqeq %H1, %H4\n" + "strexdeq %0, %5, %H5, [%3]" + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) : "r" (&ptr->counter), "r" (old), "r" (new) : "cc"); } while (res); @@ -381,11 +381,11 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new) smp_mb(); __asm__ __volatile__("@ atomic64_xchg\n" -"1: ldrexd %0, %H0, [%2]\n" -" strexd %1, %3, %H3, [%2]\n" +"1: ldrexd %0, %H0, [%3]\n" +" strexd %1, %4, %H4, [%3]\n" " teq %1, #0\n" " bne 1b" - : "=&r" (result), "=&r" (tmp) + : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter) : "r" (&ptr->counter), "r" (new) : "cc"); @@ -402,16 +402,16 @@ static inline u64 atomic64_dec_if_positive(atomic64_t *v) smp_mb(); __asm__ __volatile__("@ atomic64_dec_if_positive\n" -"1: ldrexd %0, %H0, [%2]\n" +"1: ldrexd %0, %H0, [%3]\n" " subs %0, %0, #1\n" " sbc %H0, %H0, #0\n" " teq %H0, #0\n" " bmi 2f\n" -" strexd %1, %0, %H0, [%2]\n" +" strexd %1, %0, %H0, [%3]\n" " teq %1, #0\n" " bne 1b\n" "2:" - : "=&r" (result), "=&r" (tmp) + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter) : "cc"); @@ -429,18 +429,18 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u) smp_mb(); __asm__ __volatile__("@ atomic64_add_unless\n" -"1: ldrexd %0, %H0, [%3]\n" -" teq %0, %4\n" -" teqeq %H0, %H4\n" +"1: ldrexd %0, %H0, [%4]\n" +" teq %0, %5\n" +" teqeq %H0, %H5\n" " moveq %1, #0\n" " beq 2f\n" -" adds %0, %0, %5\n" -" adc %H0, %H0, %H5\n" -" strexd %2, %0, %H0, [%3]\n" +" adds %0, %0, %6\n" +" adc %H0, %H0, %H6\n" +" strexd %2, %0, %H0, [%4]\n" " teq %2, #0\n" " bne 1b\n" "2:" - : "=&r" (val), "=&r" (ret), "=&r" (tmp) + : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "r" (u), "r" (a) : "cc"); diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index 51662feb9f1d..6750b8e45a49 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h @@ -121,4 +121,8 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); extern void elf_set_personality(const struct elf32_hdr *); #define SET_PERSONALITY(ex) elf_set_personality(&(ex)) +struct mm_struct; +extern unsigned long arch_randomize_brk(struct mm_struct *mm); +#define arch_randomize_brk arch_randomize_brk + #endif diff --git a/arch/arm/include/asm/hwcap.h b/arch/arm/include/asm/hwcap.h index f7bd52b1c365..c1062c317103 100644 --- a/arch/arm/include/asm/hwcap.h +++ b/arch/arm/include/asm/hwcap.h @@ -19,6 +19,7 @@ #define HWCAP_NEON 4096 #define HWCAP_VFPv3 8192 #define HWCAP_VFPv3D16 16384 +#define HWCAP_TLS 32768 #if defined(__KERNEL__) && !defined(__ASSEMBLY__) /* diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index c980156f3263..1261b1f928d9 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -26,6 +26,7 @@ #include <linux/types.h> #include <asm/byteorder.h> #include <asm/memory.h> +#include <asm/system.h> /* * ISA I/O bus memory addresses are 1:1 with the physical address. @@ -179,25 +180,38 @@ extern void _memset_io(volatile void __iomem *, int, size_t); * IO port primitives for more information. */ #ifdef __mem_pci -#define readb(c) ({ __u8 __v = __raw_readb(__mem_pci(c)); __v; }) -#define readw(c) ({ __u16 __v = le16_to_cpu((__force __le16) \ +#define readb_relaxed(c) ({ u8 __v = __raw_readb(__mem_pci(c)); __v; }) +#define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16) \ __raw_readw(__mem_pci(c))); __v; }) -#define readl(c) ({ __u32 __v = le32_to_cpu((__force __le32) \ +#define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32) \ __raw_readl(__mem_pci(c))); __v; }) -#define readb_relaxed(addr) readb(addr) -#define readw_relaxed(addr) readw(addr) -#define readl_relaxed(addr) readl(addr) + +#define writeb_relaxed(v,c) ((void)__raw_writeb(v,__mem_pci(c))) +#define writew_relaxed(v,c) ((void)__raw_writew((__force u16) \ + cpu_to_le16(v),__mem_pci(c))) +#define writel_relaxed(v,c) ((void)__raw_writel((__force u32) \ + cpu_to_le32(v),__mem_pci(c))) + +#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE +#define __iormb() rmb() +#define __iowmb() wmb() +#else +#define __iormb() do { } while (0) +#define __iowmb() do { } while (0) +#endif + +#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) +#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) +#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) + +#define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); }) +#define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); }) +#define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); }) #define readsb(p,d,l) __raw_readsb(__mem_pci(p),d,l) #define readsw(p,d,l) __raw_readsw(__mem_pci(p),d,l) #define readsl(p,d,l) __raw_readsl(__mem_pci(p),d,l) -#define writeb(v,c) __raw_writeb(v,__mem_pci(c)) -#define writew(v,c) __raw_writew((__force __u16) \ - cpu_to_le16(v),__mem_pci(c)) -#define writel(v,c) __raw_writel((__force __u32) \ - cpu_to_le32(v),__mem_pci(c)) - #define writesb(p,d,l) __raw_writesb(__mem_pci(p),d,l) #define writesw(p,d,l) __raw_writesw(__mem_pci(p),d,l) #define writesl(p,d,l) __raw_writesl(__mem_pci(p),d,l) @@ -244,13 +258,13 @@ extern void _memset_io(volatile void __iomem *, int, size_t); * io{read,write}{8,16,32} macros */ #ifndef ioread8 -#define ioread8(p) ({ unsigned int __v = __raw_readb(p); __v; }) -#define ioread16(p) ({ unsigned int __v = le16_to_cpu((__force __le16)__raw_readw(p)); __v; }) -#define ioread32(p) ({ unsigned int __v = le32_to_cpu((__force __le32)__raw_readl(p)); __v; }) +#define ioread8(p) ({ unsigned int __v = __raw_readb(p); __iormb(); __v; }) +#define ioread16(p) ({ unsigned int __v = le16_to_cpu((__force __le16)__raw_readw(p)); __iormb(); __v; }) +#define ioread32(p) ({ unsigned int __v = le32_to_cpu((__force __le32)__raw_readl(p)); __iormb(); __v; }) -#define iowrite8(v,p) __raw_writeb(v, p) -#define iowrite16(v,p) __raw_writew((__force __u16)cpu_to_le16(v), p) -#define iowrite32(v,p) __raw_writel((__force __u32)cpu_to_le32(v), p) +#define iowrite8(v,p) ({ __iowmb(); (void)__raw_writeb(v, p); }) +#define iowrite16(v,p) ({ __iowmb(); (void)__raw_writew((__force __u16)cpu_to_le16(v), p); }) +#define iowrite32(v,p) ({ __iowmb(); (void)__raw_writel((__force __u32)cpu_to_le32(v), p); }) #define ioread8_rep(p,d,c) __raw_readsb(p,d,c) #define ioread16_rep(p,d,c) __raw_readsw(p,d,c) diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h index 237282f7c762..2721a5814cb9 100644 --- a/arch/arm/include/asm/irq.h +++ b/arch/arm/include/asm/irq.h @@ -7,6 +7,8 @@ #define irq_canonicalize(i) (i) #endif +#define NR_IRQS_LEGACY 16 + /* * Use this value to indicate lack of interrupt * capability diff --git a/arch/arm/include/asm/kexec.h b/arch/arm/include/asm/kexec.h index df15a0dc228e..8ec9ef5c3c7b 100644 --- a/arch/arm/include/asm/kexec.h +++ b/arch/arm/include/asm/kexec.h @@ -19,10 +19,26 @@ #ifndef __ASSEMBLY__ -struct kimage; -/* Provide a dummy definition to avoid build failures. */ +/** + * crash_setup_regs() - save registers for the panic kernel + * @newregs: registers are saved here + * @oldregs: registers to be saved (may be %NULL) + * + * Function copies machine registers from @oldregs to @newregs. If @oldregs is + * %NULL then current registers are stored there. + */ static inline void crash_setup_regs(struct pt_regs *newregs, - struct pt_regs *oldregs) { } + struct pt_regs *oldregs) +{ + if (oldregs) { + memcpy(newregs, oldregs, sizeof(*newregs)); + } else { + __asm__ __volatile__ ("stmia %0, {r0 - r15}" + : : "r" (&newregs->ARM_r0)); + __asm__ __volatile__ ("mrs %0, cpsr" + : "=r" (newregs->ARM_cpsr)); + } +} #endif /* __ASSEMBLY__ */ diff --git a/arch/arm/include/asm/kgdb.h b/arch/arm/include/asm/kgdb.h index 67af4b841984..08265993227f 100644 --- a/arch/arm/include/asm/kgdb.h +++ b/arch/arm/include/asm/kgdb.h @@ -70,11 +70,11 @@ extern int kgdb_fault_expected; #define _GP_REGS 16 #define _FP_REGS 8 #define _EXTRA_REGS 2 -#define GDB_MAX_REGS (_GP_REGS + (_FP_REGS * 3) + _EXTRA_REGS) +#define DBG_MAX_REG_NUM (_GP_REGS + (_FP_REGS * 3) + _EXTRA_REGS) #define KGDB_MAX_NO_CPUS 1 #define BUFMAX 400 -#define NUMREGBYTES (GDB_MAX_REGS << 2) +#define NUMREGBYTES (DBG_MAX_REG_NUM << 2) #define NUMCRITREGBYTES (32 << 2) #define _R0 0 @@ -93,7 +93,7 @@ extern int kgdb_fault_expected; #define _SPT 13 #define _LR 14 #define _PC 15 -#define _CPSR (GDB_MAX_REGS - 1) +#define _CPSR (DBG_MAX_REG_NUM - 1) /* * So that we can denote the end of a frame for tracing, diff --git a/arch/arm/include/asm/local64.h b/arch/arm/include/asm/local64.h new file mode 100644 index 000000000000..36c93b5cc239 --- /dev/null +++ b/arch/arm/include/asm/local64.h @@ -0,0 +1 @@ +#include <asm-generic/local64.h> diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h index c59842dc7cb8..8a0dd18ba642 100644 --- a/arch/arm/include/asm/mach/arch.h +++ b/arch/arm/include/asm/mach/arch.h @@ -20,6 +20,7 @@ struct machine_desc { * by assembler code in head.S, head-common.S */ unsigned int nr; /* architecture number */ + unsigned int nr_irqs; /* number of IRQs */ unsigned int phys_io; /* start of physical io */ unsigned int io_pg_offst; /* byte offset for io * page tabe entry */ @@ -37,6 +38,7 @@ struct machine_desc { void (*fixup)(struct machine_desc *, struct tag *, char **, struct meminfo *); + void (*reserve)(void);/* reserve mem blocks */ void (*map_io)(void);/* IO mapping function */ void (*init_irq)(void); struct sys_timer *timer; /* system tick timer */ diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h index 8920b2d6e3b8..ce3eee9fe26c 100644 --- a/arch/arm/include/asm/mach/irq.h +++ b/arch/arm/include/asm/mach/irq.h @@ -17,6 +17,7 @@ struct seq_file; /* * This is internal. Do not use it. */ +extern unsigned int arch_nr_irqs; extern void (*init_arch_irq)(void); extern void init_FIQ(void); extern int show_fiq_list(struct seq_file *, void *); diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h index 742c2aaeb020..d2fedb5aeb1f 100644 --- a/arch/arm/include/asm/mach/map.h +++ b/arch/arm/include/asm/mach/map.h @@ -27,6 +27,8 @@ struct map_desc { #define MT_MEMORY 9 #define MT_ROM 10 #define MT_MEMORY_NONCACHED 11 +#define MT_MEMORY_DTCM 12 +#define MT_MEMORY_ITCM 13 #ifdef CONFIG_MMU extern void iotable_init(struct map_desc *, int); diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h index 52f0da1e97df..16330bd0657c 100644 --- a/arch/arm/include/asm/mach/pci.h +++ b/arch/arm/include/asm/mach/pci.h @@ -46,6 +46,7 @@ struct pci_sys_data { /* IRQ mapping */ int (*map_irq)(struct pci_dev *, u8, u8); struct hw_pci *hw; + void *private_data; /* platform controller private data */ }; /* diff --git a/arch/arm/include/asm/mach/udc_pxa2xx.h b/arch/arm/include/asm/mach/udc_pxa2xx.h index f3eabf1ecec3..833306ee9e7f 100644 --- a/arch/arm/include/asm/mach/udc_pxa2xx.h +++ b/arch/arm/include/asm/mach/udc_pxa2xx.h @@ -21,8 +21,8 @@ struct pxa2xx_udc_mach_info { * here. Note that sometimes the signals go through inverters... */ bool gpio_vbus_inverted; - u16 gpio_vbus; /* high == vbus present */ + int gpio_vbus; /* high == vbus present */ bool gpio_pullup_inverted; - u16 gpio_pullup; /* high == pullup activated */ + int gpio_pullup; /* high == pullup activated */ }; diff --git a/arch/arm/include/asm/memblock.h b/arch/arm/include/asm/memblock.h new file mode 100644 index 000000000000..fdbc43b2e6c0 --- /dev/null +++ b/arch/arm/include/asm/memblock.h @@ -0,0 +1,16 @@ +#ifndef _ASM_ARM_MEMBLOCK_H +#define _ASM_ARM_MEMBLOCK_H + +#ifdef CONFIG_MMU +extern phys_addr_t lowmem_end_addr; +#define MEMBLOCK_REAL_LIMIT lowmem_end_addr +#else +#define MEMBLOCK_REAL_LIMIT 0 +#endif + +struct meminfo; +struct machine_desc; + +extern void arm_memblock_init(struct meminfo *, struct machine_desc *); + +#endif diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 4312ee5e3d0b..23c2e8e5c0fa 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -124,6 +124,15 @@ #endif /* !CONFIG_MMU */ /* + * We fix the TCM memories max 32 KiB ITCM resp DTCM at these + * locations + */ +#ifdef CONFIG_HAVE_TCM +#define ITCM_OFFSET UL(0xfffe0000) +#define DTCM_OFFSET UL(0xfffe8000) +#endif + +/* * Physical vs virtual RAM address space conversion. These are * private definitions which should NOT be used outside memory.h * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. @@ -158,7 +167,7 @@ #endif #ifndef arch_adjust_zones -#define arch_adjust_zones(node,size,holes) do { } while (0) +#define arch_adjust_zones(size,holes) do { } while (0) #elif !defined(CONFIG_ZONE_DMA) #error "custom arch_adjust_zones() requires CONFIG_ZONE_DMA" #endif @@ -234,76 +243,11 @@ static inline __deprecated void *bus_to_virt(unsigned long x) * virt_to_page(k) convert a _valid_ virtual address to struct page * * virt_addr_valid(k) indicates whether a virtual address is valid */ -#ifndef CONFIG_DISCONTIGMEM - #define ARCH_PFN_OFFSET PHYS_PFN_OFFSET #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) -#define PHYS_TO_NID(addr) (0) - -#else /* CONFIG_DISCONTIGMEM */ - -/* - * This is more complex. We have a set of mem_map arrays spread - * around in memory. - */ -#include <linux/numa.h> - -#define arch_pfn_to_nid(pfn) PFN_TO_NID(pfn) -#define arch_local_page_offset(pfn, nid) LOCAL_MAP_NR((pfn) << PAGE_SHIFT) - -#define virt_to_page(kaddr) \ - (ADDR_TO_MAPBASE(kaddr) + LOCAL_MAP_NR(kaddr)) - -#define virt_addr_valid(kaddr) (KVADDR_TO_NID(kaddr) < MAX_NUMNODES) - -/* - * Common discontigmem stuff. - * PHYS_TO_NID is used by the ARM kernel/setup.c - */ -#define PHYS_TO_NID(addr) PFN_TO_NID((addr) >> PAGE_SHIFT) - -/* - * Given a kaddr, ADDR_TO_MAPBASE finds the owning node of the memory - * and returns the mem_map of that node. - */ -#define ADDR_TO_MAPBASE(kaddr) NODE_MEM_MAP(KVADDR_TO_NID(kaddr)) - -/* - * Given a page frame number, find the owning node of the memory - * and returns the mem_map of that node. - */ -#define PFN_TO_MAPBASE(pfn) NODE_MEM_MAP(PFN_TO_NID(pfn)) - -#ifdef NODE_MEM_SIZE_BITS -#define NODE_MEM_SIZE_MASK ((1 << NODE_MEM_SIZE_BITS) - 1) - -/* - * Given a kernel address, find the home node of the underlying memory. - */ -#define KVADDR_TO_NID(addr) \ - (((unsigned long)(addr) - PAGE_OFFSET) >> NODE_MEM_SIZE_BITS) - -/* - * Given a page frame number, convert it to a node id. - */ -#define PFN_TO_NID(pfn) \ - (((pfn) - PHYS_PFN_OFFSET) >> (NODE_MEM_SIZE_BITS - PAGE_SHIFT)) - -/* - * Given a kaddr, LOCAL_MEM_MAP finds the owning node of the memory - * and returns the index corresponding to the appropriate page in the - * node's mem_map. - */ -#define LOCAL_MAP_NR(addr) \ - (((unsigned long)(addr) & NODE_MEM_SIZE_MASK) >> PAGE_SHIFT) - -#endif /* NODE_MEM_SIZE_BITS */ - -#endif /* !CONFIG_DISCONTIGMEM */ - /* * Optional coherency support. Currently used only by selected * Intel XSC3-based systems. diff --git a/arch/arm/include/asm/mmzone.h b/arch/arm/include/asm/mmzone.h deleted file mode 100644 index ae63a4fd28c8..000000000000 --- a/arch/arm/include/asm/mmzone.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * arch/arm/include/asm/mmzone.h - * - * 1999-12-29 Nicolas Pitre Created - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __ASM_MMZONE_H -#define __ASM_MMZONE_H - -/* - * Currently defined in arch/arm/mm/discontig.c - */ -extern pg_data_t discontig_node_data[]; - -/* - * Return a pointer to the node data for node n. - */ -#define NODE_DATA(nid) (&discontig_node_data[nid]) - -/* - * NODE_MEM_MAP gives the kaddr for the mem_map of the node. - */ -#define NODE_MEM_MAP(nid) (NODE_DATA(nid)->node_mem_map) - -#include <mach/memory.h> - -#endif diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h index 6a89567ffc5b..7bed3daf83b8 100644 --- a/arch/arm/include/asm/processor.h +++ b/arch/arm/include/asm/processor.h @@ -91,7 +91,11 @@ extern void release_thread(struct task_struct *); unsigned long get_wchan(struct task_struct *p); +#if __LINUX_ARM_ARCH__ == 6 +#define cpu_relax() smp_mb() +#else #define cpu_relax() barrier() +#endif /* * Create a new kernel thread diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index 9dcb11e59026..c974be8913a7 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -184,6 +184,42 @@ extern unsigned long profile_pc(struct pt_regs *regs); #define predicate(x) ((x) & 0xf0000000) #define PREDICATE_ALWAYS 0xe0000000 +/* + * kprobe-based event tracer support + */ +#include <linux/stddef.h> +#include <linux/types.h> +#define MAX_REG_OFFSET (offsetof(struct pt_regs, ARM_ORIG_r0)) + +extern int regs_query_register_offset(const char *name); +extern const char *regs_query_register_name(unsigned int offset); +extern bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr); +extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, + unsigned int n); + +/** + * regs_get_register() - get register value from its offset + * @regs: pt_regs from which register value is gotten + * @offset: offset number of the register. + * + * regs_get_register returns the value of a register whose offset from @regs. + * The @offset is the offset of the register in struct pt_regs. + * If @offset is bigger than MAX_REG_OFFSET, this returns 0. + */ +static inline unsigned long regs_get_register(struct pt_regs *regs, + unsigned int offset) +{ + if (unlikely(offset > MAX_REG_OFFSET)) + return 0; + return *(unsigned long *)((unsigned long)regs + offset); +} + +/* Valid only for Kernel mode traps. */ +static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) +{ + return regs->ARM_sp; +} + #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h index f392fb4437af..f1e5a9bca249 100644 --- a/arch/arm/include/asm/setup.h +++ b/arch/arm/include/asm/setup.h @@ -201,8 +201,7 @@ static struct tagtable __tagtable_##fn __tag = { tag, fn } struct membank { unsigned long start; unsigned long size; - unsigned short node; - unsigned short highmem; + unsigned int highmem; }; struct meminfo { @@ -212,9 +211,8 @@ struct meminfo { extern struct meminfo meminfo; -#define for_each_nodebank(iter,mi,no) \ - for (iter = 0; iter < (mi)->nr_banks; iter++) \ - if ((mi)->bank[iter].node == no) +#define for_each_bank(iter,mi) \ + for (iter = 0; iter < (mi)->nr_banks; iter++) #define bank_pfn_start(bank) __phys_to_pfn((bank)->start) #define bank_pfn_end(bank) __phys_to_pfn((bank)->start + (bank)->size) diff --git a/arch/arm/include/asm/stackprotector.h b/arch/arm/include/asm/stackprotector.h new file mode 100644 index 000000000000..de003327be97 --- /dev/null +++ b/arch/arm/include/asm/stackprotector.h @@ -0,0 +1,38 @@ +/* + * GCC stack protector support. + * + * Stack protector works by putting predefined pattern at the start of + * the stack frame and verifying that it hasn't been overwritten when + * returning from the function. The pattern is called stack canary + * and gcc expects it to be defined by a global variable called + * "__stack_chk_guard" on ARM. This unfortunately means that on SMP + * we cannot have a different canary value per task. + */ + +#ifndef _ASM_STACKPROTECTOR_H +#define _ASM_STACKPROTECTOR_H 1 + +#include <linux/random.h> +#include <linux/version.h> + +extern unsigned long __stack_chk_guard; + +/* + * Initialize the stackprotector canary value. + * + * NOTE: this must only be called from functions that never return, + * and it must always be inlined. + */ +static __always_inline void boot_init_stack_canary(void) +{ + unsigned long canary; + + /* Try to get a semi random initial value. */ + get_random_bytes(&canary, sizeof(canary)); + canary ^= LINUX_VERSION_CODE; + + current->stack_canary = canary; + __stack_chk_guard = current->stack_canary; +} + +#endif /* _ASM_STACKPROTECTOR_H */ diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 5f4f48002734..8ba1ccf82a02 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -83,7 +83,7 @@ void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *), - int sig, const char *name); + int sig, int code, const char *name); #define xchg(ptr,x) \ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h new file mode 100644 index 000000000000..e71d6ff8d104 --- /dev/null +++ b/arch/arm/include/asm/tls.h @@ -0,0 +1,46 @@ +#ifndef __ASMARM_TLS_H +#define __ASMARM_TLS_H + +#ifdef __ASSEMBLY__ + .macro set_tls_none, tp, tmp1, tmp2 + .endm + + .macro set_tls_v6k, tp, tmp1, tmp2 + mcr p15, 0, \tp, c13, c0, 3 @ set TLS register + .endm + + .macro set_tls_v6, tp, tmp1, tmp2 + ldr \tmp1, =elf_hwcap + ldr \tmp1, [\tmp1, #0] + mov \tmp2, #0xffff0fff + tst \tmp1, #HWCAP_TLS @ hardware TLS available? + mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register + streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0 + .endm + + .macro set_tls_software, tp, tmp1, tmp2 + mov \tmp1, #0xffff0fff + str \tp, [\tmp1, #-15] @ set TLS value at 0xffff0ff0 + .endm +#endif + +#ifdef CONFIG_TLS_REG_EMUL +#define tls_emu 1 +#define has_tls_reg 1 +#define set_tls set_tls_none +#elif __LINUX_ARM_ARCH__ >= 7 || \ + (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K)) +#define tls_emu 0 +#define has_tls_reg 1 +#define set_tls set_tls_v6k +#elif __LINUX_ARM_ARCH__ == 6 +#define tls_emu 0 +#define has_tls_reg (elf_hwcap & HWCAP_TLS) +#define set_tls set_tls_v6 +#else +#define tls_emu 0 +#define has_tls_reg 0 +#define set_tls set_tls_software +#endif + +#endif /* __ASMARM_TLS_H */ diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h index 422f3cc204a2..3d5fc41ae8d3 100644 --- a/arch/arm/include/asm/vfpmacros.h +++ b/arch/arm/include/asm/vfpmacros.h @@ -3,6 +3,8 @@ * * Assembler-only file containing VFP macros and register definitions. */ +#include <asm/hwcap.h> + #include "vfp.h" @ Macros to allow building with old toolkits (with no VFP support) @@ -22,12 +24,20 @@ LDC p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d0-d15} #endif #ifdef CONFIG_VFPv3 +#if __LINUX_ARM_ARCH__ <= 6 + ldr \tmp, =elf_hwcap @ may not have MVFR regs + ldr \tmp, [\tmp, #0] + tst \tmp, #HWCAP_VFPv3D16 + ldceq p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31} + addne \base, \base, #32*4 @ step over unused register space +#else VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0 and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field cmp \tmp, #2 @ 32 x 64bit registers? ldceql p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31} addne \base, \base, #32*4 @ step over unused register space #endif +#endif .endm @ write all the working registers out of the VFP @@ -38,10 +48,18 @@ STC p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d0-d15} #endif #ifdef CONFIG_VFPv3 +#if __LINUX_ARM_ARCH__ <= 6 + ldr \tmp, =elf_hwcap @ may not have MVFR regs + ldr \tmp, [\tmp, #0] + tst \tmp, #HWCAP_VFPv3D16 + stceq p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31} + addne \base, \base, #32*4 @ step over unused register space +#else VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0 and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field cmp \tmp, #2 @ 32 x 64bit registers? stceql p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31} addne \base, \base, #32*4 @ step over unused register space #endif +#endif .endm |