diff options
Diffstat (limited to 'arch/arm/include')
24 files changed, 151 insertions, 230 deletions
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index 514e398f1a07..d3db39860b9c 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -16,7 +16,6 @@ generic-y += local64.h generic-y += msgbuf.h generic-y += param.h generic-y += parport.h -generic-y += percpu.h generic-y += poll.h generic-y += resource.h generic-y += sections.h diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 2ef95813fce0..eb87200aa4b5 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -250,6 +250,7 @@ * Beware, it also clobers LR. */ .macro safe_svcmode_maskall reg:req +#if __LINUX_ARM_ARCH__ >= 6 mrs \reg , cpsr mov lr , \reg and lr , lr , #MODE_MASK @@ -266,6 +267,13 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) __ERET 1: msr cpsr_c, \reg 2: +#else +/* + * workaround for possibly broken pre-v6 hardware + * (akita, Sharp Zaurus C-1000, PXA270-based) + */ + setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg +#endif .endm /* diff --git a/arch/arm/include/asm/cpu.h b/arch/arm/include/asm/cpu.h index d797223b39d5..2744f0602550 100644 --- a/arch/arm/include/asm/cpu.h +++ b/arch/arm/include/asm/cpu.h @@ -15,6 +15,7 @@ struct cpuinfo_arm { struct cpu cpu; + u32 cpuid; #ifdef CONFIG_SMP unsigned int loops_per_jiffy; #endif diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index cb47d28cbe1f..a59dcb5ab5fc 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -25,6 +25,19 @@ #define CPUID_EXT_ISAR4 "c2, 4" #define CPUID_EXT_ISAR5 "c2, 5" +#define MPIDR_SMP_BITMASK (0x3 << 30) +#define MPIDR_SMP_VALUE (0x2 << 30) + +#define MPIDR_MT_BITMASK (0x1 << 24) + +#define MPIDR_HWID_BITMASK 0xFFFFFF + +#define MPIDR_LEVEL_BITS 8 +#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1) + +#define MPIDR_AFFINITY_LEVEL(mpidr, level) \ + ((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK) + extern unsigned int processor_id; #ifdef CONFIG_CPU_CP15 diff --git a/arch/arm/include/asm/cti.h b/arch/arm/include/asm/cti.h index a0ada3ea4358..f2e5cad3f306 100644 --- a/arch/arm/include/asm/cti.h +++ b/arch/arm/include/asm/cti.h @@ -146,15 +146,7 @@ static inline void cti_irq_ack(struct cti *cti) */ static inline void cti_unlock(struct cti *cti) { - void __iomem *base = cti->base; - unsigned long val; - - val = __raw_readl(base + LOCKSTATUS); - - if (val & 1) { - val = LOCKCODE; - __raw_writel(val, base + LOCKACCESS); - } + __raw_writel(LOCKCODE, cti->base + LOCKACCESS); } /** @@ -166,14 +158,6 @@ static inline void cti_unlock(struct cti *cti) */ static inline void cti_lock(struct cti *cti) { - void __iomem *base = cti->base; - unsigned long val; - - val = __raw_readl(base + LOCKSTATUS); - - if (!(val & 1)) { - val = ~LOCKCODE; - __raw_writel(val, base + LOCKACCESS); - } + __raw_writel(~LOCKCODE, cti->base + LOCKACCESS); } #endif diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h index c4c87bc12231..3b2c40b5bfa2 100644 --- a/arch/arm/include/asm/hardware/cache-l2x0.h +++ b/arch/arm/include/asm/hardware/cache-l2x0.h @@ -102,6 +102,10 @@ #define L2X0_ADDR_FILTER_EN 1 +#define L2X0_CTRL_EN 1 + +#define L2X0_WAY_SIZE_SHIFT 3 + #ifndef __ASSEMBLY__ extern void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask); #if defined(CONFIG_CACHE_L2X0) && defined(CONFIG_OF) @@ -126,6 +130,7 @@ struct l2x0_regs { unsigned long filter_end; unsigned long prefetch_ctrl; unsigned long pwr_ctrl; + unsigned long ctrl; }; extern struct l2x0_regs l2x0_saved_regs; diff --git a/arch/arm/include/asm/hardware/vic.h b/arch/arm/include/asm/hardware/vic.h index e14af1a1a320..2bebad36fc83 100644 --- a/arch/arm/include/asm/hardware/vic.h +++ b/arch/arm/include/asm/hardware/vic.h @@ -47,7 +47,7 @@ struct device_node; struct pt_regs; -void __vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, +void __vic_init(void __iomem *base, int irq_start, u32 vic_sources, u32 resume_sources, struct device_node *node); void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources); int vic_of_init(struct device_node *node, struct device_node *parent); diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h index c190bc992f0e..01169dd723f1 100644 --- a/arch/arm/include/asm/hw_breakpoint.h +++ b/arch/arm/include/asm/hw_breakpoint.h @@ -98,12 +98,12 @@ static inline void decode_ctrl_reg(u32 reg, #define ARM_BASE_WCR 112 /* Accessor macros for the debug registers. */ -#define ARM_DBG_READ(M, OP2, VAL) do {\ - asm volatile("mrc p14, 0, %0, c0," #M ", " #OP2 : "=r" (VAL));\ +#define ARM_DBG_READ(N, M, OP2, VAL) do {\ + asm volatile("mrc p14, 0, %0, " #N "," #M ", " #OP2 : "=r" (VAL));\ } while (0) -#define ARM_DBG_WRITE(M, OP2, VAL) do {\ - asm volatile("mcr p14, 0, %0, c0," #M ", " #OP2 : : "r" (VAL));\ +#define ARM_DBG_WRITE(N, M, OP2, VAL) do {\ + asm volatile("mcr p14, 0, %0, " #N "," #M ", " #OP2 : : "r" (VAL));\ } while (0) struct notifier_block; diff --git a/arch/arm/include/asm/mach/serial_at91.h b/arch/arm/include/asm/mach/serial_at91.h deleted file mode 100644 index ea6d063923b8..000000000000 --- a/arch/arm/include/asm/mach/serial_at91.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * arch/arm/include/asm/mach/serial_at91.h - * - * Based on serial_sa1100.h by Nicolas Pitre - * - * Copyright (C) 2002 ATMEL Rousset - * - * Low level machine dependent UART functions. - */ - -struct uart_port; - -/* - * This is a temporary structure for registering these - * functions; it is intended to be discarded after boot. - */ -struct atmel_port_fns { - void (*set_mctrl)(struct uart_port *, u_int); - u_int (*get_mctrl)(struct uart_port *); - void (*enable_ms)(struct uart_port *); - void (*pm)(struct uart_port *, u_int, u_int); - int (*set_wake)(struct uart_port *, u_int); - int (*open)(struct uart_port *); - void (*close)(struct uart_port *); -}; - -#if defined(CONFIG_SERIAL_ATMEL) -void atmel_register_uart_fns(struct atmel_port_fns *fns); -#else -#define atmel_register_uart_fns(fns) do { } while (0) -#endif - - diff --git a/arch/arm/include/asm/mach/serial_sa1100.h b/arch/arm/include/asm/mach/serial_sa1100.h deleted file mode 100644 index d09064bf95a0..000000000000 --- a/arch/arm/include/asm/mach/serial_sa1100.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * arch/arm/include/asm/mach/serial_sa1100.h - * - * Author: Nicolas Pitre - * - * Moved and changed lots, Russell King - * - * Low level machine dependent UART functions. - */ - -struct uart_port; -struct uart_info; - -/* - * This is a temporary structure for registering these - * functions; it is intended to be discarded after boot. - */ -struct sa1100_port_fns { - void (*set_mctrl)(struct uart_port *, u_int); - u_int (*get_mctrl)(struct uart_port *); - void (*pm)(struct uart_port *, u_int, u_int); - int (*set_wake)(struct uart_port *, u_int); -}; - -#ifdef CONFIG_SERIAL_SA1100 -void sa1100_register_uart_fns(struct sa1100_port_fns *fns); -void sa1100_register_uart(int idx, int port); -#else -#define sa1100_register_uart_fns(fns) do { } while (0) -#define sa1100_register_uart(idx,port) do { } while (0) -#endif diff --git a/arch/arm/include/asm/mach/udc_pxa2xx.h b/arch/arm/include/asm/mach/udc_pxa2xx.h deleted file mode 100644 index ea297ac70bc6..000000000000 --- a/arch/arm/include/asm/mach/udc_pxa2xx.h +++ /dev/null @@ -1,26 +0,0 @@ -/* - * arch/arm/include/asm/mach/udc_pxa2xx.h - * - * This supports machine-specific differences in how the PXA2xx - * USB Device Controller (UDC) is wired. - * - * It is set in linux/arch/arm/mach-pxa/<machine>.c or in - * linux/arch/mach-ixp4xx/<machine>.c and used in - * the probe routine of linux/drivers/usb/gadget/pxa2xx_udc.c - */ - -struct pxa2xx_udc_mach_info { - int (*udc_is_connected)(void); /* do we see host? */ - void (*udc_command)(int cmd); -#define PXA2XX_UDC_CMD_CONNECT 0 /* let host see us */ -#define PXA2XX_UDC_CMD_DISCONNECT 1 /* so host won't see us */ - - /* Boards following the design guidelines in the developer's manual, - * with on-chip GPIOs not Lubbock's weird hardware, can have a sane - * VBUS IRQ and omit the methods above. Store the GPIO number - * here. Note that sometimes the signals go through inverters... - */ - bool gpio_pullup_inverted; - int gpio_pullup; /* high == pullup activated */ -}; - diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index 14965658a923..9f77e7804f3b 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h @@ -5,18 +5,15 @@ typedef struct { #ifdef CONFIG_CPU_HAS_ASID - unsigned int id; - raw_spinlock_t id_lock; + u64 id; #endif - unsigned int kvm_seq; + unsigned int vmalloc_seq; } mm_context_t; #ifdef CONFIG_CPU_HAS_ASID -#define ASID(mm) ((mm)->context.id & 255) - -/* init_mm.context.id_lock should be initialized. */ -#define INIT_MM_CONTEXT(name) \ - .context.id_lock = __RAW_SPIN_LOCK_UNLOCKED(name.context.id_lock), +#define ASID_BITS 8 +#define ASID_MASK ((~0ULL) << ASID_BITS) +#define ASID(mm) ((mm)->context.id & ~ASID_MASK) #else #define ASID(mm) (0) #endif diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index 0306bc642c0d..e1f644bc7cc5 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -20,88 +20,12 @@ #include <asm/proc-fns.h> #include <asm-generic/mm_hooks.h> -void __check_kvm_seq(struct mm_struct *mm); +void __check_vmalloc_seq(struct mm_struct *mm); #ifdef CONFIG_CPU_HAS_ASID -/* - * On ARMv6, we have the following structure in the Context ID: - * - * 31 7 0 - * +-------------------------+-----------+ - * | process ID | ASID | - * +-------------------------+-----------+ - * | context ID | - * +-------------------------------------+ - * - * The ASID is used to tag entries in the CPU caches and TLBs. - * The context ID is used by debuggers and trace logic, and - * should be unique within all running processes. - */ -#define ASID_BITS 8 -#define ASID_MASK ((~0) << ASID_BITS) -#define ASID_FIRST_VERSION (1 << ASID_BITS) - -extern unsigned int cpu_last_asid; - -void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); -void __new_context(struct mm_struct *mm); -void cpu_set_reserved_ttbr0(void); - -static inline void switch_new_context(struct mm_struct *mm) -{ - unsigned long flags; - - __new_context(mm); - - local_irq_save(flags); - cpu_switch_mm(mm->pgd, mm); - local_irq_restore(flags); -} - -static inline void check_and_switch_context(struct mm_struct *mm, - struct task_struct *tsk) -{ - if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) - __check_kvm_seq(mm); - - /* - * Required during context switch to avoid speculative page table - * walking with the wrong TTBR. - */ - cpu_set_reserved_ttbr0(); - - if (!((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) - /* - * The ASID is from the current generation, just switch to the - * new pgd. This condition is only true for calls from - * context_switch() and interrupts are already disabled. - */ - cpu_switch_mm(mm->pgd, mm); - else if (irqs_disabled()) - /* - * Defer the new ASID allocation until after the context - * switch critical region since __new_context() cannot be - * called with interrupts disabled (it sends IPIs). - */ - set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM); - else - /* - * That is a direct call to switch_mm() or activate_mm() with - * interrupts enabled and a new context. - */ - switch_new_context(mm); -} - -#define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0) - -#define finish_arch_post_lock_switch \ - finish_arch_post_lock_switch -static inline void finish_arch_post_lock_switch(void) -{ - if (test_and_clear_thread_flag(TIF_SWITCH_MM)) - switch_new_context(current->mm); -} +void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); +#define init_new_context(tsk,mm) ({ mm->context.id = 0; }) #else /* !CONFIG_CPU_HAS_ASID */ @@ -110,8 +34,8 @@ static inline void finish_arch_post_lock_switch(void) static inline void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) { - if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) - __check_kvm_seq(mm); + if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) + __check_vmalloc_seq(mm); if (irqs_disabled()) /* @@ -143,6 +67,7 @@ static inline void finish_arch_post_lock_switch(void) #endif /* CONFIG_CPU_HAS_ASID */ #define destroy_context(mm) do { } while(0) +#define activate_mm(prev,next) switch_mm(prev, next, NULL) /* * This is called when "tsk" is about to enter lazy TLB mode. @@ -186,6 +111,5 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, } #define deactivate_mm(tsk,mm) do { } while (0) -#define activate_mm(prev,next) switch_mm(prev, next, NULL) #endif diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h new file mode 100644 index 000000000000..968c0a14e0a3 --- /dev/null +++ b/arch/arm/include/asm/percpu.h @@ -0,0 +1,45 @@ +/* + * Copyright 2012 Calxeda, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef _ASM_ARM_PERCPU_H_ +#define _ASM_ARM_PERCPU_H_ + +/* + * Same as asm-generic/percpu.h, except that we store the per cpu offset + * in the TPIDRPRW. TPIDRPRW only exists on V6K and V7 + */ +#if defined(CONFIG_SMP) && !defined(CONFIG_CPU_V6) +static inline void set_my_cpu_offset(unsigned long off) +{ + /* Set TPIDRPRW */ + asm volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (off) : "memory"); +} + +static inline unsigned long __my_cpu_offset(void) +{ + unsigned long off; + /* Read TPIDRPRW */ + asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : : "memory"); + return off; +} +#define __my_cpu_offset __my_cpu_offset() +#else +#define set_my_cpu_offset(x) do {} while(0) + +#endif /* CONFIG_SMP */ + +#include <asm-generic/percpu.h> + +#endif /* _ASM_ARM_PERCPU_H_ */ diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index 625cd621a436..755877527cf9 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h @@ -21,4 +21,11 @@ #define C(_x) PERF_COUNT_HW_CACHE_##_x #define CACHE_OP_UNSUPPORTED 0xFFFF +#ifdef CONFIG_HW_PERF_EVENTS +struct pt_regs; +extern unsigned long perf_instruction_pointer(struct pt_regs *regs); +extern unsigned long perf_misc_flags(struct pt_regs *regs); +#define perf_misc_flags(regs) perf_misc_flags(regs) +#endif + #endif /* __ARM_PERF_EVENT_H__ */ diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h index 2317a71c8f8e..f97ee02386ee 100644 --- a/arch/arm/include/asm/pgtable-2level.h +++ b/arch/arm/include/asm/pgtable-2level.h @@ -115,6 +115,7 @@ * The PTE table pointer refers to the hardware entries; the "Linux" * entries are stored 1024 bytes below. */ +#define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */ #define L_PTE_PRESENT (_AT(pteval_t, 1) << 0) #define L_PTE_YOUNG (_AT(pteval_t, 1) << 1) #define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */ @@ -123,6 +124,7 @@ #define L_PTE_USER (_AT(pteval_t, 1) << 8) #define L_PTE_XN (_AT(pteval_t, 1) << 9) #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */ +#define L_PTE_NONE (_AT(pteval_t, 1) << 11) /* * These are the memory types, defined to be compatible with diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index b24903549d1c..a3f37929940a 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -67,7 +67,8 @@ * These bits overlap with the hardware bits but the naming is preserved for * consistency with the classic page table format. */ -#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Valid */ +#define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */ +#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Present */ #define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */ #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */ #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */ @@ -76,6 +77,7 @@ #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */ #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */ #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */ +#define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */ /* * To be used in assembly code with the upper page attributes. diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 08c12312a1f9..9c82f988c0e3 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -73,7 +73,7 @@ extern pgprot_t pgprot_kernel; #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) -#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY) +#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY | L_PTE_NONE) #define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN) #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER) #define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) @@ -83,7 +83,7 @@ extern pgprot_t pgprot_kernel; #define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN) #define PAGE_KERNEL_EXEC pgprot_kernel -#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN) +#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE) #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) #define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER) #define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) @@ -203,9 +203,7 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) #define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) #define pte_special(pte) (0) -#define pte_present_user(pte) \ - ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \ - (L_PTE_PRESENT | L_PTE_USER)) +#define pte_present_user(pte) (pte_present(pte) && (pte_val(pte) & L_PTE_USER)) #if __LINUX_ARM_ARCH__ < 6 static inline void __sync_icache_dcache(pte_t pteval) @@ -242,7 +240,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { - const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER; + const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | L_PTE_NONE; pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); return pte; } diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index a26170dce02e..f24edad26c70 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h @@ -67,19 +67,19 @@ struct arm_pmu { cpumask_t active_irqs; char *name; irqreturn_t (*handle_irq)(int irq_num, void *dev); - void (*enable)(struct hw_perf_event *evt, int idx); - void (*disable)(struct hw_perf_event *evt, int idx); + void (*enable)(struct perf_event *event); + void (*disable)(struct perf_event *event); int (*get_event_idx)(struct pmu_hw_events *hw_events, - struct hw_perf_event *hwc); + struct perf_event *event); int (*set_event_filter)(struct hw_perf_event *evt, struct perf_event_attr *attr); - u32 (*read_counter)(int idx); - void (*write_counter)(int idx, u32 val); - void (*start)(void); - void (*stop)(void); + u32 (*read_counter)(struct perf_event *event); + void (*write_counter)(struct perf_event *event, u32 val); + void (*start)(struct arm_pmu *); + void (*stop)(struct arm_pmu *); void (*reset)(void *); - int (*request_irq)(irq_handler_t handler); - void (*free_irq)(void); + int (*request_irq)(struct arm_pmu *, irq_handler_t handler); + void (*free_irq)(struct arm_pmu *); int (*map_event)(struct perf_event *event); int num_events; atomic_t active_events; @@ -93,15 +93,11 @@ struct arm_pmu { extern const struct dev_pm_ops armpmu_dev_pm_ops; -int armpmu_register(struct arm_pmu *armpmu, char *name, int type); +int armpmu_register(struct arm_pmu *armpmu, int type); -u64 armpmu_event_update(struct perf_event *event, - struct hw_perf_event *hwc, - int idx); +u64 armpmu_event_update(struct perf_event *event); -int armpmu_event_set_period(struct perf_event *event, - struct hw_perf_event *hwc, - int idx); +int armpmu_event_set_period(struct perf_event *event); int armpmu_map_event(struct perf_event *event, const unsigned (*event_map)[PERF_COUNT_HW_MAX], diff --git a/arch/arm/include/asm/prom.h b/arch/arm/include/asm/prom.h index 6d65ba222db9..a219227c3e43 100644 --- a/arch/arm/include/asm/prom.h +++ b/arch/arm/include/asm/prom.h @@ -17,6 +17,7 @@ extern struct machine_desc *setup_machine_fdt(unsigned int dt_phys); extern void arm_dt_memblock_reserve(void); +extern void __init arm_dt_init_cpu_maps(void); #else /* CONFIG_OF */ @@ -26,6 +27,7 @@ static inline struct machine_desc *setup_machine_fdt(unsigned int dt_phys) } static inline void arm_dt_memblock_reserve(void) { } +static inline void arm_dt_init_cpu_maps(void) { } #endif /* CONFIG_OF */ #endif /* ASMARM_PROM_H */ diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index 2e3be16c6766..d3a22bebe6ce 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h @@ -79,6 +79,7 @@ extern void cpu_die(void); extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); +extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask); struct smp_operations { #ifdef CONFIG_SMP diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h index 558d6c80aca9..aaa61b6f50ff 100644 --- a/arch/arm/include/asm/smp_plat.h +++ b/arch/arm/include/asm/smp_plat.h @@ -5,6 +5,9 @@ #ifndef __ASMARM_SMP_PLAT_H #define __ASMARM_SMP_PLAT_H +#include <linux/cpumask.h> +#include <linux/err.h> + #include <asm/cputype.h> /* @@ -48,5 +51,19 @@ static inline int cache_ops_need_broadcast(void) */ extern int __cpu_logical_map[]; #define cpu_logical_map(cpu) __cpu_logical_map[cpu] +/* + * Retrieve logical cpu index corresponding to a given MPIDR[23:0] + * - mpidr: MPIDR[23:0] to be used for the look-up + * + * Returns the cpu logical index or -EINVAL on look-up error + */ +static inline int get_logical_index(u32 mpidr) +{ + int cpu; + for (cpu = 0; cpu < nr_cpu_ids; cpu++) + if (cpu_logical_map(cpu) == mpidr) + return cpu; + return -EINVAL; +} #endif diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h index 9fdded6b1089..f1d96d4e8092 100644 --- a/arch/arm/include/asm/syscall.h +++ b/arch/arm/include/asm/syscall.h @@ -7,6 +7,8 @@ #ifndef _ASM_ARM_SYSCALL_H #define _ASM_ARM_SYSCALL_H +#include <linux/audit.h> /* for AUDIT_ARCH_* */ +#include <linux/elf.h> /* for ELF_EM */ #include <linux/err.h> #include <linux/sched.h> @@ -95,4 +97,11 @@ static inline void syscall_set_arguments(struct task_struct *task, memcpy(®s->ARM_r0 + i, args, n * sizeof(args[0])); } +static inline int syscall_get_arch(struct task_struct *task, + struct pt_regs *regs) +{ + /* ARM tasks don't change audit architectures on the fly. */ + return AUDIT_ARCH_ARM; +} + #endif /* _ASM_ARM_SYSCALL_H */ diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 8477b4c1d39f..cddda1f41f0f 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -151,10 +151,10 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #define TIF_SYSCALL_TRACE 8 #define TIF_SYSCALL_AUDIT 9 #define TIF_SYSCALL_TRACEPOINT 10 +#define TIF_SECCOMP 11 /* seccomp syscall filtering active */ #define TIF_USING_IWMMXT 17 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_RESTORE_SIGMASK 20 -#define TIF_SECCOMP 21 #define TIF_SWITCH_MM 22 /* deferred switch_mm */ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) @@ -163,11 +163,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) -#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) +#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) /* Checks for any syscall work in entry-common.S */ -#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT) +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ + _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP) /* * Change these and you break ASM code in entry-common.S |