diff options
Diffstat (limited to 'arch/tile')
27 files changed, 1322 insertions, 135 deletions
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index b3692ce78f90..4f3006b600e3 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig @@ -3,6 +3,8 @@ config TILE def_bool y + select HAVE_PERF_EVENTS + select USE_PMC if PERF_EVENTS select HAVE_DMA_ATTRS select HAVE_DMA_API_DEBUG select HAVE_KVM if !TILEGX @@ -66,6 +68,10 @@ config HUGETLB_SUPER_PAGES config GENERIC_TIME_VSYSCALL def_bool y +# Enable PMC if PERF_EVENTS, OPROFILE, or WATCHPOINTS are enabled. +config USE_PMC + bool + # FIXME: tilegx can implement a more efficient rwsem. config RWSEM_GENERIC_SPINLOCK def_bool y @@ -119,6 +125,8 @@ config HVC_TILE config TILEGX bool "Building for TILE-Gx (64-bit) processor" + select SPARSE_IRQ + select GENERIC_IRQ_LEGACY_ALLOC_HWIRQ select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_FUNCTION_GRAPH_TRACER @@ -405,7 +413,7 @@ config PCI_DOMAINS config NO_IOMEM def_bool !PCI -config NO_IOPORT +config NO_IOPORT_MAP def_bool !PCI config TILE_PCI_IO diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h index 1ad4a1f7d42b..1b109fad9fff 100644 --- a/arch/tile/include/asm/atomic_32.h +++ b/arch/tile/include/asm/atomic_32.h @@ -169,16 +169,6 @@ static inline void atomic64_set(atomic64_t *v, long long n) #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) -/* - * We need to barrier before modifying the word, since the _atomic_xxx() - * routines just tns the lock and then read/modify/write of the word. - * But after the word is updated, the routine issues an "mf" before returning, - * and since it's a function call, we don't even need a compiler barrier. - */ -#define smp_mb__before_atomic_dec() smp_mb() -#define smp_mb__before_atomic_inc() smp_mb() -#define smp_mb__after_atomic_dec() do { } while (0) -#define smp_mb__after_atomic_inc() do { } while (0) #endif /* !__ASSEMBLY__ */ diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h index ad220eed05fc..7b11c5fadd42 100644 --- a/arch/tile/include/asm/atomic_64.h +++ b/arch/tile/include/asm/atomic_64.h @@ -105,12 +105,6 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) -/* Atomic dec and inc don't implement barrier, so provide them if needed. */ -#define smp_mb__before_atomic_dec() smp_mb() -#define smp_mb__after_atomic_dec() smp_mb() -#define smp_mb__before_atomic_inc() smp_mb() -#define smp_mb__after_atomic_inc() smp_mb() - /* Define this to indicate that cmpxchg is an efficient operation. */ #define __HAVE_ARCH_CMPXCHG diff --git a/arch/tile/include/asm/barrier.h b/arch/tile/include/asm/barrier.h index b5a05d050a8f..96a42ae79f4d 100644 --- a/arch/tile/include/asm/barrier.h +++ b/arch/tile/include/asm/barrier.h @@ -72,6 +72,20 @@ mb_incoherent(void) #define mb() fast_mb() #define iob() fast_iob() +#ifndef __tilegx__ /* 32 bit */ +/* + * We need to barrier before modifying the word, since the _atomic_xxx() + * routines just tns the lock and then read/modify/write of the word. + * But after the word is updated, the routine issues an "mf" before returning, + * and since it's a function call, we don't even need a compiler barrier. + */ +#define smp_mb__before_atomic() smp_mb() +#define smp_mb__after_atomic() do { } while (0) +#else /* 64 bit */ +#define smp_mb__before_atomic() smp_mb() +#define smp_mb__after_atomic() smp_mb() +#endif + #include <asm-generic/barrier.h> #endif /* !__ASSEMBLY__ */ diff --git a/arch/tile/include/asm/bitops.h b/arch/tile/include/asm/bitops.h index d5a206865036..20caa346ac06 100644 --- a/arch/tile/include/asm/bitops.h +++ b/arch/tile/include/asm/bitops.h @@ -17,6 +17,7 @@ #define _ASM_TILE_BITOPS_H #include <linux/types.h> +#include <asm/barrier.h> #ifndef _LINUX_BITOPS_H #error only <linux/bitops.h> can be included directly diff --git a/arch/tile/include/asm/bitops_32.h b/arch/tile/include/asm/bitops_32.h index 386865ad2f55..bbf7b666f21d 100644 --- a/arch/tile/include/asm/bitops_32.h +++ b/arch/tile/include/asm/bitops_32.h @@ -49,8 +49,8 @@ static inline void set_bit(unsigned nr, volatile unsigned long *addr) * restricted to acting on a single-word quantity. * * clear_bit() may not contain a memory barrier, so if it is used for - * locking purposes, you should call smp_mb__before_clear_bit() and/or - * smp_mb__after_clear_bit() to ensure changes are visible on other cpus. + * locking purposes, you should call smp_mb__before_atomic() and/or + * smp_mb__after_atomic() to ensure changes are visible on other cpus. */ static inline void clear_bit(unsigned nr, volatile unsigned long *addr) { @@ -121,10 +121,6 @@ static inline int test_and_change_bit(unsigned nr, return (_atomic_xor(addr, mask) & mask) != 0; } -/* See discussion at smp_mb__before_atomic_dec() in <asm/atomic_32.h>. */ -#define smp_mb__before_clear_bit() smp_mb() -#define smp_mb__after_clear_bit() do {} while (0) - #include <asm-generic/bitops/ext2-atomic.h> #endif /* _ASM_TILE_BITOPS_32_H */ diff --git a/arch/tile/include/asm/bitops_64.h b/arch/tile/include/asm/bitops_64.h index ad34cd056085..bb1a29221fcd 100644 --- a/arch/tile/include/asm/bitops_64.h +++ b/arch/tile/include/asm/bitops_64.h @@ -32,10 +32,6 @@ static inline void clear_bit(unsigned nr, volatile unsigned long *addr) __insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask); } -#define smp_mb__before_clear_bit() smp_mb() -#define smp_mb__after_clear_bit() smp_mb() - - static inline void change_bit(unsigned nr, volatile unsigned long *addr) { unsigned long mask = (1UL << (nr % BITS_PER_LONG)); diff --git a/arch/tile/include/asm/irq.h b/arch/tile/include/asm/irq.h index 33cff9a3058b..1fe86911838b 100644 --- a/arch/tile/include/asm/irq.h +++ b/arch/tile/include/asm/irq.h @@ -18,10 +18,12 @@ #include <linux/hardirq.h> /* The hypervisor interface provides 32 IRQs. */ -#define NR_IRQS 32 +#define NR_IRQS 32 /* IRQ numbers used for linux IPIs. */ -#define IRQ_RESCHEDULE 0 +#define IRQ_RESCHEDULE 0 +/* Interrupts for dynamic allocation start at 1. Let the core allocate irq0 */ +#define NR_IRQS_LEGACY 1 #define irq_canonicalize(irq) (irq) diff --git a/arch/tile/include/asm/perf_event.h b/arch/tile/include/asm/perf_event.h new file mode 100644 index 000000000000..59c5b164e5b6 --- /dev/null +++ b/arch/tile/include/asm/perf_event.h @@ -0,0 +1,22 @@ +/* + * Copyright 2014 Tilera Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + */ + +#ifndef _ASM_TILE_PERF_EVENT_H +#define _ASM_TILE_PERF_EVENT_H + +#include <linux/percpu.h> +DECLARE_PER_CPU(u64, perf_irqs); + +unsigned long handle_syscall_link_address(void); +#endif /* _ASM_TILE_PERF_EVENT_H */ diff --git a/arch/tile/include/asm/pmc.h b/arch/tile/include/asm/pmc.h new file mode 100644 index 000000000000..7ae3956d9008 --- /dev/null +++ b/arch/tile/include/asm/pmc.h @@ -0,0 +1,64 @@ +/* + * Copyright 2014 Tilera Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + */ + +#ifndef _ASM_TILE_PMC_H +#define _ASM_TILE_PMC_H + +#include <linux/ptrace.h> + +#define TILE_BASE_COUNTERS 2 + +/* Bitfields below are derived from SPR PERF_COUNT_CTL*/ +#ifndef __tilegx__ +/* PERF_COUNT_CTL on TILEPro */ +#define TILE_CTL_EXCL_USER (1 << 7) /* exclude user level */ +#define TILE_CTL_EXCL_KERNEL (1 << 8) /* exclude kernel level */ +#define TILE_CTL_EXCL_HV (1 << 9) /* exclude hypervisor level */ + +#define TILE_SEL_MASK 0x7f /* 7 bits for event SEL, + COUNT_0_SEL */ +#define TILE_PLM_MASK 0x780 /* 4 bits priv level msks, + COUNT_0_MASK*/ +#define TILE_EVENT_MASK (TILE_SEL_MASK | TILE_PLM_MASK) + +#else /* __tilegx__*/ +/* PERF_COUNT_CTL on TILEGx*/ +#define TILE_CTL_EXCL_USER (1 << 10) /* exclude user level */ +#define TILE_CTL_EXCL_KERNEL (1 << 11) /* exclude kernel level */ +#define TILE_CTL_EXCL_HV (1 << 12) /* exclude hypervisor level */ + +#define TILE_SEL_MASK 0x3f /* 6 bits for event SEL, + COUNT_0_SEL*/ +#define TILE_BOX_MASK 0x1c0 /* 3 bits box msks, + COUNT_0_BOX */ +#define TILE_PLM_MASK 0x3c00 /* 4 bits priv level msks, + COUNT_0_MASK */ +#define TILE_EVENT_MASK (TILE_SEL_MASK | TILE_BOX_MASK | TILE_PLM_MASK) +#endif /* __tilegx__*/ + +/* Takes register and fault number. Returns error to disable the interrupt. */ +typedef int (*perf_irq_t)(struct pt_regs *, int); + +int userspace_perf_handler(struct pt_regs *regs, int fault); + +perf_irq_t reserve_pmc_hardware(perf_irq_t new_perf_irq); +void release_pmc_hardware(void); + +unsigned long pmc_get_overflow(void); +void pmc_ack_overflow(unsigned long status); + +void unmask_pmc_interrupts(void); +void mask_pmc_interrupts(void); + +#endif /* _ASM_TILE_PMC_H */ diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h index 729aa107f64e..d767ff9f59b9 100644 --- a/arch/tile/include/asm/thread_info.h +++ b/arch/tile/include/asm/thread_info.h @@ -129,6 +129,7 @@ extern void _cpu_idle(void); #define TIF_MEMDIE 7 /* OOM killer at work */ #define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ #define TIF_SYSCALL_TRACEPOINT 9 /* syscall tracepoint instrumentation */ +#define TIF_POLLING_NRFLAG 10 /* idle is polling for TIF_NEED_RESCHED */ #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) @@ -140,6 +141,7 @@ extern void _cpu_idle(void); #define _TIF_MEMDIE (1<<TIF_MEMDIE) #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) +#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) /* Work to do on any return to user space. */ #define _TIF_ALLWORK_MASK \ @@ -162,7 +164,6 @@ extern void _cpu_idle(void); #ifdef __tilegx__ #define TS_COMPAT 0x0001 /* 32-bit compatibility mode */ #endif -#define TS_POLLING 0x0004 /* in idle loop but not sleeping */ #define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal */ #ifndef __ASSEMBLY__ diff --git a/arch/tile/include/asm/topology.h b/arch/tile/include/asm/topology.h index d15c0d8d550f..938311844233 100644 --- a/arch/tile/include/asm/topology.h +++ b/arch/tile/include/asm/topology.h @@ -44,39 +44,6 @@ static inline const struct cpumask *cpumask_of_node(int node) /* For now, use numa node -1 for global allocation. */ #define pcibus_to_node(bus) ((void)(bus), -1) -/* - * TILE architecture has many cores integrated in one processor, so we need - * setup bigger balance_interval for both CPU/NODE scheduling domains to - * reduce process scheduling costs. - */ - -/* sched_domains SD_CPU_INIT for TILE architecture */ -#define SD_CPU_INIT (struct sched_domain) { \ - .min_interval = 4, \ - .max_interval = 128, \ - .busy_factor = 64, \ - .imbalance_pct = 125, \ - .cache_nice_tries = 1, \ - .busy_idx = 2, \ - .idle_idx = 1, \ - .newidle_idx = 0, \ - .wake_idx = 0, \ - .forkexec_idx = 0, \ - \ - .flags = 1*SD_LOAD_BALANCE \ - | 1*SD_BALANCE_NEWIDLE \ - | 1*SD_BALANCE_EXEC \ - | 1*SD_BALANCE_FORK \ - | 0*SD_BALANCE_WAKE \ - | 0*SD_WAKE_AFFINE \ - | 0*SD_SHARE_CPUPOWER \ - | 0*SD_SHARE_PKG_RESOURCES \ - | 0*SD_SERIALIZE \ - , \ - .last_balance = jiffies, \ - .balance_interval = 32, \ -} - /* By definition, we create nodes based on online memory. */ #define node_has_online_mem(nid) 1 diff --git a/arch/tile/kernel/Makefile b/arch/tile/kernel/Makefile index 27a2bf39dae8..21f77bf68c69 100644 --- a/arch/tile/kernel/Makefile +++ b/arch/tile/kernel/Makefile @@ -25,6 +25,8 @@ obj-$(CONFIG_PCI) += pci_gx.o else obj-$(CONFIG_PCI) += pci.o endif +obj-$(CONFIG_PERF_EVENTS) += perf_event.o +obj-$(CONFIG_USE_PMC) += pmc.o obj-$(CONFIG_TILE_USB) += usb.o obj-$(CONFIG_TILE_HVGLUE_TRACE) += hvglue_trace.o obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o mcount_64.o diff --git a/arch/tile/kernel/ftrace.c b/arch/tile/kernel/ftrace.c index f1c452092eeb..8d52d83cc516 100644 --- a/arch/tile/kernel/ftrace.c +++ b/arch/tile/kernel/ftrace.c @@ -167,10 +167,8 @@ int ftrace_make_nop(struct module *mod, return ret; } -int __init ftrace_dyn_arch_init(void *data) +int __init ftrace_dyn_arch_init(void) { - *(unsigned long *)data = 0; - return 0; } #endif /* CONFIG_DYNAMIC_FTRACE */ diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S index 2cbe6d5dd6b0..cdbda45a4e4b 100644 --- a/arch/tile/kernel/intvec_32.S +++ b/arch/tile/kernel/intvec_32.S @@ -313,13 +313,13 @@ intvec_\vecname: movei r3, 0 } .else - .ifc \c_routine, op_handle_perf_interrupt + .ifc \c_routine, handle_perf_interrupt { mfspr r2, PERF_COUNT_STS movei r3, -1 /* not used, but set for consistency */ } .else - .ifc \c_routine, op_handle_aux_perf_interrupt + .ifc \c_routine, handle_perf_interrupt { mfspr r2, AUX_PERF_COUNT_STS movei r3, -1 /* not used, but set for consistency */ @@ -946,6 +946,13 @@ STD_ENTRY(interrupt_return) bzt r30, .Lrestore_regs 3: + /* We are relying on INT_PERF_COUNT at 33, and AUX_PERF_COUNT at 48 */ + { + moveli r0, lo16(1 << (INT_PERF_COUNT - 32)) + bz r31, .Lrestore_regs + } + auli r0, r0, ha16(1 << (INT_AUX_PERF_COUNT - 32)) + mtspr SPR_INTERRUPT_MASK_RESET_K_1, r0 /* * We now commit to returning from this interrupt, since we will be @@ -1171,6 +1178,10 @@ handle_nmi: PTREGS_PTR(r0, PTREGS_OFFSET_BASE) } FEEDBACK_REENTER(handle_nmi) + { + movei r30, 1 + seq r31, r0, zero + } j interrupt_return STD_ENDPROC(handle_nmi) @@ -1835,8 +1846,9 @@ int_unalign: /* Include .intrpt array of interrupt vectors */ .section ".intrpt", "ax" -#define op_handle_perf_interrupt bad_intr -#define op_handle_aux_perf_interrupt bad_intr +#ifndef CONFIG_USE_PMC +#define handle_perf_interrupt bad_intr +#endif #ifndef CONFIG_HARDWALL #define do_hardwall_trap bad_intr @@ -1877,7 +1889,7 @@ int_unalign: int_hand INT_IDN_AVAIL, IDN_AVAIL, bad_intr int_hand INT_UDN_AVAIL, UDN_AVAIL, bad_intr int_hand INT_PERF_COUNT, PERF_COUNT, \ - op_handle_perf_interrupt, handle_nmi + handle_perf_interrupt, handle_nmi int_hand INT_INTCTRL_3, INTCTRL_3, bad_intr #if CONFIG_KERNEL_PL == 2 dc_dispatch INT_INTCTRL_2, INTCTRL_2 @@ -1902,7 +1914,7 @@ int_unalign: int_hand INT_SN_CPL, SN_CPL, bad_intr int_hand INT_DOUBLE_FAULT, DOUBLE_FAULT, do_trap int_hand INT_AUX_PERF_COUNT, AUX_PERF_COUNT, \ - op_handle_aux_perf_interrupt, handle_nmi + handle_perf_interrupt, handle_nmi /* Synthetic interrupt delivered only by the simulator */ int_hand INT_BREAKPOINT, BREAKPOINT, do_breakpoint diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S index b8fc497f2437..5b67efcecabd 100644 --- a/arch/tile/kernel/intvec_64.S +++ b/arch/tile/kernel/intvec_64.S @@ -509,10 +509,10 @@ intvec_\vecname: .ifc \c_routine, do_trap mfspr r2, GPV_REASON .else - .ifc \c_routine, op_handle_perf_interrupt + .ifc \c_routine, handle_perf_interrupt mfspr r2, PERF_COUNT_STS .else - .ifc \c_routine, op_handle_aux_perf_interrupt + .ifc \c_routine, handle_perf_interrupt mfspr r2, AUX_PERF_COUNT_STS .endif .endif @@ -971,6 +971,15 @@ STD_ENTRY(interrupt_return) beqzt r30, .Lrestore_regs 3: +#if INT_PERF_COUNT + 1 != INT_AUX_PERF_COUNT +# error Bad interrupt assumption +#endif + { + movei r0, 3 /* two adjacent bits for the PERF_COUNT mask */ + beqz r31, .Lrestore_regs + } + shli r0, r0, INT_PERF_COUNT + mtspr SPR_INTERRUPT_MASK_RESET_K, r0 /* * We now commit to returning from this interrupt, since we will be @@ -1187,7 +1196,7 @@ handle_nmi: FEEDBACK_REENTER(handle_nmi) { movei r30, 1 - move r31, r0 + cmpeq r31, r0, zero } j interrupt_return STD_ENDPROC(handle_nmi) @@ -1491,8 +1500,9 @@ STD_ENTRY(fill_ra_stack) .global intrpt_start intrpt_start: -#define op_handle_perf_interrupt bad_intr -#define op_handle_aux_perf_interrupt bad_intr +#ifndef CONFIG_USE_PMC +#define handle_perf_interrupt bad_intr +#endif #ifndef CONFIG_HARDWALL #define do_hardwall_trap bad_intr @@ -1540,9 +1550,9 @@ intrpt_start: #endif int_hand INT_IPI_0, IPI_0, bad_intr int_hand INT_PERF_COUNT, PERF_COUNT, \ - op_handle_perf_interrupt, handle_nmi + handle_perf_interrupt, handle_nmi int_hand INT_AUX_PERF_COUNT, AUX_PERF_COUNT, \ - op_handle_perf_interrupt, handle_nmi + handle_perf_interrupt, handle_nmi int_hand INT_INTCTRL_3, INTCTRL_3, bad_intr #if CONFIG_KERNEL_PL == 2 dc_dispatch INT_INTCTRL_2, INTCTRL_2 diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c index 0586fdb9352d..637f2ffaa5f5 100644 --- a/arch/tile/kernel/irq.c +++ b/arch/tile/kernel/irq.c @@ -21,6 +21,7 @@ #include <hv/drv_pcie_rc_intf.h> #include <arch/spr_def.h> #include <asm/traps.h> +#include <linux/perf_event.h> /* Bit-flag stored in irq_desc->chip_data to indicate HW-cleared irqs. */ #define IS_HW_CLEARED 1 @@ -53,13 +54,6 @@ static DEFINE_PER_CPU(unsigned long, irq_disable_mask) */ static DEFINE_PER_CPU(int, irq_depth); -/* State for allocating IRQs on Gx. */ -#if CHIP_HAS_IPI() -static unsigned long available_irqs = ((1UL << NR_IRQS) - 1) & - (~(1UL << IRQ_RESCHEDULE)); -static DEFINE_SPINLOCK(available_irqs_lock); -#endif - #if CHIP_HAS_IPI() /* Use SPRs to manipulate device interrupts. */ #define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_K, irq_mask) @@ -261,37 +255,27 @@ void ack_bad_irq(unsigned int irq) } /* - * Generic, controller-independent functions: + * /proc/interrupts printing: */ - -#if CHIP_HAS_IPI() -int create_irq(void) +int arch_show_interrupts(struct seq_file *p, int prec) { - unsigned long flags; - int result; - - spin_lock_irqsave(&available_irqs_lock, flags); - if (available_irqs == 0) - result = -ENOMEM; - else { - result = __ffs(available_irqs); - available_irqs &= ~(1UL << result); - dynamic_irq_init(result); - } - spin_unlock_irqrestore(&available_irqs_lock, flags); +#ifdef CONFIG_PERF_EVENTS + int i; - return result; + seq_printf(p, "%*s: ", prec, "PMI"); + + for_each_online_cpu(i) + seq_printf(p, "%10llu ", per_cpu(perf_irqs, i)); + seq_puts(p, " perf_events\n"); +#endif + return 0; } -EXPORT_SYMBOL(create_irq); -void destroy_irq(unsigned int irq) +#if CHIP_HAS_IPI() +int arch_setup_hwirq(unsigned int irq, int node) { - unsigned long flags; - - spin_lock_irqsave(&available_irqs_lock, flags); - available_irqs |= (1UL << irq); - dynamic_irq_cleanup(irq); - spin_unlock_irqrestore(&available_irqs_lock, flags); + return irq >= NR_IRQS ? -EINVAL : 0; } -EXPORT_SYMBOL(destroy_irq); + +void arch_teardown_hwirq(unsigned int irq) { } #endif diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c index 00331af9525d..7867266f9716 100644 --- a/arch/tile/kernel/messaging.c +++ b/arch/tile/kernel/messaging.c @@ -68,8 +68,8 @@ void hv_message_intr(struct pt_regs *regs, int intnum) #endif while (1) { - rmi = hv_receive_message(__get_cpu_var(msg_state), - (HV_VirtAddr) message, + HV_MsgState *state = this_cpu_ptr(&msg_state); + rmi = hv_receive_message(*state, (HV_VirtAddr) message, sizeof(message)); if (rmi.msglen == 0) break; diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c index c45593db7718..1f80a88c75a6 100644 --- a/arch/tile/kernel/pci.c +++ b/arch/tile/kernel/pci.c @@ -250,8 +250,6 @@ static void fixup_read_and_payload_sizes(void) /* Scan for the smallest maximum payload size. */ for_each_pci_dev(dev) { - u32 devcap; - if (!pci_is_pcie(dev)) continue; diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c index 077b7bc437e5..e39f9c542807 100644 --- a/arch/tile/kernel/pci_gx.c +++ b/arch/tile/kernel/pci_gx.c @@ -350,10 +350,9 @@ static int tile_init_irqs(struct pci_controller *controller) int cpu; /* Ask the kernel to allocate an IRQ. */ - irq = create_irq(); - if (irq < 0) { + irq = irq_alloc_hwirq(-1); + if (!irq) { pr_err("PCI: no free irq vectors, failed for %d\n", i); - goto free_irqs; } controller->irq_intx_table[i] = irq; @@ -382,7 +381,7 @@ static int tile_init_irqs(struct pci_controller *controller) free_irqs: for (j = 0; j < i; j++) - destroy_irq(controller->irq_intx_table[j]); + irq_free_hwirq(controller->irq_intx_table[j]); return -1; } @@ -1500,9 +1499,9 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) int irq; int ret; - irq = create_irq(); - if (irq < 0) - return irq; + irq = irq_alloc_hwirq(-1); + if (!irq) + return -ENOSPC; /* * Since we use a 64-bit Mem-Map to accept the MSI write, we fail @@ -1601,11 +1600,11 @@ hv_msi_config_failure: /* Free mem-map */ msi_mem_map_alloc_failure: is_64_failure: - destroy_irq(irq); + irq_free_hwirq(irq); return ret; } void arch_teardown_msi_irq(unsigned int irq) { - destroy_irq(irq); + irq_free_hwirq(irq); } diff --git a/arch/tile/kernel/perf_event.c b/arch/tile/kernel/perf_event.c new file mode 100644 index 000000000000..2bf6c9c135c1 --- /dev/null +++ b/arch/tile/kernel/perf_event.c @@ -0,0 +1,1005 @@ +/* + * Copyright 2014 Tilera Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + * + * + * Perf_events support for Tile processor. + * + * This code is based upon the x86 perf event + * code, which is: + * + * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> + * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar + * Copyright (C) 2009 Jaswinder Singh Rajput + * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter + * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> + * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> + * Copyright (C) 2009 Google, Inc., Stephane Eranian + */ + +#include <linux/kprobes.h> +#include <linux/kernel.h> +#include <linux/kdebug.h> +#include <linux/mutex.h> +#include <linux/bitmap.h> +#include <linux/irq.h> +#include <linux/interrupt.h> +#include <linux/perf_event.h> +#include <linux/atomic.h> +#include <asm/traps.h> +#include <asm/stack.h> +#include <asm/pmc.h> +#include <hv/hypervisor.h> + +#define TILE_MAX_COUNTERS 4 + +#define PERF_COUNT_0_IDX 0 +#define PERF_COUNT_1_IDX 1 +#define AUX_PERF_COUNT_0_IDX 2 +#define AUX_PERF_COUNT_1_IDX 3 + +struct cpu_hw_events { + int n_events; + struct perf_event *events[TILE_MAX_COUNTERS]; /* counter order */ + struct perf_event *event_list[TILE_MAX_COUNTERS]; /* enabled + order */ + int assign[TILE_MAX_COUNTERS]; + unsigned long active_mask[BITS_TO_LONGS(TILE_MAX_COUNTERS)]; + unsigned long used_mask; +}; + +/* TILE arch specific performance monitor unit */ +struct tile_pmu { + const char *name; + int version; + const int *hw_events; /* generic hw events table */ + /* generic hw cache events table */ + const int (*cache_events)[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX]; + int (*map_hw_event)(u64); /*method used to map + hw events */ + int (*map_cache_event)(u64); /*method used to map + cache events */ + + u64 max_period; /* max sampling period */ + u64 cntval_mask; /* counter width mask */ + int cntval_bits; /* counter width */ + int max_events; /* max generic hw events + in map */ + int num_counters; /* number base + aux counters */ + int num_base_counters; /* number base counters */ +}; + +DEFINE_PER_CPU(u64, perf_irqs); +static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); + +#define TILE_OP_UNSUPP (-1) + +#ifndef __tilegx__ +/* TILEPro hardware events map */ +static const int tile_hw_event_map[] = { + [PERF_COUNT_HW_CPU_CYCLES] = 0x01, /* ONE */ + [PERF_COUNT_HW_INSTRUCTIONS] = 0x06, /* MP_BUNDLE_RETIRED */ + [PERF_COUNT_HW_CACHE_REFERENCES] = TILE_OP_UNSUPP, + [PERF_COUNT_HW_CACHE_MISSES] = TILE_OP_UNSUPP, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x16, /* + MP_CONDITIONAL_BRANCH_ISSUED */ + [PERF_COUNT_HW_BRANCH_MISSES] = 0x14, /* + MP_CONDITIONAL_BRANCH_MISSPREDICT */ + [PERF_COUNT_HW_BUS_CYCLES] = TILE_OP_UNSUPP, +}; +#else +/* TILEGx hardware events map */ +static const int tile_hw_event_map[] = { + [PERF_COUNT_HW_CPU_CYCLES] = 0x181, /* ONE */ + [PERF_COUNT_HW_INSTRUCTIONS] = 0xdb, /* INSTRUCTION_BUNDLE */ + [PERF_COUNT_HW_CACHE_REFERENCES] = TILE_OP_UNSUPP, + [PERF_COUNT_HW_CACHE_MISSES] = TILE_OP_UNSUPP, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0xd9, /* + COND_BRANCH_PRED_CORRECT */ + [PERF_COUNT_HW_BRANCH_MISSES] = 0xda, /* + COND_BRANCH_PRED_INCORRECT */ + [PERF_COUNT_HW_BUS_CYCLES] = TILE_OP_UNSUPP, +}; +#endif + +#define C(x) PERF_COUNT_HW_CACHE_##x + +/* + * Generalized hw caching related hw_event table, filled + * in on a per model basis. A value of -1 means + * 'not supported', any other value means the + * raw hw_event ID. + */ +#ifndef __tilegx__ +/* TILEPro hardware cache event map */ +static const int tile_cache_event_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { +[C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = TILE_OP_UNSUPP, + [C(RESULT_MISS)] = 0x21, /* RD_MISS */ + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = TILE_OP_UNSUPP, + [C(RESULT_MISS)] = 0x22, /* WR_MISS */ + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = TILE_OP_UNSUPP, + [C(RESULT_MISS)] = TILE_OP_UNSUPP, + }, +}, +[C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x12, /* MP_ICACHE_HIT_ISSUED */ + [C(RESULT_MISS)] = TILE_OP_UNSUPP, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = TILE_OP_UNSUPP, + [C(RESULT_MISS)] = TILE_OP_UNSUPP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = TILE_OP_UNSUPP, + [C(RESULT_MISS)] = TILE_OP_UNSUPP, + }, +}, +[C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = TILE_OP_UNSUPP, + [C(RESULT_MISS)] = TILE_OP_UNSUPP, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = TILE_OP_UNSUPP, + [C(RESULT_MISS)] = TILE_OP_UNSUPP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = TILE_OP_UNSUPP, + [C(RESULT_MISS)] = TILE_OP_UNSUPP, + }, +}, +[C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x1d, /* TLB_CNT */ + [C(RESULT_MISS)] = 0x20, /* TLB_EXCEPTION */ + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = TILE_OP_UNSUPP, + [C(RESULT_MISS)] = TILE_OP_UNSUPP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = TILE_OP_UNSUPP, + [C(RESULT_MISS)] = TILE_OP_UNSUPP, + }, +}, +[C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x13, /* MP_ITLB_HIT_ISSUED */ + [C(RESULT_MISS)] = TILE_OP_UNSUPP, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = TILE_OP_UNSUPP, + [C(RESULT_MISS)] = TILE_OP_UNSUPP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = TILE_OP_UNSUPP, + [C(RESULT_MISS)] = TILE_OP_UNSUPP, + }, +}, +[C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = TILE_OP_UNSUPP, + [C(RESULT_MISS)] = TILE_OP_UNSUPP, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = TILE_OP_UNSUPP, + [C(RESULT_MISS)] = TILE_OP_UNSUPP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = TILE_OP_UNSUPP, + [C(RESULT_MISS)] = TILE_OP_UNSUPP, + }, +}, +}; +#else +/* TILEGx hardware events map */ +static const int tile_cache_event_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { +[C(L1D)] = { + /* + * Like some other architectures (e.g. ARM), the performance + * counters don't differentiate between read and write + * accesses/misses, so this isn't strictly correct, but it's the + * best we can do. Writes and reads get combined. + */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = TILE_OP_UNSUPP, + [C(RESULT_MISS)] = 0x44, /* RD_MISS */ + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = TILE_OP_UNSUPP, + [C(RESULT_MISS)] = 0x45, /* WR_MISS */ + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = TILE_OP_UNSUPP, + [C(RESULT_MISS)] = TILE_OP_UNSUPP, + }, +}, +[C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = TILE_OP_UNSUPP, + [C(RESULT_MISS)] = TILE_OP_UNSUPP, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = TILE_OP_UNSUPP, + [C(RESULT_MISS)] = TILE_OP_UNSUPP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = TILE_OP_UNSUPP, + [C(RESULT_MISS)] = TILE_OP_UNSUPP, + }, +}, +[C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = TILE_OP_UNSUPP, + [C(RESULT_MISS)] = TILE_OP_UNSUPP, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = TILE_OP_UNSUPP, + [C(RESULT_MISS)] = TILE_OP_UNSUPP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = TILE_OP_UNSUPP, + [C(RESULT_MISS)] = TILE_OP_UNSUPP, + }, +}, +[C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x40, /* TLB_CNT */ + [C(RESULT_MISS)] = 0x43, /* TLB_EXCEPTION */ + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = 0x40, /* TLB_CNT */ + [C(RESULT_MISS)] = 0x43, /* TLB_EXCEPTION */ + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = TILE_OP_UNSUPP, + [C(RESULT_MISS)] = TILE_OP_UNSUPP, + }, +}, +[C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = TILE_OP_UNSUPP, + [C(RESULT_MISS)] = 0xd4, /* ITLB_MISS_INT */ + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = TILE_OP_UNSUPP, + [C(RESULT_MISS)] = 0xd4, /* ITLB_MISS_INT */ + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = TILE_OP_UNSUPP, + [C(RESULT_MISS)] = TILE_OP_UNSUPP, + }, +}, +[C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = TILE_OP_UNSUPP, + [C(RESULT_MISS)] = TILE_OP_UNSUPP, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = TILE_OP_UNSUPP, + [C(RESULT_MISS)] = TILE_OP_UNSUPP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = TILE_OP_UNSUPP, + [C(RESULT_MISS)] = TILE_OP_UNSUPP, + }, +}, +}; +#endif + +static atomic_t tile_active_events; +static DEFINE_MUTEX(perf_intr_reserve_mutex); + +static int tile_map_hw_event(u64 config); +static int tile_map_cache_event(u64 config); + +static int tile_pmu_handle_irq(struct pt_regs *regs, int fault); + +/* + * To avoid new_raw_count getting larger then pre_raw_count + * in tile_perf_event_update(), we limit the value of max_period to 2^31 - 1. + */ +static const struct tile_pmu tilepmu = { +#ifndef __tilegx__ + .name = "tilepro", +#else + .name = "tilegx", +#endif + .max_events = ARRAY_SIZE(tile_hw_event_map), + .map_hw_event = tile_map_hw_event, + .hw_events = tile_hw_event_map, + .map_cache_event = tile_map_cache_event, + .cache_events = &tile_cache_event_map, + .cntval_bits = 32, + .cntval_mask = (1ULL << 32) - 1, + .max_period = (1ULL << 31) - 1, + .num_counters = TILE_MAX_COUNTERS, + .num_base_counters = TILE_BASE_COUNTERS, +}; + +static const struct tile_pmu *tile_pmu __read_mostly; + +/* + * Check whether perf event is enabled. + */ +int tile_perf_enabled(void) +{ + return atomic_read(&tile_active_events) != 0; +} + +/* + * Read Performance Counters. + */ +static inline u64 read_counter(int idx) +{ + u64 val = 0; + + /* __insn_mfspr() only takes an immediate argument */ + switch (idx) { + case PERF_COUNT_0_IDX: + val = __insn_mfspr(SPR_PERF_COUNT_0); + break; + case PERF_COUNT_1_IDX: + val = __insn_mfspr(SPR_PERF_COUNT_1); + break; + case AUX_PERF_COUNT_0_IDX: + val = __insn_mfspr(SPR_AUX_PERF_COUNT_0); + break; + case AUX_PERF_COUNT_1_IDX: + val = __insn_mfspr(SPR_AUX_PERF_COUNT_1); + break; + default: + WARN_ON_ONCE(idx > AUX_PERF_COUNT_1_IDX || + idx < PERF_COUNT_0_IDX); + } + + return val; +} + +/* + * Write Performance Counters. + */ +static inline void write_counter(int idx, u64 value) +{ + /* __insn_mtspr() only takes an immediate argument */ + switch (idx) { + case PERF_COUNT_0_IDX: + __insn_mtspr(SPR_PERF_COUNT_0, value); + break; + case PERF_COUNT_1_IDX: + __insn_mtspr(SPR_PERF_COUNT_1, value); + break; + case AUX_PERF_COUNT_0_IDX: + __insn_mtspr(SPR_AUX_PERF_COUNT_0, value); + break; + case AUX_PERF_COUNT_1_IDX: + __insn_mtspr(SPR_AUX_PERF_COUNT_1, value); + break; + default: + WARN_ON_ONCE(idx > AUX_PERF_COUNT_1_IDX || + idx < PERF_COUNT_0_IDX); + } +} + +/* + * Enable performance event by setting + * Performance Counter Control registers. + */ +static inline void tile_pmu_enable_event(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + unsigned long cfg, mask; + int shift, idx = hwc->idx; + + /* + * prevent early activation from tile_pmu_start() in hw_perf_enable + */ + + if (WARN_ON_ONCE(idx == -1)) + return; + + if (idx < tile_pmu->num_base_counters) + cfg = __insn_mfspr(SPR_PERF_COUNT_CTL); + else + cfg = __insn_mfspr(SPR_AUX_PERF_COUNT_CTL); + + switch (idx) { + case PERF_COUNT_0_IDX: + case AUX_PERF_COUNT_0_IDX: + mask = TILE_EVENT_MASK; + shift = 0; + break; + case PERF_COUNT_1_IDX: + case AUX_PERF_COUNT_1_IDX: + mask = TILE_EVENT_MASK << 16; + shift = 16; + break; + default: + WARN_ON_ONCE(idx < PERF_COUNT_0_IDX || + idx > AUX_PERF_COUNT_1_IDX); + return; + } + + /* Clear mask bits to enable the event. */ + cfg &= ~mask; + cfg |= hwc->config << shift; + + if (idx < tile_pmu->num_base_counters) + __insn_mtspr(SPR_PERF_COUNT_CTL, cfg); + else + __insn_mtspr(SPR_AUX_PERF_COUNT_CTL, cfg); +} + +/* + * Disable performance event by clearing + * Performance Counter Control registers. + */ +static inline void tile_pmu_disable_event(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + unsigned long cfg, mask; + int idx = hwc->idx; + + if (idx == -1) + return; + + if (idx < tile_pmu->num_base_counters) + cfg = __insn_mfspr(SPR_PERF_COUNT_CTL); + else + cfg = __insn_mfspr(SPR_AUX_PERF_COUNT_CTL); + + switch (idx) { + case PERF_COUNT_0_IDX: + case AUX_PERF_COUNT_0_IDX: + mask = TILE_PLM_MASK; + break; + case PERF_COUNT_1_IDX: + case AUX_PERF_COUNT_1_IDX: + mask = TILE_PLM_MASK << 16; + break; + default: + WARN_ON_ONCE(idx < PERF_COUNT_0_IDX || + idx > AUX_PERF_COUNT_1_IDX); + return; + } + + /* Set mask bits to disable the event. */ + cfg |= mask; + + if (idx < tile_pmu->num_base_counters) + __insn_mtspr(SPR_PERF_COUNT_CTL, cfg); + else + __insn_mtspr(SPR_AUX_PERF_COUNT_CTL, cfg); +} + +/* + * Propagate event elapsed time into the generic event. + * Can only be executed on the CPU where the event is active. + * Returns the delta events processed. + */ +static u64 tile_perf_event_update(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + int shift = 64 - tile_pmu->cntval_bits; + u64 prev_raw_count, new_raw_count; + u64 oldval; + int idx = hwc->idx; + u64 delta; + + /* + * Careful: an NMI might modify the previous event value. + * + * Our tactic to handle this is to first atomically read and + * exchange a new raw count - then add that new-prev delta + * count to the generic event atomically: + */ +again: + prev_raw_count = local64_read(&hwc->prev_count); + new_raw_count = read_counter(idx); + + oldval = local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count); + if (oldval != prev_raw_count) + goto again; + + /* + * Now we have the new raw value and have updated the prev + * timestamp already. We can now calculate the elapsed delta + * (event-)time and add that to the generic event. + * + * Careful, not all hw sign-extends above the physical width + * of the count. + */ + delta = (new_raw_count << shift) - (prev_raw_count << shift); + delta >>= shift; + + local64_add(delta, &event->count); + local64_sub(delta, &hwc->period_left); + + return new_raw_count; +} + +/* + * Set the next IRQ period, based on the hwc->period_left value. + * To be called with the event disabled in hw: + */ +static int tile_event_set_period(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + s64 left = local64_read(&hwc->period_left); + s64 period = hwc->sample_period; + int ret = 0; + + /* + * If we are way outside a reasonable range then just skip forward: + */ + if (unlikely(left <= -period)) { + left = period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + if (unlikely(left <= 0)) { + left += period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + if (left > tile_pmu->max_period) + left = tile_pmu->max_period; + + /* + * The hw event starts counting from this event offset, + * mark it to be able to extra future deltas: + */ + local64_set(&hwc->prev_count, (u64)-left); + + write_counter(idx, (u64)(-left) & tile_pmu->cntval_mask); + + perf_event_update_userpage(event); + + return ret; +} + +/* + * Stop the event but do not release the PMU counter + */ +static void tile_pmu_stop(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (__test_and_clear_bit(idx, cpuc->active_mask)) { + tile_pmu_disable_event(event); + cpuc->events[hwc->idx] = NULL; + WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); + hwc->state |= PERF_HES_STOPPED; + } + + if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { + /* + * Drain the remaining delta count out of a event + * that we are disabling: + */ + tile_perf_event_update(event); + hwc->state |= PERF_HES_UPTODATE; + } +} + +/* + * Start an event (without re-assigning counter) + */ +static void tile_pmu_start(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + int idx = event->hw.idx; + + if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) + return; + + if (WARN_ON_ONCE(idx == -1)) + return; + + if (flags & PERF_EF_RELOAD) { + WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); + tile_event_set_period(event); + } + + event->hw.state = 0; + + cpuc->events[idx] = event; + __set_bit(idx, cpuc->active_mask); + + unmask_pmc_interrupts(); + + tile_pmu_enable_event(event); + + perf_event_update_userpage(event); +} + +/* + * Add a single event to the PMU. + * + * The event is added to the group of enabled events + * but only if it can be scehduled with existing events. + */ +static int tile_pmu_add(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc; + unsigned long mask; + int b, max_cnt; + + hwc = &event->hw; + + /* + * We are full. + */ + if (cpuc->n_events == tile_pmu->num_counters) + return -ENOSPC; + + cpuc->event_list[cpuc->n_events] = event; + cpuc->n_events++; + + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + if (!(flags & PERF_EF_START)) + hwc->state |= PERF_HES_ARCH; + + /* + * Find first empty counter. + */ + max_cnt = tile_pmu->num_counters; + mask = ~cpuc->used_mask; + + /* Find next free counter. */ + b = find_next_bit(&mask, max_cnt, 0); + + /* Should not happen. */ + if (WARN_ON_ONCE(b == max_cnt)) + return -ENOSPC; + + /* + * Assign counter to event. + */ + event->hw.idx = b; + __set_bit(b, &cpuc->used_mask); + + /* + * Start if requested. + */ + if (flags & PERF_EF_START) + tile_pmu_start(event, PERF_EF_RELOAD); + + return 0; +} + +/* + * Delete a single event from the PMU. + * + * The event is deleted from the group of enabled events. + * If it is the last event, disable PMU interrupt. + */ +static void tile_pmu_del(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + int i; + + /* + * Remove event from list, compact list if necessary. + */ + for (i = 0; i < cpuc->n_events; i++) { + if (cpuc->event_list[i] == event) { + while (++i < cpuc->n_events) + cpuc->event_list[i-1] = cpuc->event_list[i]; + --cpuc->n_events; + cpuc->events[event->hw.idx] = NULL; + __clear_bit(event->hw.idx, &cpuc->used_mask); + tile_pmu_stop(event, PERF_EF_UPDATE); + break; + } + } + /* + * If there are no events left, then mask PMU interrupt. + */ + if (cpuc->n_events == 0) + mask_pmc_interrupts(); + perf_event_update_userpage(event); +} + +/* + * Propagate event elapsed time into the event. + */ +static inline void tile_pmu_read(struct perf_event *event) +{ + tile_perf_event_update(event); +} + +/* + * Map generic events to Tile PMU. + */ +static int tile_map_hw_event(u64 config) +{ + if (config >= tile_pmu->max_events) + return -EINVAL; + return tile_pmu->hw_events[config]; +} + +/* + * Map generic hardware cache events to Tile PMU. + */ +static int tile_map_cache_event(u64 config) +{ + unsigned int cache_type, cache_op, cache_result; + int code; + + if (!tile_pmu->cache_events) + return -ENOENT; + + cache_type = (config >> 0) & 0xff; + if (cache_type >= PERF_COUNT_HW_CACHE_MAX) + return -EINVAL; + + cache_op = (config >> 8) & 0xff; + if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) + return -EINVAL; + + cache_result = (config >> 16) & 0xff; + if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) + return -EINVAL; + + code = (*tile_pmu->cache_events)[cache_type][cache_op][cache_result]; + if (code == TILE_OP_UNSUPP) + return -EINVAL; + + return code; +} + +static void tile_event_destroy(struct perf_event *event) +{ + if (atomic_dec_return(&tile_active_events) == 0) + release_pmc_hardware(); +} + +static int __tile_event_init(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + struct hw_perf_event *hwc = &event->hw; + int code; + + switch (attr->type) { + case PERF_TYPE_HARDWARE: + code = tile_pmu->map_hw_event(attr->config); + break; + case PERF_TYPE_HW_CACHE: + code = tile_pmu->map_cache_event(attr->config); + break; + case PERF_TYPE_RAW: + code = attr->config & TILE_EVENT_MASK; + break; + default: + /* Should not happen. */ + return -EOPNOTSUPP; + } + + if (code < 0) + return code; + + hwc->config = code; + hwc->idx = -1; + + if (attr->exclude_user) + hwc->config |= TILE_CTL_EXCL_USER; + + if (attr->exclude_kernel) + hwc->config |= TILE_CTL_EXCL_KERNEL; + + if (attr->exclude_hv) + hwc->config |= TILE_CTL_EXCL_HV; + + if (!hwc->sample_period) { + hwc->sample_period = tile_pmu->max_period; + hwc->last_period = hwc->sample_period; + local64_set(&hwc->period_left, hwc->sample_period); + } + event->destroy = tile_event_destroy; + return 0; +} + +static int tile_event_init(struct perf_event *event) +{ + int err = 0; + perf_irq_t old_irq_handler = NULL; + + if (atomic_inc_return(&tile_active_events) == 1) + old_irq_handler = reserve_pmc_hardware(tile_pmu_handle_irq); + + if (old_irq_handler) { + pr_warn("PMC hardware busy (reserved by oprofile)\n"); + + atomic_dec(&tile_active_events); + return -EBUSY; + } + + switch (event->attr.type) { + case PERF_TYPE_RAW: + case PERF_TYPE_HARDWARE: + case PERF_TYPE_HW_CACHE: + break; + + default: + return -ENOENT; + } + + err = __tile_event_init(event); + if (err) { + if (event->destroy) + event->destroy(event); + } + return err; +} + +static struct pmu tilera_pmu = { + .event_init = tile_event_init, + .add = tile_pmu_add, + .del = tile_pmu_del, + + .start = tile_pmu_start, + .stop = tile_pmu_stop, + + .read = tile_pmu_read, +}; + +/* + * PMU's IRQ handler, PMU has 2 interrupts, they share the same handler. + */ +int tile_pmu_handle_irq(struct pt_regs *regs, int fault) +{ + struct perf_sample_data data; + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct perf_event *event; + struct hw_perf_event *hwc; + u64 val; + unsigned long status; + int bit; + + __get_cpu_var(perf_irqs)++; + + if (!atomic_read(&tile_active_events)) + return 0; + + status = pmc_get_overflow(); + pmc_ack_overflow(status); + + for_each_set_bit(bit, &status, tile_pmu->num_counters) { + + event = cpuc->events[bit]; + + if (!event) + continue; + + if (!test_bit(bit, cpuc->active_mask)) + continue; + + hwc = &event->hw; + + val = tile_perf_event_update(event); + if (val & (1ULL << (tile_pmu->cntval_bits - 1))) + continue; + + perf_sample_data_init(&data, 0, event->hw.last_period); + if (!tile_event_set_period(event)) + continue; + + if (perf_event_overflow(event, &data, regs)) + tile_pmu_stop(event, 0); + } + + return 0; +} + +static bool __init supported_pmu(void) +{ + tile_pmu = &tilepmu; + return true; +} + +int __init init_hw_perf_events(void) +{ + supported_pmu(); + perf_pmu_register(&tilera_pmu, "cpu", PERF_TYPE_RAW); + return 0; +} +arch_initcall(init_hw_perf_events); + +/* Callchain handling code. */ + +/* + * Tile specific backtracing code for perf_events. + */ +static inline void perf_callchain(struct perf_callchain_entry *entry, + struct pt_regs *regs) +{ + struct KBacktraceIterator kbt; + unsigned int i; + + /* + * Get the address just after the "jalr" instruction that + * jumps to the handler for a syscall. When we find this + * address in a backtrace, we silently ignore it, which gives + * us a one-step backtrace connection from the sys_xxx() + * function in the kernel to the xxx() function in libc. + * Otherwise, we lose the ability to properly attribute time + * from the libc calls to the kernel implementations, since + * oprofile only considers PCs from backtraces a pair at a time. + */ + unsigned long handle_syscall_pc = handle_syscall_link_address(); + + KBacktraceIterator_init(&kbt, NULL, regs); + kbt.profile = 1; + + /* + * The sample for the pc is already recorded. Now we are adding the + * address of the callsites on the stack. Our iterator starts + * with the frame of the (already sampled) call site. If our + * iterator contained a "return address" field, we could have just + * used it and wouldn't have needed to skip the first + * frame. That's in effect what the arm and x86 versions do. + * Instead we peel off the first iteration to get the equivalent + * behavior. + */ + + if (KBacktraceIterator_end(&kbt)) + return; + KBacktraceIterator_next(&kbt); + + /* + * Set stack depth to 16 for user and kernel space respectively, that + * is, total 32 stack frames. + */ + for (i = 0; i < 16; ++i) { + unsigned long pc; + if (KBacktraceIterator_end(&kbt)) + break; + pc = kbt.it.pc; + if (pc != handle_syscall_pc) + perf_callchain_store(entry, pc); + KBacktraceIterator_next(&kbt); + } +} + +void perf_callchain_user(struct perf_callchain_entry *entry, + struct pt_regs *regs) +{ + perf_callchain(entry, regs); +} + +void perf_callchain_kernel(struct perf_callchain_entry *entry, + struct pt_regs *regs) +{ + perf_callchain(entry, regs); +} diff --git a/arch/tile/kernel/pmc.c b/arch/tile/kernel/pmc.c new file mode 100644 index 000000000000..db62cc34b955 --- /dev/null +++ b/arch/tile/kernel/pmc.c @@ -0,0 +1,121 @@ +/* + * Copyright 2014 Tilera Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + */ + +#include <linux/errno.h> +#include <linux/spinlock.h> +#include <linux/module.h> +#include <linux/atomic.h> +#include <linux/interrupt.h> + +#include <asm/processor.h> +#include <asm/pmc.h> + +perf_irq_t perf_irq = NULL; +int handle_perf_interrupt(struct pt_regs *regs, int fault) +{ + int retval; + + if (!perf_irq) + panic("Unexpected PERF_COUNT interrupt %d\n", fault); + + nmi_enter(); + retval = perf_irq(regs, fault); + nmi_exit(); + return retval; +} + +/* Reserve PMC hardware if it is available. */ +perf_irq_t reserve_pmc_hardware(perf_irq_t new_perf_irq) +{ + return cmpxchg(&perf_irq, NULL, new_perf_irq); +} +EXPORT_SYMBOL(reserve_pmc_hardware); + +/* Release PMC hardware. */ +void release_pmc_hardware(void) +{ + perf_irq = NULL; +} +EXPORT_SYMBOL(release_pmc_hardware); + + +/* + * Get current overflow status of each performance counter, + * and auxiliary performance counter. + */ +unsigned long +pmc_get_overflow(void) +{ + unsigned long status; + + /* + * merge base+aux into a single vector + */ + status = __insn_mfspr(SPR_PERF_COUNT_STS); + status |= __insn_mfspr(SPR_AUX_PERF_COUNT_STS) << TILE_BASE_COUNTERS; + return status; +} + +/* + * Clear the status bit for the corresponding counter, if written + * with a one. + */ +void +pmc_ack_overflow(unsigned long status) +{ + /* + * clear overflow status by writing ones + */ + __insn_mtspr(SPR_PERF_COUNT_STS, status); + __insn_mtspr(SPR_AUX_PERF_COUNT_STS, status >> TILE_BASE_COUNTERS); +} + +/* + * The perf count interrupts are masked and unmasked explicitly, + * and only here. The normal irq_enable() does not enable them, + * and irq_disable() does not disable them. That lets these + * routines drive the perf count interrupts orthogonally. + * + * We also mask the perf count interrupts on entry to the perf count + * interrupt handler in assembly code, and by default unmask them + * again (with interrupt critical section protection) just before + * returning from the interrupt. If the perf count handler returns + * a non-zero error code, then we don't re-enable them before returning. + * + * For Pro, we rely on both interrupts being in the same word to update + * them atomically so we never have one enabled and one disabled. + */ + +#if CHIP_HAS_SPLIT_INTR_MASK() +# if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 +# error Fix assumptions about which word PERF_COUNT interrupts are in +# endif +#endif + +static inline unsigned long long pmc_mask(void) +{ + unsigned long long mask = 1ULL << INT_PERF_COUNT; + mask |= 1ULL << INT_AUX_PERF_COUNT; + return mask; +} + +void unmask_pmc_interrupts(void) +{ + interrupt_mask_reset_mask(pmc_mask()); +} + +void mask_pmc_interrupts(void) +{ + interrupt_mask_set_mask(pmc_mask()); +} diff --git a/arch/tile/kernel/proc.c b/arch/tile/kernel/proc.c index 681100c59fda..6829a9508649 100644 --- a/arch/tile/kernel/proc.c +++ b/arch/tile/kernel/proc.c @@ -113,7 +113,7 @@ arch_initcall(proc_tile_init); * Support /proc/sys/tile directory */ -static ctl_table unaligned_subtable[] = { +static struct ctl_table unaligned_subtable[] = { { .procname = "enabled", .data = &unaligned_fixup, @@ -138,7 +138,7 @@ static ctl_table unaligned_subtable[] = { {} }; -static ctl_table unaligned_table[] = { +static struct ctl_table unaligned_table[] = { { .procname = "unaligned_fixup", .mode = 0555, diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c index 5d10642db63e..462dcd0c1700 100644 --- a/arch/tile/kernel/time.c +++ b/arch/tile/kernel/time.c @@ -236,7 +236,15 @@ cycles_t ns2cycles(unsigned long nsecs) * clock frequency. */ struct clock_event_device *dev = &__raw_get_cpu_var(tile_timer); - return ((u64)nsecs * dev->mult) >> dev->shift; + + /* + * as in clocksource.h and x86's timer.h, we split the calculation + * into 2 parts to avoid unecessary overflow of the intermediate + * value. This will not lead to any loss of precision. + */ + u64 quot = (u64)nsecs >> dev->shift; + u64 rem = (u64)nsecs & ((1ULL << dev->shift) - 1); + return quot * dev->mult + ((rem * dev->mult) >> dev->shift); } void update_vsyscall_tz(void) diff --git a/arch/tile/kernel/vdso/Makefile b/arch/tile/kernel/vdso/Makefile index e2b7a2f4ee41..a025f63d54cd 100644 --- a/arch/tile/kernel/vdso/Makefile +++ b/arch/tile/kernel/vdso/Makefile @@ -104,7 +104,7 @@ $(obj-vdso32:%=%): KBUILD_AFLAGS = $(KBUILD_AFLAGS_32) $(obj-vdso32:%=%): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32) $(obj)/vgettimeofday32.o: $(obj)/vgettimeofday.c - $(call if_changed,cc_o_c) + $(call if_changed_rule,cc_o_c) $(obj)/vrt_sigreturn32.o: $(obj)/vrt_sigreturn.S $(call if_changed,as_o_S) diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c index 004ba568d93f..33294fdc402e 100644 --- a/arch/tile/mm/homecache.c +++ b/arch/tile/mm/homecache.c @@ -417,7 +417,7 @@ void __homecache_free_pages(struct page *page, unsigned int order) if (put_page_testzero(page)) { homecache_change_page_home(page, order, PAGE_HOME_HASH); if (order == 0) { - free_hot_cold_page(page, 0); + free_hot_cold_page(page, false); } else { init_page_count(page); __free_pages(page, order); diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c index 0cb3bbaa580c..e514899e1100 100644 --- a/arch/tile/mm/hugetlbpage.c +++ b/arch/tile/mm/hugetlbpage.c @@ -166,11 +166,6 @@ int pud_huge(pud_t pud) return !!(pud_val(pud) & _PAGE_HUGE_PAGE); } -int pmd_huge_support(void) -{ - return 1; -} - struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) { |