diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-15 01:20:12 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-15 01:20:12 +0300 |
commit | 2990821d0e38d2bfc556ad39d709b5f8a83c2ebd (patch) | |
tree | 666da0573384cf73e8a6e2e60e4ef79ff92a2b5b /arch | |
parent | 869c34f5208adde010065c387354f2ebe4ec3bfc (diff) | |
parent | 9ced975711d605d7deb27ef027e0f9816a51b479 (diff) | |
download | linux-2990821d0e38d2bfc556ad39d709b5f8a83c2ebd.tar.xz |
Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/upstream-linus
* 'upstream' of git://git.linux-mips.org/pub/scm/upstream-linus: (26 commits)
MIPS: Alchemy: Fix reset for MTX-1 and XXS1500
MIPS: MTX-1: Make au1000_eth probe all PHY addresses
MIPS: Jz4740: Add HAVE_CLK
MIPS: Move idle task creation to work queue
MIPS, Perf-events: Use unsigned delta for right shift in event update
MIPS, Perf-events: Work with the new callchain interface
MIPS, Perf-events: Fix event check in validate_event()
MIPS, Perf-events: Work with the new PMU interface
MIPS, Perf-events: Work with irq_work
MIPS: Fix always CONFIG_LOONGSON_UART_BASE=y
MIPS: Loongson: Fix potentially wrong string handling
MIPS: Fix GCC-4.6 'set but not used' warning in arch/mips/mm/init.c
MIPS: Fix GCC-4.6 'set but not used' warning in ieee754int.h
MIPS: Remove unused code from arch/mips/kernel/syscall.c
MIPS: Fix GCC-4.6 'set but not used' warning in signal*.c
MIPS: MSP: Fix MSP71xx bpci interrupt handler return value
MIPS: Select R4K timer lib for all MSP platforms
MIPS: Loongson: Remove ad-hoc cmdline default
MIPS: Clear the correct flag in sysmips(MIPS_FIXADE, ...).
MIPS: Add an unreachable return statement to satisfy buggy GCCs.
...
Diffstat (limited to 'arch')
-rw-r--r-- | arch/mips/Kconfig | 4 | ||||
-rw-r--r-- | arch/mips/alchemy/mtx-1/board_setup.c | 4 | ||||
-rw-r--r-- | arch/mips/alchemy/mtx-1/platform.c | 9 | ||||
-rw-r--r-- | arch/mips/alchemy/xxs1500/board_setup.c | 4 | ||||
-rw-r--r-- | arch/mips/include/asm/perf_event.h | 12 | ||||
-rw-r--r-- | arch/mips/kernel/ftrace.c | 179 | ||||
-rw-r--r-- | arch/mips/kernel/perf_event.c | 345 | ||||
-rw-r--r-- | arch/mips/kernel/perf_event_mipsxx.c | 4 | ||||
-rw-r--r-- | arch/mips/kernel/signal.c | 2 | ||||
-rw-r--r-- | arch/mips/kernel/signal32.c | 2 | ||||
-rw-r--r-- | arch/mips/kernel/smp.c | 31 | ||||
-rw-r--r-- | arch/mips/kernel/syscall.c | 5 | ||||
-rw-r--r-- | arch/mips/kernel/vpe.c | 4 | ||||
-rw-r--r-- | arch/mips/loongson/Kconfig | 5 | ||||
-rw-r--r-- | arch/mips/loongson/common/cmdline.c | 5 | ||||
-rw-r--r-- | arch/mips/loongson/common/machtype.c | 3 | ||||
-rw-r--r-- | arch/mips/math-emu/ieee754int.h | 4 | ||||
-rw-r--r-- | arch/mips/mm/init.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/tlbex.c | 2 | ||||
-rw-r--r-- | arch/mips/pci/ops-pmcmsp.c | 4 | ||||
-rw-r--r-- | arch/mips/pmc-sierra/Kconfig | 4 | ||||
-rw-r--r-- | arch/mips/pmc-sierra/msp71xx/msp_time.c | 2 |
22 files changed, 331 insertions, 305 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index f5ecc0566bc2..d88983516e26 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -4,6 +4,7 @@ config MIPS select HAVE_GENERIC_DMA_COHERENT select HAVE_IDE select HAVE_OPROFILE + select HAVE_IRQ_WORK select HAVE_PERF_EVENTS select PERF_USE_VMALLOC select HAVE_ARCH_KGDB @@ -208,6 +209,7 @@ config MACH_JZ4740 select ARCH_REQUIRE_GPIOLIB select SYS_HAS_EARLY_PRINTK select HAVE_PWM + select HAVE_CLK config LASAT bool "LASAT Networks platforms" @@ -333,6 +335,8 @@ config PNX8550_STB810 config PMC_MSP bool "PMC-Sierra MSP chipsets" depends on EXPERIMENTAL + select CEVT_R4K + select CSRC_R4K select DMA_NONCOHERENT select SWAP_IO_SPACE select NO_EXCEPT_FILL diff --git a/arch/mips/alchemy/mtx-1/board_setup.c b/arch/mips/alchemy/mtx-1/board_setup.c index 6398fa95905c..40b84b991191 100644 --- a/arch/mips/alchemy/mtx-1/board_setup.c +++ b/arch/mips/alchemy/mtx-1/board_setup.c @@ -54,8 +54,8 @@ int mtx1_pci_idsel(unsigned int devsel, int assert); static void mtx1_reset(char *c) { - /* Hit BCSR.SYSTEM_CONTROL[SW_RST] */ - au_writel(0x00000000, 0xAE00001C); + /* Jump to the reset vector */ + __asm__ __volatile__("jr\t%0"::"r"(0xbfc00000)); } static void mtx1_power_off(void) diff --git a/arch/mips/alchemy/mtx-1/platform.c b/arch/mips/alchemy/mtx-1/platform.c index e30e42add697..956f946218c5 100644 --- a/arch/mips/alchemy/mtx-1/platform.c +++ b/arch/mips/alchemy/mtx-1/platform.c @@ -28,6 +28,8 @@ #include <linux/mtd/physmap.h> #include <mtd/mtd-abi.h> +#include <asm/mach-au1x00/au1xxx_eth.h> + static struct gpio_keys_button mtx1_gpio_button[] = { { .gpio = 207, @@ -140,10 +142,17 @@ static struct __initdata platform_device * mtx1_devs[] = { &mtx1_mtd, }; +static struct au1000_eth_platform_data mtx1_au1000_eth0_pdata = { + .phy_search_highest_addr = 1, + .phy1_search_mac0 = 1, +}; + static int __init mtx1_register_devices(void) { int rc; + au1xxx_override_eth_cfg(0, &mtx1_au1000_eth0_pdata); + rc = gpio_request(mtx1_gpio_button[0].gpio, mtx1_gpio_button[0].desc); if (rc < 0) { diff --git a/arch/mips/alchemy/xxs1500/board_setup.c b/arch/mips/alchemy/xxs1500/board_setup.c index b43c918925d3..80c521e5290d 100644 --- a/arch/mips/alchemy/xxs1500/board_setup.c +++ b/arch/mips/alchemy/xxs1500/board_setup.c @@ -36,8 +36,8 @@ static void xxs1500_reset(char *c) { - /* Hit BCSR.SYSTEM_CONTROL[SW_RST] */ - au_writel(0x00000000, 0xAE00001C); + /* Jump to the reset vector */ + __asm__ __volatile__("jr\t%0"::"r"(0xbfc00000)); } static void xxs1500_power_off(void) diff --git a/arch/mips/include/asm/perf_event.h b/arch/mips/include/asm/perf_event.h index e00007cf8162..d0c77496c728 100644 --- a/arch/mips/include/asm/perf_event.h +++ b/arch/mips/include/asm/perf_event.h @@ -11,15 +11,5 @@ #ifndef __MIPS_PERF_EVENT_H__ #define __MIPS_PERF_EVENT_H__ - -/* - * MIPS performance counters do not raise NMI upon overflow, a regular - * interrupt will be signaled. Hence we can do the pending perf event - * work at the tail of the irq handler. - */ -static inline void -set_perf_event_pending(void) -{ -} - +/* Leave it empty here. The file is required by linux/perf_event.h */ #endif /* __MIPS_PERF_EVENT_H__ */ diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 5a84a1f11231..94ca2b018af7 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -17,29 +17,13 @@ #include <asm/cacheflush.h> #include <asm/uasm.h> -/* - * If the Instruction Pointer is in module space (0xc0000000), return true; - * otherwise, it is in kernel space (0x80000000), return false. - * - * FIXME: This will not work when the kernel space and module space are the - * same. If they are the same, we need to modify scripts/recordmcount.pl, - * ftrace_make_nop/call() and the other related parts to ensure the - * enabling/disabling of the calling site to _mcount is right for both kernel - * and module. - */ - -static inline int in_module(unsigned long ip) -{ - return ip & 0x40000000; -} +#include <asm-generic/sections.h> #ifdef CONFIG_DYNAMIC_FTRACE #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ -#define INSN_B_1F_4 0x10000004 /* b 1f; offset = 4 */ -#define INSN_B_1F_5 0x10000005 /* b 1f; offset = 5 */ #define INSN_NOP 0x00000000 /* nop */ #define INSN_JAL(addr) \ ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK))) @@ -69,6 +53,20 @@ static inline void ftrace_dyn_arch_init_insns(void) #endif } +/* + * Check if the address is in kernel space + * + * Clone core_kernel_text() from kernel/extable.c, but doesn't call + * init_kernel_text() for Ftrace doesn't trace functions in init sections. + */ +static inline int in_kernel_space(unsigned long ip) +{ + if (ip >= (unsigned long)_stext && + ip <= (unsigned long)_etext) + return 1; + return 0; +} + static int ftrace_modify_code(unsigned long ip, unsigned int new_code) { int faulted; @@ -84,6 +82,42 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code) return 0; } +/* + * The details about the calling site of mcount on MIPS + * + * 1. For kernel: + * + * move at, ra + * jal _mcount --> nop + * + * 2. For modules: + * + * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT + * + * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) + * addiu v1, v1, low_16bit_of_mcount + * move at, ra + * move $12, ra_address + * jalr v1 + * sub sp, sp, 8 + * 1: offset = 5 instructions + * 2.2 For the Other situations + * + * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) + * addiu v1, v1, low_16bit_of_mcount + * move at, ra + * jalr v1 + * nop | move $12, ra_address | sub sp, sp, 8 + * 1: offset = 4 instructions + */ + +#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) +#define MCOUNT_OFFSET_INSNS 5 +#else +#define MCOUNT_OFFSET_INSNS 4 +#endif +#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS) + int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { @@ -91,39 +125,11 @@ int ftrace_make_nop(struct module *mod, unsigned long ip = rec->ip; /* - * We have compiled module with -mlong-calls, but compiled the kernel - * without it, we need to cope with them respectively. + * If ip is in kernel space, no long call, otherwise, long call is + * needed. */ - if (in_module(ip)) { -#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) - /* - * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) - * addiu v1, v1, low_16bit_of_mcount - * move at, ra - * move $12, ra_address - * jalr v1 - * sub sp, sp, 8 - * 1: offset = 5 instructions - */ - new = INSN_B_1F_5; -#else - /* - * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) - * addiu v1, v1, low_16bit_of_mcount - * move at, ra - * jalr v1 - * nop | move $12, ra_address | sub sp, sp, 8 - * 1: offset = 4 instructions - */ - new = INSN_B_1F_4; -#endif - } else { - /* - * move at, ra - * jal _mcount --> nop - */ - new = INSN_NOP; - } + new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F; + return ftrace_modify_code(ip, new); } @@ -132,8 +138,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) unsigned int new; unsigned long ip = rec->ip; - /* ip, module: 0xc0000000, kernel: 0x80000000 */ - new = in_module(ip) ? insn_lui_v1_hi16_mcount : insn_jal_ftrace_caller; + new = in_kernel_space(ip) ? insn_jal_ftrace_caller : + insn_lui_v1_hi16_mcount; return ftrace_modify_code(ip, new); } @@ -190,29 +196,25 @@ int ftrace_disable_ftrace_graph_caller(void) #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ -unsigned long ftrace_get_parent_addr(unsigned long self_addr, - unsigned long parent, - unsigned long parent_addr, - unsigned long fp) +unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long + old_parent_ra, unsigned long parent_ra_addr, unsigned long fp) { - unsigned long sp, ip, ra; + unsigned long sp, ip, tmp; unsigned int code; int faulted; /* - * For module, move the ip from calling site of mcount to the - * instruction "lui v1, hi_16bit_of_mcount"(offset is 20), but for - * kernel, move to the instruction "move ra, at"(offset is 12) + * For module, move the ip from the return address after the + * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for + * kernel, move after the instruction "move ra, at"(offset is 16) */ - ip = self_addr - (in_module(self_addr) ? 20 : 12); + ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24); /* * search the text until finding the non-store instruction or "s{d,w} * ra, offset(sp)" instruction */ do { - ip -= 4; - /* get the code at "ip": code = *(unsigned int *)ip; */ safe_load_code(code, ip, faulted); @@ -224,18 +226,20 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr, * store the ra on the stack */ if ((code & S_R_SP) != S_R_SP) - return parent_addr; + return parent_ra_addr; - } while (((code & S_RA_SP) != S_RA_SP)); + /* Move to the next instruction */ + ip -= 4; + } while ((code & S_RA_SP) != S_RA_SP); sp = fp + (code & OFFSET_MASK); - /* ra = *(unsigned long *)sp; */ - safe_load_stack(ra, sp, faulted); + /* tmp = *(unsigned long *)sp; */ + safe_load_stack(tmp, sp, faulted); if (unlikely(faulted)) return 0; - if (ra == parent) + if (tmp == old_parent_ra) return sp; return 0; } @@ -246,21 +250,21 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr, * Hook the return address and push it in the stack of return addrs * in current thread info. */ -void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, +void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra, unsigned long fp) { - unsigned long old; + unsigned long old_parent_ra; struct ftrace_graph_ent trace; unsigned long return_hooker = (unsigned long) &return_to_handler; - int faulted; + int faulted, insns; if (unlikely(atomic_read(¤t->tracing_graph_pause))) return; /* - * "parent" is the stack address saved the return address of the caller - * of _mcount. + * "parent_ra_addr" is the stack address saved the return address of + * the caller of _mcount. * * if the gcc < 4.5, a leaf function does not save the return address * in the stack address, so, we "emulate" one in _mcount's stack space, @@ -275,37 +279,44 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, * do it in ftrace_graph_caller of mcount.S. */ - /* old = *parent; */ - safe_load_stack(old, parent, faulted); + /* old_parent_ra = *parent_ra_addr; */ + safe_load_stack(old_parent_ra, parent_ra_addr, faulted); if (unlikely(faulted)) goto out; #ifndef KBUILD_MCOUNT_RA_ADDRESS - parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old, - (unsigned long)parent, fp); + parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra, + old_parent_ra, (unsigned long)parent_ra_addr, fp); /* * If fails when getting the stack address of the non-leaf function's * ra, stop function graph tracer and return */ - if (parent == 0) + if (parent_ra_addr == 0) goto out; #endif - /* *parent = return_hooker; */ - safe_store_stack(return_hooker, parent, faulted); + /* *parent_ra_addr = return_hooker; */ + safe_store_stack(return_hooker, parent_ra_addr, faulted); if (unlikely(faulted)) goto out; - if (ftrace_push_return_trace(old, self_addr, &trace.depth, fp) == - -EBUSY) { - *parent = old; + if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp) + == -EBUSY) { + *parent_ra_addr = old_parent_ra; return; } - trace.func = self_addr; + /* + * Get the recorded ip of the current mcount calling site in the + * __mcount_loc section, which will be used to filter the function + * entries configured through the tracing/set_graph_function interface. + */ + + insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; + trace.func = self_ra - (MCOUNT_INSN_SIZE * insns); /* Only trace if the calling function expects to */ if (!ftrace_graph_entry(&trace)) { current->curr_ret_stack--; - *parent = old; + *parent_ra_addr = old_parent_ra; } return; out: diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c index 2b7f3f703b83..a8244854d3dc 100644 --- a/arch/mips/kernel/perf_event.c +++ b/arch/mips/kernel/perf_event.c @@ -161,41 +161,6 @@ mipspmu_event_set_period(struct perf_event *event, return ret; } -static int mipspmu_enable(struct perf_event *event) -{ - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); - struct hw_perf_event *hwc = &event->hw; - int idx; - int err = 0; - - /* To look for a free counter for this event. */ - idx = mipspmu->alloc_counter(cpuc, hwc); - if (idx < 0) { - err = idx; - goto out; - } - - /* - * If there is an event in the counter we are going to use then - * make sure it is disabled. - */ - event->hw.idx = idx; - mipspmu->disable_event(idx); - cpuc->events[idx] = event; - - /* Set the period for the event. */ - mipspmu_event_set_period(event, hwc, idx); - - /* Enable the event. */ - mipspmu->enable_event(hwc, idx); - - /* Propagate our changes to the userspace mapping. */ - perf_event_update_userpage(event); - -out: - return err; -} - static void mipspmu_event_update(struct perf_event *event, struct hw_perf_event *hwc, int idx) @@ -204,7 +169,7 @@ static void mipspmu_event_update(struct perf_event *event, unsigned long flags; int shift = 64 - TOTAL_BITS; s64 prev_raw_count, new_raw_count; - s64 delta; + u64 delta; again: prev_raw_count = local64_read(&hwc->prev_count); @@ -231,32 +196,90 @@ again: return; } -static void mipspmu_disable(struct perf_event *event) +static void mipspmu_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + if (!mipspmu) + return; + + if (flags & PERF_EF_RELOAD) + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + + hwc->state = 0; + + /* Set the period for the event. */ + mipspmu_event_set_period(event, hwc, hwc->idx); + + /* Enable the event. */ + mipspmu->enable_event(hwc, hwc->idx); +} + +static void mipspmu_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + if (!mipspmu) + return; + + if (!(hwc->state & PERF_HES_STOPPED)) { + /* We are working on a local event. */ + mipspmu->disable_event(hwc->idx); + barrier(); + mipspmu_event_update(event, hwc, hwc->idx); + hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; + } +} + +static int mipspmu_add(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; - int idx = hwc->idx; + int idx; + int err = 0; + perf_pmu_disable(event->pmu); - WARN_ON(idx < 0 || idx >= mipspmu->num_counters); + /* To look for a free counter for this event. */ + idx = mipspmu->alloc_counter(cpuc, hwc); + if (idx < 0) { + err = idx; + goto out; + } - /* We are working on a local event. */ + /* + * If there is an event in the counter we are going to use then + * make sure it is disabled. + */ + event->hw.idx = idx; mipspmu->disable_event(idx); + cpuc->events[idx] = event; - barrier(); - - mipspmu_event_update(event, hwc, idx); - cpuc->events[idx] = NULL; - clear_bit(idx, cpuc->used_mask); + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + if (flags & PERF_EF_START) + mipspmu_start(event, PERF_EF_RELOAD); + /* Propagate our changes to the userspace mapping. */ perf_event_update_userpage(event); + +out: + perf_pmu_enable(event->pmu); + return err; } -static void mipspmu_unthrottle(struct perf_event *event) +static void mipspmu_del(struct perf_event *event, int flags) { + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; - mipspmu->enable_event(hwc, hwc->idx); + WARN_ON(idx < 0 || idx >= mipspmu->num_counters); + + mipspmu_stop(event, PERF_EF_UPDATE); + cpuc->events[idx] = NULL; + clear_bit(idx, cpuc->used_mask); + + perf_event_update_userpage(event); } static void mipspmu_read(struct perf_event *event) @@ -270,12 +293,17 @@ static void mipspmu_read(struct perf_event *event) mipspmu_event_update(event, hwc, hwc->idx); } -static struct pmu pmu = { - .enable = mipspmu_enable, - .disable = mipspmu_disable, - .unthrottle = mipspmu_unthrottle, - .read = mipspmu_read, -}; +static void mipspmu_enable(struct pmu *pmu) +{ + if (mipspmu) + mipspmu->start(); +} + +static void mipspmu_disable(struct pmu *pmu) +{ + if (mipspmu) + mipspmu->stop(); +} static atomic_t active_events = ATOMIC_INIT(0); static DEFINE_MUTEX(pmu_reserve_mutex); @@ -318,6 +346,82 @@ static void mipspmu_free_irq(void) perf_irq = save_perf_irq; } +/* + * mipsxx/rm9000/loongson2 have different performance counters, they have + * specific low-level init routines. + */ +static void reset_counters(void *arg); +static int __hw_perf_event_init(struct perf_event *event); + +static void hw_perf_event_destroy(struct perf_event *event) +{ + if (atomic_dec_and_mutex_lock(&active_events, + &pmu_reserve_mutex)) { + /* + * We must not call the destroy function with interrupts + * disabled. + */ + on_each_cpu(reset_counters, + (void *)(long)mipspmu->num_counters, 1); + mipspmu_free_irq(); + mutex_unlock(&pmu_reserve_mutex); + } +} + +static int mipspmu_event_init(struct perf_event *event) +{ + int err = 0; + + switch (event->attr.type) { + case PERF_TYPE_RAW: + case PERF_TYPE_HARDWARE: + case PERF_TYPE_HW_CACHE: + break; + + default: + return -ENOENT; + } + + if (!mipspmu || event->cpu >= nr_cpumask_bits || + (event->cpu >= 0 && !cpu_online(event->cpu))) + return -ENODEV; + + if (!atomic_inc_not_zero(&active_events)) { + if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) { + atomic_dec(&active_events); + return -ENOSPC; + } + + mutex_lock(&pmu_reserve_mutex); + if (atomic_read(&active_events) == 0) + err = mipspmu_get_irq(); + + if (!err) + atomic_inc(&active_events); + mutex_unlock(&pmu_reserve_mutex); + } + + if (err) + return err; + + err = __hw_perf_event_init(event); + if (err) + hw_perf_event_destroy(event); + + return err; +} + +static struct pmu pmu = { + .pmu_enable = mipspmu_enable, + .pmu_disable = mipspmu_disable, + .event_init = mipspmu_event_init, + .add = mipspmu_add, + .del = mipspmu_del, + .start = mipspmu_start, + .stop = mipspmu_stop, + .read = mipspmu_read, +}; + static inline unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev) { @@ -382,8 +486,9 @@ static int validate_event(struct cpu_hw_events *cpuc, { struct hw_perf_event fake_hwc = event->hw; - if (event->pmu && event->pmu != &pmu) - return 0; + /* Allow mixed event group. So return 1 to pass validation. */ + if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF) + return 1; return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0; } @@ -409,73 +514,6 @@ static int validate_group(struct perf_event *event) return 0; } -/* - * mipsxx/rm9000/loongson2 have different performance counters, they have - * specific low-level init routines. - */ -static void reset_counters(void *arg); -static int __hw_perf_event_init(struct perf_event *event); - -static void hw_perf_event_destroy(struct perf_event *event) -{ - if (atomic_dec_and_mutex_lock(&active_events, - &pmu_reserve_mutex)) { - /* - * We must not call the destroy function with interrupts - * disabled. - */ - on_each_cpu(reset_counters, - (void *)(long)mipspmu->num_counters, 1); - mipspmu_free_irq(); - mutex_unlock(&pmu_reserve_mutex); - } -} - -const struct pmu *hw_perf_event_init(struct perf_event *event) -{ - int err = 0; - - if (!mipspmu || event->cpu >= nr_cpumask_bits || - (event->cpu >= 0 && !cpu_online(event->cpu))) - return ERR_PTR(-ENODEV); - - if (!atomic_inc_not_zero(&active_events)) { - if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) { - atomic_dec(&active_events); - return ERR_PTR(-ENOSPC); - } - - mutex_lock(&pmu_reserve_mutex); - if (atomic_read(&active_events) == 0) - err = mipspmu_get_irq(); - - if (!err) - atomic_inc(&active_events); - mutex_unlock(&pmu_reserve_mutex); - } - - if (err) - return ERR_PTR(err); - - err = __hw_perf_event_init(event); - if (err) - hw_perf_event_destroy(event); - - return err ? ERR_PTR(err) : &pmu; -} - -void hw_perf_enable(void) -{ - if (mipspmu) - mipspmu->start(); -} - -void hw_perf_disable(void) -{ - if (mipspmu) - mipspmu->stop(); -} - /* This is needed by specific irq handlers in perf_event_*.c */ static void handle_associated_event(struct cpu_hw_events *cpuc, @@ -496,21 +534,13 @@ handle_associated_event(struct cpu_hw_events *cpuc, #include "perf_event_mipsxx.c" /* Callchain handling code. */ -static inline void -callchain_store(struct perf_callchain_entry *entry, - u64 ip) -{ - if (entry->nr < PERF_MAX_STACK_DEPTH) - entry->ip[entry->nr++] = ip; -} /* * Leave userspace callchain empty for now. When we find a way to trace * the user stack callchains, we add here. */ -static void -perf_callchain_user(struct pt_regs *regs, - struct perf_callchain_entry *entry) +void perf_callchain_user(struct perf_callchain_entry *entry, + struct pt_regs *regs) { } @@ -523,23 +553,21 @@ static void save_raw_perf_callchain(struct perf_callchain_entry *entry, while (!kstack_end(sp)) { addr = *sp++; if (__kernel_text_address(addr)) { - callchain_store(entry, addr); + perf_callchain_store(entry, addr); if (entry->nr >= PERF_MAX_STACK_DEPTH) break; } } } -static void -perf_callchain_kernel(struct pt_regs *regs, - struct perf_callchain_entry *entry) +void perf_callchain_kernel(struct perf_callchain_entry *entry, + struct pt_regs *regs) { unsigned long sp = regs->regs[29]; #ifdef CONFIG_KALLSYMS unsigned long ra = regs->regs[31]; unsigned long pc = regs->cp0_epc; - callchain_store(entry, PERF_CONTEXT_KERNEL); if (raw_show_trace || !__kernel_text_address(pc)) { unsigned long stack_page = (unsigned long)task_stack_page(current); @@ -549,53 +577,12 @@ perf_callchain_kernel(struct pt_regs *regs, return; } do { - callchain_store(entry, pc); + perf_callchain_store(entry, pc); if (entry->nr >= PERF_MAX_STACK_DEPTH) break; pc = unwind_stack(current, &sp, pc, &ra); } while (pc); #else - callchain_store(entry, PERF_CONTEXT_KERNEL); save_raw_perf_callchain(entry, sp); #endif } - -static void -perf_do_callchain(struct pt_regs *regs, - struct perf_callchain_entry *entry) -{ - int is_user; - - if (!regs) - return; - - is_user = user_mode(regs); - - if (!current || !current->pid) - return; - - if (is_user && current->state != TASK_RUNNING) - return; - - if (!is_user) { - perf_callchain_kernel(regs, entry); - if (current->mm) - regs = task_pt_regs(current); - else - regs = NULL; - } - if (regs) - perf_callchain_user(regs, entry); -} - -static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); - -struct perf_callchain_entry * -perf_callchain(struct pt_regs *regs) -{ - struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry); - - entry->nr = 0; - perf_do_callchain(regs, entry); - return entry; -} diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index 183e0d226669..d9a7db78ed62 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -696,7 +696,7 @@ static int mipsxx_pmu_handle_shared_irq(void) * interrupt, not NMI. */ if (handled == IRQ_HANDLED) - perf_event_do_pending(); + irq_work_run(); #ifdef CONFIG_MIPS_MT_SMP read_unlock(&pmuint_rwlock); @@ -1045,6 +1045,8 @@ init_hw_perf_events(void) "CPU, irq %d%s\n", mipspmu->name, counters, irq, irq < 0 ? " (share with timer interrupt)" : ""); + perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); + return 0; } early_initcall(init_hw_perf_events); diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 5922342bca39..dbbe0ce48d89 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -84,7 +84,7 @@ static int protected_save_fp_context(struct sigcontext __user *sc) static int protected_restore_fp_context(struct sigcontext __user *sc) { - int err, tmp; + int err, tmp __maybe_unused; while (1) { lock_fpu_owner(); own_fpu_inatomic(0); diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index a0ed0e052b2e..aae986613795 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c @@ -115,7 +115,7 @@ static int protected_save_fp_context32(struct sigcontext32 __user *sc) static int protected_restore_fp_context32(struct sigcontext32 __user *sc) { - int err, tmp; + int err, tmp __maybe_unused; while (1) { lock_fpu_owner(); own_fpu_inatomic(0); diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 383aeb95cb49..32a256101082 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -193,6 +193,22 @@ void __devinit smp_prepare_boot_cpu(void) */ static struct task_struct *cpu_idle_thread[NR_CPUS]; +struct create_idle { + struct work_struct work; + struct task_struct *idle; + struct completion done; + int cpu; +}; + +static void __cpuinit do_fork_idle(struct work_struct *work) +{ + struct create_idle *c_idle = + container_of(work, struct create_idle, work); + + c_idle->idle = fork_idle(c_idle->cpu); + complete(&c_idle->done); +} + int __cpuinit __cpu_up(unsigned int cpu) { struct task_struct *idle; @@ -203,8 +219,19 @@ int __cpuinit __cpu_up(unsigned int cpu) * Linux can schedule processes on this slave. */ if (!cpu_idle_thread[cpu]) { - idle = fork_idle(cpu); - cpu_idle_thread[cpu] = idle; + /* + * Schedule work item to avoid forking user task + * Ported from arch/x86/kernel/smpboot.c + */ + struct create_idle c_idle = { + .cpu = cpu, + .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), + }; + + INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); + schedule_work(&c_idle.work); + wait_for_completion(&c_idle.done); + idle = cpu_idle_thread[cpu] = c_idle.idle; if (IS_ERR(idle)) panic(KERN_ERR "Fork failed for CPU %d", cpu); diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 1dc6edff45e0..58beabf50b3c 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -383,12 +383,11 @@ save_static_function(sys_sysmips); static int __used noinline _sys_sysmips(nabi_no_regargs struct pt_regs regs) { - long cmd, arg1, arg2, arg3; + long cmd, arg1, arg2; cmd = regs.regs[4]; arg1 = regs.regs[5]; arg2 = regs.regs[6]; - arg3 = regs.regs[7]; switch (cmd) { case MIPS_ATOMIC_SET: @@ -405,7 +404,7 @@ _sys_sysmips(nabi_no_regargs struct pt_regs regs) if (arg1 & 2) set_thread_flag(TIF_LOGADE); else - clear_thread_flag(TIF_FIXADE); + clear_thread_flag(TIF_LOGADE); return 0; diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index 6a1fdfef8fde..ab52b7cf3b6b 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c @@ -148,9 +148,9 @@ struct { spinlock_t tc_list_lock; struct list_head tc_list; /* Thread contexts */ } vpecontrol = { - .vpe_list_lock = SPIN_LOCK_UNLOCKED, + .vpe_list_lock = __SPIN_LOCK_UNLOCKED(vpe_list_lock), .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list), - .tc_list_lock = SPIN_LOCK_UNLOCKED, + .tc_list_lock = __SPIN_LOCK_UNLOCKED(tc_list_lock), .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list) }; diff --git a/arch/mips/loongson/Kconfig b/arch/mips/loongson/Kconfig index 6e1b77fec7ea..aca93eed8779 100644 --- a/arch/mips/loongson/Kconfig +++ b/arch/mips/loongson/Kconfig @@ -1,6 +1,7 @@ +if MACH_LOONGSON + choice prompt "Machine Type" - depends on MACH_LOONGSON config LEMOTE_FULOONG2E bool "Lemote Fuloong(2e) mini-PC" @@ -87,3 +88,5 @@ config LOONGSON_UART_BASE config LOONGSON_MC146818 bool default n + +endif # MACH_LOONGSON diff --git a/arch/mips/loongson/common/cmdline.c b/arch/mips/loongson/common/cmdline.c index 1a06defc4f7f..353e1d2e41a5 100644 --- a/arch/mips/loongson/common/cmdline.c +++ b/arch/mips/loongson/common/cmdline.c @@ -44,10 +44,5 @@ void __init prom_init_cmdline(void) strcat(arcs_cmdline, " "); } - if ((strstr(arcs_cmdline, "console=")) == NULL) - strcat(arcs_cmdline, " console=ttyS0,115200"); - if ((strstr(arcs_cmdline, "root=")) == NULL) - strcat(arcs_cmdline, " root=/dev/hda1"); - prom_init_machtype(); } diff --git a/arch/mips/loongson/common/machtype.c b/arch/mips/loongson/common/machtype.c index 81fbe6b73f91..2efd5d9dee27 100644 --- a/arch/mips/loongson/common/machtype.c +++ b/arch/mips/loongson/common/machtype.c @@ -41,7 +41,7 @@ void __weak __init mach_prom_init_machtype(void) void __init prom_init_machtype(void) { - char *p, str[MACHTYPE_LEN]; + char *p, str[MACHTYPE_LEN + 1]; int machtype = MACH_LEMOTE_FL2E; mips_machtype = LOONGSON_MACHTYPE; @@ -53,6 +53,7 @@ void __init prom_init_machtype(void) } p += strlen("machtype="); strncpy(str, p, MACHTYPE_LEN); + str[MACHTYPE_LEN] = '\0'; p = strstr(str, " "); if (p) *p = '\0'; diff --git a/arch/mips/math-emu/ieee754int.h b/arch/mips/math-emu/ieee754int.h index 2701d9500959..2a7d43f4f161 100644 --- a/arch/mips/math-emu/ieee754int.h +++ b/arch/mips/math-emu/ieee754int.h @@ -70,7 +70,7 @@ #define COMPXSP \ - unsigned xm; int xe; int xs; int xc + unsigned xm; int xe; int xs __maybe_unused; int xc #define COMPYSP \ unsigned ym; int ye; int ys; int yc @@ -104,7 +104,7 @@ #define COMPXDP \ -u64 xm; int xe; int xs; int xc +u64 xm; int xe; int xs __maybe_unused; int xc #define COMPYDP \ u64 ym; int ye; int ys; int yc diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 2efcbd24c82f..279599e9a779 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -324,7 +324,7 @@ int page_is_ram(unsigned long pagenr) void __init paging_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES]; - unsigned long lastpfn; + unsigned long lastpfn __maybe_unused; pagetable_init(); diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 083d3412d0bc..04f9e17db9d0 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -109,6 +109,8 @@ static bool scratchpad_available(void) static int scratchpad_offset(int i) { BUG(); + /* Really unreachable, but evidently some GCC want this. */ + return 0; } #endif /* diff --git a/arch/mips/pci/ops-pmcmsp.c b/arch/mips/pci/ops-pmcmsp.c index b7c03d80c88c..68798f869c0f 100644 --- a/arch/mips/pci/ops-pmcmsp.c +++ b/arch/mips/pci/ops-pmcmsp.c @@ -308,7 +308,7 @@ static struct resource pci_mem_resource = { * RETURNS: PCIBIOS_SUCCESSFUL - success * ****************************************************************************/ -static int bpci_interrupt(int irq, void *dev_id) +static irqreturn_t bpci_interrupt(int irq, void *dev_id) { struct msp_pci_regs *preg = (void *)PCI_BASE_REG; unsigned int stat = preg->if_status; @@ -326,7 +326,7 @@ static int bpci_interrupt(int irq, void *dev_id) /* write to clear all asserted interrupts */ preg->if_status = stat; - return PCIBIOS_SUCCESSFUL; + return IRQ_HANDLED; } /***************************************************************************** diff --git a/arch/mips/pmc-sierra/Kconfig b/arch/mips/pmc-sierra/Kconfig index c139988bb85d..8d798497c614 100644 --- a/arch/mips/pmc-sierra/Kconfig +++ b/arch/mips/pmc-sierra/Kconfig @@ -4,15 +4,11 @@ choice config PMC_MSP4200_EVAL bool "PMC-Sierra MSP4200 Eval Board" - select CEVT_R4K - select CSRC_R4K select IRQ_MSP_SLP select HW_HAS_PCI config PMC_MSP4200_GW bool "PMC-Sierra MSP4200 VoIP Gateway" - select CEVT_R4K - select CSRC_R4K select IRQ_MSP_SLP select HW_HAS_PCI diff --git a/arch/mips/pmc-sierra/msp71xx/msp_time.c b/arch/mips/pmc-sierra/msp71xx/msp_time.c index cca64e15f57f..01df84ce31e2 100644 --- a/arch/mips/pmc-sierra/msp71xx/msp_time.c +++ b/arch/mips/pmc-sierra/msp71xx/msp_time.c @@ -81,7 +81,7 @@ void __init plat_time_init(void) mips_hpt_frequency = cpu_rate/2; } -unsigned int __init get_c0_compare_int(void) +unsigned int __cpuinit get_c0_compare_int(void) { return MSP_INT_VPE0_TIMER; } |