From e0eca07badc023a675a61906020b397da20f07c3 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 14 May 2008 23:49:43 -0400 Subject: ftrace, POWERPC: add irqs_disabled_flags to ppc PPC doesn't have the irqs_disabled_flags needed by ftrace. This patch adds it. Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- include/asm-powerpc/hw_irq.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/hw_irq.h b/include/asm-powerpc/hw_irq.h index ad8c9f7fd0e3..f75a5fc64d2e 100644 --- a/include/asm-powerpc/hw_irq.h +++ b/include/asm-powerpc/hw_irq.h @@ -59,6 +59,11 @@ extern void iseries_handle_interrupts(void); get_paca()->hard_enabled = 0; \ } while(0) +static inline int irqs_disabled_flags(unsigned long flags) +{ + return flags == 0; +} + #else #if defined(CONFIG_BOOKE) @@ -113,6 +118,11 @@ static inline void local_irq_save_ptr(unsigned long *flags) #define hard_irq_enable() local_irq_enable() #define hard_irq_disable() local_irq_disable() +static inline int irqs_disabled_flags(unsigned long flags) +{ + return (flags & MSR_EE) == 0; +} + #endif /* CONFIG_PPC64 */ /* -- cgit v1.2.3 From ccbfac2923c9febaeaf07a50054027a92b502718 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 22 May 2008 14:31:07 -0400 Subject: ftrace: powerpc clean ups This patch cleans up the ftrace code in PowerPC based on the comments from Michael Ellerman. Signed-off-by: Steven Rostedt Cc: Michael Ellerman Cc: proski@gnu.org Cc: a.p.zijlstra@chello.nl Cc: Pekka Paalanen Cc: Steven Rostedt Cc: linuxppc-dev@ozlabs.org Cc: Soeren Sandmann Pedersen Cc: paulus@samba.org Signed-off-by: Thomas Gleixner --- arch/powerpc/kernel/entry_32.S | 11 ++--------- arch/powerpc/kernel/ftrace.c | 8 +++++++- arch/powerpc/kernel/ppc_ksyms.c | 5 +++++ arch/powerpc/kernel/setup_32.c | 5 ----- arch/powerpc/kernel/setup_64.c | 5 ----- include/asm-powerpc/ftrace.h | 6 ++++++ 6 files changed, 20 insertions(+), 20 deletions(-) create mode 100644 include/asm-powerpc/ftrace.h (limited to 'include/asm-powerpc') diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 0e6221889ca9..3b1dd29d9f91 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -1129,18 +1129,11 @@ _GLOBAL(_mcount) stw r5, 8(r1) LOAD_REG_ADDR(r5, ftrace_trace_function) -#if 0 - mtctr r3 - mr r1, r5 - bctrl -#endif lwz r5,0(r5) -#if 1 + mtctr r5 bctrl -#else - bl ftrace_stub -#endif + nop lwz r6, 8(r1) diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c index 5a4993fefa45..69ed41223468 100644 --- a/arch/powerpc/kernel/ftrace.c +++ b/arch/powerpc/kernel/ftrace.c @@ -51,10 +51,16 @@ notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) { static unsigned int op; + /* + * It would be nice to just use create_function_call, but that will + * update the code itself. Here we need to just return the + * instruction that is going to be modified, without modifying the + * code. + */ addr = GET_ADDR(addr); /* Set to "bl addr" */ - op = 0x48000001 | (ftrace_calc_offset(ip, addr) & 0x03fffffe); + op = 0x48000001 | (ftrace_calc_offset(ip, addr) & 0x03fffffc); /* * No locking needed, this must be called via kstop_machine diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index cf6b5a7d8b3f..4300db52662a 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c @@ -43,6 +43,7 @@ #include #include #include +#include #ifdef CONFIG_PPC32 extern void transfer_to_handler(void); @@ -68,6 +69,10 @@ EXPORT_SYMBOL(single_step_exception); EXPORT_SYMBOL(sys_sigreturn); #endif +#ifdef CONFIG_FTRACE +EXPORT_SYMBOL(_mcount); +#endif + EXPORT_SYMBOL(strcpy); EXPORT_SYMBOL(strncpy); EXPORT_SYMBOL(strcat); diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 22f8e2bacd32..19e8fcb9cea8 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -47,11 +47,6 @@ #include #endif -#ifdef CONFIG_FTRACE -extern void _mcount(void); -EXPORT_SYMBOL(_mcount); -#endif - extern void bootx_init(unsigned long r4, unsigned long phys); int boot_cpuid; diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 277bf18cbbcc..098fd96a394a 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -85,11 +85,6 @@ struct ppc64_caches ppc64_caches = { }; EXPORT_SYMBOL_GPL(ppc64_caches); -#ifdef CONFIG_FTRACE -extern void _mcount(void); -EXPORT_SYMBOL(_mcount); -#endif - /* * These are used in binfmt_elf.c to put aux entries on the stack * for each elf executable being started. diff --git a/include/asm-powerpc/ftrace.h b/include/asm-powerpc/ftrace.h new file mode 100644 index 000000000000..b1bfa704b6e5 --- /dev/null +++ b/include/asm-powerpc/ftrace.h @@ -0,0 +1,6 @@ +#ifndef _ASM_POWERPC_FTRACE +#define _ASM_POWERPC_FTRACE + +extern void _mcount(void); + +#endif -- cgit v1.2.3 From 395a59d0f8e86bb39cd700c3d185d30c670bb958 Mon Sep 17 00:00:00 2001 From: Abhishek Sagar Date: Sat, 21 Jun 2008 23:47:27 +0530 Subject: ftrace: store mcount address in rec->ip Record the address of the mcount call-site. Currently all archs except sparc64 record the address of the instruction following the mcount call-site. Some general cleanups are entailed. Storing mcount addresses in rec->ip enables looking them up in the kprobe hash table later on to check if they're kprobe'd. Signed-off-by: Abhishek Sagar Cc: davem@davemloft.net Cc: Steven Rostedt Signed-off-by: Ingo Molnar --- arch/arm/kernel/armksyms.c | 10 +++++----- arch/arm/kernel/entry-common.S | 4 ++++ arch/arm/kernel/ftrace.c | 16 +++++++--------- arch/powerpc/kernel/entry_32.S | 4 ++++ arch/powerpc/kernel/entry_64.S | 5 ++++- arch/powerpc/kernel/ftrace.c | 21 +++++++-------------- arch/sparc64/kernel/ftrace.c | 10 ++++++---- arch/sparc64/kernel/sparc64_ksyms.c | 2 +- arch/x86/kernel/entry_32.S | 4 ++++ arch/x86/kernel/entry_64.S | 4 ++++ arch/x86/kernel/ftrace.c | 26 +++++++++----------------- arch/x86/kernel/i386_ksyms_32.c | 2 +- arch/x86/kernel/x8664_ksyms_64.c | 2 +- include/asm-arm/ftrace.h | 14 ++++++++++++++ include/asm-powerpc/ftrace.h | 8 ++++++++ include/asm-sparc64/ftrace.h | 14 ++++++++++++++ include/asm-x86/ftrace.h | 14 ++++++++++++++ include/linux/ftrace.h | 3 +-- kernel/trace/ftrace.c | 3 ++- 19 files changed, 110 insertions(+), 56 deletions(-) create mode 100644 include/asm-arm/ftrace.h create mode 100644 include/asm-sparc64/ftrace.h create mode 100644 include/asm-x86/ftrace.h (limited to 'include/asm-powerpc') diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index 3b132215cbf8..cc7b246e9652 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -18,6 +18,7 @@ #include #include #include +#include /* * libgcc functions - functions that are used internally by the @@ -48,11 +49,6 @@ extern void __aeabi_ulcmp(void); extern void fpundefinstr(void); extern void fp_enter(void); -#ifdef CONFIG_FTRACE -extern void mcount(void); -EXPORT_SYMBOL(mcount); -#endif - /* * This has a special calling convention; it doesn't * modify any of the usual registers, except for LR. @@ -186,3 +182,7 @@ EXPORT_SYMBOL(_find_next_bit_be); #endif EXPORT_SYMBOL(copy_page); + +#ifdef CONFIG_FTRACE +EXPORT_SYMBOL(mcount); +#endif diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 8f79a4789ed4..84694e88b428 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -9,6 +9,7 @@ */ #include +#include #include #include "entry-header.S" @@ -104,6 +105,7 @@ ENTRY(ret_from_fork) ENTRY(mcount) stmdb sp!, {r0-r3, lr} mov r0, lr + sub r0, r0, #MCOUNT_INSN_SIZE .globl mcount_call mcount_call: @@ -114,6 +116,7 @@ ENTRY(ftrace_caller) stmdb sp!, {r0-r3, lr} ldr r1, [fp, #-4] mov r0, lr + sub r0, r0, #MCOUNT_INSN_SIZE .globl ftrace_call ftrace_call: @@ -134,6 +137,7 @@ ENTRY(mcount) trace: ldr r1, [fp, #-4] mov r0, lr + sub r0, r0, #MCOUNT_INSN_SIZE mov lr, pc mov pc, r2 ldmia sp!, {r0-r3, pc} diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c index 22f3d6e309f9..76d50e6091bc 100644 --- a/arch/arm/kernel/ftrace.c +++ b/arch/arm/kernel/ftrace.c @@ -12,9 +12,10 @@ */ #include + #include +#include -#define INSN_SIZE 4 #define PC_OFFSET 8 #define BL_OPCODE 0xeb000000 #define BL_OFFSET_MASK 0x00ffffff @@ -32,10 +33,10 @@ unsigned char *ftrace_call_replace(unsigned long pc, unsigned long addr) { long offset; - offset = (long)addr - (long)(pc - INSN_SIZE + PC_OFFSET); + offset = (long)addr - (long)(pc + PC_OFFSET); if (unlikely(offset < -33554432 || offset > 33554428)) { /* Can't generate branches that far (from ARM ARM). Ftrace - * doesn't generate branches outside of core kernel text. + * doesn't generate branches outside of kernel text. */ WARN_ON_ONCE(1); return NULL; @@ -52,7 +53,6 @@ int ftrace_modify_code(unsigned long pc, unsigned char *old_code, old = *(unsigned long *)old_code; new = *(unsigned long *)new_code; - pc -= INSN_SIZE; __asm__ __volatile__ ( "1: ldr %1, [%2] \n" @@ -77,7 +77,7 @@ int ftrace_modify_code(unsigned long pc, unsigned char *old_code, : "memory"); if (!err && (replaced == old)) - flush_icache_range(pc, pc + INSN_SIZE); + flush_icache_range(pc, pc + MCOUNT_INSN_SIZE); return err; } @@ -89,8 +89,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func) unsigned char *new; pc = (unsigned long)&ftrace_call; - pc += INSN_SIZE; - memcpy(&old, &ftrace_call, INSN_SIZE); + memcpy(&old, &ftrace_call, MCOUNT_INSN_SIZE); new = ftrace_call_replace(pc, (unsigned long)func); ret = ftrace_modify_code(pc, (unsigned char *)&old, new); return ret; @@ -103,8 +102,7 @@ int ftrace_mcount_set(unsigned long *data) unsigned char *new; pc = (unsigned long)&mcount_call; - pc += INSN_SIZE; - memcpy(&old, &mcount_call, INSN_SIZE); + memcpy(&old, &mcount_call, MCOUNT_INSN_SIZE); new = ftrace_call_replace(pc, *addr); *addr = ftrace_modify_code(pc, (unsigned char *)&old, new); return 0; diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 3b1dd29d9f91..7231a708af0d 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -30,6 +30,7 @@ #include #include #include +#include #undef SHOW_SYSCALLS #undef SHOW_SYSCALLS_TASK @@ -1053,6 +1054,7 @@ _GLOBAL(_mcount) stw r10,40(r1) stw r3, 44(r1) stw r5, 8(r1) + subi r3, r3, MCOUNT_INSN_SIZE .globl mcount_call mcount_call: bl ftrace_stub @@ -1090,6 +1092,7 @@ _GLOBAL(ftrace_caller) stw r10,40(r1) stw r3, 44(r1) stw r5, 8(r1) + subi r3, r3, MCOUNT_INSN_SIZE .globl ftrace_call ftrace_call: bl ftrace_stub @@ -1128,6 +1131,7 @@ _GLOBAL(_mcount) stw r3, 44(r1) stw r5, 8(r1) + subi r3, r3, MCOUNT_INSN_SIZE LOAD_REG_ADDR(r5, ftrace_trace_function) lwz r5,0(r5) diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 2c4d9e056ead..2f511a969d2c 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -31,6 +31,7 @@ #include #include #include +#include /* * System calls. @@ -879,6 +880,7 @@ _GLOBAL(_mcount) mflr r3 stdu r1, -112(r1) std r3, 128(r1) + subi r3, r3, MCOUNT_INSN_SIZE .globl mcount_call mcount_call: bl ftrace_stub @@ -895,6 +897,7 @@ _GLOBAL(ftrace_caller) stdu r1, -112(r1) std r3, 128(r1) ld r4, 16(r11) + subi r3, r3, MCOUNT_INSN_SIZE .globl ftrace_call ftrace_call: bl ftrace_stub @@ -916,7 +919,7 @@ _GLOBAL(_mcount) std r3, 128(r1) ld r4, 16(r11) - + subi r3, r3, MCOUNT_INSN_SIZE LOAD_REG_ADDR(r5,ftrace_trace_function) ld r5,0(r5) ld r5,0(r5) diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c index e12c593ab9ca..3855ceb937b0 100644 --- a/arch/powerpc/kernel/ftrace.c +++ b/arch/powerpc/kernel/ftrace.c @@ -15,8 +15,8 @@ #include #include +#include -#define CALL_BACK 4 static unsigned int ftrace_nop = 0x60000000; @@ -27,9 +27,10 @@ static unsigned int ftrace_nop = 0x60000000; # define GET_ADDR(addr) *(unsigned long *)addr #endif + static unsigned int notrace ftrace_calc_offset(long ip, long addr) { - return (int)((addr + CALL_BACK) - ip); + return (int)(addr - ip); } notrace unsigned char *ftrace_nop_replace(void) @@ -76,9 +77,6 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code, unsigned new = *(unsigned *)new_code; int faulted = 0; - /* move the IP back to the start of the call */ - ip -= CALL_BACK; - /* * Note: Due to modules and __init, code can * disappear and change, we need to protect against faulting @@ -118,12 +116,10 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code, notrace int ftrace_update_ftrace_func(ftrace_func_t func) { unsigned long ip = (unsigned long)(&ftrace_call); - unsigned char old[4], *new; + unsigned char old[MCOUNT_INSN_SIZE], *new; int ret; - ip += CALL_BACK; - - memcpy(old, &ftrace_call, 4); + memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE); new = ftrace_call_replace(ip, (unsigned long)func); ret = ftrace_modify_code(ip, old, new); @@ -134,16 +130,13 @@ notrace int ftrace_mcount_set(unsigned long *data) { unsigned long ip = (long)(&mcount_call); unsigned long *addr = data; - unsigned char old[4], *new; - - /* ip is at the location, but modify code will subtact this */ - ip += CALL_BACK; + unsigned char old[MCOUNT_INSN_SIZE], *new; /* * Replace the mcount stub with a pointer to the * ip recorder function. */ - memcpy(old, &mcount_call, 4); + memcpy(old, &mcount_call, MCOUNT_INSN_SIZE); new = ftrace_call_replace(ip, *addr); *addr = ftrace_modify_code(ip, old, new); diff --git a/arch/sparc64/kernel/ftrace.c b/arch/sparc64/kernel/ftrace.c index c17373195b1e..4298d0aee713 100644 --- a/arch/sparc64/kernel/ftrace.c +++ b/arch/sparc64/kernel/ftrace.c @@ -5,6 +5,8 @@ #include #include +#include + static const u32 ftrace_nop = 0x01000000; notrace unsigned char *ftrace_nop_replace(void) @@ -60,9 +62,9 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code, notrace int ftrace_update_ftrace_func(ftrace_func_t func) { unsigned long ip = (unsigned long)(&ftrace_call); - unsigned char old[4], *new; + unsigned char old[MCOUNT_INSN_SIZE], *new; - memcpy(old, &ftrace_call, 4); + memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE); new = ftrace_call_replace(ip, (unsigned long)func); return ftrace_modify_code(ip, old, new); } @@ -71,13 +73,13 @@ notrace int ftrace_mcount_set(unsigned long *data) { unsigned long ip = (long)(&mcount_call); unsigned long *addr = data; - unsigned char old[4], *new; + unsigned char old[MCOUNT_INSN_SIZE], *new; /* * Replace the mcount stub with a pointer to the * ip recorder function. */ - memcpy(old, &mcount_call, 4); + memcpy(old, &mcount_call, MCOUNT_INSN_SIZE); new = ftrace_call_replace(ip, *addr); *addr = ftrace_modify_code(ip, old, new); diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c index 8ac0b99f2c55..b80d982a29c6 100644 --- a/arch/sparc64/kernel/sparc64_ksyms.c +++ b/arch/sparc64/kernel/sparc64_ksyms.c @@ -53,6 +53,7 @@ #include #include #include +#include struct poll { int fd; @@ -112,7 +113,6 @@ EXPORT_SYMBOL(smp_call_function); #endif /* CONFIG_SMP */ #if defined(CONFIG_MCOUNT) -extern void _mcount(void); EXPORT_SYMBOL(_mcount); #endif diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 04ea83ccb979..95e6bbe3665e 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -51,6 +51,7 @@ #include #include #include +#include #include "irq_vectors.h" /* @@ -1118,6 +1119,7 @@ ENTRY(mcount) pushl %ecx pushl %edx movl 0xc(%esp), %eax + subl $MCOUNT_INSN_SIZE, %eax .globl mcount_call mcount_call: @@ -1136,6 +1138,7 @@ ENTRY(ftrace_caller) pushl %edx movl 0xc(%esp), %eax movl 0x4(%ebp), %edx + subl $MCOUNT_INSN_SIZE, %eax .globl ftrace_call ftrace_call: @@ -1166,6 +1169,7 @@ trace: pushl %edx movl 0xc(%esp), %eax movl 0x4(%ebp), %edx + subl $MCOUNT_INSN_SIZE, %eax call *ftrace_trace_function diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index fe25e5febca3..b0f7308f78a6 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -51,6 +51,7 @@ #include #include #include +#include .code64 @@ -68,6 +69,7 @@ ENTRY(mcount) movq %r9, 48(%rsp) movq 0x38(%rsp), %rdi + subq $MCOUNT_INSN_SIZE, %rdi .globl mcount_call mcount_call: @@ -99,6 +101,7 @@ ENTRY(ftrace_caller) movq 0x38(%rsp), %rdi movq 8(%rbp), %rsi + subq $MCOUNT_INSN_SIZE, %rdi .globl ftrace_call ftrace_call: @@ -139,6 +142,7 @@ trace: movq 0x38(%rsp), %rdi movq 8(%rbp), %rsi + subq $MCOUNT_INSN_SIZE, %rdi call *ftrace_trace_function diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 55828149e01e..ab115cd15fdf 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -17,20 +17,21 @@ #include #include +#include -#define CALL_BACK 5 /* Long is fine, even if it is only 4 bytes ;-) */ static long *ftrace_nop; union ftrace_code_union { - char code[5]; + char code[MCOUNT_INSN_SIZE]; struct { char e8; int offset; } __attribute__((packed)); }; + static int notrace ftrace_calc_offset(long ip, long addr) { return (int)(addr - ip); @@ -46,7 +47,7 @@ notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) static union ftrace_code_union calc; calc.e8 = 0xe8; - calc.offset = ftrace_calc_offset(ip, addr); + calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr); /* * No locking needed, this must be called via kstop_machine @@ -65,9 +66,6 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code, unsigned char newch = new_code[4]; int faulted = 0; - /* move the IP back to the start of the call */ - ip -= CALL_BACK; - /* * Note: Due to modules and __init, code can * disappear and change, we need to protect against faulting @@ -102,12 +100,10 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code, notrace int ftrace_update_ftrace_func(ftrace_func_t func) { unsigned long ip = (unsigned long)(&ftrace_call); - unsigned char old[5], *new; + unsigned char old[MCOUNT_INSN_SIZE], *new; int ret; - ip += CALL_BACK; - - memcpy(old, &ftrace_call, 5); + memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE); new = ftrace_call_replace(ip, (unsigned long)func); ret = ftrace_modify_code(ip, old, new); @@ -118,16 +114,13 @@ notrace int ftrace_mcount_set(unsigned long *data) { unsigned long ip = (long)(&mcount_call); unsigned long *addr = data; - unsigned char old[5], *new; - - /* ip is at the location, but modify code will subtact this */ - ip += CALL_BACK; + unsigned char old[MCOUNT_INSN_SIZE], *new; /* * Replace the mcount stub with a pointer to the * ip recorder function. */ - memcpy(old, &mcount_call, 5); + memcpy(old, &mcount_call, MCOUNT_INSN_SIZE); new = ftrace_call_replace(ip, *addr); *addr = ftrace_modify_code(ip, old, new); @@ -142,8 +135,7 @@ int __init ftrace_dyn_arch_init(void *data) ftrace_mcount_set(data); - ftrace_nop = (unsigned long *)noptable[CALL_BACK]; + ftrace_nop = (unsigned long *)noptable[MCOUNT_INSN_SIZE]; return 0; } - diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c index 29999dbb754c..dd7ebee446af 100644 --- a/arch/x86/kernel/i386_ksyms_32.c +++ b/arch/x86/kernel/i386_ksyms_32.c @@ -1,9 +1,9 @@ -#include #include #include #include #include +#include #ifdef CONFIG_FTRACE /* mcount is defined in assembly */ diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c index 122885bc5f3b..16ff4bf418d9 100644 --- a/arch/x86/kernel/x8664_ksyms_64.c +++ b/arch/x86/kernel/x8664_ksyms_64.c @@ -1,7 +1,6 @@ /* Exports for assembly files. All C exports should go in the respective C files. */ -#include #include #include @@ -11,6 +10,7 @@ #include #include #include +#include #ifdef CONFIG_FTRACE /* mcount is defined in assembly */ diff --git a/include/asm-arm/ftrace.h b/include/asm-arm/ftrace.h new file mode 100644 index 000000000000..584ef9a8e5a5 --- /dev/null +++ b/include/asm-arm/ftrace.h @@ -0,0 +1,14 @@ +#ifndef _ASM_ARM_FTRACE +#define _ASM_ARM_FTRACE + +#ifdef CONFIG_FTRACE +#define MCOUNT_ADDR ((long)(mcount)) +#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ + +#ifndef __ASSEMBLY__ +extern void mcount(void); +#endif + +#endif + +#endif /* _ASM_ARM_FTRACE */ diff --git a/include/asm-powerpc/ftrace.h b/include/asm-powerpc/ftrace.h index b1bfa704b6e5..de921326cca8 100644 --- a/include/asm-powerpc/ftrace.h +++ b/include/asm-powerpc/ftrace.h @@ -1,6 +1,14 @@ #ifndef _ASM_POWERPC_FTRACE #define _ASM_POWERPC_FTRACE +#ifdef CONFIG_FTRACE +#define MCOUNT_ADDR ((long)(_mcount)) +#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ + +#ifndef __ASSEMBLY__ extern void _mcount(void); +#endif #endif + +#endif /* _ASM_POWERPC_FTRACE */ diff --git a/include/asm-sparc64/ftrace.h b/include/asm-sparc64/ftrace.h new file mode 100644 index 000000000000..f76a40a338bb --- /dev/null +++ b/include/asm-sparc64/ftrace.h @@ -0,0 +1,14 @@ +#ifndef _ASM_SPARC64_FTRACE +#define _ASM_SPARC64_FTRACE + +#ifdef CONFIG_FTRACE +#define MCOUNT_ADDR ((long)(_mcount)) +#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ + +#ifndef __ASSEMBLY__ +extern void _mcount(void); +#endif + +#endif + +#endif /* _ASM_SPARC64_FTRACE */ diff --git a/include/asm-x86/ftrace.h b/include/asm-x86/ftrace.h new file mode 100644 index 000000000000..c184441133f2 --- /dev/null +++ b/include/asm-x86/ftrace.h @@ -0,0 +1,14 @@ +#ifndef _ASM_X86_FTRACE +#define _ASM_SPARC64_FTRACE + +#ifdef CONFIG_FTRACE +#define MCOUNT_ADDR ((long)(mcount)) +#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ + +#ifndef __ASSEMBLY__ +extern void mcount(void); +#endif + +#endif /* CONFIG_FTRACE */ + +#endif /* _ASM_X86_FTRACE */ diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 20e14d0093c7..366098d591de 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -31,7 +31,6 @@ int unregister_ftrace_function(struct ftrace_ops *ops); void clear_ftrace_function(void); extern void ftrace_stub(unsigned long a0, unsigned long a1); -extern void mcount(void); #else /* !CONFIG_FTRACE */ # define register_ftrace_function(ops) do { } while (0) @@ -54,7 +53,7 @@ enum { struct dyn_ftrace { struct hlist_node node; - unsigned long ip; + unsigned long ip; /* address of mcount call-site */ unsigned long flags; }; diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 0d5bcf69952d..f1e9e5c74e64 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -27,6 +27,8 @@ #include #include +#include + #include "trace.h" /* ftrace_enabled is a method to turn ftrace on or off */ @@ -329,7 +331,6 @@ ftrace_record_ip(unsigned long ip) } #define FTRACE_ADDR ((long)(ftrace_caller)) -#define MCOUNT_ADDR ((long)(mcount)) static int __ftrace_replace_code(struct dyn_ftrace *rec, -- cgit v1.2.3 From b7d7a2404f80386307ccc0cde63d8d2a5e3bc85c Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 26 Jun 2008 11:22:13 +0200 Subject: powerpc: convert to generic helpers for IPI function calls This converts ppc to use the new helpers for smp_call_function() and friends, and adds support for smp_call_function_single(). ppc loses the timeout functionality of smp_call_function_mask() with this change, as the generic code does not provide that. Acked-by: Paul Mackerras Signed-off-by: Jens Axboe --- arch/powerpc/Kconfig | 1 + arch/powerpc/kernel/smp.c | 234 +++----------------------------- arch/powerpc/platforms/cell/interrupt.c | 1 + arch/powerpc/platforms/ps3/smp.c | 7 +- arch/powerpc/platforms/pseries/xics.c | 6 +- arch/powerpc/sysdev/mpic.c | 2 +- include/asm-powerpc/smp.h | 8 +- 7 files changed, 33 insertions(+), 226 deletions(-) (limited to 'include/asm-powerpc') diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 3934e2659407..852d40c29637 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -110,6 +110,7 @@ config PPC select HAVE_KPROBES select HAVE_KRETPROBES select HAVE_LMB + select USE_GENERIC_SMP_HELPERS if SMP config EARLY_PRINTK bool diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 1457aa0a08f1..37a5ab410dcc 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -72,12 +72,8 @@ struct smp_ops_t *smp_ops; static volatile unsigned int cpu_callin_map[NR_CPUS]; -void smp_call_function_interrupt(void); - int smt_enabled_at_boot = 1; -static int ipi_fail_ok; - static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL; #ifdef CONFIG_PPC64 @@ -99,12 +95,15 @@ void smp_message_recv(int msg) { switch(msg) { case PPC_MSG_CALL_FUNCTION: - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); break; case PPC_MSG_RESCHEDULE: /* XXX Do we have to do this? */ set_need_resched(); break; + case PPC_MSG_CALL_FUNC_SINGLE: + generic_smp_call_function_single_interrupt(); + break; case PPC_MSG_DEBUGGER_BREAK: if (crash_ipi_function_ptr) { crash_ipi_function_ptr(get_irq_regs()); @@ -128,6 +127,19 @@ void smp_send_reschedule(int cpu) smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE); } +void arch_send_call_function_single_ipi(int cpu) +{ + smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE); +} + +void arch_send_call_function_ipi(cpumask_t mask) +{ + unsigned int cpu; + + for_each_cpu_mask(cpu, mask) + smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION); +} + #ifdef CONFIG_DEBUGGER void smp_send_debugger_break(int cpu) { @@ -154,215 +166,9 @@ static void stop_this_cpu(void *dummy) ; } -/* - * Structure and data for smp_call_function(). This is designed to minimise - * static memory requirements. It also looks cleaner. - * Stolen from the i386 version. - */ -static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock); - -static struct call_data_struct { - void (*func) (void *info); - void *info; - atomic_t started; - atomic_t finished; - int wait; -} *call_data; - -/* delay of at least 8 seconds */ -#define SMP_CALL_TIMEOUT 8 - -/* - * These functions send a 'generic call function' IPI to other online - * CPUS in the system. - * - * [SUMMARY] Run a function on other CPUs. - * The function to run. This must be fast and non-blocking. - * An arbitrary pointer to pass to the function. - * currently unused. - * If true, wait (atomically) until function has completed on other CPUs. - * [RETURNS] 0 on success, else a negative status code. Does not return until - * remote CPUs are nearly ready to execute <> or are or have executed. - * is a cpu map of the cpus to send IPI to. - * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler or from a bottom half handler. - */ -static int __smp_call_function_map(void (*func) (void *info), void *info, - int nonatomic, int wait, cpumask_t map) -{ - struct call_data_struct data; - int ret = -1, num_cpus; - int cpu; - u64 timeout; - - if (unlikely(smp_ops == NULL)) - return ret; - - data.func = func; - data.info = info; - atomic_set(&data.started, 0); - data.wait = wait; - if (wait) - atomic_set(&data.finished, 0); - - /* remove 'self' from the map */ - if (cpu_isset(smp_processor_id(), map)) - cpu_clear(smp_processor_id(), map); - - /* sanity check the map, remove any non-online processors. */ - cpus_and(map, map, cpu_online_map); - - num_cpus = cpus_weight(map); - if (!num_cpus) - goto done; - - call_data = &data; - smp_wmb(); - /* Send a message to all CPUs in the map */ - for_each_cpu_mask(cpu, map) - smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION); - - timeout = get_tb() + (u64) SMP_CALL_TIMEOUT * tb_ticks_per_sec; - - /* Wait for indication that they have received the message */ - while (atomic_read(&data.started) != num_cpus) { - HMT_low(); - if (get_tb() >= timeout) { - printk("smp_call_function on cpu %d: other cpus not " - "responding (%d)\n", smp_processor_id(), - atomic_read(&data.started)); - if (!ipi_fail_ok) - debugger(NULL); - goto out; - } - } - - /* optionally wait for the CPUs to complete */ - if (wait) { - while (atomic_read(&data.finished) != num_cpus) { - HMT_low(); - if (get_tb() >= timeout) { - printk("smp_call_function on cpu %d: other " - "cpus not finishing (%d/%d)\n", - smp_processor_id(), - atomic_read(&data.finished), - atomic_read(&data.started)); - debugger(NULL); - goto out; - } - } - } - - done: - ret = 0; - - out: - call_data = NULL; - HMT_medium(); - return ret; -} - -static int __smp_call_function(void (*func)(void *info), void *info, - int nonatomic, int wait) -{ - int ret; - spin_lock(&call_lock); - ret =__smp_call_function_map(func, info, nonatomic, wait, - cpu_online_map); - spin_unlock(&call_lock); - return ret; -} - -int smp_call_function(void (*func) (void *info), void *info, int nonatomic, - int wait) -{ - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); - - return __smp_call_function(func, info, nonatomic, wait); -} -EXPORT_SYMBOL(smp_call_function); - -int smp_call_function_single(int cpu, void (*func) (void *info), void *info, - int nonatomic, int wait) -{ - cpumask_t map = CPU_MASK_NONE; - int ret = 0; - - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); - - if (!cpu_online(cpu)) - return -EINVAL; - - cpu_set(cpu, map); - if (cpu != get_cpu()) { - spin_lock(&call_lock); - ret = __smp_call_function_map(func, info, nonatomic, wait, map); - spin_unlock(&call_lock); - } else { - local_irq_disable(); - func(info); - local_irq_enable(); - } - put_cpu(); - return ret; -} -EXPORT_SYMBOL(smp_call_function_single); - void smp_send_stop(void) { - int nolock; - - /* It's OK to fail sending the IPI, since the alternative is to - * be stuck forever waiting on the other CPU to take the interrupt. - * - * It's better to at least continue and go through reboot, since this - * function is usually called at panic or reboot time in the first - * place. - */ - ipi_fail_ok = 1; - - /* Don't deadlock in case we got called through panic */ - nolock = !spin_trylock(&call_lock); - __smp_call_function_map(stop_this_cpu, NULL, 1, 0, cpu_online_map); - if (!nolock) - spin_unlock(&call_lock); -} - -void smp_call_function_interrupt(void) -{ - void (*func) (void *info); - void *info; - int wait; - - /* call_data will be NULL if the sender timed out while - * waiting on us to receive the call. - */ - if (!call_data) - return; - - func = call_data->func; - info = call_data->info; - wait = call_data->wait; - - if (!wait) - smp_mb__before_atomic_inc(); - - /* - * Notify initiating CPU that I've grabbed the data and am - * about to execute the function - */ - atomic_inc(&call_data->started); - /* - * At this point the info structure may be out of scope unless wait==1 - */ - (*func)(info); - if (wait) { - smp_mb__before_atomic_inc(); - atomic_inc(&call_data->finished); - } + smp_call_function(stop_this_cpu, NULL, 0, 0); } extern struct gettimeofday_struct do_gtod; @@ -596,9 +402,9 @@ int __devinit start_secondary(void *unused) secondary_cpu_time_init(); - spin_lock(&call_lock); + ipi_call_lock(); cpu_set(cpu, cpu_online_map); - spin_unlock(&call_lock); + ipi_call_unlock(); local_irq_enable(); diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index 5bf7df146022..2d5bb22d6c09 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c @@ -218,6 +218,7 @@ void iic_request_IPIs(void) { iic_request_ipi(PPC_MSG_CALL_FUNCTION, "IPI-call"); iic_request_ipi(PPC_MSG_RESCHEDULE, "IPI-resched"); + iic_request_ipi(PPC_MSG_CALL_FUNC_SINGLE, "IPI-call-single"); #ifdef CONFIG_DEBUGGER iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug"); #endif /* CONFIG_DEBUGGER */ diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c index f0b12f212363..a0927a3bacb7 100644 --- a/arch/powerpc/platforms/ps3/smp.c +++ b/arch/powerpc/platforms/ps3/smp.c @@ -105,9 +105,10 @@ static void __init ps3_smp_setup_cpu(int cpu) * to index needs to be setup. */ - BUILD_BUG_ON(PPC_MSG_CALL_FUNCTION != 0); - BUILD_BUG_ON(PPC_MSG_RESCHEDULE != 1); - BUILD_BUG_ON(PPC_MSG_DEBUGGER_BREAK != 3); + BUILD_BUG_ON(PPC_MSG_CALL_FUNCTION != 0); + BUILD_BUG_ON(PPC_MSG_RESCHEDULE != 1); + BUILD_BUG_ON(PPC_MSG_CALL_FUNC_SINGLE != 2); + BUILD_BUG_ON(PPC_MSG_DEBUGGER_BREAK != 3); for (i = 0; i < MSG_COUNT; i++) { result = ps3_event_receive_port_setup(cpu, &virqs[i]); diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index ebebc28fe895..0fc830f576f5 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c @@ -383,13 +383,11 @@ static irqreturn_t xics_ipi_dispatch(int cpu) mb(); smp_message_recv(PPC_MSG_RESCHEDULE); } -#if 0 - if (test_and_clear_bit(PPC_MSG_MIGRATE_TASK, + if (test_and_clear_bit(PPC_MSG_CALL_FUNC_SINGLE, &xics_ipi_message[cpu].value)) { mb(); - smp_message_recv(PPC_MSG_MIGRATE_TASK); + smp_message_recv(PPC_MSG_CALL_FUNC_SINGLE); } -#endif #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK, &xics_ipi_message[cpu].value)) { diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 7680001676a6..6c90c95b454e 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c @@ -1494,7 +1494,7 @@ void mpic_request_ipis(void) static char *ipi_names[] = { "IPI0 (call function)", "IPI1 (reschedule)", - "IPI2 (unused)", + "IPI2 (call function single)", "IPI3 (debugger break)", }; BUG_ON(mpic == NULL); diff --git a/include/asm-powerpc/smp.h b/include/asm-powerpc/smp.h index 505f35bacaa9..c663a1fa77c5 100644 --- a/include/asm-powerpc/smp.h +++ b/include/asm-powerpc/smp.h @@ -67,10 +67,7 @@ DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); * in /proc/interrupts will be wrong!!! --Troy */ #define PPC_MSG_CALL_FUNCTION 0 #define PPC_MSG_RESCHEDULE 1 -/* This is unused now */ -#if 0 -#define PPC_MSG_MIGRATE_TASK 2 -#endif +#define PPC_MSG_CALL_FUNC_SINGLE 2 #define PPC_MSG_DEBUGGER_BREAK 3 void smp_init_iSeries(void); @@ -117,6 +114,9 @@ extern void smp_generic_take_timebase(void); extern struct smp_ops_t *smp_ops; +extern void arch_send_call_function_single_ipi(int cpu); +extern void arch_send_call_function_ipi(cpumask_t mask); + #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ -- cgit v1.2.3 From 86df86424939d316b1f6cfac1b6204f0c7dee317 Mon Sep 17 00:00:00 2001 From: David Gibson Date: Tue, 8 Jul 2008 15:58:16 +1000 Subject: Correct hash flushing from huge_ptep_set_wrprotect() As Andy Whitcroft recently pointed out, the current powerpc version of huge_ptep_set_wrprotect() has a bug. It just calls ptep_set_wrprotect() which in turn calls pte_update() then hpte_need_flush() with the 'huge' argument set to 0. This will cause hpte_need_flush() to flush the wrong hash entries (of any). Andy's fix for this is already in the powerpc tree as commit 016b33c4958681c24056abed8ec95844a0da80a3. I have confirmed this is a real bug, not masked by some other synchronization, with a new testcase for libhugetlbfs. A process write a (MAP_PRIVATE) hugepage mapping, fork(), then alter the mapping and have the child incorrectly see the second write. Therefore, this should be fixed for 2.6.26, and for the stable tree. Here is a suitable patch for 2.6.26, which I think will also be suitable for the stable tree (neither of the headers in question has been changed much recently). It is cut down slighlty from Andy's original version, in that it does not include a 32-bit version of huge_ptep_set_wrprotect(). Currently, hugepages are not supported on any 32-bit powerpc platform. When they are, a suitable 32-bit version can be added - the only 32-bit hardware which supports hugepages does not use the conventional hashtable MMU and so will have different needs anyway. Signed-off-by: Andy Whitcroft Signed-off-by: David Gibson Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Linus Torvalds --- include/asm-powerpc/hugetlb.h | 6 ------ include/asm-powerpc/pgtable-ppc64.h | 10 ++++++++++ 2 files changed, 10 insertions(+), 6 deletions(-) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/hugetlb.h b/include/asm-powerpc/hugetlb.h index 649c6c3b87b3..be32ff02f4a0 100644 --- a/include/asm-powerpc/hugetlb.h +++ b/include/asm-powerpc/hugetlb.h @@ -49,12 +49,6 @@ static inline pte_t huge_pte_wrprotect(pte_t pte) return pte_wrprotect(pte); } -static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) -{ - ptep_set_wrprotect(mm, addr, ptep); -} - static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte, int dirty) diff --git a/include/asm-powerpc/pgtable-ppc64.h b/include/asm-powerpc/pgtable-ppc64.h index cc6a43ba41d0..7686569a0bef 100644 --- a/include/asm-powerpc/pgtable-ppc64.h +++ b/include/asm-powerpc/pgtable-ppc64.h @@ -314,6 +314,16 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, old = pte_update(mm, addr, ptep, _PAGE_RW, 0); } +static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + unsigned long old; + + if ((pte_val(*ptep) & _PAGE_RW) == 0) + return; + old = pte_update(mm, addr, ptep, _PAGE_RW, 1); +} + /* * We currently remove entries from the hashtable regardless of whether * the entry was young or dirty. The generic routines only flush if the -- cgit v1.2.3