diff options
author | Steven Rostedt (VMware) <rostedt@goodmis.org> | 2021-10-08 12:13:30 +0300 |
---|---|---|
committer | Steven Rostedt (VMware) <rostedt@goodmis.org> | 2021-10-20 03:33:21 +0300 |
commit | 8646698aefad7547dc7acee8f8c2099d7653dc70 (patch) | |
tree | 84f60ef92cd3770a5bc61d4b728a371fef8f17de /arch/x86/kernel/ftrace.c | |
parent | 1e85010e17c1d72627eaf14d75d22e4d693abf70 (diff) | |
download | linux-8646698aefad7547dc7acee8f8c2099d7653dc70.tar.xz |
x86/ftrace: Remove fault protection code in prepare_ftrace_return
Removing the fault protection code when writing return_hooker
to stack. As Steven noted:
> That protection was there from the beginning due to being "paranoid",
> considering ftrace was bricking network cards. But that protection
> would not have even protected against that.
Link: https://lkml.kernel.org/r/20211008091336.33616-3-jolsa@kernel.org
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'arch/x86/kernel/ftrace.c')
-rw-r--r-- | arch/x86/kernel/ftrace.c | 38 |
1 files changed, 3 insertions, 35 deletions
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 23d221a9a3cd..7f12eacdf1ae 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -620,12 +620,10 @@ int ftrace_disable_ftrace_graph_caller(void) * Hook the return address and push it in the stack of return addrs * in current thread info. */ -void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, +void prepare_ftrace_return(unsigned long ip, unsigned long *parent, unsigned long frame_pointer) { unsigned long return_hooker = (unsigned long)&return_to_handler; - unsigned long old; - int faulted; /* * When resuming from suspend-to-ram, this function can be indirectly @@ -645,37 +643,7 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, if (unlikely(atomic_read(¤t->tracing_graph_pause))) return; - /* - * Protect against fault, even if it shouldn't - * happen. This tool is too much intrusive to - * ignore such a protection. - */ - asm volatile( - "1: " _ASM_MOV " (%[parent]), %[old]\n" - "2: " _ASM_MOV " %[return_hooker], (%[parent])\n" - " movl $0, %[faulted]\n" - "3:\n" - - ".section .fixup, \"ax\"\n" - "4: movl $1, %[faulted]\n" - " jmp 3b\n" - ".previous\n" - - _ASM_EXTABLE(1b, 4b) - _ASM_EXTABLE(2b, 4b) - - : [old] "=&r" (old), [faulted] "=r" (faulted) - : [parent] "r" (parent), [return_hooker] "r" (return_hooker) - : "memory" - ); - - if (unlikely(faulted)) { - ftrace_graph_stop(); - WARN_ON(1); - return; - } - - if (function_graph_enter(old, self_addr, frame_pointer, parent)) - *parent = old; + if (!function_graph_enter(*parent, ip, frame_pointer, parent)) + *parent = return_hooker; } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |