summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/ftrace.c
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>2014-11-19 05:14:11 +0300
committerSteven Rostedt <rostedt@goodmis.org>2014-11-19 23:25:26 +0300
commitaec0be2d6e9f02dbef41ee54854c2e003e55c23e (patch)
treed99c09ba4247724e467ab497f2184068a64ef63b /arch/x86/kernel/ftrace.c
parent9960efeb80f73bd073483dab0855ee0ddc27085c (diff)
downloadlinux-aec0be2d6e9f02dbef41ee54854c2e003e55c23e.tar.xz
ftrace/x86/extable: Add is_ftrace_trampoline() function
Stack traces that happen from function tracing check if the address on the stack is a __kernel_text_address(). That is, is the address kernel code. This calls core_kernel_text() which returns true if the address is part of the builtin kernel code. It also calls is_module_text_address() which returns true if the address belongs to module code. But what is missing is ftrace dynamically allocated trampolines. These trampolines are allocated for individual ftrace_ops that call the ftrace_ops callback functions directly. But if they do a stack trace, the code checking the stack wont detect them as they are neither core kernel code nor module address space. Adding another field to ftrace_ops that also stores the size of the trampoline assigned to it we can create a new function called is_ftrace_trampoline() that returns true if the address is a dynamically allocate ftrace trampoline. Note, it ignores trampolines that are not dynamically allocated as they will return true with the core_kernel_text() function. Link: http://lkml.kernel.org/r/20141119034829.497125839@goodmis.org Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Acked-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'arch/x86/kernel/ftrace.c')
-rw-r--r--arch/x86/kernel/ftrace.c9
1 files changed, 7 insertions, 2 deletions
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 1aea94d336c7..60881d919432 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -712,7 +712,8 @@ union ftrace_op_code_union {
} __attribute__((packed));
};
-static unsigned long create_trampoline(struct ftrace_ops *ops)
+static unsigned long
+create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
{
unsigned const char *jmp;
unsigned long start_offset;
@@ -749,6 +750,8 @@ static unsigned long create_trampoline(struct ftrace_ops *ops)
if (!trampoline)
return 0;
+ *tramp_size = size + MCOUNT_INSN_SIZE + sizeof(void *);
+
/* Copy ftrace_caller onto the trampoline memory */
ret = probe_kernel_read(trampoline, (void *)start_offset, size);
if (WARN_ON(ret < 0)) {
@@ -819,6 +822,7 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
unsigned char *new;
unsigned long offset;
unsigned long ip;
+ unsigned int size;
int ret;
if (ops->trampoline) {
@@ -829,9 +833,10 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
return;
} else {
- ops->trampoline = create_trampoline(ops);
+ ops->trampoline = create_trampoline(ops, &size);
if (!ops->trampoline)
return;
+ ops->trampoline_size = size;
}
offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);