summaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel/ftrace.c
diff options
context:
space:
mode:
authorMichael Ellerman <mpe@ellerman.id.au>2016-07-19 07:48:30 +0300
committerMichael Ellerman <mpe@ellerman.id.au>2016-07-21 13:10:37 +0300
commit9d636109511a000882f8dff4eaafa874eec5ece8 (patch)
tree321f23e7a84c48b008f5996238d67e6db1dc07fd /arch/powerpc/kernel/ftrace.c
parentb1923caa6e641f3d0a93b5d045aef67ded5aef67 (diff)
downloadlinux-9d636109511a000882f8dff4eaafa874eec5ece8.tar.xz
powerpc/ftrace: Separate the heuristics for checking call sites
In __ftrace_make_nop() (the 64-bit version), we have code to deal with two ftrace ABIs. There is the original ABI, which looks mostly like a function call, and then the mprofile-kernel ABI which is just a branch. The code tries to handle both cases, by looking for the presence of a load to restore the TOC pointer (PPC_INST_LD_TOC). If we detect the TOC load, we assume the call site is for an mcount() call using the old ABI. That means we patch the mcount() call with a b +8, to branch over the TOC load. However if the kernel was built with mprofile-kernel, then there will never be a call site using the original ftrace ABI. If for some reason we do see a TOC load, then it's there for a good reason, and we should not jump over it. So split the code, using the existing CC_USING_MPROFILE_KERNEL. Kernels built with mprofile-kernel will only look for, and expect, the new ABI, and similarly for the original ABI. Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/kernel/ftrace.c')
-rw-r--r--arch/powerpc/kernel/ftrace.c35
1 files changed, 18 insertions, 17 deletions
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index 7af6c4de044b..cc52d9795f88 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -144,6 +144,21 @@ __ftrace_make_nop(struct module *mod,
return -EINVAL;
}
+#ifdef CC_USING_MPROFILE_KERNEL
+ /* When using -mkernel_profile there is no load to jump over */
+ pop = PPC_INST_NOP;
+
+ if (probe_kernel_read(&op, (void *)(ip - 4), 4)) {
+ pr_err("Fetching instruction at %lx failed.\n", ip - 4);
+ return -EFAULT;
+ }
+
+ /* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
+ if (op != PPC_INST_MFLR && op != PPC_INST_STD_LR) {
+ pr_err("Unexpected instruction %08x around bl _mcount\n", op);
+ return -EINVAL;
+ }
+#else
/*
* Our original call site looks like:
*
@@ -170,24 +185,10 @@ __ftrace_make_nop(struct module *mod,
}
if (op != PPC_INST_LD_TOC) {
- unsigned int inst;
-
- if (probe_kernel_read(&inst, (void *)(ip - 4), 4)) {
- pr_err("Fetching instruction at %lx failed.\n", ip - 4);
- return -EFAULT;
- }
-
- /* We expect either a mlfr r0, or a std r0, LRSAVE(r1) */
- if (inst != PPC_INST_MFLR && inst != PPC_INST_STD_LR) {
- pr_err("Unexpected instructions around bl _mcount\n"
- "when enabling dynamic ftrace!\t"
- "(%08x,bl,%08x)\n", inst, op);
- return -EINVAL;
- }
-
- /* When using -mkernel_profile there is no load to jump over */
- pop = PPC_INST_NOP;
+ pr_err("Expected %08x found %08x\n", PPC_INST_LD_TOC, op);
+ return -EINVAL;
}
+#endif /* CC_USING_MPROFILE_KERNEL */
if (patch_instruction((unsigned int *)ip, pop)) {
pr_err("Patching NOP failed.\n");