diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-10-13 00:21:15 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-10-13 00:21:15 +0300 |
commit | 1c6890707eb1438b0fb4e0a10d4afe48a217628b (patch) | |
tree | f600dc002e5076353efb005727d2fa3c47560dd4 /arch/x86/kernel | |
parent | 3bff6112c80cecb76af5fe485506f96e8adb6122 (diff) | |
parent | bcb53209be5cb32d485507452edda19b78f31d84 (diff) | |
download | linux-1c6890707eb1438b0fb4e0a10d4afe48a217628b.tar.xz |
Merge tag 'perf-kprobes-2020-10-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf/kprobes updates from Ingo Molnar:
"This prepares to unify the kretprobe trampoline handler and make
kretprobe lockless (those patches are still work in progress)"
* tag 'perf-kprobes-2020-10-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
kprobes: Fix to check probe enabled before disarm_kprobe_ftrace()
kprobes: Make local functions static
kprobes: Free kretprobe_instance with RCU callback
kprobes: Remove NMI context check
sparc: kprobes: Use generic kretprobe trampoline handler
sh: kprobes: Use generic kretprobe trampoline handler
s390: kprobes: Use generic kretprobe trampoline handler
powerpc: kprobes: Use generic kretprobe trampoline handler
parisc: kprobes: Use generic kretprobe trampoline handler
mips: kprobes: Use generic kretprobe trampoline handler
ia64: kprobes: Use generic kretprobe trampoline handler
csky: kprobes: Use generic kretprobe trampoline handler
arc: kprobes: Use generic kretprobe trampoline handler
arm64: kprobes: Use generic kretprobe trampoline handler
arm: kprobes: Use generic kretprobe trampoline handler
x86/kprobes: Use generic kretprobe trampoline handler
kprobes: Add generic kretprobe trampoline handler
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/kprobes/core.c | 108 |
1 files changed, 3 insertions, 105 deletions
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index fdadc37d72af..882b95313ad6 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -767,124 +767,22 @@ asm( NOKPROBE_SYMBOL(kretprobe_trampoline); STACK_FRAME_NON_STANDARD(kretprobe_trampoline); + /* * Called from kretprobe_trampoline */ __used __visible void *trampoline_handler(struct pt_regs *regs) { - struct kretprobe_instance *ri = NULL; - struct hlist_head *head, empty_rp; - struct hlist_node *tmp; - unsigned long flags, orig_ret_address = 0; - unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; - kprobe_opcode_t *correct_ret_addr = NULL; - void *frame_pointer; - bool skipped = false; - - /* - * Set a dummy kprobe for avoiding kretprobe recursion. - * Since kretprobe never run in kprobe handler, kprobe must not - * be running at this point. - */ - kprobe_busy_begin(); - - INIT_HLIST_HEAD(&empty_rp); - kretprobe_hash_lock(current, &head, &flags); /* fixup registers */ regs->cs = __KERNEL_CS; #ifdef CONFIG_X86_32 regs->cs |= get_kernel_rpl(); regs->gs = 0; #endif - /* We use pt_regs->sp for return address holder. */ - frame_pointer = ®s->sp; - regs->ip = trampoline_address; + regs->ip = (unsigned long)&kretprobe_trampoline; regs->orig_ax = ~0UL; - /* - * It is possible to have multiple instances associated with a given - * task either because multiple functions in the call path have - * return probes installed on them, and/or more than one - * return probe was registered for a target function. - * - * We can handle this because: - * - instances are always pushed into the head of the list - * - when multiple return probes are registered for the same - * function, the (chronologically) first instance's ret_addr - * will be the real return address, and all the rest will - * point to kretprobe_trampoline. - */ - hlist_for_each_entry(ri, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - /* - * Return probes must be pushed on this hash list correct - * order (same as return order) so that it can be popped - * correctly. However, if we find it is pushed it incorrect - * order, this means we find a function which should not be - * probed, because the wrong order entry is pushed on the - * path of processing other kretprobe itself. - */ - if (ri->fp != frame_pointer) { - if (!skipped) - pr_warn("kretprobe is stacked incorrectly. Trying to fixup.\n"); - skipped = true; - continue; - } - - orig_ret_address = (unsigned long)ri->ret_addr; - if (skipped) - pr_warn("%ps must be blacklisted because of incorrect kretprobe order\n", - ri->rp->kp.addr); - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - kretprobe_assert(ri, orig_ret_address, trampoline_address); - - correct_ret_addr = ri->ret_addr; - hlist_for_each_entry_safe(ri, tmp, head, hlist) { - if (ri->task != current) - /* another task is sharing our hash bucket */ - continue; - if (ri->fp != frame_pointer) - continue; - - orig_ret_address = (unsigned long)ri->ret_addr; - if (ri->rp && ri->rp->handler) { - __this_cpu_write(current_kprobe, &ri->rp->kp); - ri->ret_addr = correct_ret_addr; - ri->rp->handler(ri, regs); - __this_cpu_write(current_kprobe, &kprobe_busy); - } - - recycle_rp_inst(ri, &empty_rp); - - if (orig_ret_address != trampoline_address) - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - break; - } - - kretprobe_hash_unlock(current, &flags); - - kprobe_busy_end(); - - hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { - hlist_del(&ri->hlist); - kfree(ri); - } - return (void *)orig_ret_address; + return (void *)kretprobe_trampoline_handler(regs, &kretprobe_trampoline, ®s->sp); } NOKPROBE_SYMBOL(trampoline_handler); |