diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-01-24 04:24:20 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-01-24 04:24:20 +0300 |
commit | 544521d6217fb7846b746ada9d70f308f078aa7e (patch) | |
tree | 1ee768a27eb3895c388a896f99dc00b4e1766bdd | |
parent | e0b1f59142746f74476a03040f745329c8355a7e (diff) | |
parent | 927054606d08d95827f854246293f8379480ed15 (diff) | |
download | linux-544521d6217fb7846b746ada9d70f308f078aa7e.tar.xz |
Merge tag 'probes-v6.14' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace
Pull probes updates from Masami Hiramatsu:
- kprobes: Cleanups using guard() and __free(): Use cleanup.h macros to
cleanup code and remove all gotos from kprobes code.
- tracing/probes: Also cleanups tracing/*probe events code with guard()
and __free(). These patches are just to simplify the parser codes.
- kprobes: Reduce preempt disable scope in check_kprobe_access_safe()
This reduces preempt disable time to only when getting the module
refcount in check_kprobe_access_safe().
Previously it disabled preempt needlessly for other checks including
jump_label_text_reserved(), which took a long time because of the
linear search.
* tag 'probes-v6.14' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace:
tracing/kprobes: Simplify __trace_kprobe_create() by removing gotos
tracing: Use __free() for kprobe events to cleanup
tracing: Use __free() in trace_probe for cleanup
kprobes: Remove remaining gotos
kprobes: Remove unneeded goto
kprobes: Use guard for rcu_read_lock
kprobes: Use guard() for external locks
jump_label: Define guard() for jump_label_lock
tracing/eprobe: Adopt guard() and scoped_guard()
tracing/uprobe: Adopt guard() and scoped_guard()
tracing/kprobe: Adopt guard() and scoped_guard()
kprobes: Adopt guard() and scoped_guard()
kprobes: Reduce preempt disable scope in check_kprobe_access_safe()
-rw-r--r-- | include/linux/jump_label.h | 3 | ||||
-rw-r--r-- | kernel/kprobes.c | 592 | ||||
-rw-r--r-- | kernel/trace/trace_eprobe.c | 36 | ||||
-rw-r--r-- | kernel/trace/trace_kprobe.c | 155 | ||||
-rw-r--r-- | kernel/trace/trace_probe.c | 51 | ||||
-rw-r--r-- | kernel/trace/trace_uprobe.c | 15 |
6 files changed, 384 insertions, 468 deletions
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index f5a2727ca4a9..fdb79dd1ebd8 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -75,6 +75,7 @@ #include <linux/types.h> #include <linux/compiler.h> +#include <linux/cleanup.h> extern bool static_key_initialized; @@ -347,6 +348,8 @@ static inline void static_key_disable(struct static_key *key) #endif /* CONFIG_JUMP_LABEL */ +DEFINE_LOCK_GUARD_0(jump_label_lock, jump_label_lock(), jump_label_unlock()) + #define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE #define jump_label_enabled static_key_enabled diff --git a/kernel/kprobes.c b/kernel/kprobes.c index b027a4030976..030569210670 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -39,6 +39,7 @@ #include <linux/static_call.h> #include <linux/perf_event.h> #include <linux/execmem.h> +#include <linux/cleanup.h> #include <asm/sections.h> #include <asm/cacheflush.h> @@ -140,45 +141,39 @@ static int collect_garbage_slots(struct kprobe_insn_cache *c); kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c) { struct kprobe_insn_page *kip; - kprobe_opcode_t *slot = NULL; /* Since the slot array is not protected by rcu, we need a mutex */ - mutex_lock(&c->mutex); - retry: - rcu_read_lock(); - list_for_each_entry_rcu(kip, &c->pages, list) { - if (kip->nused < slots_per_page(c)) { - int i; - - for (i = 0; i < slots_per_page(c); i++) { - if (kip->slot_used[i] == SLOT_CLEAN) { - kip->slot_used[i] = SLOT_USED; - kip->nused++; - slot = kip->insns + (i * c->insn_size); - rcu_read_unlock(); - goto out; + guard(mutex)(&c->mutex); + do { + guard(rcu)(); + list_for_each_entry_rcu(kip, &c->pages, list) { + if (kip->nused < slots_per_page(c)) { + int i; + + for (i = 0; i < slots_per_page(c); i++) { + if (kip->slot_used[i] == SLOT_CLEAN) { + kip->slot_used[i] = SLOT_USED; + kip->nused++; + return kip->insns + (i * c->insn_size); + } } + /* kip->nused is broken. Fix it. */ + kip->nused = slots_per_page(c); + WARN_ON(1); } - /* kip->nused is broken. Fix it. */ - kip->nused = slots_per_page(c); - WARN_ON(1); } - } - rcu_read_unlock(); - /* If there are any garbage slots, collect it and try again. */ - if (c->nr_garbage && collect_garbage_slots(c) == 0) - goto retry; + } while (c->nr_garbage && collect_garbage_slots(c) == 0); /* All out of space. Need to allocate a new page. */ kip = kmalloc(struct_size(kip, slot_used, slots_per_page(c)), GFP_KERNEL); if (!kip) - goto out; + return NULL; kip->insns = c->alloc(); if (!kip->insns) { kfree(kip); - goto out; + return NULL; } INIT_LIST_HEAD(&kip->list); memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c)); @@ -187,14 +182,12 @@ kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c) kip->ngarbage = 0; kip->cache = c; list_add_rcu(&kip->list, &c->pages); - slot = kip->insns; /* Record the perf ksymbol register event after adding the page */ perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, (unsigned long)kip->insns, PAGE_SIZE, false, c->sym); -out: - mutex_unlock(&c->mutex); - return slot; + + return kip->insns; } /* Return true if all garbages are collected, otherwise false. */ @@ -249,25 +242,35 @@ static int collect_garbage_slots(struct kprobe_insn_cache *c) return 0; } -void __free_insn_slot(struct kprobe_insn_cache *c, - kprobe_opcode_t *slot, int dirty) +static long __find_insn_page(struct kprobe_insn_cache *c, + kprobe_opcode_t *slot, struct kprobe_insn_page **pkip) { - struct kprobe_insn_page *kip; + struct kprobe_insn_page *kip = NULL; long idx; - mutex_lock(&c->mutex); - rcu_read_lock(); + guard(rcu)(); list_for_each_entry_rcu(kip, &c->pages, list) { idx = ((long)slot - (long)kip->insns) / (c->insn_size * sizeof(kprobe_opcode_t)); - if (idx >= 0 && idx < slots_per_page(c)) - goto out; + if (idx >= 0 && idx < slots_per_page(c)) { + *pkip = kip; + return idx; + } } /* Could not find this slot. */ WARN_ON(1); - kip = NULL; -out: - rcu_read_unlock(); + *pkip = NULL; + return -1; +} + +void __free_insn_slot(struct kprobe_insn_cache *c, + kprobe_opcode_t *slot, int dirty) +{ + struct kprobe_insn_page *kip = NULL; + long idx; + + guard(mutex)(&c->mutex); + idx = __find_insn_page(c, slot, &kip); /* Mark and sweep: this may sleep */ if (kip) { /* Check double free */ @@ -281,7 +284,6 @@ out: collect_one_slot(kip, idx); } } - mutex_unlock(&c->mutex); } /* @@ -600,47 +602,43 @@ static void kick_kprobe_optimizer(void) /* Kprobe jump optimizer */ static void kprobe_optimizer(struct work_struct *work) { - mutex_lock(&kprobe_mutex); - cpus_read_lock(); - mutex_lock(&text_mutex); + guard(mutex)(&kprobe_mutex); - /* - * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed) - * kprobes before waiting for quiesence period. - */ - do_unoptimize_kprobes(); + scoped_guard(cpus_read_lock) { + guard(mutex)(&text_mutex); - /* - * Step 2: Wait for quiesence period to ensure all potentially - * preempted tasks to have normally scheduled. Because optprobe - * may modify multiple instructions, there is a chance that Nth - * instruction is preempted. In that case, such tasks can return - * to 2nd-Nth byte of jump instruction. This wait is for avoiding it. - * Note that on non-preemptive kernel, this is transparently converted - * to synchronoze_sched() to wait for all interrupts to have completed. - */ - synchronize_rcu_tasks(); + /* + * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed) + * kprobes before waiting for quiesence period. + */ + do_unoptimize_kprobes(); - /* Step 3: Optimize kprobes after quiesence period */ - do_optimize_kprobes(); + /* + * Step 2: Wait for quiesence period to ensure all potentially + * preempted tasks to have normally scheduled. Because optprobe + * may modify multiple instructions, there is a chance that Nth + * instruction is preempted. In that case, such tasks can return + * to 2nd-Nth byte of jump instruction. This wait is for avoiding it. + * Note that on non-preemptive kernel, this is transparently converted + * to synchronoze_sched() to wait for all interrupts to have completed. + */ + synchronize_rcu_tasks(); - /* Step 4: Free cleaned kprobes after quiesence period */ - do_free_cleaned_kprobes(); + /* Step 3: Optimize kprobes after quiesence period */ + do_optimize_kprobes(); - mutex_unlock(&text_mutex); - cpus_read_unlock(); + /* Step 4: Free cleaned kprobes after quiesence period */ + do_free_cleaned_kprobes(); + } /* Step 5: Kick optimizer again if needed */ if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) kick_kprobe_optimizer(); - - mutex_unlock(&kprobe_mutex); } -/* Wait for completing optimization and unoptimization */ -void wait_for_kprobe_optimizer(void) +static void wait_for_kprobe_optimizer_locked(void) { - mutex_lock(&kprobe_mutex); + lockdep_assert_held(&kprobe_mutex); while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) { mutex_unlock(&kprobe_mutex); @@ -652,8 +650,14 @@ void wait_for_kprobe_optimizer(void) mutex_lock(&kprobe_mutex); } +} - mutex_unlock(&kprobe_mutex); +/* Wait for completing optimization and unoptimization */ +void wait_for_kprobe_optimizer(void) +{ + guard(mutex)(&kprobe_mutex); + + wait_for_kprobe_optimizer_locked(); } bool optprobe_queued_unopt(struct optimized_kprobe *op) @@ -852,29 +856,24 @@ static void try_to_optimize_kprobe(struct kprobe *p) return; /* For preparing optimization, jump_label_text_reserved() is called. */ - cpus_read_lock(); - jump_label_lock(); - mutex_lock(&text_mutex); + guard(cpus_read_lock)(); + guard(jump_label_lock)(); + guard(mutex)(&text_mutex); ap = alloc_aggr_kprobe(p); if (!ap) - goto out; + return; op = container_of(ap, struct optimized_kprobe, kp); if (!arch_prepared_optinsn(&op->optinsn)) { /* If failed to setup optimizing, fallback to kprobe. */ arch_remove_optimized_kprobe(op); kfree(op); - goto out; + return; } init_aggr_kprobe(ap, p); optimize_kprobe(ap); /* This just kicks optimizer thread. */ - -out: - mutex_unlock(&text_mutex); - jump_label_unlock(); - cpus_read_unlock(); } static void optimize_all_kprobes(void) @@ -883,10 +882,10 @@ static void optimize_all_kprobes(void) struct kprobe *p; unsigned int i; - mutex_lock(&kprobe_mutex); + guard(mutex)(&kprobe_mutex); /* If optimization is already allowed, just return. */ if (kprobes_allow_optimization) - goto out; + return; cpus_read_lock(); kprobes_allow_optimization = true; @@ -898,8 +897,6 @@ static void optimize_all_kprobes(void) } cpus_read_unlock(); pr_info("kprobe jump-optimization is enabled. All kprobes are optimized if possible.\n"); -out: - mutex_unlock(&kprobe_mutex); } #ifdef CONFIG_SYSCTL @@ -909,12 +906,10 @@ static void unoptimize_all_kprobes(void) struct kprobe *p; unsigned int i; - mutex_lock(&kprobe_mutex); + guard(mutex)(&kprobe_mutex); /* If optimization is already prohibited, just return. */ - if (!kprobes_allow_optimization) { - mutex_unlock(&kprobe_mutex); + if (!kprobes_allow_optimization) return; - } cpus_read_lock(); kprobes_allow_optimization = false; @@ -926,10 +921,8 @@ static void unoptimize_all_kprobes(void) } } cpus_read_unlock(); - mutex_unlock(&kprobe_mutex); - /* Wait for unoptimizing completion. */ - wait_for_kprobe_optimizer(); + wait_for_kprobe_optimizer_locked(); pr_info("kprobe jump-optimization is disabled. All kprobes are based on software breakpoint.\n"); } @@ -941,7 +934,7 @@ static int proc_kprobes_optimization_handler(const struct ctl_table *table, { int ret; - mutex_lock(&kprobe_sysctl_mutex); + guard(mutex)(&kprobe_sysctl_mutex); sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0; ret = proc_dointvec_minmax(table, write, buffer, length, ppos); @@ -949,7 +942,6 @@ static int proc_kprobes_optimization_handler(const struct ctl_table *table, optimize_all_kprobes(); else unoptimize_all_kprobes(); - mutex_unlock(&kprobe_sysctl_mutex); return ret; } @@ -1024,7 +1016,8 @@ static void __disarm_kprobe(struct kprobe *p, bool reopt) #define __arm_kprobe(p) arch_arm_kprobe(p) #define __disarm_kprobe(p, o) arch_disarm_kprobe(p) #define kprobe_disarmed(p) kprobe_disabled(p) -#define wait_for_kprobe_optimizer() do {} while (0) +#define wait_for_kprobe_optimizer_locked() \ + lockdep_assert_held(&kprobe_mutex) static int reuse_unused_kprobe(struct kprobe *ap) { @@ -1078,20 +1071,18 @@ static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops, if (*cnt == 0) { ret = register_ftrace_function(ops); - if (WARN(ret < 0, "Failed to register kprobe-ftrace (error %d)\n", ret)) - goto err_ftrace; + if (WARN(ret < 0, "Failed to register kprobe-ftrace (error %d)\n", ret)) { + /* + * At this point, sinec ops is not registered, we should be sefe from + * registering empty filter. + */ + ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0); + return ret; + } } (*cnt)++; return ret; - -err_ftrace: - /* - * At this point, sinec ops is not registered, we should be sefe from - * registering empty filter. - */ - ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0); - return ret; } static int arm_kprobe_ftrace(struct kprobe *p) @@ -1163,12 +1154,9 @@ static int arm_kprobe(struct kprobe *kp) if (unlikely(kprobe_ftrace(kp))) return arm_kprobe_ftrace(kp); - cpus_read_lock(); - mutex_lock(&text_mutex); + guard(cpus_read_lock)(); + guard(mutex)(&text_mutex); __arm_kprobe(kp); - mutex_unlock(&text_mutex); - cpus_read_unlock(); - return 0; } @@ -1177,12 +1165,9 @@ static int disarm_kprobe(struct kprobe *kp, bool reopt) if (unlikely(kprobe_ftrace(kp))) return disarm_kprobe_ftrace(kp); - cpus_read_lock(); - mutex_lock(&text_mutex); + guard(cpus_read_lock)(); + guard(mutex)(&text_mutex); __disarm_kprobe(kp, reopt); - mutex_unlock(&text_mutex); - cpus_read_unlock(); - return 0; } @@ -1299,62 +1284,55 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p) int ret = 0; struct kprobe *ap = orig_p; - cpus_read_lock(); - - /* For preparing optimization, jump_label_text_reserved() is called */ - jump_label_lock(); - mutex_lock(&text_mutex); - - if (!kprobe_aggrprobe(orig_p)) { - /* If 'orig_p' is not an 'aggr_kprobe', create new one. */ - ap = alloc_aggr_kprobe(orig_p); - if (!ap) { - ret = -ENOMEM; - goto out; + scoped_guard(cpus_read_lock) { + /* For preparing optimization, jump_label_text_reserved() is called */ + guard(jump_label_lock)(); + guard(mutex)(&text_mutex); + + if (!kprobe_aggrprobe(orig_p)) { + /* If 'orig_p' is not an 'aggr_kprobe', create new one. */ + ap = alloc_aggr_kprobe(orig_p); + if (!ap) + return -ENOMEM; + init_aggr_kprobe(ap, orig_p); + } else if (kprobe_unused(ap)) { + /* This probe is going to die. Rescue it */ + ret = reuse_unused_kprobe(ap); + if (ret) + return ret; } - init_aggr_kprobe(ap, orig_p); - } else if (kprobe_unused(ap)) { - /* This probe is going to die. Rescue it */ - ret = reuse_unused_kprobe(ap); - if (ret) - goto out; - } - if (kprobe_gone(ap)) { - /* - * Attempting to insert new probe at the same location that - * had a probe in the module vaddr area which already - * freed. So, the instruction slot has already been - * released. We need a new slot for the new probe. - */ - ret = arch_prepare_kprobe(ap); - if (ret) + if (kprobe_gone(ap)) { /* - * Even if fail to allocate new slot, don't need to - * free the 'ap'. It will be used next time, or - * freed by unregister_kprobe(). + * Attempting to insert new probe at the same location that + * had a probe in the module vaddr area which already + * freed. So, the instruction slot has already been + * released. We need a new slot for the new probe. */ - goto out; - - /* Prepare optimized instructions if possible. */ - prepare_optimized_kprobe(ap); + ret = arch_prepare_kprobe(ap); + if (ret) + /* + * Even if fail to allocate new slot, don't need to + * free the 'ap'. It will be used next time, or + * freed by unregister_kprobe(). + */ + return ret; - /* - * Clear gone flag to prevent allocating new slot again, and - * set disabled flag because it is not armed yet. - */ - ap->flags = (ap->flags & ~KPROBE_FLAG_GONE) - | KPROBE_FLAG_DISABLED; - } + /* Prepare optimized instructions if possible. */ + prepare_optimized_kprobe(ap); - /* Copy the insn slot of 'p' to 'ap'. */ - copy_kprobe(ap, p); - ret = add_new_kprobe(ap, p); + /* + * Clear gone flag to prevent allocating new slot again, and + * set disabled flag because it is not armed yet. + */ + ap->flags = (ap->flags & ~KPROBE_FLAG_GONE) + | KPROBE_FLAG_DISABLED; + } -out: - mutex_unlock(&text_mutex); - jump_label_unlock(); - cpus_read_unlock(); + /* Copy the insn slot of 'p' to 'ap'. */ + copy_kprobe(ap, p); + ret = add_new_kprobe(ap, p); + } if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) { ap->flags &= ~KPROBE_FLAG_DISABLED; @@ -1448,7 +1426,7 @@ _kprobe_addr(kprobe_opcode_t *addr, const char *symbol_name, unsigned long offset, bool *on_func_entry) { if ((symbol_name && addr) || (!symbol_name && !addr)) - goto invalid; + return ERR_PTR(-EINVAL); if (symbol_name) { /* @@ -1478,16 +1456,16 @@ _kprobe_addr(kprobe_opcode_t *addr, const char *symbol_name, * at the start of the function. */ addr = arch_adjust_kprobe_addr((unsigned long)addr, offset, on_func_entry); - if (addr) - return addr; + if (!addr) + return ERR_PTR(-EINVAL); -invalid: - return ERR_PTR(-EINVAL); + return addr; } static kprobe_opcode_t *kprobe_addr(struct kprobe *p) { bool on_func_entry; + return _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry); } @@ -1505,15 +1483,15 @@ static struct kprobe *__get_valid_kprobe(struct kprobe *p) if (unlikely(!ap)) return NULL; - if (p != ap) { - list_for_each_entry(list_p, &ap->list, list) - if (list_p == p) - /* kprobe p is a valid probe */ - goto valid; - return NULL; - } -valid: - return ap; + if (p == ap) + return ap; + + list_for_each_entry(list_p, &ap->list, list) + if (list_p == p) + /* kprobe p is a valid probe */ + return ap; + + return NULL; } /* @@ -1522,14 +1500,12 @@ valid: */ static inline int warn_kprobe_rereg(struct kprobe *p) { - int ret = 0; + guard(mutex)(&kprobe_mutex); - mutex_lock(&kprobe_mutex); if (WARN_ON_ONCE(__get_valid_kprobe(p))) - ret = -EINVAL; - mutex_unlock(&kprobe_mutex); + return -EINVAL; - return ret; + return 0; } static int check_ftrace_location(struct kprobe *p) @@ -1565,17 +1541,23 @@ static int check_kprobe_address_safe(struct kprobe *p, ret = check_ftrace_location(p); if (ret) return ret; - jump_label_lock(); - preempt_disable(); + + guard(jump_label_lock)(); /* Ensure the address is in a text area, and find a module if exists. */ *probed_mod = NULL; if (!core_kernel_text((unsigned long) p->addr)) { + guard(preempt)(); *probed_mod = __module_text_address((unsigned long) p->addr); - if (!(*probed_mod)) { - ret = -EINVAL; - goto out; - } + if (!(*probed_mod)) + return -EINVAL; + + /* + * We must hold a refcount of the probed module while updating + * its code to prohibit unexpected unloading. + */ + if (unlikely(!try_module_get(*probed_mod))) + return -ENOENT; } /* Ensure it is not in reserved area. */ if (in_gate_area_no_mm((unsigned long) p->addr) || @@ -1584,49 +1566,71 @@ static int check_kprobe_address_safe(struct kprobe *p, static_call_text_reserved(p->addr, p->addr) || find_bug((unsigned long)p->addr) || is_cfi_preamble_symbol((unsigned long)p->addr)) { - ret = -EINVAL; - goto out; + module_put(*probed_mod); + return -EINVAL; } /* Get module refcount and reject __init functions for loaded modules. */ if (IS_ENABLED(CONFIG_MODULES) && *probed_mod) { /* - * We must hold a refcount of the probed module while updating - * its code to prohibit unexpected unloading. - */ - if (unlikely(!try_module_get(*probed_mod))) { - ret = -ENOENT; - goto out; - } - - /* * If the module freed '.init.text', we couldn't insert * kprobes in there. */ if (within_module_init((unsigned long)p->addr, *probed_mod) && !module_is_coming(*probed_mod)) { module_put(*probed_mod); - *probed_mod = NULL; - ret = -ENOENT; + return -ENOENT; } } -out: - preempt_enable(); - jump_label_unlock(); + return 0; +} - return ret; +static int __register_kprobe(struct kprobe *p) +{ + int ret; + struct kprobe *old_p; + + guard(mutex)(&kprobe_mutex); + + old_p = get_kprobe(p->addr); + if (old_p) + /* Since this may unoptimize 'old_p', locking 'text_mutex'. */ + return register_aggr_kprobe(old_p, p); + + scoped_guard(cpus_read_lock) { + /* Prevent text modification */ + guard(mutex)(&text_mutex); + ret = prepare_kprobe(p); + if (ret) + return ret; + } + + INIT_HLIST_NODE(&p->hlist); + hlist_add_head_rcu(&p->hlist, + &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); + + if (!kprobes_all_disarmed && !kprobe_disabled(p)) { + ret = arm_kprobe(p); + if (ret) { + hlist_del_rcu(&p->hlist); + synchronize_rcu(); + } + } + + /* Try to optimize kprobe */ + try_to_optimize_kprobe(p); + return 0; } int register_kprobe(struct kprobe *p) { int ret; - struct kprobe *old_p; struct module *probed_mod; kprobe_opcode_t *addr; bool on_func_entry; - /* Adjust probe address from symbol */ + /* Canonicalize probe address from symbol */ addr = _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry); if (IS_ERR(addr)) return PTR_ERR(addr); @@ -1638,6 +1642,8 @@ int register_kprobe(struct kprobe *p) /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ p->flags &= KPROBE_FLAG_DISABLED; + if (on_func_entry) + p->flags |= KPROBE_FLAG_ON_FUNC_ENTRY; p->nmissed = 0; INIT_LIST_HEAD(&p->list); @@ -1645,44 +1651,7 @@ int register_kprobe(struct kprobe *p) if (ret) return ret; - mutex_lock(&kprobe_mutex); - - if (on_func_entry) - p->flags |= KPROBE_FLAG_ON_FUNC_ENTRY; - - old_p = get_kprobe(p->addr); - if (old_p) { - /* Since this may unoptimize 'old_p', locking 'text_mutex'. */ - ret = register_aggr_kprobe(old_p, p); - goto out; - } - - cpus_read_lock(); - /* Prevent text modification */ - mutex_lock(&text_mutex); - ret = prepare_kprobe(p); - mutex_unlock(&text_mutex); - cpus_read_unlock(); - if (ret) - goto out; - - INIT_HLIST_NODE(&p->hlist); - hlist_add_head_rcu(&p->hlist, - &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); - - if (!kprobes_all_disarmed && !kprobe_disabled(p)) { - ret = arm_kprobe(p); - if (ret) { - hlist_del_rcu(&p->hlist); - synchronize_rcu(); - goto out; - } - } - - /* Try to optimize kprobe */ - try_to_optimize_kprobe(p); -out: - mutex_unlock(&kprobe_mutex); + ret = __register_kprobe(p); if (probed_mod) module_put(probed_mod); @@ -1761,29 +1730,31 @@ static int __unregister_kprobe_top(struct kprobe *p) if (IS_ERR(ap)) return PTR_ERR(ap); - if (ap == p) - /* - * This probe is an independent(and non-optimized) kprobe - * (not an aggrprobe). Remove from the hash list. - */ - goto disarmed; + WARN_ON(ap != p && !kprobe_aggrprobe(ap)); - /* Following process expects this probe is an aggrprobe */ - WARN_ON(!kprobe_aggrprobe(ap)); - - if (list_is_singular(&ap->list) && kprobe_disarmed(ap)) + /* + * If the probe is an independent(and non-optimized) kprobe + * (not an aggrprobe), the last kprobe on the aggrprobe, or + * kprobe is already disarmed, just remove from the hash list. + */ + if (ap == p || + (list_is_singular(&ap->list) && kprobe_disarmed(ap))) { /* * !disarmed could be happen if the probe is under delayed * unoptimizing. */ - goto disarmed; - else { - /* If disabling probe has special handlers, update aggrprobe */ - if (p->post_handler && !kprobe_gone(p)) { - list_for_each_entry(list_p, &ap->list, list) { - if ((list_p != p) && (list_p->post_handler)) - goto noclean; - } + hlist_del_rcu(&ap->hlist); + return 0; + } + + /* If disabling probe has special handlers, update aggrprobe */ + if (p->post_handler && !kprobe_gone(p)) { + list_for_each_entry(list_p, &ap->list, list) { + if ((list_p != p) && (list_p->post_handler)) + break; + } + /* No other probe has post_handler */ + if (list_entry_is_head(list_p, &ap->list, list)) { /* * For the kprobe-on-ftrace case, we keep the * post_handler setting to identify this aggrprobe @@ -1792,24 +1763,21 @@ static int __unregister_kprobe_top(struct kprobe *p) if (!kprobe_ftrace(ap)) ap->post_handler = NULL; } -noclean: + } + + /* + * Remove from the aggrprobe: this path will do nothing in + * __unregister_kprobe_bottom(). + */ + list_del_rcu(&p->list); + if (!kprobe_disabled(ap) && !kprobes_all_disarmed) /* - * Remove from the aggrprobe: this path will do nothing in - * __unregister_kprobe_bottom(). + * Try to optimize this probe again, because post + * handler may have been changed. */ - list_del_rcu(&p->list); - if (!kprobe_disabled(ap) && !kprobes_all_disarmed) - /* - * Try to optimize this probe again, because post - * handler may have been changed. - */ - optimize_kprobe(ap); - } + optimize_kprobe(ap); return 0; -disarmed: - hlist_del_rcu(&ap->hlist); - return 0; } static void __unregister_kprobe_bottom(struct kprobe *p) @@ -1858,12 +1826,11 @@ void unregister_kprobes(struct kprobe **kps, int num) if (num <= 0) return; - mutex_lock(&kprobe_mutex); - for (i = 0; i < num; i++) - if (__unregister_kprobe_top(kps[i]) < 0) - kps[i]->addr = NULL; - mutex_unlock(&kprobe_mutex); - + scoped_guard(mutex, &kprobe_mutex) { + for (i = 0; i < num; i++) + if (__unregister_kprobe_top(kps[i]) < 0) + kps[i]->addr = NULL; + } synchronize_rcu(); for (i = 0; i < num; i++) if (kps[i]->addr) @@ -2302,8 +2269,9 @@ void unregister_kretprobes(struct kretprobe **rps, int num) if (num <= 0) return; - mutex_lock(&kprobe_mutex); for (i = 0; i < num; i++) { + guard(mutex)(&kprobe_mutex); + if (__unregister_kprobe_top(&rps[i]->kp) < 0) rps[i]->kp.addr = NULL; #ifdef CONFIG_KRETPROBE_ON_RETHOOK @@ -2312,7 +2280,6 @@ void unregister_kretprobes(struct kretprobe **rps, int num) rcu_assign_pointer(rps[i]->rph->rp, NULL); #endif } - mutex_unlock(&kprobe_mutex); synchronize_rcu(); for (i = 0; i < num; i++) { @@ -2393,18 +2360,14 @@ static void kill_kprobe(struct kprobe *p) /* Disable one kprobe */ int disable_kprobe(struct kprobe *kp) { - int ret = 0; struct kprobe *p; - mutex_lock(&kprobe_mutex); + guard(mutex)(&kprobe_mutex); /* Disable this kprobe */ p = __disable_kprobe(kp); - if (IS_ERR(p)) - ret = PTR_ERR(p); - mutex_unlock(&kprobe_mutex); - return ret; + return IS_ERR(p) ? PTR_ERR(p) : 0; } EXPORT_SYMBOL_GPL(disable_kprobe); @@ -2414,20 +2377,16 @@ int enable_kprobe(struct kprobe *kp) int ret = 0; struct kprobe *p; - mutex_lock(&kprobe_mutex); + guard(mutex)(&kprobe_mutex); /* Check whether specified probe is valid. */ p = __get_valid_kprobe(kp); - if (unlikely(p == NULL)) { - ret = -EINVAL; - goto out; - } + if (unlikely(p == NULL)) + return -EINVAL; - if (kprobe_gone(kp)) { + if (kprobe_gone(kp)) /* This kprobe has gone, we couldn't enable it. */ - ret = -EINVAL; - goto out; - } + return -EINVAL; if (p != kp) kp->flags &= ~KPROBE_FLAG_DISABLED; @@ -2441,8 +2400,6 @@ int enable_kprobe(struct kprobe *kp) kp->flags |= KPROBE_FLAG_DISABLED; } } -out: - mutex_unlock(&kprobe_mutex); return ret; } EXPORT_SYMBOL_GPL(enable_kprobe); @@ -2630,11 +2587,11 @@ static int kprobes_module_callback(struct notifier_block *nb, unsigned int i; int checkcore = (val == MODULE_STATE_GOING); - if (val == MODULE_STATE_COMING) { - mutex_lock(&kprobe_mutex); + guard(mutex)(&kprobe_mutex); + + if (val == MODULE_STATE_COMING) add_module_kprobe_blacklist(mod); - mutex_unlock(&kprobe_mutex); - } + if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE) return NOTIFY_DONE; @@ -2644,7 +2601,6 @@ static int kprobes_module_callback(struct notifier_block *nb, * notified, only '.init.text' section would be freed. We need to * disable kprobes which have been inserted in the sections. */ - mutex_lock(&kprobe_mutex); for (i = 0; i < KPROBE_TABLE_SIZE; i++) { head = &kprobe_table[i]; hlist_for_each_entry(p, head, hlist) @@ -2667,7 +2623,6 @@ static int kprobes_module_callback(struct notifier_block *nb, } if (val == MODULE_STATE_GOING) remove_module_kprobe_blacklist(mod); - mutex_unlock(&kprobe_mutex); return NOTIFY_DONE; } @@ -2695,7 +2650,7 @@ void kprobe_free_init_mem(void) struct kprobe *p; int i; - mutex_lock(&kprobe_mutex); + guard(mutex)(&kprobe_mutex); /* Kill all kprobes on initmem because the target code has been freed. */ for (i = 0; i < KPROBE_TABLE_SIZE; i++) { @@ -2705,8 +2660,6 @@ void kprobe_free_init_mem(void) kill_kprobe(p); } } - - mutex_unlock(&kprobe_mutex); } static int __init init_kprobes(void) @@ -2902,11 +2855,11 @@ static int arm_all_kprobes(void) unsigned int i, total = 0, errors = 0; int err, ret = 0; - mutex_lock(&kprobe_mutex); + guard(mutex)(&kprobe_mutex); /* If kprobes are armed, just return */ if (!kprobes_all_disarmed) - goto already_enabled; + return 0; /* * optimize_kprobe() called by arm_kprobe() checks @@ -2936,8 +2889,6 @@ static int arm_all_kprobes(void) else pr_info("Kprobes globally enabled\n"); -already_enabled: - mutex_unlock(&kprobe_mutex); return ret; } @@ -2948,13 +2899,11 @@ static int disarm_all_kprobes(void) unsigned int i, total = 0, errors = 0; int err, ret = 0; - mutex_lock(&kprobe_mutex); + guard(mutex)(&kprobe_mutex); /* If kprobes are already disarmed, just return */ - if (kprobes_all_disarmed) { - mutex_unlock(&kprobe_mutex); + if (kprobes_all_disarmed) return 0; - } kprobes_all_disarmed = true; @@ -2979,11 +2928,8 @@ static int disarm_all_kprobes(void) else pr_info("Kprobes globally disabled\n"); - mutex_unlock(&kprobe_mutex); - /* Wait for disarming all kprobes by optimizer */ - wait_for_kprobe_optimizer(); - + wait_for_kprobe_optimizer_locked(); return ret; } diff --git a/kernel/trace/trace_eprobe.c b/kernel/trace/trace_eprobe.c index be8be0c1aaf0..82fd637cfc19 100644 --- a/kernel/trace/trace_eprobe.c +++ b/kernel/trace/trace_eprobe.c @@ -917,10 +917,10 @@ static int __trace_eprobe_create(int argc, const char *argv[]) goto error; } - mutex_lock(&event_mutex); - event_call = find_and_get_event(sys_name, sys_event); - ep = alloc_event_probe(group, event, event_call, argc - 2); - mutex_unlock(&event_mutex); + scoped_guard(mutex, &event_mutex) { + event_call = find_and_get_event(sys_name, sys_event); + ep = alloc_event_probe(group, event, event_call, argc - 2); + } if (IS_ERR(ep)) { ret = PTR_ERR(ep); @@ -952,23 +952,21 @@ static int __trace_eprobe_create(int argc, const char *argv[]) if (ret < 0) goto error; init_trace_eprobe_call(ep); - mutex_lock(&event_mutex); - ret = trace_probe_register_event_call(&ep->tp); - if (ret) { - if (ret == -EEXIST) { - trace_probe_log_set_index(0); - trace_probe_log_err(0, EVENT_EXIST); + scoped_guard(mutex, &event_mutex) { + ret = trace_probe_register_event_call(&ep->tp); + if (ret) { + if (ret == -EEXIST) { + trace_probe_log_set_index(0); + trace_probe_log_err(0, EVENT_EXIST); + } + goto error; + } + ret = dyn_event_add(&ep->devent, &ep->tp.event->call); + if (ret < 0) { + trace_probe_unregister_event_call(&ep->tp); + goto error; } - mutex_unlock(&event_mutex); - goto error; - } - ret = dyn_event_add(&ep->devent, &ep->tp.event->call); - if (ret < 0) { - trace_probe_unregister_event_call(&ep->tp); - mutex_unlock(&event_mutex); - goto error; } - mutex_unlock(&event_mutex); return ret; parse_error: ret = -EINVAL; diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 0642ea174849..d8d5f18a141a 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -8,6 +8,7 @@ #define pr_fmt(fmt) "trace_kprobe: " fmt #include <linux/bpf-cgroup.h> +#include <linux/cleanup.h> #include <linux/security.h> #include <linux/module.h> #include <linux/uaccess.h> @@ -257,6 +258,9 @@ static void free_trace_kprobe(struct trace_kprobe *tk) } } +DEFINE_FREE(free_trace_kprobe, struct trace_kprobe *, + if (!IS_ERR_OR_NULL(_T)) free_trace_kprobe(_T)) + /* * Allocate new trace_probe and initialize it (including kprobes). */ @@ -268,7 +272,7 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group, int maxactive, int nargs, bool is_return) { - struct trace_kprobe *tk; + struct trace_kprobe *tk __free(free_trace_kprobe) = NULL; int ret = -ENOMEM; tk = kzalloc(struct_size(tk, tp.args, nargs), GFP_KERNEL); @@ -277,12 +281,12 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group, tk->nhit = alloc_percpu(unsigned long); if (!tk->nhit) - goto error; + return ERR_PTR(ret); if (symbol) { tk->symbol = kstrdup(symbol, GFP_KERNEL); if (!tk->symbol) - goto error; + return ERR_PTR(ret); tk->rp.kp.symbol_name = tk->symbol; tk->rp.kp.offset = offs; } else @@ -299,13 +303,10 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group, ret = trace_probe_init(&tk->tp, event, group, false, nargs); if (ret < 0) - goto error; + return ERR_PTR(ret); dyn_event_init(&tk->devent, &trace_kprobe_ops); - return tk; -error: - free_trace_kprobe(tk); - return ERR_PTR(ret); + return_ptr(tk); } static struct trace_kprobe *find_trace_kprobe(const char *event, @@ -634,7 +635,7 @@ static int register_trace_kprobe(struct trace_kprobe *tk) struct trace_kprobe *old_tk; int ret; - mutex_lock(&event_mutex); + guard(mutex)(&event_mutex); old_tk = find_trace_kprobe(trace_probe_name(&tk->tp), trace_probe_group_name(&tk->tp)); @@ -642,11 +643,9 @@ static int register_trace_kprobe(struct trace_kprobe *tk) if (trace_kprobe_is_return(tk) != trace_kprobe_is_return(old_tk)) { trace_probe_log_set_index(0); trace_probe_log_err(0, DIFF_PROBE_TYPE); - ret = -EEXIST; - } else { - ret = append_trace_kprobe(tk, old_tk); + return -EEXIST; } - goto end; + return append_trace_kprobe(tk, old_tk); } /* Register new event */ @@ -657,7 +656,7 @@ static int register_trace_kprobe(struct trace_kprobe *tk) trace_probe_log_err(0, EVENT_EXIST); } else pr_warn("Failed to register probe event(%d)\n", ret); - goto end; + return ret; } /* Register k*probe */ @@ -672,8 +671,6 @@ static int register_trace_kprobe(struct trace_kprobe *tk) else dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp)); -end: - mutex_unlock(&event_mutex); return ret; } @@ -706,7 +703,7 @@ static int trace_kprobe_module_callback(struct notifier_block *nb, return NOTIFY_DONE; /* Update probes on coming module */ - mutex_lock(&event_mutex); + guard(mutex)(&event_mutex); for_each_trace_kprobe(tk, pos) { if (trace_kprobe_within_module(tk, mod)) { /* Don't need to check busy - this should have gone. */ @@ -718,7 +715,6 @@ static int trace_kprobe_module_callback(struct notifier_block *nb, module_name(mod), ret); } } - mutex_unlock(&event_mutex); return NOTIFY_DONE; } @@ -840,7 +836,8 @@ out: static int trace_kprobe_entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs); -static int __trace_kprobe_create(int argc, const char *argv[]) +static int trace_kprobe_create_internal(int argc, const char *argv[], + struct traceprobe_parse_context *ctx) { /* * Argument syntax: @@ -866,11 +863,12 @@ static int __trace_kprobe_create(int argc, const char *argv[]) * Type of args: * FETCHARG:TYPE : use TYPE instead of unsigned long. */ - struct trace_kprobe *tk = NULL; + struct trace_kprobe *tk __free(free_trace_kprobe) = NULL; int i, len, new_argc = 0, ret = 0; bool is_return = false; - char *symbol = NULL, *tmp = NULL; - const char **new_argv = NULL; + char *symbol __free(kfree) = NULL; + char *tmp = NULL; + const char **new_argv __free(kfree) = NULL; const char *event = NULL, *group = KPROBE_EVENT_SYSTEM; enum probe_print_type ptype; int maxactive = 0; @@ -879,8 +877,7 @@ static int __trace_kprobe_create(int argc, const char *argv[]) char buf[MAX_EVENT_NAME_LEN]; char gbuf[MAX_EVENT_NAME_LEN]; char abuf[MAX_BTF_ARGS_LEN]; - char *dbuf = NULL; - struct traceprobe_parse_context ctx = { .flags = TPARG_FL_KERNEL }; + char *dbuf __free(kfree) = NULL; switch (argv[0][0]) { case 'r': @@ -894,8 +891,6 @@ static int __trace_kprobe_create(int argc, const char *argv[]) if (argc < 2) return -ECANCELED; - trace_probe_log_init("trace_kprobe", argc, argv); - event = strchr(&argv[0][1], ':'); if (event) event++; @@ -903,7 +898,7 @@ static int __trace_kprobe_create(int argc, const char *argv[]) if (isdigit(argv[0][1])) { if (!is_return) { trace_probe_log_err(1, BAD_MAXACT_TYPE); - goto parse_error; + return -EINVAL; } if (event) len = event - &argv[0][1] - 1; @@ -911,21 +906,21 @@ static int __trace_kprobe_create(int argc, const char *argv[]) len = strlen(&argv[0][1]); if (len > MAX_EVENT_NAME_LEN - 1) { trace_probe_log_err(1, BAD_MAXACT); - goto parse_error; + return -EINVAL; } memcpy(buf, &argv[0][1], len); buf[len] = '\0'; ret = kstrtouint(buf, 0, &maxactive); if (ret || !maxactive) { trace_probe_log_err(1, BAD_MAXACT); - goto parse_error; + return -EINVAL; } /* kretprobes instances are iterated over via a list. The * maximum should stay reasonable. */ if (maxactive > KRETPROBE_MAXACTIVE_MAX) { trace_probe_log_err(1, MAXACT_TOO_BIG); - goto parse_error; + return -EINVAL; } } @@ -934,16 +929,13 @@ static int __trace_kprobe_create(int argc, const char *argv[]) if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) { trace_probe_log_set_index(1); /* Check whether uprobe event specified */ - if (strchr(argv[1], '/') && strchr(argv[1], ':')) { - ret = -ECANCELED; - goto error; - } + if (strchr(argv[1], '/') && strchr(argv[1], ':')) + return -ECANCELED; + /* a symbol specified */ symbol = kstrdup(argv[1], GFP_KERNEL); - if (!symbol) { - ret = -ENOMEM; - goto error; - } + if (!symbol) + return -ENOMEM; tmp = strchr(symbol, '%'); if (tmp) { @@ -952,7 +944,7 @@ static int __trace_kprobe_create(int argc, const char *argv[]) is_return = true; } else { trace_probe_log_err(tmp - symbol, BAD_ADDR_SUFFIX); - goto parse_error; + return -EINVAL; } } @@ -960,7 +952,7 @@ static int __trace_kprobe_create(int argc, const char *argv[]) ret = traceprobe_split_symbol_offset(symbol, &offset); if (ret || offset < 0 || offset > UINT_MAX) { trace_probe_log_err(0, BAD_PROBE_ADDR); - goto parse_error; + return -EINVAL; } ret = validate_probe_symbol(symbol); if (ret) { @@ -968,17 +960,17 @@ static int __trace_kprobe_create(int argc, const char *argv[]) trace_probe_log_err(0, NON_UNIQ_SYMBOL); else trace_probe_log_err(0, BAD_PROBE_ADDR); - goto parse_error; + return -EINVAL; } if (is_return) - ctx.flags |= TPARG_FL_RETURN; + ctx->flags |= TPARG_FL_RETURN; ret = kprobe_on_func_entry(NULL, symbol, offset); if (ret == 0 && !is_return) - ctx.flags |= TPARG_FL_FENTRY; + ctx->flags |= TPARG_FL_FENTRY; /* Defer the ENOENT case until register kprobe */ if (ret == -EINVAL && is_return) { trace_probe_log_err(0, BAD_RETPROBE); - goto parse_error; + return -EINVAL; } } @@ -987,7 +979,7 @@ static int __trace_kprobe_create(int argc, const char *argv[]) ret = traceprobe_parse_event_name(&event, &group, gbuf, event - argv[0]); if (ret) - goto parse_error; + return ret; } if (!event) { @@ -1003,26 +995,24 @@ static int __trace_kprobe_create(int argc, const char *argv[]) } argc -= 2; argv += 2; - ctx.funcname = symbol; + ctx->funcname = symbol; new_argv = traceprobe_expand_meta_args(argc, argv, &new_argc, - abuf, MAX_BTF_ARGS_LEN, &ctx); + abuf, MAX_BTF_ARGS_LEN, ctx); if (IS_ERR(new_argv)) { ret = PTR_ERR(new_argv); new_argv = NULL; - goto out; + return ret; } if (new_argv) { argc = new_argc; argv = new_argv; } - if (argc > MAX_TRACE_ARGS) { - ret = -E2BIG; - goto out; - } + if (argc > MAX_TRACE_ARGS) + return -E2BIG; ret = traceprobe_expand_dentry_args(argc, argv, &dbuf); if (ret) - goto out; + return ret; /* setup a probe */ tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive, @@ -1031,16 +1021,16 @@ static int __trace_kprobe_create(int argc, const char *argv[]) ret = PTR_ERR(tk); /* This must return -ENOMEM, else there is a bug */ WARN_ON_ONCE(ret != -ENOMEM); - goto out; /* We know tk is not allocated */ + return ret; /* We know tk is not allocated */ } /* parse arguments */ for (i = 0; i < argc; i++) { trace_probe_log_set_index(i + 2); - ctx.offset = 0; - ret = traceprobe_parse_probe_arg(&tk->tp, i, argv[i], &ctx); + ctx->offset = 0; + ret = traceprobe_parse_probe_arg(&tk->tp, i, argv[i], ctx); if (ret) - goto error; /* This can be -ENOMEM */ + return ret; /* This can be -ENOMEM */ } /* entry handler for kretprobe */ if (is_return && tk->tp.entry_arg) { @@ -1051,7 +1041,7 @@ static int __trace_kprobe_create(int argc, const char *argv[]) ptype = is_return ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL; ret = traceprobe_set_print_fmt(&tk->tp, ptype); if (ret < 0) - goto error; + return ret; ret = register_trace_kprobe(tk); if (ret) { @@ -1062,27 +1052,34 @@ static int __trace_kprobe_create(int argc, const char *argv[]) trace_probe_log_err(0, BAD_PROBE_ADDR); else if (ret != -ENOMEM && ret != -EEXIST) trace_probe_log_err(0, FAIL_REG_PROBE); - goto error; + return ret; } + /* + * Here, 'tk' has been registered to the list successfully, + * so we don't need to free it. + */ + tk = NULL; + + return 0; +} + +static int trace_kprobe_create_cb(int argc, const char *argv[]) +{ + struct traceprobe_parse_context ctx = { .flags = TPARG_FL_KERNEL }; + int ret; + + trace_probe_log_init("trace_kprobe", argc, argv); + + ret = trace_kprobe_create_internal(argc, argv, &ctx); -out: traceprobe_finish_parse(&ctx); trace_probe_log_clear(); - kfree(new_argv); - kfree(symbol); - kfree(dbuf); return ret; - -parse_error: - ret = -EINVAL; -error: - free_trace_kprobe(tk); - goto out; } static int trace_kprobe_create(const char *raw_command) { - return trace_probe_create(raw_command, __trace_kprobe_create); + return trace_probe_create(raw_command, trace_kprobe_create_cb); } static int create_or_delete_trace_kprobe(const char *raw_command) @@ -1898,7 +1895,7 @@ create_local_trace_kprobe(char *func, void *addr, unsigned long offs, bool is_return) { enum probe_print_type ptype; - struct trace_kprobe *tk; + struct trace_kprobe *tk __free(free_trace_kprobe) = NULL; int ret; char *event; @@ -1929,19 +1926,14 @@ create_local_trace_kprobe(char *func, void *addr, unsigned long offs, ptype = trace_kprobe_is_return(tk) ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL; - if (traceprobe_set_print_fmt(&tk->tp, ptype) < 0) { - ret = -ENOMEM; - goto error; - } + if (traceprobe_set_print_fmt(&tk->tp, ptype) < 0) + return ERR_PTR(-ENOMEM); ret = __register_trace_kprobe(tk); if (ret < 0) - goto error; + return ERR_PTR(ret); - return trace_probe_event_call(&tk->tp); -error: - free_trace_kprobe(tk); - return ERR_PTR(ret); + return trace_probe_event_call(&(no_free_ptr(tk)->tp)); } void destroy_local_trace_kprobe(struct trace_event_call *event_call) @@ -1970,13 +1962,12 @@ static __init void enable_boot_kprobe_events(void) struct trace_kprobe *tk; struct dyn_event *pos; - mutex_lock(&event_mutex); + guard(mutex)(&event_mutex); for_each_trace_kprobe(tk, pos) { list_for_each_entry(file, &tr->events, list) if (file->event_call == trace_probe_event_call(&tk->tp)) trace_event_enable_disable(file, 1, 0); } - mutex_unlock(&event_mutex); } static __init void setup_boot_kprobe_events(void) diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index 16a5e368e7b7..8f58ee1e8858 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -1409,7 +1409,7 @@ static int traceprobe_parse_probe_arg_body(const char *argv, ssize_t *size, struct traceprobe_parse_context *ctx) { struct fetch_insn *code, *tmp = NULL; - char *type, *arg; + char *type, *arg __free(kfree) = NULL; int ret, len; len = strlen(argv); @@ -1426,22 +1426,16 @@ static int traceprobe_parse_probe_arg_body(const char *argv, ssize_t *size, return -ENOMEM; parg->comm = kstrdup(arg, GFP_KERNEL); - if (!parg->comm) { - ret = -ENOMEM; - goto out; - } + if (!parg->comm) + return -ENOMEM; type = parse_probe_arg_type(arg, parg, ctx); - if (IS_ERR(type)) { - ret = PTR_ERR(type); - goto out; - } + if (IS_ERR(type)) + return PTR_ERR(type); code = tmp = kcalloc(FETCH_INSN_MAX, sizeof(*code), GFP_KERNEL); - if (!code) { - ret = -ENOMEM; - goto out; - } + if (!code) + return -ENOMEM; code[FETCH_INSN_MAX - 1].op = FETCH_OP_END; ctx->last_type = NULL; @@ -1497,8 +1491,6 @@ fail: kfree(code->data); } kfree(tmp); -out: - kfree(arg); return ret; } @@ -1668,7 +1660,7 @@ const char **traceprobe_expand_meta_args(int argc, const char *argv[], { const struct btf_param *params = NULL; int i, j, n, used, ret, args_idx = -1; - const char **new_argv = NULL; + const char **new_argv __free(kfree) = NULL; ret = argv_has_var_arg(argc, argv, &args_idx, ctx); if (ret < 0) @@ -1707,7 +1699,7 @@ const char **traceprobe_expand_meta_args(int argc, const char *argv[], ret = sprint_nth_btf_arg(n, "", buf + used, bufsize - used, ctx); if (ret < 0) - goto error; + return ERR_PTR(ret); new_argv[j++] = buf + used; used += ret + 1; @@ -1721,25 +1713,20 @@ const char **traceprobe_expand_meta_args(int argc, const char *argv[], n = simple_strtoul(argv[i] + 4, &type, 10); if (type && !(*type == ':' || *type == '\0')) { trace_probe_log_err(0, BAD_VAR); - ret = -ENOENT; - goto error; + return ERR_PTR(-ENOENT); } /* Note: $argN starts from $arg1 */ ret = sprint_nth_btf_arg(n - 1, type, buf + used, bufsize - used, ctx); if (ret < 0) - goto error; + return ERR_PTR(ret); new_argv[j++] = buf + used; used += ret + 1; } else new_argv[j++] = argv[i]; } - return new_argv; - -error: - kfree(new_argv); - return ERR_PTR(ret); + return_ptr(new_argv); } /* @buf: *buf must be equal to NULL. Caller must to free *buf */ @@ -1747,14 +1734,14 @@ int traceprobe_expand_dentry_args(int argc, const char *argv[], char **buf) { int i, used, ret; const int bufsize = MAX_DENTRY_ARGS_LEN; - char *tmpbuf = NULL; + char *tmpbuf __free(kfree) = NULL; if (*buf) return -EINVAL; used = 0; for (i = 0; i < argc; i++) { - char *tmp; + char *tmp __free(kfree) = NULL; char *equal; size_t arg_len; @@ -1769,7 +1756,7 @@ int traceprobe_expand_dentry_args(int argc, const char *argv[], char **buf) tmp = kstrdup(argv[i], GFP_KERNEL); if (!tmp) - goto nomem; + return -ENOMEM; equal = strchr(tmp, '='); if (equal) @@ -1790,18 +1777,14 @@ int traceprobe_expand_dentry_args(int argc, const char *argv[], char **buf) offsetof(struct file, f_path.dentry), equal ? equal + 1 : tmp); - kfree(tmp); if (ret >= bufsize - used) - goto nomem; + return -ENOMEM; argv[i] = tmpbuf + used; used += ret + 1; } - *buf = tmpbuf; + *buf = no_free_ptr(tmpbuf); return 0; -nomem: - kfree(tmpbuf); - return -ENOMEM; } void traceprobe_finish_parse(struct traceprobe_parse_context *ctx) diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 4875e7f5de3d..ccc762fbb69c 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -498,11 +498,11 @@ static int register_trace_uprobe(struct trace_uprobe *tu) struct trace_uprobe *old_tu; int ret; - mutex_lock(&event_mutex); + guard(mutex)(&event_mutex); ret = validate_ref_ctr_offset(tu); if (ret) - goto end; + return ret; /* register as an event */ old_tu = find_probe_event(trace_probe_name(&tu->tp), @@ -511,11 +511,9 @@ static int register_trace_uprobe(struct trace_uprobe *tu) if (is_ret_probe(tu) != is_ret_probe(old_tu)) { trace_probe_log_set_index(0); trace_probe_log_err(0, DIFF_PROBE_TYPE); - ret = -EEXIST; - } else { - ret = append_trace_uprobe(tu, old_tu); + return -EEXIST; } - goto end; + return append_trace_uprobe(tu, old_tu); } ret = register_uprobe_event(tu); @@ -525,14 +523,11 @@ static int register_trace_uprobe(struct trace_uprobe *tu) trace_probe_log_err(0, EVENT_EXIST); } else pr_warn("Failed to register probe event(%d)\n", ret); - goto end; + return ret; } dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp)); -end: - mutex_unlock(&event_mutex); - return ret; } |