summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMasami Hiramatsu (Google) <mhiramat@kernel.org>2026-04-20 17:00:56 +0300
committerMasami Hiramatsu (Google) <mhiramat@kernel.org>2026-04-21 17:59:39 +0300
commit1aec9e5c3e31ce1e28f914427fb7f90b91d310df (patch)
treecc8431dd5a602e8441ce10bae61d716f26618529
parent6ad51ada17ed80c9a5f205b4c01c424cac8b0d46 (diff)
downloadlinux-1aec9e5c3e31ce1e28f914427fb7f90b91d310df.tar.xz
tracing/fprobe: Unregister fprobe even if memory allocation fails
unregister_fprobe() can fail under memory pressure because of memory allocation failure, but this maybe called from module unloading, and usually there is no way to retry it. Moreover. trace_fprobe does not check the return value. To fix this problem, unregister fprobe and fprobe_hash_node even if working memory allocation fails. Anyway, if the last fprobe is removed, the filter will be freed. Link: https://lore.kernel.org/all/177669365629.132053.8433032896213721288.stgit@mhiramat.tok.corp.google.com/ Fixes: 4346ba160409 ("fprobe: Rewrite fprobe on function-graph tracer") Cc: stable@vger.kernel.org Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
-rw-r--r--kernel/trace/fprobe.c25
1 files changed, 15 insertions, 10 deletions
diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c
index af9ba7250874..a2b659006e0e 100644
--- a/kernel/trace/fprobe.c
+++ b/kernel/trace/fprobe.c
@@ -324,9 +324,10 @@ static void fprobe_ftrace_remove_ips(unsigned long *addrs, int num)
lockdep_assert_held(&fprobe_mutex);
fprobe_ftrace_active--;
- if (!fprobe_ftrace_active)
+ if (!fprobe_ftrace_active) {
unregister_ftrace_function(&fprobe_ftrace_ops);
- if (num)
+ ftrace_free_filter(&fprobe_ftrace_ops);
+ } else if (num)
ftrace_set_filter_ips(&fprobe_ftrace_ops, addrs, num, 1, 0);
}
@@ -525,10 +526,10 @@ static void fprobe_graph_remove_ips(unsigned long *addrs, int num)
fprobe_graph_active--;
/* Q: should we unregister it ? */
- if (!fprobe_graph_active)
+ if (!fprobe_graph_active) {
unregister_ftrace_graph(&fprobe_graph_ops);
-
- if (num)
+ ftrace_free_filter(&fprobe_graph_ops.ops);
+ } else if (num)
ftrace_set_filter_ips(&fprobe_graph_ops.ops, addrs, num, 1, 0);
}
@@ -932,15 +933,19 @@ int unregister_fprobe(struct fprobe *fp)
hlist_array = fp->hlist_array;
addrs = kcalloc(hlist_array->size, sizeof(unsigned long), GFP_KERNEL);
- if (!addrs) {
- ret = -ENOMEM; /* TODO: Fallback to one-by-one loop */
- goto out;
- }
+ /*
+ * This will remove fprobe_hash_node from the hash table even if
+ * memory allocation fails. However, ftrace_ops will not be updated.
+ * Anyway, when the last fprobe is unregistered, ftrace_ops is also
+ * unregistered.
+ */
+ if (!addrs)
+ pr_warn("Failed to allocate working array. ftrace_ops may not sync.\n");
/* Remove non-synonim ips from table and hash */
count = 0;
for (i = 0; i < hlist_array->size; i++) {
- if (!delete_fprobe_node(&hlist_array->array[i]))
+ if (!delete_fprobe_node(&hlist_array->array[i]) && addrs)
addrs[count++] = hlist_array->array[i].addr;
}
del_fprobe_hash(fp);