summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorMartin KaFai Lau <kafai@fb.com>2020-11-13 00:13:13 +0300
committerAlexei Starovoitov <ast@kernel.org>2020-11-13 05:39:28 +0300
commit8e4597c627fb48f361e2a5b012202cb1b6cbcd5e (patch)
treefeab62b4fd4006ab7d2536ec40b8aa923506eb9e /net
parente794bfddb8b8521bd016e73b411d6b03149e9e66 (diff)
downloadlinux-8e4597c627fb48f361e2a5b012202cb1b6cbcd5e.tar.xz
bpf: Allow using bpf_sk_storage in FENTRY/FEXIT/RAW_TP
This patch enables the FENTRY/FEXIT/RAW_TP tracing program to use the bpf_sk_storage_(get|delete) helper, so those tracing programs can access the sk's bpf_local_storage and the later selftest will show some examples. The bpf_sk_storage is currently used in bpf-tcp-cc, tc, cg sockops...etc which is running either in softirq or task context. This patch adds bpf_sk_storage_get_tracing_proto and bpf_sk_storage_delete_tracing_proto. They will check in runtime that the helpers can only be called when serving softirq or running in a task context. That should enable most common tracing use cases on sk. During the load time, the new tracing_allowed() function will ensure the tracing prog using the bpf_sk_storage_(get|delete) helper is not tracing any bpf_sk_storage*() function itself. The sk is passed as "void *" when calling into bpf_local_storage. This patch only allows tracing a kernel function. Signed-off-by: Martin KaFai Lau <kafai@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Song Liu <songliubraving@fb.com> Link: https://lore.kernel.org/bpf/20201112211313.2587383-1-kafai@fb.com
Diffstat (limited to 'net')
-rw-r--r--net/core/bpf_sk_storage.c74
1 files changed, 74 insertions, 0 deletions
diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
index fd416678f236..359908a7d3c1 100644
--- a/net/core/bpf_sk_storage.c
+++ b/net/core/bpf_sk_storage.c
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/bpf.h>
+#include <linux/btf.h>
#include <linux/btf_ids.h>
#include <linux/bpf_local_storage.h>
#include <net/bpf_sk_storage.h>
@@ -378,6 +379,79 @@ const struct bpf_func_proto bpf_sk_storage_delete_proto = {
.arg2_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
};
+static bool bpf_sk_storage_tracing_allowed(const struct bpf_prog *prog)
+{
+ const struct btf *btf_vmlinux;
+ const struct btf_type *t;
+ const char *tname;
+ u32 btf_id;
+
+ if (prog->aux->dst_prog)
+ return false;
+
+ /* Ensure the tracing program is not tracing
+ * any bpf_sk_storage*() function and also
+ * use the bpf_sk_storage_(get|delete) helper.
+ */
+ switch (prog->expected_attach_type) {
+ case BPF_TRACE_RAW_TP:
+ /* bpf_sk_storage has no trace point */
+ return true;
+ case BPF_TRACE_FENTRY:
+ case BPF_TRACE_FEXIT:
+ btf_vmlinux = bpf_get_btf_vmlinux();
+ btf_id = prog->aux->attach_btf_id;
+ t = btf_type_by_id(btf_vmlinux, btf_id);
+ tname = btf_name_by_offset(btf_vmlinux, t->name_off);
+ return !!strncmp(tname, "bpf_sk_storage",
+ strlen("bpf_sk_storage"));
+ default:
+ return false;
+ }
+
+ return false;
+}
+
+BPF_CALL_4(bpf_sk_storage_get_tracing, struct bpf_map *, map, struct sock *, sk,
+ void *, value, u64, flags)
+{
+ if (!in_serving_softirq() && !in_task())
+ return (unsigned long)NULL;
+
+ return (unsigned long)____bpf_sk_storage_get(map, sk, value, flags);
+}
+
+BPF_CALL_2(bpf_sk_storage_delete_tracing, struct bpf_map *, map,
+ struct sock *, sk)
+{
+ if (!in_serving_softirq() && !in_task())
+ return -EPERM;
+
+ return ____bpf_sk_storage_delete(map, sk);
+}
+
+const struct bpf_func_proto bpf_sk_storage_get_tracing_proto = {
+ .func = bpf_sk_storage_get_tracing,
+ .gpl_only = false,
+ .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
+ .arg1_type = ARG_CONST_MAP_PTR,
+ .arg2_type = ARG_PTR_TO_BTF_ID,
+ .arg2_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
+ .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
+ .arg4_type = ARG_ANYTHING,
+ .allowed = bpf_sk_storage_tracing_allowed,
+};
+
+const struct bpf_func_proto bpf_sk_storage_delete_tracing_proto = {
+ .func = bpf_sk_storage_delete_tracing,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_CONST_MAP_PTR,
+ .arg2_type = ARG_PTR_TO_BTF_ID,
+ .arg2_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
+ .allowed = bpf_sk_storage_tracing_allowed,
+};
+
struct bpf_sk_storage_diag {
u32 nr_maps;
struct bpf_map *maps[];