summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorBenjamin Tissoires <bentiss@kernel.org>2024-06-08 12:01:15 +0300
committerBenjamin Tissoires <bentiss@kernel.org>2024-06-14 12:20:16 +0300
commitebc0d8093e8c97de459615438edefad1a4ac352c (patch)
tree0777c8ebab5266327d80405b15f034ebc06d4d90 /drivers
parent99b40bf8053fa261d368ef78848961c04aa93c74 (diff)
downloadlinux-ebc0d8093e8c97de459615438edefad1a4ac352c.tar.xz
HID: bpf: implement HID-BPF through bpf_struct_ops
We do this implementation in several steps to not have the CI failing: - first (this patch), we add struct_ops while keeping the existing infra available - then we change the selftests, the examples and the existing in-tree HID-BPF programs - then we remove the existing trace points making old HID-BPF obsolete There are a few advantages of struct_ops over tracing: - compatibility with sleepable programs (for hid_hw_raw_request() in a later patch) - a lot simpler in the kernel: it's a simple rcu protected list - we can add more parameters to the function called without much trouble - the "attach" is now generic through BPF-core: the caller just needs to set hid_id and flags before calling __load(). - all the BPF tough part is not handled in BPF-core through generic processing - hid_bpf_ctx is now only writable where it needs be Acked-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/r/20240608-hid_bpf_struct_ops-v3-3-6ac6ade58329@kernel.org Signed-off-by: Benjamin Tissoires <bentiss@kernel.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/hid/bpf/Makefile2
-rw-r--r--drivers/hid/bpf/hid_bpf_dispatch.c52
-rw-r--r--drivers/hid/bpf/hid_bpf_dispatch.h4
-rw-r--r--drivers/hid/bpf/hid_bpf_jmp_table.c3
-rw-r--r--drivers/hid/bpf/hid_bpf_struct_ops.c252
5 files changed, 305 insertions, 8 deletions
diff --git a/drivers/hid/bpf/Makefile b/drivers/hid/bpf/Makefile
index cf55120cf7d6..1cb3f31e9335 100644
--- a/drivers/hid/bpf/Makefile
+++ b/drivers/hid/bpf/Makefile
@@ -8,4 +8,4 @@ LIBBPF_INCLUDE = $(srctree)/tools/lib
obj-$(CONFIG_HID_BPF) += hid_bpf.o
CFLAGS_hid_bpf_dispatch.o += -I$(LIBBPF_INCLUDE)
CFLAGS_hid_bpf_jmp_table.o += -I$(LIBBPF_INCLUDE)
-hid_bpf-objs += hid_bpf_dispatch.o hid_bpf_jmp_table.o
+hid_bpf-objs += hid_bpf_dispatch.o hid_bpf_jmp_table.o hid_bpf_struct_ops.o
diff --git a/drivers/hid/bpf/hid_bpf_dispatch.c b/drivers/hid/bpf/hid_bpf_dispatch.c
index c8bb79ce2354..7216c3c7713d 100644
--- a/drivers/hid/bpf/hid_bpf_dispatch.c
+++ b/drivers/hid/bpf/hid_bpf_dispatch.c
@@ -58,6 +58,7 @@ dispatch_hid_bpf_device_event(struct hid_device *hdev, enum hid_report_type type
},
.data = hdev->bpf.device_data,
};
+ struct hid_bpf_ops *e;
int ret;
if (type >= HID_REPORT_TYPES)
@@ -70,9 +71,25 @@ dispatch_hid_bpf_device_event(struct hid_device *hdev, enum hid_report_type type
memset(ctx_kern.data, 0, hdev->bpf.allocated_data);
memcpy(ctx_kern.data, data, *size);
+ rcu_read_lock();
+ list_for_each_entry_rcu(e, &hdev->bpf.prog_list, list) {
+ if (e->hid_device_event) {
+ ret = e->hid_device_event(&ctx_kern.ctx, type);
+ if (ret < 0) {
+ rcu_read_unlock();
+ return ERR_PTR(ret);
+ }
+
+ if (ret)
+ ctx_kern.ctx.retval = ret;
+ }
+ }
+ rcu_read_unlock();
+
ret = hid_bpf_prog_run(hdev, HID_BPF_PROG_TYPE_DEVICE_EVENT, &ctx_kern);
if (ret < 0)
return ERR_PTR(ret);
+ ret = ctx_kern.ctx.retval;
if (ret) {
if (ret > ctx_kern.ctx.allocated_size)
@@ -122,7 +139,10 @@ u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *s
memcpy(ctx_kern.data, rdesc, min_t(unsigned int, *size, HID_MAX_DESCRIPTOR_SIZE));
- ret = hid_bpf_prog_run(hdev, HID_BPF_PROG_TYPE_RDESC_FIXUP, &ctx_kern);
+ if (hdev->bpf.rdesc_ops)
+ ret = hdev->bpf.rdesc_ops->hid_rdesc_fixup(&ctx_kern.ctx);
+ else
+ ret = hid_bpf_prog_run(hdev, HID_BPF_PROG_TYPE_RDESC_FIXUP, &ctx_kern);
if (ret < 0)
goto ignore_bpf;
@@ -150,7 +170,7 @@ static int device_match_id(struct device *dev, const void *id)
return hdev->id == *(int *)id;
}
-static struct hid_device *hid_get_device(unsigned int hid_id)
+struct hid_device *hid_get_device(unsigned int hid_id)
{
struct device *dev;
@@ -164,7 +184,7 @@ static struct hid_device *hid_get_device(unsigned int hid_id)
return to_hid_device(dev);
}
-static void hid_put_device(struct hid_device *hid)
+void hid_put_device(struct hid_device *hid)
{
put_device(&hid->dev);
}
@@ -205,7 +225,7 @@ static int __hid_bpf_allocate_data(struct hid_device *hdev, u8 **data, u32 *size
return 0;
}
-static int hid_bpf_allocate_event_data(struct hid_device *hdev)
+int hid_bpf_allocate_event_data(struct hid_device *hdev)
{
/* hdev->bpf.device_data is already allocated, abort */
if (hdev->bpf.device_data)
@@ -592,14 +612,22 @@ static const struct btf_kfunc_id_set hid_bpf_syscall_kfunc_set = {
int hid_bpf_connect_device(struct hid_device *hdev)
{
- struct hid_bpf_prog_list *prog_list;
+ bool need_to_allocate = false;
+ struct hid_bpf_ops *e;
rcu_read_lock();
- prog_list = rcu_dereference(hdev->bpf.progs[HID_BPF_PROG_TYPE_DEVICE_EVENT]);
+ list_for_each_entry_rcu(e, &hdev->bpf.prog_list, list) {
+ if (e->hid_device_event) {
+ need_to_allocate = true;
+ break;
+ }
+ }
+ if (rcu_dereference(hdev->bpf.progs[HID_BPF_PROG_TYPE_DEVICE_EVENT]))
+ need_to_allocate = true;
rcu_read_unlock();
/* only allocate BPF data if there are programs attached */
- if (!prog_list)
+ if (!need_to_allocate)
return 0;
return hid_bpf_allocate_event_data(hdev);
@@ -623,12 +651,15 @@ void hid_bpf_destroy_device(struct hid_device *hdev)
hdev->bpf.destroyed = true;
__hid_bpf_destroy_device(hdev);
+ __hid_bpf_ops_destroy_device(hdev);
}
EXPORT_SYMBOL_GPL(hid_bpf_destroy_device);
void hid_bpf_device_init(struct hid_device *hdev)
{
spin_lock_init(&hdev->bpf.progs_lock);
+ INIT_LIST_HEAD(&hdev->bpf.prog_list);
+ mutex_init(&hdev->bpf.prog_list_lock);
}
EXPORT_SYMBOL_GPL(hid_bpf_device_init);
@@ -662,6 +693,13 @@ static int __init hid_bpf_init(void)
return 0;
}
+ /* register struct_ops kfuncs after we are sure we can load our preloaded bpf program */
+ err = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &hid_bpf_kfunc_set);
+ if (err) {
+ pr_warn("error while setting HID BPF tracing kfuncs: %d", err);
+ return 0;
+ }
+
/* register syscalls after we are sure we can load our preloaded bpf program */
err = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &hid_bpf_syscall_kfunc_set);
if (err) {
diff --git a/drivers/hid/bpf/hid_bpf_dispatch.h b/drivers/hid/bpf/hid_bpf_dispatch.h
index fbe0639d09f2..e52c43d81650 100644
--- a/drivers/hid/bpf/hid_bpf_dispatch.h
+++ b/drivers/hid/bpf/hid_bpf_dispatch.h
@@ -10,12 +10,16 @@ struct hid_bpf_ctx_kern {
u8 *data;
};
+struct hid_device *hid_get_device(unsigned int hid_id);
+void hid_put_device(struct hid_device *hid);
+int hid_bpf_allocate_event_data(struct hid_device *hdev);
int hid_bpf_preload_skel(void);
void hid_bpf_free_links_and_skel(void);
int hid_bpf_get_prog_attach_type(struct bpf_prog *prog);
int __hid_bpf_attach_prog(struct hid_device *hdev, enum hid_bpf_prog_type prog_type, int prog_fd,
struct bpf_prog *prog, __u32 flags);
void __hid_bpf_destroy_device(struct hid_device *hdev);
+void __hid_bpf_ops_destroy_device(struct hid_device *hdev);
int hid_bpf_prog_run(struct hid_device *hdev, enum hid_bpf_prog_type type,
struct hid_bpf_ctx_kern *ctx_kern);
int hid_bpf_reconnect(struct hid_device *hdev);
diff --git a/drivers/hid/bpf/hid_bpf_jmp_table.c b/drivers/hid/bpf/hid_bpf_jmp_table.c
index aa8e1c79cdf5..8a54ba447718 100644
--- a/drivers/hid/bpf/hid_bpf_jmp_table.c
+++ b/drivers/hid/bpf/hid_bpf_jmp_table.c
@@ -81,6 +81,9 @@ static int hid_bpf_program_count(struct hid_device *hdev,
if (type >= HID_BPF_PROG_TYPE_MAX)
return -EINVAL;
+ if (type == HID_BPF_PROG_TYPE_RDESC_FIXUP && hdev->bpf.rdesc_ops)
+ n += 1;
+
FOR_ENTRIES(i, jmp_table.tail, jmp_table.head) {
struct hid_bpf_prog_entry *entry = &jmp_table.entries[i];
diff --git a/drivers/hid/bpf/hid_bpf_struct_ops.c b/drivers/hid/bpf/hid_bpf_struct_ops.c
new file mode 100644
index 000000000000..dde547e734ed
--- /dev/null
+++ b/drivers/hid/bpf/hid_bpf_struct_ops.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * HID-BPF support for Linux
+ *
+ * Copyright (c) 2024 Benjamin Tissoires
+ */
+
+#include <linux/bitops.h>
+#include <linux/bpf_verifier.h>
+#include <linux/bpf.h>
+#include <linux/btf.h>
+#include <linux/btf_ids.h>
+#include <linux/filter.h>
+#include <linux/hid.h>
+#include <linux/hid_bpf.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include "hid_bpf_dispatch.h"
+
+static struct btf *hid_bpf_ops_btf;
+
+static int hid_bpf_ops_init(struct btf *btf)
+{
+ hid_bpf_ops_btf = btf;
+ return 0;
+}
+
+static bool hid_bpf_ops_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ const struct bpf_prog *prog,
+ struct bpf_insn_access_aux *info)
+{
+ return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
+}
+
+static int hid_bpf_ops_check_member(const struct btf_type *t,
+ const struct btf_member *member,
+ const struct bpf_prog *prog)
+{
+ u32 moff = __btf_member_bit_offset(t, member) / 8;
+
+ switch (moff) {
+ case offsetof(struct hid_bpf_ops, hid_rdesc_fixup):
+ break;
+ default:
+ if (prog->sleepable)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hid_bpf_ops_btf_struct_access(struct bpf_verifier_log *log,
+ const struct bpf_reg_state *reg,
+ int off, int size)
+{
+ const struct btf_type *state;
+ const struct btf_type *t;
+ s32 type_id;
+
+ type_id = btf_find_by_name_kind(reg->btf, "hid_bpf_ctx",
+ BTF_KIND_STRUCT);
+ if (type_id < 0)
+ return -EINVAL;
+
+ t = btf_type_by_id(reg->btf, reg->btf_id);
+ state = btf_type_by_id(reg->btf, type_id);
+ if (t != state) {
+ bpf_log(log, "only access to hid_bpf_ctx is supported\n");
+ return -EACCES;
+ }
+
+ /* out-of-bound access in hid_bpf_ctx */
+ if (off + size > sizeof(struct hid_bpf_ctx)) {
+ bpf_log(log, "write access at off %d with size %d\n", off, size);
+ return -EACCES;
+ }
+
+ if (off < offsetof(struct hid_bpf_ctx, retval)) {
+ bpf_log(log,
+ "write access at off %d with size %d on read-only part of hid_bpf_ctx\n",
+ off, size);
+ return -EACCES;
+ }
+
+ return NOT_INIT;
+}
+
+static const struct bpf_verifier_ops hid_bpf_verifier_ops = {
+ .is_valid_access = hid_bpf_ops_is_valid_access,
+ .btf_struct_access = hid_bpf_ops_btf_struct_access,
+};
+
+static int hid_bpf_ops_init_member(const struct btf_type *t,
+ const struct btf_member *member,
+ void *kdata, const void *udata)
+{
+ const struct hid_bpf_ops *uhid_bpf_ops;
+ struct hid_bpf_ops *khid_bpf_ops;
+ u32 moff;
+
+ uhid_bpf_ops = (const struct hid_bpf_ops *)udata;
+ khid_bpf_ops = (struct hid_bpf_ops *)kdata;
+
+ moff = __btf_member_bit_offset(t, member) / 8;
+
+ switch (moff) {
+ case offsetof(struct hid_bpf_ops, hid_id):
+ /* For hid_id and flags fields, this function has to copy it
+ * and return 1 to indicate that the data has been handled by
+ * the struct_ops type, or the verifier will reject the map if
+ * the value of those fields is not zero.
+ */
+ khid_bpf_ops->hid_id = uhid_bpf_ops->hid_id;
+ return 1;
+ case offsetof(struct hid_bpf_ops, flags):
+ if (uhid_bpf_ops->flags & ~BPF_F_BEFORE)
+ return -EINVAL;
+ khid_bpf_ops->flags = uhid_bpf_ops->flags;
+ return 1;
+ }
+ return 0;
+}
+
+static int hid_bpf_reg(void *kdata)
+{
+ struct hid_bpf_ops *ops = kdata;
+ struct hid_device *hdev;
+ int count, err = 0;
+
+ hdev = hid_get_device(ops->hid_id);
+ if (IS_ERR(hdev))
+ return PTR_ERR(hdev);
+
+ ops->hdev = hdev;
+
+ mutex_lock(&hdev->bpf.prog_list_lock);
+
+ count = list_count_nodes(&hdev->bpf.prog_list);
+ if (count >= HID_BPF_MAX_PROGS_PER_DEV) {
+ err = -E2BIG;
+ goto out_unlock;
+ }
+
+ if (ops->hid_rdesc_fixup) {
+ if (hdev->bpf.rdesc_ops) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ hdev->bpf.rdesc_ops = ops;
+ }
+
+ if (ops->hid_device_event) {
+ err = hid_bpf_allocate_event_data(hdev);
+ if (err)
+ goto out_unlock;
+ }
+
+ if (ops->flags & BPF_F_BEFORE)
+ list_add_rcu(&ops->list, &hdev->bpf.prog_list);
+ else
+ list_add_tail_rcu(&ops->list, &hdev->bpf.prog_list);
+
+out_unlock:
+ mutex_unlock(&hdev->bpf.prog_list_lock);
+
+ if (err) {
+ if (hdev->bpf.rdesc_ops == ops)
+ hdev->bpf.rdesc_ops = NULL;
+ hid_put_device(hdev);
+ } else if (ops->hid_rdesc_fixup) {
+ hid_bpf_reconnect(hdev);
+ }
+
+ return err;
+}
+
+static void hid_bpf_unreg(void *kdata)
+{
+ struct hid_bpf_ops *ops = kdata;
+ struct hid_device *hdev;
+ bool reconnect = false;
+
+ hdev = ops->hdev;
+
+ /* check if __hid_bpf_ops_destroy_device() has been called */
+ if (!hdev)
+ return;
+
+ mutex_lock(&hdev->bpf.prog_list_lock);
+
+ list_del_rcu(&ops->list);
+
+ reconnect = hdev->bpf.rdesc_ops == ops;
+ if (reconnect)
+ hdev->bpf.rdesc_ops = NULL;
+
+ mutex_unlock(&hdev->bpf.prog_list_lock);
+
+ if (reconnect)
+ hid_bpf_reconnect(hdev);
+
+ hid_put_device(hdev);
+}
+
+static int __hid_bpf_device_event(struct hid_bpf_ctx *ctx, enum hid_report_type type)
+{
+ return 0;
+}
+
+static int __hid_bpf_rdesc_fixup(struct hid_bpf_ctx *ctx)
+{
+ return 0;
+}
+
+static struct hid_bpf_ops __bpf_hid_bpf_ops = {
+ .hid_device_event = __hid_bpf_device_event,
+ .hid_rdesc_fixup = __hid_bpf_rdesc_fixup,
+};
+
+static struct bpf_struct_ops bpf_hid_bpf_ops = {
+ .verifier_ops = &hid_bpf_verifier_ops,
+ .init = hid_bpf_ops_init,
+ .check_member = hid_bpf_ops_check_member,
+ .init_member = hid_bpf_ops_init_member,
+ .reg = hid_bpf_reg,
+ .unreg = hid_bpf_unreg,
+ .name = "hid_bpf_ops",
+ .cfi_stubs = &__bpf_hid_bpf_ops,
+ .owner = THIS_MODULE,
+};
+
+void __hid_bpf_ops_destroy_device(struct hid_device *hdev)
+{
+ struct hid_bpf_ops *e;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(e, &hdev->bpf.prog_list, list) {
+ hid_put_device(hdev);
+ e->hdev = NULL;
+ }
+ rcu_read_unlock();
+}
+
+static int __init hid_bpf_struct_ops_init(void)
+{
+ return register_bpf_struct_ops(&bpf_hid_bpf_ops, hid_bpf_ops);
+}
+late_initcall(hid_bpf_struct_ops_init);