summaryrefslogtreecommitdiff
path: root/kernel/bpf
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/bpf')
-rw-r--r--kernel/bpf/Makefile4
-rw-r--r--kernel/bpf/arraymap.c28
-rw-r--r--kernel/bpf/btf.c14
-rw-r--r--kernel/bpf/cgroup.c162
-rw-r--r--kernel/bpf/core.c79
-rw-r--r--kernel/bpf/cpumap.c16
-rw-r--r--kernel/bpf/devmap.c15
-rw-r--r--kernel/bpf/hashtab.c26
-rw-r--r--kernel/bpf/helpers.c20
-rw-r--r--kernel/bpf/inode.c11
-rw-r--r--kernel/bpf/local_storage.c379
-rw-r--r--kernel/bpf/lpm_trie.c12
-rw-r--r--kernel/bpf/map_in_map.c3
-rw-r--r--kernel/bpf/offload.c223
-rw-r--r--kernel/bpf/reuseport_array.c363
-rw-r--r--kernel/bpf/sockmap.c14
-rw-r--r--kernel/bpf/stackmap.c1
-rw-r--r--kernel/bpf/syscall.c107
-rw-r--r--kernel/bpf/verifier.c55
-rw-r--r--kernel/bpf/xskmap.c3
20 files changed, 1312 insertions, 223 deletions
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index f27f5496d6fe..0488b8258321 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -3,6 +3,7 @@ obj-y := core.o
obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o
obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o
+obj-$(CONFIG_BPF_SYSCALL) += local_storage.o
obj-$(CONFIG_BPF_SYSCALL) += disasm.o
obj-$(CONFIG_BPF_SYSCALL) += btf.o
ifeq ($(CONFIG_NET),y)
@@ -22,3 +23,6 @@ ifeq ($(CONFIG_PERF_EVENTS),y)
obj-$(CONFIG_BPF_SYSCALL) += stackmap.o
endif
obj-$(CONFIG_CGROUP_BPF) += cgroup.o
+ifeq ($(CONFIG_INET),y)
+obj-$(CONFIG_BPF_SYSCALL) += reuseport_array.o
+endif
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 544e58f5f642..0c17aab3ce5f 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -54,7 +54,7 @@ static int bpf_array_alloc_percpu(struct bpf_array *array)
}
/* Called from syscall */
-static int array_map_alloc_check(union bpf_attr *attr)
+int array_map_alloc_check(union bpf_attr *attr)
{
bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
int numa_node = bpf_map_attr_numa_node(attr);
@@ -358,27 +358,20 @@ static void array_map_seq_show_elem(struct bpf_map *map, void *key,
rcu_read_unlock();
}
-static int array_map_check_btf(const struct bpf_map *map, const struct btf *btf,
- u32 btf_key_id, u32 btf_value_id)
+static int array_map_check_btf(const struct bpf_map *map,
+ const struct btf_type *key_type,
+ const struct btf_type *value_type)
{
- const struct btf_type *key_type, *value_type;
- u32 key_size, value_size;
u32 int_data;
- key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
- if (!key_type || BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
+ if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
return -EINVAL;
int_data = *(u32 *)(key_type + 1);
- /* bpf array can only take a u32 key. This check makes
- * sure that the btf matches the attr used during map_create.
+ /* bpf array can only take a u32 key. This check makes sure
+ * that the btf matches the attr used during map_create.
*/
- if (BTF_INT_BITS(int_data) != 32 || key_size != 4 ||
- BTF_INT_OFFSET(int_data))
- return -EINVAL;
-
- value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
- if (!value_type || value_size > map->value_size)
+ if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
return -EINVAL;
return 0;
@@ -405,6 +398,7 @@ const struct bpf_map_ops percpu_array_map_ops = {
.map_lookup_elem = percpu_array_map_lookup_elem,
.map_update_elem = array_map_update_elem,
.map_delete_elem = array_map_delete_elem,
+ .map_check_btf = array_map_check_btf,
};
static int fd_array_map_alloc_check(union bpf_attr *attr)
@@ -546,6 +540,7 @@ const struct bpf_map_ops prog_array_map_ops = {
.map_fd_put_ptr = prog_fd_array_put_ptr,
.map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
.map_release_uref = bpf_fd_array_map_clear,
+ .map_check_btf = map_check_no_btf,
};
static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
@@ -634,6 +629,7 @@ const struct bpf_map_ops perf_event_array_map_ops = {
.map_fd_get_ptr = perf_event_fd_array_get_ptr,
.map_fd_put_ptr = perf_event_fd_array_put_ptr,
.map_release = perf_event_fd_array_release,
+ .map_check_btf = map_check_no_btf,
};
#ifdef CONFIG_CGROUPS
@@ -665,6 +661,7 @@ const struct bpf_map_ops cgroup_array_map_ops = {
.map_delete_elem = fd_array_map_delete_elem,
.map_fd_get_ptr = cgroup_fd_array_get_ptr,
.map_fd_put_ptr = cgroup_fd_array_put_ptr,
+ .map_check_btf = map_check_no_btf,
};
#endif
@@ -749,4 +746,5 @@ const struct bpf_map_ops array_of_maps_map_ops = {
.map_fd_put_ptr = bpf_map_fd_put_ptr,
.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
.map_gen_lookup = array_of_map_gen_lookup,
+ .map_check_btf = map_check_no_btf,
};
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 9704934252b3..2590700237c1 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -1519,9 +1519,9 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
{
bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION;
const struct btf_member *member;
+ u32 meta_needed, last_offset;
struct btf *btf = env->btf;
u32 struct_size = t->size;
- u32 meta_needed;
u16 i;
meta_needed = btf_type_vlen(t) * sizeof(*member);
@@ -1534,6 +1534,7 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
btf_verifier_log_type(env, t, NULL);
+ last_offset = 0;
for_each_member(i, t, member) {
if (!btf_name_offset_valid(btf, member->name_off)) {
btf_verifier_log_member(env, t, member,
@@ -1555,6 +1556,16 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
return -EINVAL;
}
+ /*
+ * ">" instead of ">=" because the last member could be
+ * "char a[0];"
+ */
+ if (last_offset > member->offset) {
+ btf_verifier_log_member(env, t, member,
+ "Invalid member bits_offset");
+ return -EINVAL;
+ }
+
if (BITS_ROUNDUP_BYTES(member->offset) > struct_size) {
btf_verifier_log_member(env, t, member,
"Memmber bits_offset exceeds its struct size");
@@ -1562,6 +1573,7 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
}
btf_verifier_log_member(env, t, member, NULL);
+ last_offset = member->offset;
}
return meta_needed;
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index 3d83ee7df381..6a7d931bbc55 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -34,6 +34,8 @@ void cgroup_bpf_put(struct cgroup *cgrp)
list_for_each_entry_safe(pl, tmp, progs, node) {
list_del(&pl->node);
bpf_prog_put(pl->prog);
+ bpf_cgroup_storage_unlink(pl->storage);
+ bpf_cgroup_storage_free(pl->storage);
kfree(pl);
static_branch_dec(&cgroup_bpf_enabled_key);
}
@@ -95,7 +97,7 @@ static int compute_effective_progs(struct cgroup *cgrp,
enum bpf_attach_type type,
struct bpf_prog_array __rcu **array)
{
- struct bpf_prog_array __rcu *progs;
+ struct bpf_prog_array *progs;
struct bpf_prog_list *pl;
struct cgroup *p = cgrp;
int cnt = 0;
@@ -115,18 +117,20 @@ static int compute_effective_progs(struct cgroup *cgrp,
cnt = 0;
p = cgrp;
do {
- if (cnt == 0 || (p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
- list_for_each_entry(pl,
- &p->bpf.progs[type], node) {
- if (!pl->prog)
- continue;
- rcu_dereference_protected(progs, 1)->
- progs[cnt++] = pl->prog;
- }
- p = cgroup_parent(p);
- } while (p);
+ if (cnt > 0 && !(p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
+ continue;
+
+ list_for_each_entry(pl, &p->bpf.progs[type], node) {
+ if (!pl->prog)
+ continue;
- *array = progs;
+ progs->items[cnt].prog = pl->prog;
+ progs->items[cnt].cgroup_storage = pl->storage;
+ cnt++;
+ }
+ } while ((p = cgroup_parent(p)));
+
+ rcu_assign_pointer(*array, progs);
return 0;
}
@@ -173,6 +177,45 @@ cleanup:
return -ENOMEM;
}
+static int update_effective_progs(struct cgroup *cgrp,
+ enum bpf_attach_type type)
+{
+ struct cgroup_subsys_state *css;
+ int err;
+
+ /* allocate and recompute effective prog arrays */
+ css_for_each_descendant_pre(css, &cgrp->self) {
+ struct cgroup *desc = container_of(css, struct cgroup, self);
+
+ err = compute_effective_progs(desc, type, &desc->bpf.inactive);
+ if (err)
+ goto cleanup;
+ }
+
+ /* all allocations were successful. Activate all prog arrays */
+ css_for_each_descendant_pre(css, &cgrp->self) {
+ struct cgroup *desc = container_of(css, struct cgroup, self);
+
+ activate_effective_progs(desc, type, desc->bpf.inactive);
+ desc->bpf.inactive = NULL;
+ }
+
+ return 0;
+
+cleanup:
+ /* oom while computing effective. Free all computed effective arrays
+ * since they were not activated
+ */
+ css_for_each_descendant_pre(css, &cgrp->self) {
+ struct cgroup *desc = container_of(css, struct cgroup, self);
+
+ bpf_prog_array_free(desc->bpf.inactive);
+ desc->bpf.inactive = NULL;
+ }
+
+ return err;
+}
+
#define BPF_CGROUP_MAX_PROGS 64
/**
@@ -189,7 +232,7 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
{
struct list_head *progs = &cgrp->bpf.progs[type];
struct bpf_prog *old_prog = NULL;
- struct cgroup_subsys_state *css;
+ struct bpf_cgroup_storage *storage, *old_storage = NULL;
struct bpf_prog_list *pl;
bool pl_was_allocated;
int err;
@@ -211,72 +254,71 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS)
return -E2BIG;
+ storage = bpf_cgroup_storage_alloc(prog);
+ if (IS_ERR(storage))
+ return -ENOMEM;
+
if (flags & BPF_F_ALLOW_MULTI) {
- list_for_each_entry(pl, progs, node)
- if (pl->prog == prog)
+ list_for_each_entry(pl, progs, node) {
+ if (pl->prog == prog) {
/* disallow attaching the same prog twice */
+ bpf_cgroup_storage_free(storage);
return -EINVAL;
+ }
+ }
pl = kmalloc(sizeof(*pl), GFP_KERNEL);
- if (!pl)
+ if (!pl) {
+ bpf_cgroup_storage_free(storage);
return -ENOMEM;
+ }
+
pl_was_allocated = true;
pl->prog = prog;
+ pl->storage = storage;
list_add_tail(&pl->node, progs);
} else {
if (list_empty(progs)) {
pl = kmalloc(sizeof(*pl), GFP_KERNEL);
- if (!pl)
+ if (!pl) {
+ bpf_cgroup_storage_free(storage);
return -ENOMEM;
+ }
pl_was_allocated = true;
list_add_tail(&pl->node, progs);
} else {
pl = list_first_entry(progs, typeof(*pl), node);
old_prog = pl->prog;
+ old_storage = pl->storage;
+ bpf_cgroup_storage_unlink(old_storage);
pl_was_allocated = false;
}
pl->prog = prog;
+ pl->storage = storage;
}
cgrp->bpf.flags[type] = flags;
- /* allocate and recompute effective prog arrays */
- css_for_each_descendant_pre(css, &cgrp->self) {
- struct cgroup *desc = container_of(css, struct cgroup, self);
-
- err = compute_effective_progs(desc, type, &desc->bpf.inactive);
- if (err)
- goto cleanup;
- }
-
- /* all allocations were successful. Activate all prog arrays */
- css_for_each_descendant_pre(css, &cgrp->self) {
- struct cgroup *desc = container_of(css, struct cgroup, self);
-
- activate_effective_progs(desc, type, desc->bpf.inactive);
- desc->bpf.inactive = NULL;
- }
+ err = update_effective_progs(cgrp, type);
+ if (err)
+ goto cleanup;
static_branch_inc(&cgroup_bpf_enabled_key);
+ if (old_storage)
+ bpf_cgroup_storage_free(old_storage);
if (old_prog) {
bpf_prog_put(old_prog);
static_branch_dec(&cgroup_bpf_enabled_key);
}
+ bpf_cgroup_storage_link(storage, cgrp, type);
return 0;
cleanup:
- /* oom while computing effective. Free all computed effective arrays
- * since they were not activated
- */
- css_for_each_descendant_pre(css, &cgrp->self) {
- struct cgroup *desc = container_of(css, struct cgroup, self);
-
- bpf_prog_array_free(desc->bpf.inactive);
- desc->bpf.inactive = NULL;
- }
-
/* and cleanup the prog list */
pl->prog = old_prog;
+ bpf_cgroup_storage_free(pl->storage);
+ pl->storage = old_storage;
+ bpf_cgroup_storage_link(old_storage, cgrp, type);
if (pl_was_allocated) {
list_del(&pl->node);
kfree(pl);
@@ -299,7 +341,6 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
struct list_head *progs = &cgrp->bpf.progs[type];
u32 flags = cgrp->bpf.flags[type];
struct bpf_prog *old_prog = NULL;
- struct cgroup_subsys_state *css;
struct bpf_prog_list *pl;
int err;
@@ -338,25 +379,14 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
pl->prog = NULL;
}
- /* allocate and recompute effective prog arrays */
- css_for_each_descendant_pre(css, &cgrp->self) {
- struct cgroup *desc = container_of(css, struct cgroup, self);
-
- err = compute_effective_progs(desc, type, &desc->bpf.inactive);
- if (err)
- goto cleanup;
- }
-
- /* all allocations were successful. Activate all prog arrays */
- css_for_each_descendant_pre(css, &cgrp->self) {
- struct cgroup *desc = container_of(css, struct cgroup, self);
-
- activate_effective_progs(desc, type, desc->bpf.inactive);
- desc->bpf.inactive = NULL;
- }
+ err = update_effective_progs(cgrp, type);
+ if (err)
+ goto cleanup;
/* now can actually delete it from this cgroup list */
list_del(&pl->node);
+ bpf_cgroup_storage_unlink(pl->storage);
+ bpf_cgroup_storage_free(pl->storage);
kfree(pl);
if (list_empty(progs))
/* last program was detached, reset flags to zero */
@@ -367,16 +397,6 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
return 0;
cleanup:
- /* oom while computing effective. Free all computed effective arrays
- * since they were not activated
- */
- css_for_each_descendant_pre(css, &cgrp->self) {
- struct cgroup *desc = container_of(css, struct cgroup, self);
-
- bpf_prog_array_free(desc->bpf.inactive);
- desc->bpf.inactive = NULL;
- }
-
/* and restore back old_prog */
pl->prog = old_prog;
return err;
@@ -655,6 +675,8 @@ cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_map_delete_elem_proto;
case BPF_FUNC_get_current_uid_gid:
return &bpf_get_current_uid_gid_proto;
+ case BPF_FUNC_get_local_storage:
+ return &bpf_get_local_storage_proto;
case BPF_FUNC_trace_printk:
if (capable(CAP_SYS_ADMIN))
return bpf_get_trace_printk_proto();
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 1e5625d46414..4d09e610777f 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1538,11 +1538,12 @@ static struct {
.null_prog = NULL,
};
-struct bpf_prog_array __rcu *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
+struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
{
if (prog_cnt)
return kzalloc(sizeof(struct bpf_prog_array) +
- sizeof(struct bpf_prog *) * (prog_cnt + 1),
+ sizeof(struct bpf_prog_array_item) *
+ (prog_cnt + 1),
flags);
return &empty_prog_array.hdr;
@@ -1556,43 +1557,45 @@ void bpf_prog_array_free(struct bpf_prog_array __rcu *progs)
kfree_rcu(progs, rcu);
}
-int bpf_prog_array_length(struct bpf_prog_array __rcu *progs)
+int bpf_prog_array_length(struct bpf_prog_array __rcu *array)
{
- struct bpf_prog **prog;
+ struct bpf_prog_array_item *item;
u32 cnt = 0;
rcu_read_lock();
- prog = rcu_dereference(progs)->progs;
- for (; *prog; prog++)
- if (*prog != &dummy_bpf_prog.prog)
+ item = rcu_dereference(array)->items;
+ for (; item->prog; item++)
+ if (item->prog != &dummy_bpf_prog.prog)
cnt++;
rcu_read_unlock();
return cnt;
}
-static bool bpf_prog_array_copy_core(struct bpf_prog **prog,
+
+static bool bpf_prog_array_copy_core(struct bpf_prog_array __rcu *array,
u32 *prog_ids,
u32 request_cnt)
{
+ struct bpf_prog_array_item *item;
int i = 0;
- for (; *prog; prog++) {
- if (*prog == &dummy_bpf_prog.prog)
+ item = rcu_dereference(array)->items;
+ for (; item->prog; item++) {
+ if (item->prog == &dummy_bpf_prog.prog)
continue;
- prog_ids[i] = (*prog)->aux->id;
+ prog_ids[i] = item->prog->aux->id;
if (++i == request_cnt) {
- prog++;
+ item++;
break;
}
}
- return !!(*prog);
+ return !!(item->prog);
}
-int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
+int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *array,
__u32 __user *prog_ids, u32 cnt)
{
- struct bpf_prog **prog;
unsigned long err = 0;
bool nospc;
u32 *ids;
@@ -1611,8 +1614,7 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
if (!ids)
return -ENOMEM;
rcu_read_lock();
- prog = rcu_dereference(progs)->progs;
- nospc = bpf_prog_array_copy_core(prog, ids, cnt);
+ nospc = bpf_prog_array_copy_core(array, ids, cnt);
rcu_read_unlock();
err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
kfree(ids);
@@ -1623,14 +1625,14 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
return 0;
}
-void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs,
+void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *array,
struct bpf_prog *old_prog)
{
- struct bpf_prog **prog = progs->progs;
+ struct bpf_prog_array_item *item = array->items;
- for (; *prog; prog++)
- if (*prog == old_prog) {
- WRITE_ONCE(*prog, &dummy_bpf_prog.prog);
+ for (; item->prog; item++)
+ if (item->prog == old_prog) {
+ WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
break;
}
}
@@ -1641,7 +1643,7 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
struct bpf_prog_array **new_array)
{
int new_prog_cnt, carry_prog_cnt = 0;
- struct bpf_prog **existing_prog;
+ struct bpf_prog_array_item *existing;
struct bpf_prog_array *array;
bool found_exclude = false;
int new_prog_idx = 0;
@@ -1650,15 +1652,15 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
* the new array.
*/
if (old_array) {
- existing_prog = old_array->progs;
- for (; *existing_prog; existing_prog++) {
- if (*existing_prog == exclude_prog) {
+ existing = old_array->items;
+ for (; existing->prog; existing++) {
+ if (existing->prog == exclude_prog) {
found_exclude = true;
continue;
}
- if (*existing_prog != &dummy_bpf_prog.prog)
+ if (existing->prog != &dummy_bpf_prog.prog)
carry_prog_cnt++;
- if (*existing_prog == include_prog)
+ if (existing->prog == include_prog)
return -EEXIST;
}
}
@@ -1684,15 +1686,17 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
/* Fill in the new prog array */
if (carry_prog_cnt) {
- existing_prog = old_array->progs;
- for (; *existing_prog; existing_prog++)
- if (*existing_prog != exclude_prog &&
- *existing_prog != &dummy_bpf_prog.prog)
- array->progs[new_prog_idx++] = *existing_prog;
+ existing = old_array->items;
+ for (; existing->prog; existing++)
+ if (existing->prog != exclude_prog &&
+ existing->prog != &dummy_bpf_prog.prog) {
+ array->items[new_prog_idx++].prog =
+ existing->prog;
+ }
}
if (include_prog)
- array->progs[new_prog_idx++] = include_prog;
- array->progs[new_prog_idx] = NULL;
+ array->items[new_prog_idx++].prog = include_prog;
+ array->items[new_prog_idx].prog = NULL;
*new_array = array;
return 0;
}
@@ -1701,7 +1705,6 @@ int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
u32 *prog_ids, u32 request_cnt,
u32 *prog_cnt)
{
- struct bpf_prog **prog;
u32 cnt = 0;
if (array)
@@ -1714,8 +1717,7 @@ int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
return 0;
/* this function is called under trace/bpf_trace.c: bpf_event_mutex */
- prog = rcu_dereference_check(array, 1)->progs;
- return bpf_prog_array_copy_core(prog, prog_ids, request_cnt) ? -ENOSPC
+ return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
: 0;
}
@@ -1793,6 +1795,7 @@ const struct bpf_func_proto bpf_get_current_comm_proto __weak;
const struct bpf_func_proto bpf_sock_map_update_proto __weak;
const struct bpf_func_proto bpf_sock_hash_update_proto __weak;
const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
+const struct bpf_func_proto bpf_get_local_storage_proto __weak;
const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
{
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index e0918d180f08..620bc5024d7d 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -69,7 +69,7 @@ struct bpf_cpu_map {
};
static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
- struct xdp_bulk_queue *bq);
+ struct xdp_bulk_queue *bq, bool in_napi_ctx);
static u64 cpu_map_bitmap_size(const union bpf_attr *attr)
{
@@ -375,7 +375,7 @@ static void __cpu_map_entry_free(struct rcu_head *rcu)
struct xdp_bulk_queue *bq = per_cpu_ptr(rcpu->bulkq, cpu);
/* No concurrent bq_enqueue can run at this point */
- bq_flush_to_queue(rcpu, bq);
+ bq_flush_to_queue(rcpu, bq, false);
}
free_percpu(rcpu->bulkq);
/* Cannot kthread_stop() here, last put free rcpu resources */
@@ -555,10 +555,11 @@ const struct bpf_map_ops cpu_map_ops = {
.map_update_elem = cpu_map_update_elem,
.map_lookup_elem = cpu_map_lookup_elem,
.map_get_next_key = cpu_map_get_next_key,
+ .map_check_btf = map_check_no_btf,
};
static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
- struct xdp_bulk_queue *bq)
+ struct xdp_bulk_queue *bq, bool in_napi_ctx)
{
unsigned int processed = 0, drops = 0;
const int to_cpu = rcpu->cpu;
@@ -578,7 +579,10 @@ static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
err = __ptr_ring_produce(q, xdpf);
if (err) {
drops++;
- xdp_return_frame_rx_napi(xdpf);
+ if (likely(in_napi_ctx))
+ xdp_return_frame_rx_napi(xdpf);
+ else
+ xdp_return_frame(xdpf);
}
processed++;
}
@@ -598,7 +602,7 @@ static int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
- bq_flush_to_queue(rcpu, bq);
+ bq_flush_to_queue(rcpu, bq, true);
/* Notice, xdp_buff/page MUST be queued here, long enough for
* driver to code invoking us to finished, due to driver
@@ -661,7 +665,7 @@ void __cpu_map_flush(struct bpf_map *map)
/* Flush all frames in bulkq to real queue */
bq = this_cpu_ptr(rcpu->bulkq);
- bq_flush_to_queue(rcpu, bq);
+ bq_flush_to_queue(rcpu, bq, true);
/* If already running, costs spin_lock_irqsave + smb_mb */
wake_up_process(rcpu->kthread);
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index d361fc1e3bf3..ac1df79f3788 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -217,7 +217,8 @@ void __dev_map_insert_ctx(struct bpf_map *map, u32 bit)
}
static int bq_xmit_all(struct bpf_dtab_netdev *obj,
- struct xdp_bulk_queue *bq, u32 flags)
+ struct xdp_bulk_queue *bq, u32 flags,
+ bool in_napi_ctx)
{
struct net_device *dev = obj->dev;
int sent = 0, drops = 0, err = 0;
@@ -254,7 +255,10 @@ error:
struct xdp_frame *xdpf = bq->q[i];
/* RX path under NAPI protection, can return frames faster */
- xdp_return_frame_rx_napi(xdpf);
+ if (likely(in_napi_ctx))
+ xdp_return_frame_rx_napi(xdpf);
+ else
+ xdp_return_frame(xdpf);
drops++;
}
goto out;
@@ -286,7 +290,7 @@ void __dev_map_flush(struct bpf_map *map)
__clear_bit(bit, bitmap);
bq = this_cpu_ptr(dev->bulkq);
- bq_xmit_all(dev, bq, XDP_XMIT_FLUSH);
+ bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, true);
}
}
@@ -316,7 +320,7 @@ static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf,
struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq);
if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
- bq_xmit_all(obj, bq, 0);
+ bq_xmit_all(obj, bq, 0, true);
/* Ingress dev_rx will be the same for all xdp_frame's in
* bulk_queue, because bq stored per-CPU and must be flushed
@@ -385,7 +389,7 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
__clear_bit(dev->bit, bitmap);
bq = per_cpu_ptr(dev->bulkq, cpu);
- bq_xmit_all(dev, bq, XDP_XMIT_FLUSH);
+ bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, false);
}
}
}
@@ -484,6 +488,7 @@ const struct bpf_map_ops dev_map_ops = {
.map_lookup_elem = dev_map_lookup_elem,
.map_update_elem = dev_map_update_elem,
.map_delete_elem = dev_map_delete_elem,
+ .map_check_btf = map_check_no_btf,
};
static int dev_map_notification(struct notifier_block *notifier,
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 513d9dfcf4ee..04b8eda94e7d 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -11,9 +11,11 @@
* General Public License for more details.
*/
#include <linux/bpf.h>
+#include <linux/btf.h>
#include <linux/jhash.h>
#include <linux/filter.h>
#include <linux/rculist_nulls.h>
+#include <uapi/linux/btf.h>
#include "percpu_freelist.h"
#include "bpf_lru_list.h"
#include "map_in_map.h"
@@ -1162,6 +1164,27 @@ static void htab_map_free(struct bpf_map *map)
kfree(htab);
}
+static void htab_map_seq_show_elem(struct bpf_map *map, void *key,
+ struct seq_file *m)
+{
+ void *value;
+
+ rcu_read_lock();
+
+ value = htab_map_lookup_elem(map, key);
+ if (!value) {
+ rcu_read_unlock();
+ return;
+ }
+
+ btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
+ seq_puts(m, ": ");
+ btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
+ seq_puts(m, "\n");
+
+ rcu_read_unlock();
+}
+
const struct bpf_map_ops htab_map_ops = {
.map_alloc_check = htab_map_alloc_check,
.map_alloc = htab_map_alloc,
@@ -1171,6 +1194,7 @@ const struct bpf_map_ops htab_map_ops = {
.map_update_elem = htab_map_update_elem,
.map_delete_elem = htab_map_delete_elem,
.map_gen_lookup = htab_map_gen_lookup,
+ .map_seq_show_elem = htab_map_seq_show_elem,
};
const struct bpf_map_ops htab_lru_map_ops = {
@@ -1182,6 +1206,7 @@ const struct bpf_map_ops htab_lru_map_ops = {
.map_update_elem = htab_lru_map_update_elem,
.map_delete_elem = htab_lru_map_delete_elem,
.map_gen_lookup = htab_lru_map_gen_lookup,
+ .map_seq_show_elem = htab_map_seq_show_elem,
};
/* Called from eBPF program */
@@ -1408,4 +1433,5 @@ const struct bpf_map_ops htab_of_maps_map_ops = {
.map_fd_put_ptr = bpf_map_fd_put_ptr,
.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
.map_gen_lookup = htab_of_map_gen_lookup,
+ .map_check_btf = map_check_no_btf,
};
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 73065e2d23c2..1991466b8327 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -193,4 +193,24 @@ const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
.gpl_only = false,
.ret_type = RET_INTEGER,
};
+
+DECLARE_PER_CPU(void*, bpf_cgroup_storage);
+
+BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
+{
+ /* map and flags arguments are not used now,
+ * but provide an ability to extend the API
+ * for other types of local storages.
+ * verifier checks that their values are correct.
+ */
+ return (unsigned long) this_cpu_read(bpf_cgroup_storage);
+}
+
+const struct bpf_func_proto bpf_get_local_storage_proto = {
+ .func = bpf_get_local_storage,
+ .gpl_only = false,
+ .ret_type = RET_PTR_TO_MAP_VALUE,
+ .arg1_type = ARG_CONST_MAP_PTR,
+ .arg2_type = ARG_ANYTHING,
+};
#endif
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index 76efe9a183f5..2ada5e21dfa6 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -196,19 +196,21 @@ static void *map_seq_next(struct seq_file *m, void *v, loff_t *pos)
{
struct bpf_map *map = seq_file_to_map(m);
void *key = map_iter(m)->key;
+ void *prev_key;
if (map_iter(m)->done)
return NULL;
if (unlikely(v == SEQ_START_TOKEN))
- goto done;
+ prev_key = NULL;
+ else
+ prev_key = key;
- if (map->ops->map_get_next_key(map, key, key)) {
+ if (map->ops->map_get_next_key(map, prev_key, key)) {
map_iter(m)->done = true;
return NULL;
}
-done:
++(*pos);
return key;
}
@@ -332,7 +334,8 @@ static int bpf_mkmap(struct dentry *dentry, umode_t mode, void *arg)
struct bpf_map *map = arg;
return bpf_mkobj_ops(dentry, mode, arg, &bpf_map_iops,
- map->btf ? &bpffs_map_fops : &bpffs_obj_fops);
+ bpf_map_support_seq_show(map) ?
+ &bpffs_map_fops : &bpffs_obj_fops);
}
static struct dentry *
diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c
new file mode 100644
index 000000000000..22ad967d1e5f
--- /dev/null
+++ b/kernel/bpf/local_storage.c
@@ -0,0 +1,379 @@
+//SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf-cgroup.h>
+#include <linux/bpf.h>
+#include <linux/bug.h>
+#include <linux/filter.h>
+#include <linux/mm.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+
+DEFINE_PER_CPU(void*, bpf_cgroup_storage);
+
+#ifdef CONFIG_CGROUP_BPF
+
+#define LOCAL_STORAGE_CREATE_FLAG_MASK \
+ (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
+
+struct bpf_cgroup_storage_map {
+ struct bpf_map map;
+
+ spinlock_t lock;
+ struct bpf_prog *prog;
+ struct rb_root root;
+ struct list_head list;
+};
+
+static struct bpf_cgroup_storage_map *map_to_storage(struct bpf_map *map)
+{
+ return container_of(map, struct bpf_cgroup_storage_map, map);
+}
+
+static int bpf_cgroup_storage_key_cmp(
+ const struct bpf_cgroup_storage_key *key1,
+ const struct bpf_cgroup_storage_key *key2)
+{
+ if (key1->cgroup_inode_id < key2->cgroup_inode_id)
+ return -1;
+ else if (key1->cgroup_inode_id > key2->cgroup_inode_id)
+ return 1;
+ else if (key1->attach_type < key2->attach_type)
+ return -1;
+ else if (key1->attach_type > key2->attach_type)
+ return 1;
+ return 0;
+}
+
+static struct bpf_cgroup_storage *cgroup_storage_lookup(
+ struct bpf_cgroup_storage_map *map, struct bpf_cgroup_storage_key *key,
+ bool locked)
+{
+ struct rb_root *root = &map->root;
+ struct rb_node *node;
+
+ if (!locked)
+ spin_lock_bh(&map->lock);
+
+ node = root->rb_node;
+ while (node) {
+ struct bpf_cgroup_storage *storage;
+
+ storage = container_of(node, struct bpf_cgroup_storage, node);
+
+ switch (bpf_cgroup_storage_key_cmp(key, &storage->key)) {
+ case -1:
+ node = node->rb_left;
+ break;
+ case 1:
+ node = node->rb_right;
+ break;
+ default:
+ if (!locked)
+ spin_unlock_bh(&map->lock);
+ return storage;
+ }
+ }
+
+ if (!locked)
+ spin_unlock_bh(&map->lock);
+
+ return NULL;
+}
+
+static int cgroup_storage_insert(struct bpf_cgroup_storage_map *map,
+ struct bpf_cgroup_storage *storage)
+{
+ struct rb_root *root = &map->root;
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+ while (*new) {
+ struct bpf_cgroup_storage *this;
+
+ this = container_of(*new, struct bpf_cgroup_storage, node);
+
+ parent = *new;
+ switch (bpf_cgroup_storage_key_cmp(&storage->key, &this->key)) {
+ case -1:
+ new = &((*new)->rb_left);
+ break;
+ case 1:
+ new = &((*new)->rb_right);
+ break;
+ default:
+ return -EEXIST;
+ }
+ }
+
+ rb_link_node(&storage->node, parent, new);
+ rb_insert_color(&storage->node, root);
+
+ return 0;
+}
+
+static void *cgroup_storage_lookup_elem(struct bpf_map *_map, void *_key)
+{
+ struct bpf_cgroup_storage_map *map = map_to_storage(_map);
+ struct bpf_cgroup_storage_key *key = _key;
+ struct bpf_cgroup_storage *storage;
+
+ storage = cgroup_storage_lookup(map, key, false);
+ if (!storage)
+ return NULL;
+
+ return &READ_ONCE(storage->buf)->data[0];
+}
+
+static int cgroup_storage_update_elem(struct bpf_map *map, void *_key,
+ void *value, u64 flags)
+{
+ struct bpf_cgroup_storage_key *key = _key;
+ struct bpf_cgroup_storage *storage;
+ struct bpf_storage_buffer *new;
+
+ if (flags & BPF_NOEXIST)
+ return -EINVAL;
+
+ storage = cgroup_storage_lookup((struct bpf_cgroup_storage_map *)map,
+ key, false);
+ if (!storage)
+ return -ENOENT;
+
+ new = kmalloc_node(sizeof(struct bpf_storage_buffer) +
+ map->value_size, __GFP_ZERO | GFP_USER,
+ map->numa_node);
+ if (!new)
+ return -ENOMEM;
+
+ memcpy(&new->data[0], value, map->value_size);
+
+ new = xchg(&storage->buf, new);
+ kfree_rcu(new, rcu);
+
+ return 0;
+}
+
+static int cgroup_storage_get_next_key(struct bpf_map *_map, void *_key,
+ void *_next_key)
+{
+ struct bpf_cgroup_storage_map *map = map_to_storage(_map);
+ struct bpf_cgroup_storage_key *key = _key;
+ struct bpf_cgroup_storage_key *next = _next_key;
+ struct bpf_cgroup_storage *storage;
+
+ spin_lock_bh(&map->lock);
+
+ if (list_empty(&map->list))
+ goto enoent;
+
+ if (key) {
+ storage = cgroup_storage_lookup(map, key, true);
+ if (!storage)
+ goto enoent;
+
+ storage = list_next_entry(storage, list);
+ if (!storage)
+ goto enoent;
+ } else {
+ storage = list_first_entry(&map->list,
+ struct bpf_cgroup_storage, list);
+ }
+
+ spin_unlock_bh(&map->lock);
+ next->attach_type = storage->key.attach_type;
+ next->cgroup_inode_id = storage->key.cgroup_inode_id;
+ return 0;
+
+enoent:
+ spin_unlock_bh(&map->lock);
+ return -ENOENT;
+}
+
+static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
+{
+ int numa_node = bpf_map_attr_numa_node(attr);
+ struct bpf_cgroup_storage_map *map;
+
+ if (attr->key_size != sizeof(struct bpf_cgroup_storage_key))
+ return ERR_PTR(-EINVAL);
+
+ if (attr->value_size > PAGE_SIZE)
+ return ERR_PTR(-E2BIG);
+
+ if (attr->map_flags & ~LOCAL_STORAGE_CREATE_FLAG_MASK)
+ /* reserved bits should not be used */
+ return ERR_PTR(-EINVAL);
+
+ if (attr->max_entries)
+ /* max_entries is not used and enforced to be 0 */
+ return ERR_PTR(-EINVAL);
+
+ map = kmalloc_node(sizeof(struct bpf_cgroup_storage_map),
+ __GFP_ZERO | GFP_USER, numa_node);
+ if (!map)
+ return ERR_PTR(-ENOMEM);
+
+ map->map.pages = round_up(sizeof(struct bpf_cgroup_storage_map),
+ PAGE_SIZE) >> PAGE_SHIFT;
+
+ /* copy mandatory map attributes */
+ bpf_map_init_from_attr(&map->map, attr);
+
+ spin_lock_init(&map->lock);
+ map->root = RB_ROOT;
+ INIT_LIST_HEAD(&map->list);
+
+ return &map->map;
+}
+
+static void cgroup_storage_map_free(struct bpf_map *_map)
+{
+ struct bpf_cgroup_storage_map *map = map_to_storage(_map);
+
+ WARN_ON(!RB_EMPTY_ROOT(&map->root));
+ WARN_ON(!list_empty(&map->list));
+
+ kfree(map);
+}
+
+static int cgroup_storage_delete_elem(struct bpf_map *map, void *key)
+{
+ return -EINVAL;
+}
+
+const struct bpf_map_ops cgroup_storage_map_ops = {
+ .map_alloc = cgroup_storage_map_alloc,
+ .map_free = cgroup_storage_map_free,
+ .map_get_next_key = cgroup_storage_get_next_key,
+ .map_lookup_elem = cgroup_storage_lookup_elem,
+ .map_update_elem = cgroup_storage_update_elem,
+ .map_delete_elem = cgroup_storage_delete_elem,
+ .map_check_btf = map_check_no_btf,
+};
+
+int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map)
+{
+ struct bpf_cgroup_storage_map *map = map_to_storage(_map);
+ int ret = -EBUSY;
+
+ spin_lock_bh(&map->lock);
+
+ if (map->prog && map->prog != prog)
+ goto unlock;
+ if (prog->aux->cgroup_storage && prog->aux->cgroup_storage != _map)
+ goto unlock;
+
+ map->prog = prog;
+ prog->aux->cgroup_storage = _map;
+ ret = 0;
+unlock:
+ spin_unlock_bh(&map->lock);
+
+ return ret;
+}
+
+void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *_map)
+{
+ struct bpf_cgroup_storage_map *map = map_to_storage(_map);
+
+ spin_lock_bh(&map->lock);
+ if (map->prog == prog) {
+ WARN_ON(prog->aux->cgroup_storage != _map);
+ map->prog = NULL;
+ prog->aux->cgroup_storage = NULL;
+ }
+ spin_unlock_bh(&map->lock);
+}
+
+struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog)
+{
+ struct bpf_cgroup_storage *storage;
+ struct bpf_map *map;
+ u32 pages;
+
+ map = prog->aux->cgroup_storage;
+ if (!map)
+ return NULL;
+
+ pages = round_up(sizeof(struct bpf_cgroup_storage) +
+ sizeof(struct bpf_storage_buffer) +
+ map->value_size, PAGE_SIZE) >> PAGE_SHIFT;
+ if (bpf_map_charge_memlock(map, pages))
+ return ERR_PTR(-EPERM);
+
+ storage = kmalloc_node(sizeof(struct bpf_cgroup_storage),
+ __GFP_ZERO | GFP_USER, map->numa_node);
+ if (!storage) {
+ bpf_map_uncharge_memlock(map, pages);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ storage->buf = kmalloc_node(sizeof(struct bpf_storage_buffer) +
+ map->value_size, __GFP_ZERO | GFP_USER,
+ map->numa_node);
+ if (!storage->buf) {
+ bpf_map_uncharge_memlock(map, pages);
+ kfree(storage);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ storage->map = (struct bpf_cgroup_storage_map *)map;
+
+ return storage;
+}
+
+void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage)
+{
+ u32 pages;
+ struct bpf_map *map;
+
+ if (!storage)
+ return;
+
+ map = &storage->map->map;
+ pages = round_up(sizeof(struct bpf_cgroup_storage) +
+ sizeof(struct bpf_storage_buffer) +
+ map->value_size, PAGE_SIZE) >> PAGE_SHIFT;
+ bpf_map_uncharge_memlock(map, pages);
+
+ kfree_rcu(storage->buf, rcu);
+ kfree_rcu(storage, rcu);
+}
+
+void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
+ struct cgroup *cgroup,
+ enum bpf_attach_type type)
+{
+ struct bpf_cgroup_storage_map *map;
+
+ if (!storage)
+ return;
+
+ storage->key.attach_type = type;
+ storage->key.cgroup_inode_id = cgroup->kn->id.id;
+
+ map = storage->map;
+
+ spin_lock_bh(&map->lock);
+ WARN_ON(cgroup_storage_insert(map, storage));
+ list_add(&storage->list, &map->list);
+ spin_unlock_bh(&map->lock);
+}
+
+void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage)
+{
+ struct bpf_cgroup_storage_map *map;
+ struct rb_root *root;
+
+ if (!storage)
+ return;
+
+ map = storage->map;
+
+ spin_lock_bh(&map->lock);
+ root = &map->root;
+ rb_erase(&storage->node, root);
+
+ list_del(&storage->list);
+ spin_unlock_bh(&map->lock);
+}
+
+#endif
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index 1603492c9cc7..9058317ba9de 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -10,11 +10,13 @@
*/
#include <linux/bpf.h>
+#include <linux/btf.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
#include <net/ipv6.h>
+#include <uapi/linux/btf.h>
/* Intermediate node */
#define LPM_TREE_NODE_FLAG_IM BIT(0)
@@ -686,6 +688,15 @@ free_stack:
return err;
}
+static int trie_check_btf(const struct bpf_map *map,
+ const struct btf_type *key_type,
+ const struct btf_type *value_type)
+{
+ /* Keys must have struct bpf_lpm_trie_key embedded. */
+ return BTF_INFO_KIND(key_type->info) != BTF_KIND_STRUCT ?
+ -EINVAL : 0;
+}
+
const struct bpf_map_ops trie_map_ops = {
.map_alloc = trie_alloc,
.map_free = trie_free,
@@ -693,4 +704,5 @@ const struct bpf_map_ops trie_map_ops = {
.map_lookup_elem = trie_lookup_elem,
.map_update_elem = trie_update_elem,
.map_delete_elem = trie_delete_elem,
+ .map_check_btf = trie_check_btf,
};
diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c
index 1da574612bea..3bfbf4464416 100644
--- a/kernel/bpf/map_in_map.c
+++ b/kernel/bpf/map_in_map.c
@@ -23,7 +23,8 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
* is a runtime binding. Doing static check alone
* in the verifier is not enough.
*/
- if (inner_map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
+ if (inner_map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
+ inner_map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE) {
fdput(f);
return ERR_PTR(-ENOTSUPP);
}
diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
index ac747d5cf7c6..177a52436394 100644
--- a/kernel/bpf/offload.c
+++ b/kernel/bpf/offload.c
@@ -18,19 +18,43 @@
#include <linux/bug.h>
#include <linux/kdev_t.h>
#include <linux/list.h>
+#include <linux/lockdep.h>
#include <linux/netdevice.h>
#include <linux/printk.h>
#include <linux/proc_ns.h>
+#include <linux/rhashtable.h>
#include <linux/rtnetlink.h>
#include <linux/rwsem.h>
-/* Protects bpf_prog_offload_devs, bpf_map_offload_devs and offload members
+/* Protects offdevs, members of bpf_offload_netdev and offload members
* of all progs.
* RTNL lock cannot be taken when holding this lock.
*/
static DECLARE_RWSEM(bpf_devs_lock);
-static LIST_HEAD(bpf_prog_offload_devs);
-static LIST_HEAD(bpf_map_offload_devs);
+
+struct bpf_offload_dev {
+ struct list_head netdevs;
+};
+
+struct bpf_offload_netdev {
+ struct rhash_head l;
+ struct net_device *netdev;
+ struct bpf_offload_dev *offdev;
+ struct list_head progs;
+ struct list_head maps;
+ struct list_head offdev_netdevs;
+};
+
+static const struct rhashtable_params offdevs_params = {
+ .nelem_hint = 4,
+ .key_len = sizeof(struct net_device *),
+ .key_offset = offsetof(struct bpf_offload_netdev, netdev),
+ .head_offset = offsetof(struct bpf_offload_netdev, l),
+ .automatic_shrinking = true,
+};
+
+static struct rhashtable offdevs;
+static bool offdevs_inited;
static int bpf_dev_offload_check(struct net_device *netdev)
{
@@ -41,8 +65,19 @@ static int bpf_dev_offload_check(struct net_device *netdev)
return 0;
}
+static struct bpf_offload_netdev *
+bpf_offload_find_netdev(struct net_device *netdev)
+{
+ lockdep_assert_held(&bpf_devs_lock);
+
+ if (!offdevs_inited)
+ return NULL;
+ return rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
+}
+
int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
{
+ struct bpf_offload_netdev *ondev;
struct bpf_prog_offload *offload;
int err;
@@ -66,12 +101,13 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
goto err_maybe_put;
down_write(&bpf_devs_lock);
- if (offload->netdev->reg_state != NETREG_REGISTERED) {
+ ondev = bpf_offload_find_netdev(offload->netdev);
+ if (!ondev) {
err = -EINVAL;
goto err_unlock;
}
prog->aux->offload = offload;
- list_add_tail(&offload->offloads, &bpf_prog_offload_devs);
+ list_add_tail(&offload->offloads, &ondev->progs);
dev_put(offload->netdev);
up_write(&bpf_devs_lock);
@@ -294,6 +330,7 @@ static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap,
struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
{
struct net *net = current->nsproxy->net_ns;
+ struct bpf_offload_netdev *ondev;
struct bpf_offloaded_map *offmap;
int err;
@@ -316,11 +353,17 @@ struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
if (err)
goto err_unlock;
+ ondev = bpf_offload_find_netdev(offmap->netdev);
+ if (!ondev) {
+ err = -EINVAL;
+ goto err_unlock;
+ }
+
err = bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_ALLOC);
if (err)
goto err_unlock;
- list_add_tail(&offmap->offloads, &bpf_map_offload_devs);
+ list_add_tail(&offmap->offloads, &ondev->maps);
up_write(&bpf_devs_lock);
rtnl_unlock();
@@ -468,77 +511,159 @@ int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map)
return 0;
}
-bool bpf_offload_dev_match(struct bpf_prog *prog, struct bpf_map *map)
+static bool __bpf_offload_dev_match(struct bpf_prog *prog,
+ struct net_device *netdev)
{
- struct bpf_offloaded_map *offmap;
+ struct bpf_offload_netdev *ondev1, *ondev2;
struct bpf_prog_offload *offload;
- bool ret;
if (!bpf_prog_is_dev_bound(prog->aux))
return false;
- if (!bpf_map_is_dev_bound(map))
- return bpf_map_offload_neutral(map);
- down_read(&bpf_devs_lock);
offload = prog->aux->offload;
- offmap = map_to_offmap(map);
+ if (!offload)
+ return false;
+ if (offload->netdev == netdev)
+ return true;
- ret = offload && offload->netdev == offmap->netdev;
+ ondev1 = bpf_offload_find_netdev(offload->netdev);
+ ondev2 = bpf_offload_find_netdev(netdev);
+
+ return ondev1 && ondev2 && ondev1->offdev == ondev2->offdev;
+}
+
+bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev)
+{
+ bool ret;
+
+ down_read(&bpf_devs_lock);
+ ret = __bpf_offload_dev_match(prog, netdev);
up_read(&bpf_devs_lock);
return ret;
}
+EXPORT_SYMBOL_GPL(bpf_offload_dev_match);
-static void bpf_offload_orphan_all_progs(struct net_device *netdev)
+bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map)
{
- struct bpf_prog_offload *offload, *tmp;
+ struct bpf_offloaded_map *offmap;
+ bool ret;
- list_for_each_entry_safe(offload, tmp, &bpf_prog_offload_devs, offloads)
- if (offload->netdev == netdev)
- __bpf_prog_offload_destroy(offload->prog);
+ if (!bpf_map_is_dev_bound(map))
+ return bpf_map_offload_neutral(map);
+ offmap = map_to_offmap(map);
+
+ down_read(&bpf_devs_lock);
+ ret = __bpf_offload_dev_match(prog, offmap->netdev);
+ up_read(&bpf_devs_lock);
+
+ return ret;
}
-static void bpf_offload_orphan_all_maps(struct net_device *netdev)
+int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
+ struct net_device *netdev)
{
- struct bpf_offloaded_map *offmap, *tmp;
+ struct bpf_offload_netdev *ondev;
+ int err;
- list_for_each_entry_safe(offmap, tmp, &bpf_map_offload_devs, offloads)
- if (offmap->netdev == netdev)
- __bpf_map_offload_destroy(offmap);
+ ondev = kzalloc(sizeof(*ondev), GFP_KERNEL);
+ if (!ondev)
+ return -ENOMEM;
+
+ ondev->netdev = netdev;
+ ondev->offdev = offdev;
+ INIT_LIST_HEAD(&ondev->progs);
+ INIT_LIST_HEAD(&ondev->maps);
+
+ down_write(&bpf_devs_lock);
+ err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params);
+ if (err) {
+ netdev_warn(netdev, "failed to register for BPF offload\n");
+ goto err_unlock_free;
+ }
+
+ list_add(&ondev->offdev_netdevs, &offdev->netdevs);
+ up_write(&bpf_devs_lock);
+ return 0;
+
+err_unlock_free:
+ up_write(&bpf_devs_lock);
+ kfree(ondev);
+ return err;
}
+EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register);
-static int bpf_offload_notification(struct notifier_block *notifier,
- ulong event, void *ptr)
+void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
+ struct net_device *netdev)
{
- struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+ struct bpf_offload_netdev *ondev, *altdev;
+ struct bpf_offloaded_map *offmap, *mtmp;
+ struct bpf_prog_offload *offload, *ptmp;
ASSERT_RTNL();
- switch (event) {
- case NETDEV_UNREGISTER:
- /* ignore namespace changes */
- if (netdev->reg_state != NETREG_UNREGISTERING)
- break;
-
- down_write(&bpf_devs_lock);
- bpf_offload_orphan_all_progs(netdev);
- bpf_offload_orphan_all_maps(netdev);
- up_write(&bpf_devs_lock);
- break;
- default:
- break;
+ down_write(&bpf_devs_lock);
+ ondev = rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
+ if (WARN_ON(!ondev))
+ goto unlock;
+
+ WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params));
+ list_del(&ondev->offdev_netdevs);
+
+ /* Try to move the objects to another netdev of the device */
+ altdev = list_first_entry_or_null(&offdev->netdevs,
+ struct bpf_offload_netdev,
+ offdev_netdevs);
+ if (altdev) {
+ list_for_each_entry(offload, &ondev->progs, offloads)
+ offload->netdev = altdev->netdev;
+ list_splice_init(&ondev->progs, &altdev->progs);
+
+ list_for_each_entry(offmap, &ondev->maps, offloads)
+ offmap->netdev = altdev->netdev;
+ list_splice_init(&ondev->maps, &altdev->maps);
+ } else {
+ list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads)
+ __bpf_prog_offload_destroy(offload->prog);
+ list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads)
+ __bpf_map_offload_destroy(offmap);
}
- return NOTIFY_OK;
-}
-static struct notifier_block bpf_offload_notifier = {
- .notifier_call = bpf_offload_notification,
-};
+ WARN_ON(!list_empty(&ondev->progs));
+ WARN_ON(!list_empty(&ondev->maps));
+ kfree(ondev);
+unlock:
+ up_write(&bpf_devs_lock);
+}
+EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister);
-static int __init bpf_offload_init(void)
+struct bpf_offload_dev *bpf_offload_dev_create(void)
{
- register_netdevice_notifier(&bpf_offload_notifier);
- return 0;
+ struct bpf_offload_dev *offdev;
+ int err;
+
+ down_write(&bpf_devs_lock);
+ if (!offdevs_inited) {
+ err = rhashtable_init(&offdevs, &offdevs_params);
+ if (err)
+ return ERR_PTR(err);
+ offdevs_inited = true;
+ }
+ up_write(&bpf_devs_lock);
+
+ offdev = kzalloc(sizeof(*offdev), GFP_KERNEL);
+ if (!offdev)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&offdev->netdevs);
+
+ return offdev;
}
+EXPORT_SYMBOL_GPL(bpf_offload_dev_create);
-subsys_initcall(bpf_offload_init);
+void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev)
+{
+ WARN_ON(!list_empty(&offdev->netdevs));
+ kfree(offdev);
+}
+EXPORT_SYMBOL_GPL(bpf_offload_dev_destroy);
diff --git a/kernel/bpf/reuseport_array.c b/kernel/bpf/reuseport_array.c
new file mode 100644
index 000000000000..18e225de80ff
--- /dev/null
+++ b/kernel/bpf/reuseport_array.c
@@ -0,0 +1,363 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 Facebook
+ */
+#include <linux/bpf.h>
+#include <linux/err.h>
+#include <linux/sock_diag.h>
+#include <net/sock_reuseport.h>
+
+struct reuseport_array {
+ struct bpf_map map;
+ struct sock __rcu *ptrs[];
+};
+
+static struct reuseport_array *reuseport_array(struct bpf_map *map)
+{
+ return (struct reuseport_array *)map;
+}
+
+/* The caller must hold the reuseport_lock */
+void bpf_sk_reuseport_detach(struct sock *sk)
+{
+ struct sock __rcu **socks;
+
+ write_lock_bh(&sk->sk_callback_lock);
+ socks = sk->sk_user_data;
+ if (socks) {
+ WRITE_ONCE(sk->sk_user_data, NULL);
+ /*
+ * Do not move this NULL assignment outside of
+ * sk->sk_callback_lock because there is
+ * a race with reuseport_array_free()
+ * which does not hold the reuseport_lock.
+ */
+ RCU_INIT_POINTER(*socks, NULL);
+ }
+ write_unlock_bh(&sk->sk_callback_lock);
+}
+
+static int reuseport_array_alloc_check(union bpf_attr *attr)
+{
+ if (attr->value_size != sizeof(u32) &&
+ attr->value_size != sizeof(u64))
+ return -EINVAL;
+
+ return array_map_alloc_check(attr);
+}
+
+static void *reuseport_array_lookup_elem(struct bpf_map *map, void *key)
+{
+ struct reuseport_array *array = reuseport_array(map);
+ u32 index = *(u32 *)key;
+
+ if (unlikely(index >= array->map.max_entries))
+ return NULL;
+
+ return rcu_dereference(array->ptrs[index]);
+}
+
+/* Called from syscall only */
+static int reuseport_array_delete_elem(struct bpf_map *map, void *key)
+{
+ struct reuseport_array *array = reuseport_array(map);
+ u32 index = *(u32 *)key;
+ struct sock *sk;
+ int err;
+
+ if (index >= map->max_entries)
+ return -E2BIG;
+
+ if (!rcu_access_pointer(array->ptrs[index]))
+ return -ENOENT;
+
+ spin_lock_bh(&reuseport_lock);
+
+ sk = rcu_dereference_protected(array->ptrs[index],
+ lockdep_is_held(&reuseport_lock));
+ if (sk) {
+ write_lock_bh(&sk->sk_callback_lock);
+ WRITE_ONCE(sk->sk_user_data, NULL);
+ RCU_INIT_POINTER(array->ptrs[index], NULL);
+ write_unlock_bh(&sk->sk_callback_lock);
+ err = 0;
+ } else {
+ err = -ENOENT;
+ }
+
+ spin_unlock_bh(&reuseport_lock);
+
+ return err;
+}
+
+static void reuseport_array_free(struct bpf_map *map)
+{
+ struct reuseport_array *array = reuseport_array(map);
+ struct sock *sk;
+ u32 i;
+
+ synchronize_rcu();
+
+ /*
+ * ops->map_*_elem() will not be able to access this
+ * array now. Hence, this function only races with
+ * bpf_sk_reuseport_detach() which was triggerred by
+ * close() or disconnect().
+ *
+ * This function and bpf_sk_reuseport_detach() are
+ * both removing sk from "array". Who removes it
+ * first does not matter.
+ *
+ * The only concern here is bpf_sk_reuseport_detach()
+ * may access "array" which is being freed here.
+ * bpf_sk_reuseport_detach() access this "array"
+ * through sk->sk_user_data _and_ with sk->sk_callback_lock
+ * held which is enough because this "array" is not freed
+ * until all sk->sk_user_data has stopped referencing this "array".
+ *
+ * Hence, due to the above, taking "reuseport_lock" is not
+ * needed here.
+ */
+
+ /*
+ * Since reuseport_lock is not taken, sk is accessed under
+ * rcu_read_lock()
+ */
+ rcu_read_lock();
+ for (i = 0; i < map->max_entries; i++) {
+ sk = rcu_dereference(array->ptrs[i]);
+ if (sk) {
+ write_lock_bh(&sk->sk_callback_lock);
+ /*
+ * No need for WRITE_ONCE(). At this point,
+ * no one is reading it without taking the
+ * sk->sk_callback_lock.
+ */
+ sk->sk_user_data = NULL;
+ write_unlock_bh(&sk->sk_callback_lock);
+ RCU_INIT_POINTER(array->ptrs[i], NULL);
+ }
+ }
+ rcu_read_unlock();
+
+ /*
+ * Once reaching here, all sk->sk_user_data is not
+ * referenceing this "array". "array" can be freed now.
+ */
+ bpf_map_area_free(array);
+}
+
+static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr)
+{
+ int err, numa_node = bpf_map_attr_numa_node(attr);
+ struct reuseport_array *array;
+ u64 cost, array_size;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return ERR_PTR(-EPERM);
+
+ array_size = sizeof(*array);
+ array_size += (u64)attr->max_entries * sizeof(struct sock *);
+
+ /* make sure there is no u32 overflow later in round_up() */
+ cost = array_size;
+ if (cost >= U32_MAX - PAGE_SIZE)
+ return ERR_PTR(-ENOMEM);
+ cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
+
+ err = bpf_map_precharge_memlock(cost);
+ if (err)
+ return ERR_PTR(err);
+
+ /* allocate all map elements and zero-initialize them */
+ array = bpf_map_area_alloc(array_size, numa_node);
+ if (!array)
+ return ERR_PTR(-ENOMEM);
+
+ /* copy mandatory map attributes */
+ bpf_map_init_from_attr(&array->map, attr);
+ array->map.pages = cost;
+
+ return &array->map;
+}
+
+int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
+ void *value)
+{
+ struct sock *sk;
+ int err;
+
+ if (map->value_size != sizeof(u64))
+ return -ENOSPC;
+
+ rcu_read_lock();
+ sk = reuseport_array_lookup_elem(map, key);
+ if (sk) {
+ *(u64 *)value = sock_gen_cookie(sk);
+ err = 0;
+ } else {
+ err = -ENOENT;
+ }
+ rcu_read_unlock();
+
+ return err;
+}
+
+static int
+reuseport_array_update_check(const struct reuseport_array *array,
+ const struct sock *nsk,
+ const struct sock *osk,
+ const struct sock_reuseport *nsk_reuse,
+ u32 map_flags)
+{
+ if (osk && map_flags == BPF_NOEXIST)
+ return -EEXIST;
+
+ if (!osk && map_flags == BPF_EXIST)
+ return -ENOENT;
+
+ if (nsk->sk_protocol != IPPROTO_UDP && nsk->sk_protocol != IPPROTO_TCP)
+ return -ENOTSUPP;
+
+ if (nsk->sk_family != AF_INET && nsk->sk_family != AF_INET6)
+ return -ENOTSUPP;
+
+ if (nsk->sk_type != SOCK_STREAM && nsk->sk_type != SOCK_DGRAM)
+ return -ENOTSUPP;
+
+ /*
+ * sk must be hashed (i.e. listening in the TCP case or binded
+ * in the UDP case) and
+ * it must also be a SO_REUSEPORT sk (i.e. reuse cannot be NULL).
+ *
+ * Also, sk will be used in bpf helper that is protected by
+ * rcu_read_lock().
+ */
+ if (!sock_flag(nsk, SOCK_RCU_FREE) || !sk_hashed(nsk) || !nsk_reuse)
+ return -EINVAL;
+
+ /* READ_ONCE because the sk->sk_callback_lock may not be held here */
+ if (READ_ONCE(nsk->sk_user_data))
+ return -EBUSY;
+
+ return 0;
+}
+
+/*
+ * Called from syscall only.
+ * The "nsk" in the fd refcnt.
+ * The "osk" and "reuse" are protected by reuseport_lock.
+ */
+int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
+ void *value, u64 map_flags)
+{
+ struct reuseport_array *array = reuseport_array(map);
+ struct sock *free_osk = NULL, *osk, *nsk;
+ struct sock_reuseport *reuse;
+ u32 index = *(u32 *)key;
+ struct socket *socket;
+ int err, fd;
+
+ if (map_flags > BPF_EXIST)
+ return -EINVAL;
+
+ if (index >= map->max_entries)
+ return -E2BIG;
+
+ if (map->value_size == sizeof(u64)) {
+ u64 fd64 = *(u64 *)value;
+
+ if (fd64 > S32_MAX)
+ return -EINVAL;
+ fd = fd64;
+ } else {
+ fd = *(int *)value;
+ }
+
+ socket = sockfd_lookup(fd, &err);
+ if (!socket)
+ return err;
+
+ nsk = socket->sk;
+ if (!nsk) {
+ err = -EINVAL;
+ goto put_file;
+ }
+
+ /* Quick checks before taking reuseport_lock */
+ err = reuseport_array_update_check(array, nsk,
+ rcu_access_pointer(array->ptrs[index]),
+ rcu_access_pointer(nsk->sk_reuseport_cb),
+ map_flags);
+ if (err)
+ goto put_file;
+
+ spin_lock_bh(&reuseport_lock);
+ /*
+ * Some of the checks only need reuseport_lock
+ * but it is done under sk_callback_lock also
+ * for simplicity reason.
+ */
+ write_lock_bh(&nsk->sk_callback_lock);
+
+ osk = rcu_dereference_protected(array->ptrs[index],
+ lockdep_is_held(&reuseport_lock));
+ reuse = rcu_dereference_protected(nsk->sk_reuseport_cb,
+ lockdep_is_held(&reuseport_lock));
+ err = reuseport_array_update_check(array, nsk, osk, reuse, map_flags);
+ if (err)
+ goto put_file_unlock;
+
+ /* Ensure reuse->reuseport_id is set */
+ err = reuseport_get_id(reuse);
+ if (err < 0)
+ goto put_file_unlock;
+
+ WRITE_ONCE(nsk->sk_user_data, &array->ptrs[index]);
+ rcu_assign_pointer(array->ptrs[index], nsk);
+ free_osk = osk;
+ err = 0;
+
+put_file_unlock:
+ write_unlock_bh(&nsk->sk_callback_lock);
+
+ if (free_osk) {
+ write_lock_bh(&free_osk->sk_callback_lock);
+ WRITE_ONCE(free_osk->sk_user_data, NULL);
+ write_unlock_bh(&free_osk->sk_callback_lock);
+ }
+
+ spin_unlock_bh(&reuseport_lock);
+put_file:
+ fput(socket->file);
+ return err;
+}
+
+/* Called from syscall */
+static int reuseport_array_get_next_key(struct bpf_map *map, void *key,
+ void *next_key)
+{
+ struct reuseport_array *array = reuseport_array(map);
+ u32 index = key ? *(u32 *)key : U32_MAX;
+ u32 *next = (u32 *)next_key;
+
+ if (index >= array->map.max_entries) {
+ *next = 0;
+ return 0;
+ }
+
+ if (index == array->map.max_entries - 1)
+ return -ENOENT;
+
+ *next = index + 1;
+ return 0;
+}
+
+const struct bpf_map_ops reuseport_array_ops = {
+ .map_alloc_check = reuseport_array_alloc_check,
+ .map_alloc = reuseport_array_alloc,
+ .map_free = reuseport_array_free,
+ .map_lookup_elem = reuseport_array_lookup_elem,
+ .map_get_next_key = reuseport_array_get_next_key,
+ .map_delete_elem = reuseport_array_delete_elem,
+};
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 98fb7938beea..0c1a696b041b 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -725,11 +725,8 @@ static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send,
{
bool ingress = !!(md->flags & BPF_F_INGRESS);
struct smap_psock *psock;
- struct scatterlist *sg;
int err = 0;
- sg = md->sg_data;
-
rcu_read_lock();
psock = smap_psock_sk(sk);
if (unlikely(!psock))
@@ -1048,12 +1045,12 @@ static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
while (msg_data_left(msg)) {
- struct sk_msg_buff *m;
+ struct sk_msg_buff *m = NULL;
bool enospc = false;
int copy;
if (sk->sk_err) {
- err = sk->sk_err;
+ err = -sk->sk_err;
goto out_err;
}
@@ -1116,8 +1113,11 @@ wait_for_sndbuf:
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
wait_for_memory:
err = sk_stream_wait_memory(sk, &timeo);
- if (err)
+ if (err) {
+ if (m && m != psock->cork)
+ free_start_sg(sk, m);
goto out_err;
+ }
}
out_err:
if (err < 0)
@@ -2498,6 +2498,7 @@ const struct bpf_map_ops sock_map_ops = {
.map_update_elem = sock_map_update_elem,
.map_delete_elem = sock_map_delete_elem,
.map_release_uref = sock_map_release,
+ .map_check_btf = map_check_no_btf,
};
const struct bpf_map_ops sock_hash_ops = {
@@ -2508,6 +2509,7 @@ const struct bpf_map_ops sock_hash_ops = {
.map_update_elem = sock_hash_update_elem,
.map_delete_elem = sock_hash_delete_elem,
.map_release_uref = sock_map_release,
+ .map_check_btf = map_check_no_btf,
};
BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index b675a3f3d141..8061a439ef18 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -607,6 +607,7 @@ const struct bpf_map_ops stack_map_ops = {
.map_lookup_elem = stack_map_lookup_elem,
.map_update_elem = stack_map_update_elem,
.map_delete_elem = stack_map_delete_elem,
+ .map_check_btf = map_check_no_btf,
};
static int __init stack_map_init(void)
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index a31a1ba0f8ea..8339d81cba1d 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -103,6 +103,7 @@ int bpf_check_uarg_tail_zero(void __user *uaddr,
const struct bpf_map_ops bpf_map_offload_ops = {
.map_alloc = bpf_map_offload_map_alloc,
.map_free = bpf_map_offload_map_free,
+ .map_check_btf = map_check_no_btf,
};
static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
@@ -181,32 +182,60 @@ int bpf_map_precharge_memlock(u32 pages)
return 0;
}
-static int bpf_map_charge_memlock(struct bpf_map *map)
+static int bpf_charge_memlock(struct user_struct *user, u32 pages)
{
- struct user_struct *user = get_current_user();
- unsigned long memlock_limit;
+ unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
- memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ if (atomic_long_add_return(pages, &user->locked_vm) > memlock_limit) {
+ atomic_long_sub(pages, &user->locked_vm);
+ return -EPERM;
+ }
+ return 0;
+}
- atomic_long_add(map->pages, &user->locked_vm);
+static void bpf_uncharge_memlock(struct user_struct *user, u32 pages)
+{
+ atomic_long_sub(pages, &user->locked_vm);
+}
- if (atomic_long_read(&user->locked_vm) > memlock_limit) {
- atomic_long_sub(map->pages, &user->locked_vm);
+static int bpf_map_init_memlock(struct bpf_map *map)
+{
+ struct user_struct *user = get_current_user();
+ int ret;
+
+ ret = bpf_charge_memlock(user, map->pages);
+ if (ret) {
free_uid(user);
- return -EPERM;
+ return ret;
}
map->user = user;
- return 0;
+ return ret;
}
-static void bpf_map_uncharge_memlock(struct bpf_map *map)
+static void bpf_map_release_memlock(struct bpf_map *map)
{
struct user_struct *user = map->user;
-
- atomic_long_sub(map->pages, &user->locked_vm);
+ bpf_uncharge_memlock(user, map->pages);
free_uid(user);
}
+int bpf_map_charge_memlock(struct bpf_map *map, u32 pages)
+{
+ int ret;
+
+ ret = bpf_charge_memlock(map->user, pages);
+ if (ret)
+ return ret;
+ map->pages += pages;
+ return ret;
+}
+
+void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages)
+{
+ bpf_uncharge_memlock(map->user, pages);
+ map->pages -= pages;
+}
+
static int bpf_map_alloc_id(struct bpf_map *map)
{
int id;
@@ -256,7 +285,7 @@ static void bpf_map_free_deferred(struct work_struct *work)
{
struct bpf_map *map = container_of(work, struct bpf_map, work);
- bpf_map_uncharge_memlock(map);
+ bpf_map_release_memlock(map);
security_bpf_map_free(map);
/* implementation dependent freeing */
map->ops->map_free(map);
@@ -427,6 +456,34 @@ static int bpf_obj_name_cpy(char *dst, const char *src)
return 0;
}
+int map_check_no_btf(const struct bpf_map *map,
+ const struct btf_type *key_type,
+ const struct btf_type *value_type)
+{
+ return -ENOTSUPP;
+}
+
+static int map_check_btf(const struct bpf_map *map, const struct btf *btf,
+ u32 btf_key_id, u32 btf_value_id)
+{
+ const struct btf_type *key_type, *value_type;
+ u32 key_size, value_size;
+ int ret = 0;
+
+ key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
+ if (!key_type || key_size != map->key_size)
+ return -EINVAL;
+
+ value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
+ if (!value_type || value_size != map->value_size)
+ return -EINVAL;
+
+ if (map->ops->map_check_btf)
+ ret = map->ops->map_check_btf(map, key_type, value_type);
+
+ return ret;
+}
+
#define BPF_MAP_CREATE_LAST_FIELD btf_value_type_id
/* called via syscall */
static int map_create(union bpf_attr *attr)
@@ -461,8 +518,7 @@ static int map_create(union bpf_attr *attr)
atomic_set(&map->refcnt, 1);
atomic_set(&map->usercnt, 1);
- if (bpf_map_support_seq_show(map) &&
- (attr->btf_key_type_id || attr->btf_value_type_id)) {
+ if (attr->btf_key_type_id || attr->btf_value_type_id) {
struct btf *btf;
if (!attr->btf_key_type_id || !attr->btf_value_type_id) {
@@ -476,8 +532,8 @@ static int map_create(union bpf_attr *attr)
goto free_map_nouncharge;
}
- err = map->ops->map_check_btf(map, btf, attr->btf_key_type_id,
- attr->btf_value_type_id);
+ err = map_check_btf(map, btf, attr->btf_key_type_id,
+ attr->btf_value_type_id);
if (err) {
btf_put(btf);
goto free_map_nouncharge;
@@ -492,7 +548,7 @@ static int map_create(union bpf_attr *attr)
if (err)
goto free_map_nouncharge;
- err = bpf_map_charge_memlock(map);
+ err = bpf_map_init_memlock(map);
if (err)
goto free_map_sec;
@@ -515,7 +571,7 @@ static int map_create(union bpf_attr *attr)
return err;
free_map:
- bpf_map_uncharge_memlock(map);
+ bpf_map_release_memlock(map);
free_map_sec:
security_bpf_map_free(map);
free_map_nouncharge:
@@ -575,7 +631,7 @@ static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map,
{
int refold;
- refold = __atomic_add_unless(&map->refcnt, 1, 0);
+ refold = atomic_fetch_add_unless(&map->refcnt, 1, 0);
if (refold >= BPF_MAX_REFCNT) {
__bpf_map_put(map, false);
@@ -656,6 +712,8 @@ static int map_lookup_elem(union bpf_attr *attr)
err = bpf_fd_array_map_lookup_elem(map, key, value);
} else if (IS_FD_HASH(map)) {
err = bpf_fd_htab_map_lookup_elem(map, key, value);
+ } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
+ err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
} else {
rcu_read_lock();
ptr = map->ops->map_lookup_elem(map, key);
@@ -762,6 +820,10 @@ static int map_update_elem(union bpf_attr *attr)
err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
attr->flags);
rcu_read_unlock();
+ } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
+ /* rcu_read_lock() is not needed */
+ err = bpf_fd_reuseport_array_update_elem(map, key, value,
+ attr->flags);
} else {
rcu_read_lock();
err = map->ops->map_update_elem(map, key, value, attr->flags);
@@ -929,6 +991,9 @@ static void free_used_maps(struct bpf_prog_aux *aux)
{
int i;
+ if (aux->cgroup_storage)
+ bpf_cgroup_storage_release(aux->prog, aux->cgroup_storage);
+
for (i = 0; i < aux->used_map_cnt; i++)
bpf_map_put(aux->used_maps[i]);
@@ -1144,7 +1209,7 @@ struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
{
int refold;
- refold = __atomic_add_unless(&prog->aux->refcnt, 1, 0);
+ refold = atomic_fetch_add_unless(&prog->aux->refcnt, 1, 0);
if (refold >= BPF_MAX_REFCNT) {
__bpf_prog_put(prog, false);
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 63aaac52a265..ca90679a7fe5 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1310,6 +1310,7 @@ static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
case BPF_PROG_TYPE_LWT_IN:
case BPF_PROG_TYPE_LWT_OUT:
case BPF_PROG_TYPE_LWT_SEG6LOCAL:
+ case BPF_PROG_TYPE_SK_REUSEPORT:
/* dst_input() and dst_output() can't write for now */
if (t == BPF_WRITE)
return false;
@@ -2127,6 +2128,10 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
func_id != BPF_FUNC_current_task_under_cgroup)
goto error;
break;
+ case BPF_MAP_TYPE_CGROUP_STORAGE:
+ if (func_id != BPF_FUNC_get_local_storage)
+ goto error;
+ break;
/* devmap returns a pointer to a live net_device ifindex that we cannot
* allow to be modified from bpf side. So do not allow lookup elements
* for now.
@@ -2162,6 +2167,10 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
func_id != BPF_FUNC_msg_redirect_hash)
goto error;
break;
+ case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
+ if (func_id != BPF_FUNC_sk_select_reuseport)
+ goto error;
+ break;
default:
break;
}
@@ -2209,6 +2218,14 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
if (map->map_type != BPF_MAP_TYPE_SOCKHASH)
goto error;
break;
+ case BPF_FUNC_get_local_storage:
+ if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE)
+ goto error;
+ break;
+ case BPF_FUNC_sk_select_reuseport:
+ if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY)
+ goto error;
+ break;
default:
break;
}
@@ -2533,6 +2550,16 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
}
regs = cur_regs(env);
+
+ /* check that flags argument in get_local_storage(map, flags) is 0,
+ * this is required because get_local_storage() can't return an error.
+ */
+ if (func_id == BPF_FUNC_get_local_storage &&
+ !register_is_null(&regs[BPF_REG_2])) {
+ verbose(env, "get_local_storage() doesn't support non-zero flags\n");
+ return -EINVAL;
+ }
+
/* reset caller saved regs */
for (i = 0; i < CALLER_SAVED_REGS; i++) {
mark_reg_not_init(env, regs, caller_saved[i]);
@@ -2545,8 +2572,12 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
mark_reg_unknown(env, regs, BPF_REG_0);
} else if (fn->ret_type == RET_VOID) {
regs[BPF_REG_0].type = NOT_INIT;
- } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) {
- regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
+ } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL ||
+ fn->ret_type == RET_PTR_TO_MAP_VALUE) {
+ if (fn->ret_type == RET_PTR_TO_MAP_VALUE)
+ regs[BPF_REG_0].type = PTR_TO_MAP_VALUE;
+ else
+ regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
/* There is no offset yet applied, variable or fixed */
mark_reg_known_zero(env, regs, BPF_REG_0);
regs[BPF_REG_0].off = 0;
@@ -3238,8 +3269,8 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
}
}
- /* check dest operand */
- err = check_reg_arg(env, insn->dst_reg, DST_OP);
+ /* check dest operand, mark as required later */
+ err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
if (err)
return err;
@@ -3265,6 +3296,8 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
/* case: R = imm
* remember the value we stored into this reg
*/
+ /* clear any state __mark_reg_known doesn't set */
+ mark_reg_unknown(env, regs, insn->dst_reg);
regs[insn->dst_reg].type = SCALAR_VALUE;
if (BPF_CLASS(insn->code) == BPF_ALU64) {
__mark_reg_known(regs + insn->dst_reg,
@@ -5054,7 +5087,7 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
}
if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
- !bpf_offload_dev_match(prog, map)) {
+ !bpf_offload_prog_map_match(prog, map)) {
verbose(env, "offload device mismatch between prog and map\n");
return -EINVAL;
}
@@ -5152,6 +5185,14 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
}
env->used_maps[env->used_map_cnt++] = map;
+ if (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE &&
+ bpf_cgroup_storage_assign(env->prog, map)) {
+ verbose(env,
+ "only one cgroup storage is allowed\n");
+ fdput(f);
+ return -EBUSY;
+ }
+
fdput(f);
next_insn:
insn++;
@@ -5178,6 +5219,10 @@ static void release_maps(struct bpf_verifier_env *env)
{
int i;
+ if (env->prog->aux->cgroup_storage)
+ bpf_cgroup_storage_release(env->prog,
+ env->prog->aux->cgroup_storage);
+
for (i = 0; i < env->used_map_cnt; i++)
bpf_map_put(env->used_maps[i]);
}
diff --git a/kernel/bpf/xskmap.c b/kernel/bpf/xskmap.c
index b3c557476a8d..4ddf61e158f6 100644
--- a/kernel/bpf/xskmap.c
+++ b/kernel/bpf/xskmap.c
@@ -227,6 +227,5 @@ const struct bpf_map_ops xsk_map_ops = {
.map_lookup_elem = xsk_map_lookup_elem,
.map_update_elem = xsk_map_update_elem,
.map_delete_elem = xsk_map_delete_elem,
+ .map_check_btf = map_check_no_btf,
};
-
-