summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2026-01-07 07:48:33 +0300
committerAlexei Starovoitov <ast@kernel.org>2026-01-07 07:48:33 +0300
commitf39703b20b57126b6acbb2ed32bf81e3c8ec9f96 (patch)
tree2402dad36a68e520d03b1bb893d2af6330ed9a6e /tools
parenta8d506759231124efb911a3bd14d1ec2d9de15a1 (diff)
parent07bf7aa58e5e7fb27b8addcc33052400a7d9ce32 (diff)
downloadlinux-f39703b20b57126b6acbb2ed32bf81e3c8ec9f96.tar.xz
Merge branch 'bpf-introduce-bpf_f_cpu-and-bpf_f_all_cpus-flags-for-percpu-maps'
Leon Hwang says: ==================== bpf: Introduce BPF_F_CPU and BPF_F_ALL_CPUS flags for percpu maps This patch set introduces the BPF_F_CPU and BPF_F_ALL_CPUS flags for percpu maps, as the requirement of BPF_F_ALL_CPUS flag for percpu_array maps was discussed in the thread of "[PATCH bpf-next v3 0/4] bpf: Introduce global percpu data"[1]. The goal of BPF_F_ALL_CPUS flag is to reduce data caching overhead in light skeletons by allowing a single value to be reused to update values across all CPUs. This avoids the M:N problem where M cached values are used to update a map on N CPUs kernel. The BPF_F_CPU flag is accompanied by *flags*-embedded cpu info, which specifies the target CPU for the operation: * For lookup operations: the flag field alongside cpu info enable querying a value on the specified CPU. * For update operations: the flag field alongside cpu info enable updating value for specified CPU. Links: [1] https://lore.kernel.org/bpf/20250526162146.24429-1-leon.hwang@linux.dev/ Changes: v12 -> v13: * No changes, rebased on latest tree. v11 -> v12: * Dropped the v11 changes. * Stabilized the lru_percpu_hash map test by keeping an extra spare entry, which can be used temporarily during updates to avoid unintended LRU evictions. v10 -> v11: * Support the combination of BPF_EXIST and BPF_F_CPU/BPF_F_ALL_CPUS for update operations. * Fix unstable lru_percpu_hash map test using the combination of BPF_EXIST and BPF_F_CPU/BPF_F_ALL_CPUS to avoid LRU eviction (reported by Alexei). v9 -> v10: * Add tests to verify array and hash maps do not support BPF_F_CPU and BPF_F_ALL_CPUS flags. * Address comment from Andrii: * Copy map value using copy_map_value_long for percpu_cgroup_storage maps in a separate patch. v8 -> v9: * Change value type from u64 to u32 in selftests. * Address comments from Andrii: * Keep value_size unaligned and update everywhere for consistency when cpu flags are specified. * Update value by getting pointer for percpu hash and percpu cgroup_storage maps. v7 -> v8: * Address comments from Andrii: * Check BPF_F_LOCK when update percpu_array, percpu_hash and lru_percpu_hash maps. * Refactor flags check in __htab_map_lookup_and_delete_batch(). * Keep value_size unaligned and copy value using copy_map_value() in __htab_map_lookup_and_delete_batch() when BPF_F_CPU is specified. * Update warn message in libbpf's validate_map_op(). * Update comment of libbpf's bpf_map__lookup_elem(). v6 -> v7: * Get correct value size for percpu_hash and lru_percpu_hash in update_batch API. * Set 'count' as 'max_entries' in test cases for lookup_batch API. * Address comment from Alexei: * Move cpu flags check into bpf_map_check_op_flags(). v5 -> v6: * Move bpf_map_check_op_flags() from 'bpf.h' to 'syscall.c'. * Address comments from Alexei: * Drop the refactoring code of data copying logic for percpu maps. * Drop bpf_map_check_op_flags() wrappers. v4 -> v5: * Address comments from Andrii: * Refactor data copying logic for all percpu maps. * Drop this_cpu_ptr() micro-optimization. * Drop cpu check in libbpf's validate_map_op(). * Enhance bpf_map_check_op_flags() using *allowed flags* instead of 'extra_flags_mask'. v3 -> v4: * Address comments from Andrii: * Remove unnecessary map_type check in bpf_map_value_size(). * Reduce code churn. * Remove unnecessary do_delete check in __htab_map_lookup_and_delete_batch(). * Introduce bpf_percpu_copy_to_user() and bpf_percpu_copy_from_user(). * Rename check_map_flags() to bpf_map_check_op_flags() with extra_flags_mask. * Add human-readable pr_warn() explanations in validate_map_op(). * Use flags in bpf_map__delete_elem() and bpf_map__lookup_and_delete_elem(). * Drop "for alignment reasons". v3 link: https://lore.kernel.org/bpf/20250821160817.70285-1-leon.hwang@linux.dev/ v2 -> v3: * Address comments from Alexei: * Use BPF_F_ALL_CPUS instead of BPF_ALL_CPUS magic. * Introduce these two cpu flags for all percpu maps. * Address comments from Jiri: * Reduce some unnecessary u32 cast. * Refactor more generic map flags check function. * A code style issue. v2 link: https://lore.kernel.org/bpf/20250805163017.17015-1-leon.hwang@linux.dev/ v1 -> v2: * Address comments from Andrii: * Embed cpu info as high 32 bits of *flags* totally. * Use ERANGE instead of E2BIG. * Few format issues. ==================== Link: https://patch.msgid.link/20260107022022.12843-1-leon.hwang@linux.dev Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'tools')
-rw-r--r--tools/include/uapi/linux/bpf.h2
-rw-r--r--tools/lib/bpf/bpf.h8
-rw-r--r--tools/lib/bpf/libbpf.c26
-rw-r--r--tools/lib/bpf/libbpf.h21
-rw-r--r--tools/testing/selftests/bpf/prog_tests/percpu_alloc.c328
-rw-r--r--tools/testing/selftests/bpf/progs/percpu_alloc_array.c32
6 files changed, 398 insertions, 19 deletions
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 6b92b0847ec2..b816bc53d2e1 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -1384,6 +1384,8 @@ enum {
BPF_NOEXIST = 1, /* create new element if it didn't exist */
BPF_EXIST = 2, /* update existing element */
BPF_F_LOCK = 4, /* spin_lock-ed map_lookup/map_update */
+ BPF_F_CPU = 8, /* cpu flag for percpu maps, upper 32-bit of flags is a cpu number */
+ BPF_F_ALL_CPUS = 16, /* update value across all CPUs for percpu maps */
};
/* flags for BPF_MAP_CREATE command */
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index 1f9c28d27795..2c8e88ddb674 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -289,6 +289,14 @@ LIBBPF_API int bpf_map_lookup_and_delete_batch(int fd, void *in_batch,
* Update spin_lock-ed map elements. This must be
* specified if the map value contains a spinlock.
*
+ * **BPF_F_CPU**
+ * As for percpu maps, update value on the specified CPU. And the cpu
+ * info is embedded into the high 32 bits of **opts->elem_flags**.
+ *
+ * **BPF_F_ALL_CPUS**
+ * As for percpu maps, update value across all CPUs. This flag cannot
+ * be used with BPF_F_CPU at the same time.
+ *
* @param fd BPF map file descriptor
* @param keys pointer to an array of *count* keys
* @param values pointer to an array of *count* values
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 1a52d818a76c..6ea81701e274 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -10919,7 +10919,7 @@ bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
}
static int validate_map_op(const struct bpf_map *map, size_t key_sz,
- size_t value_sz, bool check_value_sz)
+ size_t value_sz, bool check_value_sz, __u64 flags)
{
if (!map_is_created(map)) /* map is not yet created */
return -ENOENT;
@@ -10946,6 +10946,20 @@ static int validate_map_op(const struct bpf_map *map, size_t key_sz,
int num_cpu = libbpf_num_possible_cpus();
size_t elem_sz = roundup(map->def.value_size, 8);
+ if (flags & (BPF_F_CPU | BPF_F_ALL_CPUS)) {
+ if ((flags & BPF_F_CPU) && (flags & BPF_F_ALL_CPUS)) {
+ pr_warn("map '%s': BPF_F_CPU and BPF_F_ALL_CPUS are mutually exclusive\n",
+ map->name);
+ return -EINVAL;
+ }
+ if (map->def.value_size != value_sz) {
+ pr_warn("map '%s': unexpected value size %zu provided for either BPF_F_CPU or BPF_F_ALL_CPUS, expected %u\n",
+ map->name, value_sz, map->def.value_size);
+ return -EINVAL;
+ }
+ break;
+ }
+
if (value_sz != num_cpu * elem_sz) {
pr_warn("map '%s': unexpected value size %zu provided for per-CPU map, expected %d * %zu = %zd\n",
map->name, value_sz, num_cpu, elem_sz, num_cpu * elem_sz);
@@ -10970,7 +10984,7 @@ int bpf_map__lookup_elem(const struct bpf_map *map,
{
int err;
- err = validate_map_op(map, key_sz, value_sz, true);
+ err = validate_map_op(map, key_sz, value_sz, true, flags);
if (err)
return libbpf_err(err);
@@ -10983,7 +10997,7 @@ int bpf_map__update_elem(const struct bpf_map *map,
{
int err;
- err = validate_map_op(map, key_sz, value_sz, true);
+ err = validate_map_op(map, key_sz, value_sz, true, flags);
if (err)
return libbpf_err(err);
@@ -10995,7 +11009,7 @@ int bpf_map__delete_elem(const struct bpf_map *map,
{
int err;
- err = validate_map_op(map, key_sz, 0, false /* check_value_sz */);
+ err = validate_map_op(map, key_sz, 0, false /* check_value_sz */, flags);
if (err)
return libbpf_err(err);
@@ -11008,7 +11022,7 @@ int bpf_map__lookup_and_delete_elem(const struct bpf_map *map,
{
int err;
- err = validate_map_op(map, key_sz, value_sz, true);
+ err = validate_map_op(map, key_sz, value_sz, true, flags);
if (err)
return libbpf_err(err);
@@ -11020,7 +11034,7 @@ int bpf_map__get_next_key(const struct bpf_map *map,
{
int err;
- err = validate_map_op(map, key_sz, 0, false /* check_value_sz */);
+ err = validate_map_op(map, key_sz, 0, false /* check_value_sz */, 0);
if (err)
return libbpf_err(err);
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index e14d9e349f9c..dfc37a615578 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -1216,12 +1216,13 @@ LIBBPF_API struct bpf_map *bpf_map__inner_map(struct bpf_map *map);
* @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size**
* @param value pointer to memory in which looked up value will be stored
* @param value_sz size in byte of value data memory; it has to match BPF map
- * definition's **value_size**. For per-CPU BPF maps value size has to be
- * a product of BPF map value size and number of possible CPUs in the system
- * (could be fetched with **libbpf_num_possible_cpus()**). Note also that for
- * per-CPU values value size has to be aligned up to closest 8 bytes for
- * alignment reasons, so expected size is: `round_up(value_size, 8)
- * * libbpf_num_possible_cpus()`.
+ * definition's **value_size**. For per-CPU BPF maps, value size can be
+ * `value_size` if either **BPF_F_CPU** or **BPF_F_ALL_CPUS** is specified
+ * in **flags**, otherwise a product of BPF map value size and number of
+ * possible CPUs in the system (could be fetched with
+ * **libbpf_num_possible_cpus()**). Note also that for per-CPU values value
+ * size has to be aligned up to closest 8 bytes, so expected size is:
+ * `round_up(value_size, 8) * libbpf_num_possible_cpus()`.
* @param flags extra flags passed to kernel for this operation
* @return 0, on success; negative error, otherwise
*
@@ -1239,13 +1240,7 @@ LIBBPF_API int bpf_map__lookup_elem(const struct bpf_map *map,
* @param key pointer to memory containing bytes of the key
* @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size**
* @param value pointer to memory containing bytes of the value
- * @param value_sz size in byte of value data memory; it has to match BPF map
- * definition's **value_size**. For per-CPU BPF maps value size has to be
- * a product of BPF map value size and number of possible CPUs in the system
- * (could be fetched with **libbpf_num_possible_cpus()**). Note also that for
- * per-CPU values value size has to be aligned up to closest 8 bytes for
- * alignment reasons, so expected size is: `round_up(value_size, 8)
- * * libbpf_num_possible_cpus()`.
+ * @param value_sz refer to **bpf_map__lookup_elem**'s description.'
* @param flags extra flags passed to kernel for this operation
* @return 0, on success; negative error, otherwise
*
diff --git a/tools/testing/selftests/bpf/prog_tests/percpu_alloc.c b/tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
index 343da65864d6..c1d0949f093f 100644
--- a/tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
+++ b/tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
+#include "cgroup_helpers.h"
#include "percpu_alloc_array.skel.h"
#include "percpu_alloc_cgrp_local_storage.skel.h"
#include "percpu_alloc_fail.skel.h"
@@ -115,6 +116,321 @@ static void test_failure(void) {
RUN_TESTS(percpu_alloc_fail);
}
+static void test_percpu_map_op_cpu_flag(struct bpf_map *map, void *keys, size_t key_sz, u32 entries,
+ int nr_cpus, bool test_batch)
+{
+ size_t value_sz = sizeof(u32), value_sz_cpus, value_sz_total;
+ u32 *values = NULL, *values_percpu = NULL;
+ const u32 value = 0xDEADC0DE;
+ int i, j, cpu, map_fd, err;
+ u64 batch = 0, flags;
+ void *values_row;
+ u32 count, v;
+ LIBBPF_OPTS(bpf_map_batch_opts, batch_opts);
+
+ value_sz_cpus = value_sz * nr_cpus;
+ values = calloc(entries, value_sz_cpus);
+ if (!ASSERT_OK_PTR(values, "calloc values"))
+ return;
+
+ values_percpu = calloc(entries, roundup(value_sz, 8) * nr_cpus);
+ if (!ASSERT_OK_PTR(values_percpu, "calloc values_percpu")) {
+ free(values);
+ return;
+ }
+
+ value_sz_total = value_sz_cpus * entries;
+ memset(values, 0, value_sz_total);
+
+ map_fd = bpf_map__fd(map);
+ flags = BPF_F_CPU | BPF_F_ALL_CPUS;
+ err = bpf_map_lookup_elem_flags(map_fd, keys, values, flags);
+ if (!ASSERT_ERR(err, "bpf_map_lookup_elem_flags cpu|all_cpus"))
+ goto out;
+
+ err = bpf_map_update_elem(map_fd, keys, values, flags);
+ if (!ASSERT_ERR(err, "bpf_map_update_elem cpu|all_cpus"))
+ goto out;
+
+ flags = BPF_F_ALL_CPUS;
+ err = bpf_map_lookup_elem_flags(map_fd, keys, values, flags);
+ if (!ASSERT_ERR(err, "bpf_map_lookup_elem_flags all_cpus"))
+ goto out;
+
+ flags = BPF_F_LOCK | BPF_F_CPU;
+ err = bpf_map_lookup_elem_flags(map_fd, keys, values, flags);
+ if (!ASSERT_ERR(err, "bpf_map_lookup_elem_flags BPF_F_LOCK"))
+ goto out;
+
+ flags = BPF_F_LOCK | BPF_F_ALL_CPUS;
+ err = bpf_map_update_elem(map_fd, keys, values, flags);
+ if (!ASSERT_ERR(err, "bpf_map_update_elem BPF_F_LOCK"))
+ goto out;
+
+ flags = (u64)nr_cpus << 32 | BPF_F_CPU;
+ err = bpf_map_update_elem(map_fd, keys, values, flags);
+ if (!ASSERT_EQ(err, -ERANGE, "bpf_map_update_elem -ERANGE"))
+ goto out;
+
+ err = bpf_map__update_elem(map, keys, key_sz, values, value_sz, flags);
+ if (!ASSERT_EQ(err, -ERANGE, "bpf_map__update_elem -ERANGE"))
+ goto out;
+
+ err = bpf_map_lookup_elem_flags(map_fd, keys, values, flags);
+ if (!ASSERT_EQ(err, -ERANGE, "bpf_map_lookup_elem_flags -ERANGE"))
+ goto out;
+
+ err = bpf_map__lookup_elem(map, keys, key_sz, values, value_sz, flags);
+ if (!ASSERT_EQ(err, -ERANGE, "bpf_map__lookup_elem -ERANGE"))
+ goto out;
+
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
+ /* clear value on all cpus */
+ values[0] = 0;
+ flags = BPF_F_ALL_CPUS;
+ for (i = 0; i < entries; i++) {
+ err = bpf_map__update_elem(map, keys + i * key_sz, key_sz, values,
+ value_sz, flags);
+ if (!ASSERT_OK(err, "bpf_map__update_elem all_cpus"))
+ goto out;
+ }
+
+ /* update value on specified cpu */
+ for (i = 0; i < entries; i++) {
+ values[0] = value;
+ flags = (u64)cpu << 32 | BPF_F_CPU;
+ err = bpf_map__update_elem(map, keys + i * key_sz, key_sz, values,
+ value_sz, flags);
+ if (!ASSERT_OK(err, "bpf_map__update_elem specified cpu"))
+ goto out;
+
+ /* lookup then check value on CPUs */
+ for (j = 0; j < nr_cpus; j++) {
+ flags = (u64)j << 32 | BPF_F_CPU;
+ err = bpf_map__lookup_elem(map, keys + i * key_sz, key_sz, values,
+ value_sz, flags);
+ if (!ASSERT_OK(err, "bpf_map__lookup_elem specified cpu"))
+ goto out;
+ if (!ASSERT_EQ(values[0], j != cpu ? 0 : value,
+ "bpf_map__lookup_elem value on specified cpu"))
+ goto out;
+ }
+ }
+ }
+
+ if (!test_batch)
+ goto out;
+
+ count = entries;
+ batch_opts.elem_flags = (u64)nr_cpus << 32 | BPF_F_CPU;
+ err = bpf_map_update_batch(map_fd, keys, values, &count, &batch_opts);
+ if (!ASSERT_EQ(err, -ERANGE, "bpf_map_update_batch -ERANGE"))
+ goto out;
+
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
+ memset(values, 0, value_sz_total);
+
+ /* clear values across all CPUs */
+ count = entries;
+ batch_opts.elem_flags = BPF_F_ALL_CPUS;
+ err = bpf_map_update_batch(map_fd, keys, values, &count, &batch_opts);
+ if (!ASSERT_OK(err, "bpf_map_update_batch all_cpus"))
+ goto out;
+
+ /* update values on specified CPU */
+ for (i = 0; i < entries; i++)
+ values[i] = value;
+
+ count = entries;
+ batch_opts.elem_flags = (u64)cpu << 32 | BPF_F_CPU;
+ err = bpf_map_update_batch(map_fd, keys, values, &count, &batch_opts);
+ if (!ASSERT_OK(err, "bpf_map_update_batch specified cpu"))
+ goto out;
+
+ /* lookup values on specified CPU */
+ batch = 0;
+ count = entries;
+ memset(values, 0, entries * value_sz);
+ err = bpf_map_lookup_batch(map_fd, NULL, &batch, keys, values, &count, &batch_opts);
+ if (!ASSERT_TRUE(!err || err == -ENOENT, "bpf_map_lookup_batch specified cpu"))
+ goto out;
+
+ for (i = 0; i < entries; i++)
+ if (!ASSERT_EQ(values[i], value,
+ "bpf_map_lookup_batch value on specified cpu"))
+ goto out;
+
+ /* lookup values from all CPUs */
+ batch = 0;
+ count = entries;
+ batch_opts.elem_flags = 0;
+ memset(values_percpu, 0, roundup(value_sz, 8) * nr_cpus * entries);
+ err = bpf_map_lookup_batch(map_fd, NULL, &batch, keys, values_percpu, &count,
+ &batch_opts);
+ if (!ASSERT_TRUE(!err || err == -ENOENT, "bpf_map_lookup_batch all_cpus"))
+ goto out;
+
+ for (i = 0; i < entries; i++) {
+ values_row = (void *) values_percpu +
+ roundup(value_sz, 8) * i * nr_cpus;
+ for (j = 0; j < nr_cpus; j++) {
+ v = *(u32 *) (values_row + roundup(value_sz, 8) * j);
+ if (!ASSERT_EQ(v, j != cpu ? 0 : value,
+ "bpf_map_lookup_batch value all_cpus"))
+ goto out;
+ }
+ }
+ }
+
+out:
+ free(values_percpu);
+ free(values);
+}
+
+
+static void test_percpu_map_cpu_flag(enum bpf_map_type map_type)
+{
+ struct percpu_alloc_array *skel;
+ size_t key_sz = sizeof(int);
+ int *keys, nr_cpus, i, err;
+ struct bpf_map *map;
+ u32 max_entries;
+
+ nr_cpus = libbpf_num_possible_cpus();
+ if (!ASSERT_GT(nr_cpus, 0, "libbpf_num_possible_cpus"))
+ return;
+
+ max_entries = nr_cpus + 1;
+ keys = calloc(max_entries, key_sz);
+ if (!ASSERT_OK_PTR(keys, "calloc keys"))
+ return;
+
+ for (i = 0; i < max_entries; i++)
+ keys[i] = i;
+
+ skel = percpu_alloc_array__open();
+ if (!ASSERT_OK_PTR(skel, "percpu_alloc_array__open")) {
+ free(keys);
+ return;
+ }
+
+ map = skel->maps.percpu;
+ bpf_map__set_type(map, map_type);
+ bpf_map__set_max_entries(map, max_entries);
+
+ err = percpu_alloc_array__load(skel);
+ if (!ASSERT_OK(err, "test_percpu_alloc__load"))
+ goto out;
+
+ test_percpu_map_op_cpu_flag(map, keys, key_sz, max_entries - 1, nr_cpus, true);
+out:
+ percpu_alloc_array__destroy(skel);
+ free(keys);
+}
+
+static void test_percpu_array_cpu_flag(void)
+{
+ test_percpu_map_cpu_flag(BPF_MAP_TYPE_PERCPU_ARRAY);
+}
+
+static void test_percpu_hash_cpu_flag(void)
+{
+ test_percpu_map_cpu_flag(BPF_MAP_TYPE_PERCPU_HASH);
+}
+
+static void test_lru_percpu_hash_cpu_flag(void)
+{
+ test_percpu_map_cpu_flag(BPF_MAP_TYPE_LRU_PERCPU_HASH);
+}
+
+static void test_percpu_cgroup_storage_cpu_flag(void)
+{
+ struct percpu_alloc_array *skel = NULL;
+ struct bpf_cgroup_storage_key key;
+ int cgroup, prog_fd, nr_cpus, err;
+ struct bpf_map *map;
+
+ nr_cpus = libbpf_num_possible_cpus();
+ if (!ASSERT_GT(nr_cpus, 0, "libbpf_num_possible_cpus"))
+ return;
+
+ err = setup_cgroup_environment();
+ if (!ASSERT_OK(err, "setup_cgroup_environment"))
+ return;
+
+ cgroup = create_and_get_cgroup("/cg_percpu");
+ if (!ASSERT_GE(cgroup, 0, "create_and_get_cgroup")) {
+ cleanup_cgroup_environment();
+ return;
+ }
+
+ err = join_cgroup("/cg_percpu");
+ if (!ASSERT_OK(err, "join_cgroup"))
+ goto out;
+
+ skel = percpu_alloc_array__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "percpu_alloc_array__open_and_load"))
+ goto out;
+
+ prog_fd = bpf_program__fd(skel->progs.cgroup_egress);
+ err = bpf_prog_attach(prog_fd, cgroup, BPF_CGROUP_INET_EGRESS, 0);
+ if (!ASSERT_OK(err, "bpf_prog_attach"))
+ goto out;
+
+ map = skel->maps.percpu_cgroup_storage;
+ err = bpf_map_get_next_key(bpf_map__fd(map), NULL, &key);
+ if (!ASSERT_OK(err, "bpf_map_get_next_key"))
+ goto out;
+
+ test_percpu_map_op_cpu_flag(map, &key, sizeof(key), 1, nr_cpus, false);
+out:
+ bpf_prog_detach2(-1, cgroup, BPF_CGROUP_INET_EGRESS);
+ close(cgroup);
+ cleanup_cgroup_environment();
+ percpu_alloc_array__destroy(skel);
+}
+
+static void test_map_op_cpu_flag(enum bpf_map_type map_type)
+{
+ u32 max_entries = 1, count = max_entries;
+ u64 flags, batch = 0, val = 0;
+ int err, map_fd, key = 0;
+ LIBBPF_OPTS(bpf_map_batch_opts, batch_opts);
+
+ map_fd = bpf_map_create(map_type, "test_cpu_flag", sizeof(int), sizeof(u64), max_entries,
+ NULL);
+ if (!ASSERT_GE(map_fd, 0, "bpf_map_create"))
+ return;
+
+ flags = BPF_F_ALL_CPUS;
+ err = bpf_map_update_elem(map_fd, &key, &val, flags);
+ ASSERT_ERR(err, "bpf_map_update_elem all_cpus");
+
+ batch_opts.elem_flags = BPF_F_ALL_CPUS;
+ err = bpf_map_update_batch(map_fd, &key, &val, &count, &batch_opts);
+ ASSERT_ERR(err, "bpf_map_update_batch all_cpus");
+
+ flags = BPF_F_CPU;
+ err = bpf_map_lookup_elem_flags(map_fd, &key, &val, flags);
+ ASSERT_ERR(err, "bpf_map_lookup_elem_flags cpu");
+
+ batch_opts.elem_flags = BPF_F_CPU;
+ err = bpf_map_lookup_batch(map_fd, NULL, &batch, &key, &val, &count, &batch_opts);
+ ASSERT_ERR(err, "bpf_map_lookup_batch cpu");
+
+ close(map_fd);
+}
+
+static void test_array_cpu_flag(void)
+{
+ test_map_op_cpu_flag(BPF_MAP_TYPE_ARRAY);
+}
+
+static void test_hash_cpu_flag(void)
+{
+ test_map_op_cpu_flag(BPF_MAP_TYPE_HASH);
+}
+
void test_percpu_alloc(void)
{
if (test__start_subtest("array"))
@@ -125,4 +441,16 @@ void test_percpu_alloc(void)
test_cgrp_local_storage();
if (test__start_subtest("failure_tests"))
test_failure();
+ if (test__start_subtest("cpu_flag_percpu_array"))
+ test_percpu_array_cpu_flag();
+ if (test__start_subtest("cpu_flag_percpu_hash"))
+ test_percpu_hash_cpu_flag();
+ if (test__start_subtest("cpu_flag_lru_percpu_hash"))
+ test_lru_percpu_hash_cpu_flag();
+ if (test__start_subtest("cpu_flag_percpu_cgroup_storage"))
+ test_percpu_cgroup_storage_cpu_flag();
+ if (test__start_subtest("cpu_flag_array"))
+ test_array_cpu_flag();
+ if (test__start_subtest("cpu_flag_hash"))
+ test_hash_cpu_flag();
}
diff --git a/tools/testing/selftests/bpf/progs/percpu_alloc_array.c b/tools/testing/selftests/bpf/progs/percpu_alloc_array.c
index 37c2d2608ec0..ed6a2a93d5a5 100644
--- a/tools/testing/selftests/bpf/progs/percpu_alloc_array.c
+++ b/tools/testing/selftests/bpf/progs/percpu_alloc_array.c
@@ -187,4 +187,36 @@ out:
return 0;
}
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(max_entries, 2);
+ __type(key, int);
+ __type(value, u32);
+} percpu SEC(".maps");
+
+SEC("?fentry/bpf_fentry_test1")
+int BPF_PROG(test_percpu_array, int x)
+{
+ u64 value = 0xDEADC0DE;
+ int key = 0;
+
+ bpf_map_update_elem(&percpu, &key, &value, BPF_ANY);
+ return 0;
+}
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
+ __type(key, struct bpf_cgroup_storage_key);
+ __type(value, u32);
+} percpu_cgroup_storage SEC(".maps");
+
+SEC("cgroup_skb/egress")
+int cgroup_egress(struct __sk_buff *skb)
+{
+ u32 *val = bpf_get_local_storage(&percpu_cgroup_storage, 0);
+
+ *val = 1;
+ return 1;
+}
+
char _license[] SEC("license") = "GPL";