diff options
author | Martin KaFai Lau <kafai@fb.com> | 2016-11-11 21:55:11 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-11-15 19:50:43 +0300 |
commit | 5db58faf989f16d1d6a3d661aac616f9ca7932aa (patch) | |
tree | c2ccc9a89f2752122521989a721ac31a8cf9fc41 /tools/testing/selftests/bpf/test_lru_map.c | |
parent | 8f8449384ec364ba2a654f11f94e754e4ff719e0 (diff) | |
download | linux-5db58faf989f16d1d6a3d661aac616f9ca7932aa.tar.xz |
bpf: Add tests for the LRU bpf_htab
This patch has some unit tests and a test_lru_dist.
The test_lru_dist reads in the numeric keys from a file.
The files used here are generated by a modified fio-genzipf tool
originated from the fio test suit. The sample data file can be
found here: https://github.com/iamkafai/bpf-lru
The zipf.* data files have 100k numeric keys and the key is also
ranged from 1 to 100k.
The test_lru_dist outputs the number of unique keys (nr_unique).
F.e. The following means, 61239 of them is unique out of 100k keys.
nr_misses means it cannot be found in the LRU map, so nr_misses
must be >= nr_unique. test_lru_dist also simulates a perfect LRU
map as a comparison:
[root@arch-fb-vm1 ~]# ~/devshare/fb-kernel/linux/samples/bpf/test_lru_dist \
/root/zipf.100k.a1_01.out 4000 1
...
test_parallel_lru_dist (map_type:9 map_flags:0x0):
task:0 BPF LRU: nr_unique:23093(/100000) nr_misses:31603(/100000)
task:0 Perfect LRU: nr_unique:23093(/100000 nr_misses:34328(/100000)
....
test_parallel_lru_dist (map_type:9 map_flags:0x2):
task:0 BPF LRU: nr_unique:23093(/100000) nr_misses:31710(/100000)
task:0 Perfect LRU: nr_unique:23093(/100000 nr_misses:34328(/100000)
[root@arch-fb-vm1 ~]# ~/devshare/fb-kernel/linux/samples/bpf/test_lru_dist \
/root/zipf.100k.a0_01.out 40000 1
...
test_parallel_lru_dist (map_type:9 map_flags:0x0):
task:0 BPF LRU: nr_unique:61239(/100000) nr_misses:67054(/100000)
task:0 Perfect LRU: nr_unique:61239(/100000 nr_misses:66993(/100000)
...
test_parallel_lru_dist (map_type:9 map_flags:0x2):
task:0 BPF LRU: nr_unique:61239(/100000) nr_misses:67068(/100000)
task:0 Perfect LRU: nr_unique:61239(/100000 nr_misses:66993(/100000)
LRU map has also been added to map_perf_test:
/* Global LRU */
[root@kerneltest003.31.prn1 ~]# for i in 1 4 8; do echo -n "$i cpus: "; \
./map_perf_test 16 $i | awk '{r += $3}END{print r " updates"}'; done
1 cpus: 2934082 updates
4 cpus: 7391434 updates
8 cpus: 6500576 updates
/* Percpu LRU */
[root@kerneltest003.31.prn1 ~]# for i in 1 4 8; do echo -n "$i cpus: "; \
./map_perf_test 32 $i | awk '{r += $3}END{print r " updates"}'; done
1 cpus: 2896553 updates
4 cpus: 9766395 updates
8 cpus: 17460553 updates
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'tools/testing/selftests/bpf/test_lru_map.c')
-rw-r--r-- | tools/testing/selftests/bpf/test_lru_map.c | 583 |
1 files changed, 583 insertions, 0 deletions
diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c new file mode 100644 index 000000000000..627757ed7836 --- /dev/null +++ b/tools/testing/selftests/bpf/test_lru_map.c @@ -0,0 +1,583 @@ +/* + * Copyright (c) 2016 Facebook + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#define _GNU_SOURCE +#include <stdio.h> +#include <unistd.h> +#include <errno.h> +#include <string.h> +#include <assert.h> +#include <sched.h> +#include <sys/wait.h> +#include <stdlib.h> +#include <time.h> +#include "bpf_sys.h" + +#define LOCAL_FREE_TARGET (128) +#define PERCPU_FREE_TARGET (16) + +static int nr_cpus; + +static int create_map(int map_type, int map_flags, unsigned int size) +{ + int map_fd; + + map_fd = bpf_map_create(map_type, sizeof(unsigned long long), + sizeof(unsigned long long), size, map_flags); + + if (map_fd == -1) + perror("bpf_map_create"); + + return map_fd; +} + +static int map_subset(int map0, int map1) +{ + unsigned long long next_key = 0; + unsigned long long value0[nr_cpus], value1[nr_cpus]; + int ret; + + while (!bpf_map_next_key(map1, &next_key, &next_key)) { + assert(!bpf_map_lookup(map1, &next_key, value1)); + ret = bpf_map_lookup(map0, &next_key, value0); + if (ret) { + printf("key:%llu not found from map. %s(%d)\n", + next_key, strerror(errno), errno); + return 0; + } + if (value0[0] != value1[0]) { + printf("key:%llu value0:%llu != value1:%llu\n", + next_key, value0[0], value1[0]); + return 0; + } + } + return 1; +} + +static int map_equal(int lru_map, int expected) +{ + return map_subset(lru_map, expected) && map_subset(expected, lru_map); +} + +static int sched_next_online(int pid, int next_to_try) +{ + cpu_set_t cpuset; + + if (next_to_try == nr_cpus) + return -1; + + while (next_to_try < nr_cpus) { + CPU_ZERO(&cpuset); + CPU_SET(next_to_try++, &cpuset); + if (!sched_setaffinity(pid, sizeof(cpuset), &cpuset)) + break; + } + + return next_to_try; +} + +/* Size of the LRU amp is 2 + * Add key=1 (+1 key) + * Add key=2 (+1 key) + * Lookup Key=1 + * Add Key=3 + * => Key=2 will be removed by LRU + * Iterate map. Only found key=1 and key=3 + */ +static void test_lru_sanity0(int map_type, int map_flags) +{ + unsigned long long key, value[nr_cpus]; + int lru_map_fd, expected_map_fd; + + printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, + map_flags); + + assert(sched_next_online(0, 0) != -1); + + if (map_flags & BPF_F_NO_COMMON_LRU) + lru_map_fd = create_map(map_type, map_flags, 2 * nr_cpus); + else + lru_map_fd = create_map(map_type, map_flags, 2); + assert(lru_map_fd != -1); + + expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, 2); + assert(expected_map_fd != -1); + + value[0] = 1234; + + /* insert key=1 element */ + + key = 1; + assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); + assert(!bpf_map_update(expected_map_fd, &key, value, BPF_NOEXIST)); + + /* BPF_NOEXIST means: add new element if it doesn't exist */ + assert(bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST) == -1 && + /* key=1 already exists */ + errno == EEXIST); + + assert(bpf_map_update(lru_map_fd, &key, value, -1) == -1 && + errno == EINVAL); + + /* insert key=2 element */ + + /* check that key=2 is not found */ + key = 2; + assert(bpf_map_lookup(lru_map_fd, &key, value) == -1 && + errno == ENOENT); + + /* BPF_EXIST means: update existing element */ + assert(bpf_map_update(lru_map_fd, &key, value, BPF_EXIST) == -1 && + /* key=2 is not there */ + errno == ENOENT); + + assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); + + /* insert key=3 element */ + + /* check that key=3 is not found */ + key = 3; + assert(bpf_map_lookup(lru_map_fd, &key, value) == -1 && + errno == ENOENT); + + /* check that key=1 can be found and mark the ref bit to + * stop LRU from removing key=1 + */ + key = 1; + assert(!bpf_map_lookup(lru_map_fd, &key, value)); + assert(value[0] == 1234); + + key = 3; + assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); + assert(!bpf_map_update(expected_map_fd, &key, value, BPF_NOEXIST)); + + /* key=2 has been removed from the LRU */ + key = 2; + assert(bpf_map_lookup(lru_map_fd, &key, value) == -1); + + assert(map_equal(lru_map_fd, expected_map_fd)); + + close(expected_map_fd); + close(lru_map_fd); + + printf("Pass\n"); +} + +/* Size of the LRU map is 1.5*tgt_free + * Insert 1 to tgt_free (+tgt_free keys) + * Lookup 1 to tgt_free/2 + * Insert 1+tgt_free to 2*tgt_free (+tgt_free keys) + * => 1+tgt_free/2 to LOCALFREE_TARGET will be removed by LRU + */ +static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free) +{ + unsigned long long key, end_key, value[nr_cpus]; + int lru_map_fd, expected_map_fd; + unsigned int batch_size; + unsigned int map_size; + + if (map_flags & BPF_F_NO_COMMON_LRU) + /* Ther percpu lru list (i.e each cpu has its own LRU + * list) does not have a local free list. Hence, + * it will only free old nodes till there is no free + * from the LRU list. Hence, this test does not apply + * to BPF_F_NO_COMMON_LRU + */ + return; + + printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, + map_flags); + + assert(sched_next_online(0, 0) != -1); + + batch_size = tgt_free / 2; + assert(batch_size * 2 == tgt_free); + + map_size = tgt_free + batch_size; + lru_map_fd = create_map(map_type, map_flags, map_size); + assert(lru_map_fd != -1); + + expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, map_size); + assert(expected_map_fd != -1); + + value[0] = 1234; + + /* Insert 1 to tgt_free (+tgt_free keys) */ + end_key = 1 + tgt_free; + for (key = 1; key < end_key; key++) + assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); + + /* Lookup 1 to tgt_free/2 */ + end_key = 1 + batch_size; + for (key = 1; key < end_key; key++) { + assert(!bpf_map_lookup(lru_map_fd, &key, value)); + assert(!bpf_map_update(expected_map_fd, &key, value, + BPF_NOEXIST)); + } + + /* Insert 1+tgt_free to 2*tgt_free + * => 1+tgt_free/2 to LOCALFREE_TARGET will be + * removed by LRU + */ + key = 1 + tgt_free; + end_key = key + tgt_free; + for (; key < end_key; key++) { + assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); + assert(!bpf_map_update(expected_map_fd, &key, value, + BPF_NOEXIST)); + } + + assert(map_equal(lru_map_fd, expected_map_fd)); + + close(expected_map_fd); + close(lru_map_fd); + + printf("Pass\n"); +} + +/* Size of the LRU map 1.5 * tgt_free + * Insert 1 to tgt_free (+tgt_free keys) + * Update 1 to tgt_free/2 + * => The original 1 to tgt_free/2 will be removed due to + * the LRU shrink process + * Re-insert 1 to tgt_free/2 again and do a lookup immeidately + * Insert 1+tgt_free to tgt_free*3/2 + * Insert 1+tgt_free*3/2 to tgt_free*5/2 + * => Key 1+tgt_free to tgt_free*3/2 + * will be removed from LRU because it has never + * been lookup and ref bit is not set + */ +static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) +{ + unsigned long long key, value[nr_cpus]; + unsigned long long end_key; + int lru_map_fd, expected_map_fd; + unsigned int batch_size; + unsigned int map_size; + + if (map_flags & BPF_F_NO_COMMON_LRU) + /* Ther percpu lru list (i.e each cpu has its own LRU + * list) does not have a local free list. Hence, + * it will only free old nodes till there is no free + * from the LRU list. Hence, this test does not apply + * to BPF_F_NO_COMMON_LRU + */ + return; + + printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, + map_flags); + + assert(sched_next_online(0, 0) != -1); + + batch_size = tgt_free / 2; + assert(batch_size * 2 == tgt_free); + + map_size = tgt_free + batch_size; + if (map_flags & BPF_F_NO_COMMON_LRU) + lru_map_fd = create_map(map_type, map_flags, + map_size * nr_cpus); + else + lru_map_fd = create_map(map_type, map_flags, map_size); + assert(lru_map_fd != -1); + + expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, map_size); + assert(expected_map_fd != -1); + + value[0] = 1234; + + /* Insert 1 to tgt_free (+tgt_free keys) */ + end_key = 1 + tgt_free; + for (key = 1; key < end_key; key++) + assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); + + /* Any bpf_map_update will require to acquire a new node + * from LRU first. + * + * The local list is running out of free nodes. + * It gets from the global LRU list which tries to + * shrink the inactive list to get tgt_free + * number of free nodes. + * + * Hence, the oldest key 1 to tgt_free/2 + * are removed from the LRU list. + */ + key = 1; + if (map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { + assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); + assert(!bpf_map_delete(lru_map_fd, &key)); + } else { + assert(bpf_map_update(lru_map_fd, &key, value, BPF_EXIST)); + } + + /* Re-insert 1 to tgt_free/2 again and do a lookup + * immeidately. + */ + end_key = 1 + batch_size; + value[0] = 4321; + for (key = 1; key < end_key; key++) { + assert(bpf_map_lookup(lru_map_fd, &key, value)); + assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); + assert(!bpf_map_lookup(lru_map_fd, &key, value)); + assert(value[0] == 4321); + assert(!bpf_map_update(expected_map_fd, &key, value, + BPF_NOEXIST)); + } + + value[0] = 1234; + + /* Insert 1+tgt_free to tgt_free*3/2 */ + end_key = 1 + tgt_free + batch_size; + for (key = 1 + tgt_free; key < end_key; key++) + /* These newly added but not referenced keys will be + * gone during the next LRU shrink. + */ + assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); + + /* Insert 1+tgt_free*3/2 to tgt_free*5/2 */ + end_key = key + tgt_free; + for (; key < end_key; key++) { + assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); + assert(!bpf_map_update(expected_map_fd, &key, value, + BPF_NOEXIST)); + } + + assert(map_equal(lru_map_fd, expected_map_fd)); + + close(expected_map_fd); + close(lru_map_fd); + + printf("Pass\n"); +} + +/* Size of the LRU map is 2*tgt_free + * It is to test the active/inactive list rotation + * Insert 1 to 2*tgt_free (+2*tgt_free keys) + * Lookup key 1 to tgt_free*3/2 + * Add 1+2*tgt_free to tgt_free*5/2 (+tgt_free/2 keys) + * => key 1+tgt_free*3/2 to 2*tgt_free are removed from LRU + */ +static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free) +{ + unsigned long long key, end_key, value[nr_cpus]; + int lru_map_fd, expected_map_fd; + unsigned int batch_size; + unsigned int map_size; + + printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, + map_flags); + + assert(sched_next_online(0, 0) != -1); + + batch_size = tgt_free / 2; + assert(batch_size * 2 == tgt_free); + + map_size = tgt_free * 2; + if (map_flags & BPF_F_NO_COMMON_LRU) + lru_map_fd = create_map(map_type, map_flags, + map_size * nr_cpus); + else + lru_map_fd = create_map(map_type, map_flags, map_size); + assert(lru_map_fd != -1); + + expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, map_size); + assert(expected_map_fd != -1); + + value[0] = 1234; + + /* Insert 1 to 2*tgt_free (+2*tgt_free keys) */ + end_key = 1 + (2 * tgt_free); + for (key = 1; key < end_key; key++) + assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); + + /* Lookup key 1 to tgt_free*3/2 */ + end_key = tgt_free + batch_size; + for (key = 1; key < end_key; key++) { + assert(!bpf_map_lookup(lru_map_fd, &key, value)); + assert(!bpf_map_update(expected_map_fd, &key, value, + BPF_NOEXIST)); + } + + /* Add 1+2*tgt_free to tgt_free*5/2 + * (+tgt_free/2 keys) + */ + key = 2 * tgt_free + 1; + end_key = key + batch_size; + for (; key < end_key; key++) { + assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); + assert(!bpf_map_update(expected_map_fd, &key, value, + BPF_NOEXIST)); + } + + assert(map_equal(lru_map_fd, expected_map_fd)); + + close(expected_map_fd); + close(lru_map_fd); + + printf("Pass\n"); +} + +/* Test deletion */ +static void test_lru_sanity4(int map_type, int map_flags, unsigned int tgt_free) +{ + int lru_map_fd, expected_map_fd; + unsigned long long key, value[nr_cpus]; + unsigned long long end_key; + + printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, + map_flags); + + assert(sched_next_online(0, 0) != -1); + + if (map_flags & BPF_F_NO_COMMON_LRU) + lru_map_fd = create_map(map_type, map_flags, + 3 * tgt_free * nr_cpus); + else + lru_map_fd = create_map(map_type, map_flags, 3 * tgt_free); + assert(lru_map_fd != -1); + + expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, + 3 * tgt_free); + assert(expected_map_fd != -1); + + value[0] = 1234; + + for (key = 1; key <= 2 * tgt_free; key++) + assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); + + key = 1; + assert(bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); + + for (key = 1; key <= tgt_free; key++) { + assert(!bpf_map_lookup(lru_map_fd, &key, value)); + assert(!bpf_map_update(expected_map_fd, &key, value, + BPF_NOEXIST)); + } + + for (; key <= 2 * tgt_free; key++) { + assert(!bpf_map_delete(lru_map_fd, &key)); + assert(bpf_map_delete(lru_map_fd, &key)); + } + + end_key = key + 2 * tgt_free; + for (; key < end_key; key++) { + assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST)); + assert(!bpf_map_update(expected_map_fd, &key, value, + BPF_NOEXIST)); + } + + assert(map_equal(lru_map_fd, expected_map_fd)); + + close(expected_map_fd); + close(lru_map_fd); + + printf("Pass\n"); +} + +static void do_test_lru_sanity5(unsigned long long last_key, int map_fd) +{ + unsigned long long key, value[nr_cpus]; + + /* Ensure the last key inserted by previous CPU can be found */ + assert(!bpf_map_lookup(map_fd, &last_key, value)); + + value[0] = 1234; + + key = last_key + 1; + assert(!bpf_map_update(map_fd, &key, value, BPF_NOEXIST)); + assert(!bpf_map_lookup(map_fd, &key, value)); + + /* Cannot find the last key because it was removed by LRU */ + assert(bpf_map_lookup(map_fd, &last_key, value)); +} + +/* Test map with only one element */ +static void test_lru_sanity5(int map_type, int map_flags) +{ + unsigned long long key, value[nr_cpus]; + int next_sched_cpu = 0; + int map_fd; + int i; + + if (map_flags & BPF_F_NO_COMMON_LRU) + return; + + printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, + map_flags); + + map_fd = create_map(map_type, map_flags, 1); + assert(map_fd != -1); + + value[0] = 1234; + key = 0; + assert(!bpf_map_update(map_fd, &key, value, BPF_NOEXIST)); + + for (i = 0; i < nr_cpus; i++) { + pid_t pid; + + pid = fork(); + if (pid == 0) { + next_sched_cpu = sched_next_online(0, next_sched_cpu); + if (next_sched_cpu != -1) + do_test_lru_sanity5(key, map_fd); + exit(0); + } else if (pid == -1) { + printf("couldn't spawn #%d process\n", i); + exit(1); + } else { + int status; + + /* It is mostly redundant and just allow the parent + * process to update next_shced_cpu for the next child + * process + */ + next_sched_cpu = sched_next_online(pid, next_sched_cpu); + + assert(waitpid(pid, &status, 0) == pid); + assert(status == 0); + key++; + } + } + + close(map_fd); + + printf("Pass\n"); +} + +int main(int argc, char **argv) +{ + struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY}; + int map_types[] = {BPF_MAP_TYPE_LRU_HASH, + BPF_MAP_TYPE_LRU_PERCPU_HASH}; + int map_flags[] = {0, BPF_F_NO_COMMON_LRU}; + int t, f; + + setbuf(stdout, NULL); + + assert(!setrlimit(RLIMIT_MEMLOCK, &r)); + + nr_cpus = sysconf(_SC_NPROCESSORS_CONF); + assert(nr_cpus != -1); + printf("nr_cpus:%d\n\n", nr_cpus); + + for (f = 0; f < sizeof(map_flags) / sizeof(*map_flags); f++) { + unsigned int tgt_free = (map_flags[f] & BPF_F_NO_COMMON_LRU) ? + PERCPU_FREE_TARGET : LOCAL_FREE_TARGET; + + for (t = 0; t < sizeof(map_types) / sizeof(*map_types); t++) { + test_lru_sanity0(map_types[t], map_flags[f]); + test_lru_sanity1(map_types[t], map_flags[f], tgt_free); + test_lru_sanity2(map_types[t], map_flags[f], tgt_free); + test_lru_sanity3(map_types[t], map_flags[f], tgt_free); + test_lru_sanity4(map_types[t], map_flags[f], tgt_free); + test_lru_sanity5(map_types[t], map_flags[f]); + + printf("\n"); + } + } + + return 0; +} |