summaryrefslogtreecommitdiff
path: root/tools/lib
diff options
context:
space:
mode:
authorAndrey Ignatov <rdna@fb.com>2019-02-15 02:01:42 +0300
committerDaniel Borkmann <daniel@iogearbox.net>2019-02-15 17:20:42 +0300
commit1a11a4c74f73adb840d61371c3bb560ed4d7a87f (patch)
treec550a378980ca3070ac1398ebe7eea853f2149f2 /tools/lib
parentf8ebfaf6684b03084858d8c55f81867e5171af08 (diff)
downloadlinux-1a11a4c74f73adb840d61371c3bb560ed4d7a87f.tar.xz
libbpf: Introduce bpf_map__resize
Add bpf_map__resize() to change max_entries for a map. Quite often necessary map size is unknown at compile time and can be calculated only at run time. Currently the following approach is used to do so: * bpf_object__open_buffer() to open Elf file from a buffer; * bpf_object__find_map_by_name() to find relevant map; * bpf_map__def() to get map attributes and create struct bpf_create_map_attr from them; * update max_entries in bpf_create_map_attr; * bpf_create_map_xattr() to create new map with updated max_entries; * bpf_map__reuse_fd() to replace the map in bpf_object with newly created one. And after all this bpf_object can finally be loaded. The map will have new size. It 1) is quite a lot of steps; 2) doesn't take BTF into account. For "2)" even more steps should be made and some of them require changes to libbpf (e.g. to get struct btf * from bpf_object). Instead the whole problem can be solved by introducing simple bpf_map__resize() API that checks the map and sets new max_entries if the map is not loaded yet. So the new steps are: * bpf_object__open_buffer() to open Elf file from a buffer; * bpf_object__find_map_by_name() to find relevant map; * bpf_map__resize() to update max_entries. That's much simpler and works with BTF. Signed-off-by: Andrey Ignatov <rdna@fb.com> Acked-by: Yonghong Song <yhs@fb.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Diffstat (limited to 'tools/lib')
-rw-r--r--tools/lib/bpf/libbpf.c14
-rw-r--r--tools/lib/bpf/libbpf.h1
-rw-r--r--tools/lib/bpf/libbpf.map1
3 files changed, 16 insertions, 0 deletions
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 6ef7e6e4cbd3..9597d4dace34 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -1113,6 +1113,20 @@ err_free_new_name:
return -errno;
}
+int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
+{
+ if (!map || !max_entries)
+ return -EINVAL;
+
+ /* If map already created, its attributes can't be changed. */
+ if (map->fd >= 0)
+ return -EBUSY;
+
+ map->def.max_entries = max_entries;
+
+ return 0;
+}
+
static int
bpf_object__probe_name(struct bpf_object *obj)
{
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 69a7c25eaccc..987fd92661d6 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -294,6 +294,7 @@ LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv,
bpf_map_clear_priv_t clear_priv);
LIBBPF_API void *bpf_map__priv(struct bpf_map *map);
LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd);
+LIBBPF_API int bpf_map__resize(struct bpf_map *map, __u32 max_entries);
LIBBPF_API bool bpf_map__is_offload_neutral(struct bpf_map *map);
LIBBPF_API void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex);
LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path);
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 5fc8222209f8..16f342c3d4bc 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -130,6 +130,7 @@ LIBBPF_0.0.2 {
bpf_probe_helper;
bpf_probe_map_type;
bpf_probe_prog_type;
+ bpf_map__resize;
bpf_map_lookup_elem_flags;
bpf_object__find_map_fd_by_name;
bpf_get_link_xdp_id;