summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnton Protopopov <aspsk@isovalent.com>2023-05-22 18:45:58 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2023-05-30 16:03:21 +0300
commit1a9e80f757bbb1562d82e350afce2bb2f712cc3d (patch)
tree3276fcfc559f64ff1949b23ac29aba857e3b9a32
parent177ee41f6162bd6c037f83ba070d2ac3bbf7c51c (diff)
downloadlinux-1a9e80f757bbb1562d82e350afce2bb2f712cc3d.tar.xz
bpf: fix a memory leak in the LRU and LRU_PERCPU hash maps
commit b34ffb0c6d23583830f9327864b9c1f486003305 upstream. The LRU and LRU_PERCPU maps allocate a new element on update before locking the target hash table bucket. Right after that the maps try to lock the bucket. If this fails, then maps return -EBUSY to the caller without releasing the allocated element. This makes the element untracked: it doesn't belong to either of free lists, and it doesn't belong to the hash table, so can't be re-used; this eventually leads to the permanent -ENOMEM on LRU map updates, which is unexpected. Fix this by returning the element to the local free list if bucket locking fails. Fixes: 20b6cc34ea74 ("bpf: Avoid hashtab deadlock with map_locked") Signed-off-by: Anton Protopopov <aspsk@isovalent.com> Link: https://lore.kernel.org/r/20230522154558.2166815-1-aspsk@isovalent.com Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--kernel/bpf/hashtab.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 4a3d0a744702..e4e7f343346f 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -1203,7 +1203,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
ret = htab_lock_bucket(htab, b, hash, &flags);
if (ret)
- return ret;
+ goto err_lock_bucket;
l_old = lookup_elem_raw(head, hash, key, key_size);
@@ -1224,6 +1224,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
err:
htab_unlock_bucket(htab, b, hash, flags);
+err_lock_bucket:
if (ret)
htab_lru_push_free(htab, l_new);
else if (l_old)
@@ -1326,7 +1327,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
ret = htab_lock_bucket(htab, b, hash, &flags);
if (ret)
- return ret;
+ goto err_lock_bucket;
l_old = lookup_elem_raw(head, hash, key, key_size);
@@ -1349,6 +1350,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
ret = 0;
err:
htab_unlock_bucket(htab, b, hash, flags);
+err_lock_bucket:
if (l_new)
bpf_lru_push_free(&htab->lru, &l_new->lru_node);
return ret;