summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorXin Liu <liuxin350@huawei.com>2023-04-06 15:26:22 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2023-05-11 17:03:21 +0300
commit1d4ac7b0ffc9dc683b8dafc78b8b93177071a02c (patch)
treeac2fb05a85f38715117d123f16850799e7c58a67
parentcfc7ee210f2f3c73b4981b7ab39fde90de43fbf5 (diff)
downloadlinux-1d4ac7b0ffc9dc683b8dafc78b8b93177071a02c.tar.xz
bpf, sockmap: fix deadlocks in the sockhash and sockmap
[ Upstream commit ed17aa92dc56b6d8883e4b7a8f1c6fbf5ed6cd29 ] When huang uses sched_switch tracepoint, the tracepoint does only one thing in the mounted ebpf program, which deletes the fixed elements in sockhash ([0]) It seems that elements in sockhash are rarely actively deleted by users or ebpf program. Therefore, we do not pay much attention to their deletion. Compared with hash maps, sockhash only provides spin_lock_bh protection. This causes it to appear to have self-locking behavior in the interrupt context. [0]:https://lore.kernel.org/all/CABcoxUayum5oOqFMMqAeWuS8+EzojquSOSyDA3J_2omY=2EeAg@mail.gmail.com/ Reported-by: Hsin-Wei Hung <hsinweih@uci.edu> Fixes: 604326b41a6f ("bpf, sockmap: convert to generic sk_msg interface") Signed-off-by: Xin Liu <liuxin350@huawei.com> Acked-by: John Fastabend <john.fastabend@gmail.com> Link: https://lore.kernel.org/r/20230406122622.109978-1-liuxin350@huawei.com Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Sasha Levin <sashal@kernel.org>
-rw-r--r--net/core/sock_map.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index a68a7290a3b2..2408965e5c7b 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -414,8 +414,9 @@ static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
{
struct sock *sk;
int err = 0;
+ unsigned long flags;
- raw_spin_lock_bh(&stab->lock);
+ raw_spin_lock_irqsave(&stab->lock, flags);
sk = *psk;
if (!sk_test || sk_test == sk)
sk = xchg(psk, NULL);
@@ -425,7 +426,7 @@ static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
else
err = -EINVAL;
- raw_spin_unlock_bh(&stab->lock);
+ raw_spin_unlock_irqrestore(&stab->lock, flags);
return err;
}
@@ -923,11 +924,12 @@ static int sock_hash_delete_elem(struct bpf_map *map, void *key)
struct bpf_shtab_bucket *bucket;
struct bpf_shtab_elem *elem;
int ret = -ENOENT;
+ unsigned long flags;
hash = sock_hash_bucket_hash(key, key_size);
bucket = sock_hash_select_bucket(htab, hash);
- raw_spin_lock_bh(&bucket->lock);
+ raw_spin_lock_irqsave(&bucket->lock, flags);
elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
if (elem) {
hlist_del_rcu(&elem->node);
@@ -935,7 +937,7 @@ static int sock_hash_delete_elem(struct bpf_map *map, void *key)
sock_hash_free_elem(htab, elem);
ret = 0;
}
- raw_spin_unlock_bh(&bucket->lock);
+ raw_spin_unlock_irqrestore(&bucket->lock, flags);
return ret;
}