diff options
author | Jakub Sitnicki <jakub@cloudflare.com> | 2020-03-17 20:04:39 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2020-03-22 06:08:17 +0300 |
commit | d5bee7374b68de3c44586d46e9e61ffc97a1e886 (patch) | |
tree | 9c1964e017b681e310c7ec48352bd124da90930c /net | |
parent | 5bb4c45d466cb28dd0a7f15f80c4cb7768054e94 (diff) | |
download | linux-d5bee7374b68de3c44586d46e9e61ffc97a1e886.tar.xz |
net/tls: Annotate access to sk_prot with READ_ONCE/WRITE_ONCE
sockmap performs lockless writes to sk->sk_prot on the following paths:
tcp_bpf_{recvmsg|sendmsg} / sock_map_unref
sk_psock_put
sk_psock_drop
sk_psock_restore_proto
WRITE_ONCE(sk->sk_prot, proto)
To prevent load/store tearing [1], and to make tooling aware of intentional
shared access [2], we need to annotate other sites that access sk_prot with
READ_ONCE/WRITE_ONCE macros.
Change done with Coccinelle with following semantic patch:
@@
expression E;
identifier I;
struct sock *sk;
identifier sk_prot =~ "^sk_prot$";
@@
(
E =
-sk->sk_prot
+READ_ONCE(sk->sk_prot)
|
-sk->sk_prot = E
+WRITE_ONCE(sk->sk_prot, E)
|
-sk->sk_prot
+READ_ONCE(sk->sk_prot)
->I
)
Signed-off-by: Jakub Sitnicki <jakub@cloudflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/tls/tls_device.c | 2 | ||||
-rw-r--r-- | net/tls/tls_main.c | 9 |
2 files changed, 6 insertions, 5 deletions
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 1c5574e2e058..a562ebaaa33c 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -366,7 +366,7 @@ static int tls_do_allocation(struct sock *sk, if (!offload_ctx->open_record) { if (unlikely(!skb_page_frag_refill(prepend_size, pfrag, sk->sk_allocation))) { - sk->sk_prot->enter_memory_pressure(sk); + READ_ONCE(sk->sk_prot)->enter_memory_pressure(sk); sk_stream_moderate_sndbuf(sk); return -ENOMEM; } diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index e7de0306a7df..156efce50dbd 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -69,7 +69,8 @@ void update_sk_prot(struct sock *sk, struct tls_context *ctx) { int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4; - sk->sk_prot = &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf]; + WRITE_ONCE(sk->sk_prot, + &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf]); } int wait_on_pending_writer(struct sock *sk, long *timeo) @@ -312,7 +313,7 @@ static void tls_sk_proto_close(struct sock *sk, long timeout) write_lock_bh(&sk->sk_callback_lock); if (free_ctx) rcu_assign_pointer(icsk->icsk_ulp_data, NULL); - sk->sk_prot = ctx->sk_proto; + WRITE_ONCE(sk->sk_prot, ctx->sk_proto); if (sk->sk_write_space == tls_write_space) sk->sk_write_space = ctx->sk_write_space; write_unlock_bh(&sk->sk_callback_lock); @@ -621,14 +622,14 @@ struct tls_context *tls_ctx_create(struct sock *sk) mutex_init(&ctx->tx_lock); rcu_assign_pointer(icsk->icsk_ulp_data, ctx); - ctx->sk_proto = sk->sk_prot; + ctx->sk_proto = READ_ONCE(sk->sk_prot); return ctx; } static void tls_build_proto(struct sock *sk) { int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4; - const struct proto *prot = sk->sk_prot; + const struct proto *prot = READ_ONCE(sk->sk_prot); /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */ if (ip_ver == TLSV6 && |