summaryrefslogtreecommitdiff
path: root/net/ipv6/tcp_ipv6.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv6/tcp_ipv6.c')
-rw-r--r--net/ipv6/tcp_ipv6.c178
1 files changed, 63 insertions, 115 deletions
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 5d46832c6f72..f73a97f6e68e 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -104,19 +104,6 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
}
}
-static void tcp_v6_hash(struct sock *sk)
-{
- if (sk->sk_state != TCP_CLOSE) {
- if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
- tcp_prot.hash(sk);
- return;
- }
- local_bh_disable();
- __inet6_hash(sk, NULL);
- local_bh_enable();
- }
-}
-
static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
{
return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
@@ -154,7 +141,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
struct ip6_flowlabel *flowlabel;
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
- if (flowlabel == NULL)
+ if (!flowlabel)
return -EINVAL;
fl6_sock_release(flowlabel);
}
@@ -233,11 +220,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
tp->af_specific = &tcp_sock_ipv6_specific;
#endif
goto failure;
- } else {
- ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
- ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
- &sk->sk_v6_rcv_saddr);
}
+ np->saddr = sk->sk_v6_rcv_saddr;
return err;
}
@@ -263,7 +247,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
goto failure;
}
- if (saddr == NULL) {
+ if (!saddr) {
saddr = &fl6.saddr;
sk->sk_v6_rcv_saddr = *saddr;
}
@@ -340,18 +324,20 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
{
const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
+ struct net *net = dev_net(skb->dev);
+ struct request_sock *fastopen;
struct ipv6_pinfo *np;
- struct sock *sk;
- int err;
struct tcp_sock *tp;
- struct request_sock *fastopen;
__u32 seq, snd_una;
- struct net *net = dev_net(skb->dev);
+ struct sock *sk;
+ int err;
- sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
- th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
+ sk = __inet6_lookup_established(net, &tcp_hashinfo,
+ &hdr->daddr, th->dest,
+ &hdr->saddr, ntohs(th->source),
+ skb->dev->ifindex);
- if (sk == NULL) {
+ if (!sk) {
ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
ICMP6_MIB_INERRORS);
return;
@@ -361,6 +347,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
inet_twsk_put(inet_twsk(sk));
return;
}
+ seq = ntohl(th->seq);
+ if (sk->sk_state == TCP_NEW_SYN_RECV)
+ return tcp_req_err(sk, seq);
bh_lock_sock(sk);
if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
@@ -375,7 +364,6 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
}
tp = tcp_sk(sk);
- seq = ntohl(th->seq);
/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
fastopen = tp->fastopen_rsk;
snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
@@ -419,37 +407,12 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
/* Might be for an request_sock */
switch (sk->sk_state) {
- struct request_sock *req, **prev;
- case TCP_LISTEN:
- if (sock_owned_by_user(sk))
- goto out;
-
- /* Note : We use inet6_iif() here, not tcp_v6_iif() */
- req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
- &hdr->saddr, inet6_iif(skb));
- if (!req)
- goto out;
-
- /* ICMPs are not backlogged, hence we cannot get
- * an established socket here.
- */
- WARN_ON(req->sk != NULL);
-
- if (seq != tcp_rsk(req)->snt_isn) {
- NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
- goto out;
- }
-
- inet_csk_reqsk_queue_drop(sk, req, prev);
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
- goto out;
-
case TCP_SYN_SENT:
case TCP_SYN_RECV:
/* Only in fast or simultaneous open. If a fast open socket is
* is already accepted it is treated as a connected one below.
*/
- if (fastopen && fastopen->sk == NULL)
+ if (fastopen && !fastopen->sk)
break;
if (!sock_owned_by_user(sk)) {
@@ -497,7 +460,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
&ireq->ir_v6_rmt_addr);
fl6->daddr = ireq->ir_v6_rmt_addr;
- if (np->repflow && (ireq->pktopts != NULL))
+ if (np->repflow && ireq->pktopts)
fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
skb_set_queue_mapping(skb, queue_mapping);
@@ -523,17 +486,11 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
}
static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
- struct sock *addr_sk)
+ const struct sock *addr_sk)
{
return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
}
-static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
- struct request_sock *req)
-{
- return tcp_v6_md5_do_lookup(sk, &inet_rsk(req)->ir_v6_rmt_addr);
-}
-
static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
int optlen)
{
@@ -619,9 +576,9 @@ clear_hash_noput:
return 1;
}
-static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
+static int tcp_v6_md5_hash_skb(char *md5_hash,
+ const struct tcp_md5sig_key *key,
const struct sock *sk,
- const struct request_sock *req,
const struct sk_buff *skb)
{
const struct in6_addr *saddr, *daddr;
@@ -629,12 +586,9 @@ static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
struct hash_desc *desc;
const struct tcphdr *th = tcp_hdr(skb);
- if (sk) {
- saddr = &inet6_sk(sk)->saddr;
+ if (sk) { /* valid for establish/request sockets */
+ saddr = &sk->sk_v6_rcv_saddr;
daddr = &sk->sk_v6_daddr;
- } else if (req) {
- saddr = &inet_rsk(req)->ir_v6_loc_addr;
- daddr = &inet_rsk(req)->ir_v6_rmt_addr;
} else {
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
saddr = &ip6h->saddr;
@@ -670,8 +624,7 @@ clear_hash_noput:
return 1;
}
-static int __tcp_v6_inbound_md5_hash(struct sock *sk,
- const struct sk_buff *skb)
+static bool tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
{
const __u8 *hash_location = NULL;
struct tcp_md5sig_key *hash_expected;
@@ -685,44 +638,32 @@ static int __tcp_v6_inbound_md5_hash(struct sock *sk,
/* We've parsed the options - do we have a hash? */
if (!hash_expected && !hash_location)
- return 0;
+ return false;
if (hash_expected && !hash_location) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
- return 1;
+ return true;
}
if (!hash_expected && hash_location) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
- return 1;
+ return true;
}
/* check the signature */
genhash = tcp_v6_md5_hash_skb(newhash,
hash_expected,
- NULL, NULL, skb);
+ NULL, skb);
if (genhash || memcmp(hash_location, newhash, 16) != 0) {
net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
genhash ? "failed" : "mismatch",
&ip6h->saddr, ntohs(th->source),
&ip6h->daddr, ntohs(th->dest));
- return 1;
+ return true;
}
- return 0;
+ return false;
}
-
-static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
-{
- int ret;
-
- rcu_read_lock();
- ret = __tcp_v6_inbound_md5_hash(sk, skb);
- rcu_read_unlock();
-
- return ret;
-}
-
#endif
static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
@@ -734,8 +675,6 @@ static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
- ireq->ir_iif = sk->sk_bound_dev_if;
-
/* So that link locals have meaning */
if (!sk->sk_bound_dev_if &&
ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
@@ -774,7 +713,7 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
sizeof(struct ipv6hdr),
#ifdef CONFIG_TCP_MD5SIG
- .md5_lookup = tcp_v6_reqsk_md5_lookup,
+ .req_md5_lookup = tcp_v6_md5_lookup,
.calc_md5_hash = tcp_v6_md5_hash_skb,
#endif
.init_req = tcp_v6_init_req,
@@ -811,7 +750,7 @@ static void tcp_v6_send_response(struct sock *sk, struct sk_buff *skb, u32 seq,
buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
GFP_ATOMIC);
- if (buff == NULL)
+ if (!buff)
return;
skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
@@ -931,7 +870,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
if (!key)
goto release_sk1;
- genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
+ genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
if (genhash || memcmp(hash_location, newhash, 16) != 0)
goto release_sk1;
} else {
@@ -997,17 +936,19 @@ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
{
- struct request_sock *req, **prev;
const struct tcphdr *th = tcp_hdr(skb);
+ struct request_sock *req;
struct sock *nsk;
/* Find possible connection requests. */
- req = inet6_csk_search_req(sk, &prev, th->source,
+ req = inet6_csk_search_req(sk, th->source,
&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
- if (req)
- return tcp_check_req(sk, skb, req, prev, false);
-
+ if (req) {
+ nsk = tcp_check_req(sk, skb, req, false);
+ reqsk_put(req);
+ return nsk;
+ }
nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
&ipv6_hdr(skb)->saddr, th->source,
&ipv6_hdr(skb)->daddr, ntohs(th->dest),
@@ -1067,7 +1008,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
- if (newsk == NULL)
+ if (!newsk)
return NULL;
newtcp6sk = (struct tcp6_sock *)newsk;
@@ -1079,11 +1020,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
- ipv6_addr_set_v4mapped(newinet->inet_daddr, &newsk->sk_v6_daddr);
-
- ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
-
- newsk->sk_v6_rcv_saddr = newnp->saddr;
+ newnp->saddr = newsk->sk_v6_rcv_saddr;
inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
newsk->sk_backlog_rcv = tcp_v4_do_rcv;
@@ -1128,7 +1065,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
}
newsk = tcp_create_openreq_child(sk, req, skb);
- if (newsk == NULL)
+ if (!newsk)
goto out_nonewsk;
/*
@@ -1170,7 +1107,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
/* Clone pktoptions received with SYN */
newnp->pktoptions = NULL;
- if (ireq->pktopts != NULL) {
+ if (ireq->pktopts) {
newnp->pktoptions = skb_clone(ireq->pktopts,
sk_gfp_atomic(sk, GFP_ATOMIC));
consume_skb(ireq->pktopts);
@@ -1215,7 +1152,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
#ifdef CONFIG_TCP_MD5SIG
/* Copy over the MD5 key from the original socket */
key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
- if (key != NULL) {
+ if (key) {
/* We're using one, so create a matching key
* on the newsk structure. If we fail to get
* memory, then we end up not copying the key
@@ -1232,7 +1169,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
tcp_done(newsk);
goto out;
}
- __inet6_hash(newsk, NULL);
+ __inet_hash(newsk, NULL);
return newsk;
@@ -1411,6 +1348,15 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
TCP_SKB_CB(skb)->sacked = 0;
}
+static void tcp_v6_restore_cb(struct sk_buff *skb)
+{
+ /* We need to move header back to the beginning if xfrm6_policy_check()
+ * and tcp_v6_fill_cb() are going to be called again.
+ */
+ memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
+ sizeof(struct inet6_skb_parm));
+}
+
static int tcp_v6_rcv(struct sk_buff *skb)
{
const struct tcphdr *th;
@@ -1538,11 +1484,12 @@ do_time_wait:
&ipv6_hdr(skb)->saddr, th->source,
&ipv6_hdr(skb)->daddr,
ntohs(th->dest), tcp_v6_iif(skb));
- if (sk2 != NULL) {
+ if (sk2) {
struct inet_timewait_sock *tw = inet_twsk(sk);
inet_twsk_deschedule(tw, &tcp_death_row);
inet_twsk_put(tw);
sk = sk2;
+ tcp_v6_restore_cb(skb);
goto process;
}
/* Fall through to ACK */
@@ -1551,6 +1498,7 @@ do_time_wait:
tcp_v6_timewait_ack(sk, skb);
break;
case TCP_TW_RST:
+ tcp_v6_restore_cb(skb);
goto no_tcp_socket;
case TCP_TW_SUCCESS:
;
@@ -1584,8 +1532,8 @@ static void tcp_v6_early_demux(struct sk_buff *skb)
if (sk) {
skb->sk = sk;
skb->destructor = sock_edemux;
- if (sk->sk_state != TCP_TIME_WAIT) {
- struct dst_entry *dst = sk->sk_rx_dst;
+ if (sk_fullsock(sk)) {
+ struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
if (dst)
dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
@@ -1689,9 +1637,9 @@ static void tcp_v6_destroy_sock(struct sock *sk)
#ifdef CONFIG_PROC_FS
/* Proc filesystem TCPv6 sock list dumping. */
static void get_openreq6(struct seq_file *seq,
- const struct sock *sk, struct request_sock *req, int i, kuid_t uid)
+ struct request_sock *req, int i, kuid_t uid)
{
- int ttd = req->expires - jiffies;
+ long ttd = req->rsk_timer.expires - jiffies;
const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
@@ -1827,7 +1775,7 @@ static int tcp6_seq_show(struct seq_file *seq, void *v)
get_tcp6_sock(seq, v, st->num);
break;
case TCP_SEQ_STATE_OPENREQ:
- get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
+ get_openreq6(seq, v, st->num, st->uid);
break;
}
out:
@@ -1891,7 +1839,7 @@ struct proto tcpv6_prot = {
.sendpage = tcp_sendpage,
.backlog_rcv = tcp_v6_do_rcv,
.release_cb = tcp_release_cb,
- .hash = tcp_v6_hash,
+ .hash = inet_hash,
.unhash = inet_unhash,
.get_port = inet_csk_get_port,
.enter_memory_pressure = tcp_enter_memory_pressure,