diff options
author | subashab@codeaurora.org <subashab@codeaurora.org> | 2017-03-09 02:36:49 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-03-13 08:54:17 +0300 |
commit | 5425077d73e0c8e7e9267ca8397cc0e2293c1fb9 (patch) | |
tree | 32a44a8f404bf3690be2bb1b6f933ef976119a42 /net/ipv6/udp.c | |
parent | 49b499718fa1b0d639663cfd03085b9bfd23cdc8 (diff) | |
download | linux-5425077d73e0c8e7e9267ca8397cc0e2293c1fb9.tar.xz |
net: ipv6: Add early demux handler for UDP unicast
While running a single stream UDPv6 test, we observed that amount
of CPU spent in NET_RX softirq was much greater than UDPv4 for an
equivalent receive rate. The test here was run on an ARM64 based
Android system. On further analysis with perf, we found that UDPv6
was spending significant time in the statistics netfilter targets
which did socket lookup per packet. These statistics rules perform
a lookup when there is no socket associated with the skb. Since
there are multiple instances of these rules based on UID, there
will be equal number of lookups per skb.
By introducing early demux for UDPv6, we avoid the redundant lookups.
This also helped to improve the performance (800Mbps -> 870Mbps) on a
CPU limited system in a single stream UDPv6 receive test with 1450
byte sized datagrams using iperf.
v1->v2: Use IPv6 cookie to validate dst instead of 0 as suggested
by Eric
Signed-off-by: Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6/udp.c')
-rw-r--r-- | net/ipv6/udp.c | 59 |
1 files changed, 59 insertions, 0 deletions
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 4e4c401e3bc6..08a188ffe070 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -864,6 +864,64 @@ discard: return 0; } +static struct sock *__udp6_lib_demux_lookup(struct net *net, + __be16 loc_port, const struct in6_addr *loc_addr, + __be16 rmt_port, const struct in6_addr *rmt_addr, + int dif) +{ + struct sock *sk; + + rcu_read_lock(); + sk = __udp6_lib_lookup(net, rmt_addr, rmt_port, loc_addr, loc_port, + dif, &udp_table, NULL); + if (sk && !atomic_inc_not_zero(&sk->sk_refcnt)) + sk = NULL; + rcu_read_unlock(); + + return sk; +} + +static void udp_v6_early_demux(struct sk_buff *skb) +{ + struct net *net = dev_net(skb->dev); + const struct udphdr *uh; + struct sock *sk; + struct dst_entry *dst; + int dif = skb->dev->ifindex; + + if (!pskb_may_pull(skb, skb_transport_offset(skb) + + sizeof(struct udphdr))) + return; + + uh = udp_hdr(skb); + + if (skb->pkt_type == PACKET_HOST) + sk = __udp6_lib_demux_lookup(net, uh->dest, + &ipv6_hdr(skb)->daddr, + uh->source, &ipv6_hdr(skb)->saddr, + dif); + else + return; + + if (!sk) + return; + + skb->sk = sk; + skb->destructor = sock_efree; + dst = READ_ONCE(sk->sk_rx_dst); + + if (dst) + dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); + if (dst) { + if (dst->flags & DST_NOCACHE) { + if (likely(atomic_inc_not_zero(&dst->__refcnt))) + skb_dst_set(skb, dst); + } else { + skb_dst_set_noref(skb, dst); + } + } +} + static __inline__ int udpv6_rcv(struct sk_buff *skb) { return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP); @@ -1379,6 +1437,7 @@ int compat_udpv6_getsockopt(struct sock *sk, int level, int optname, #endif static const struct inet6_protocol udpv6_protocol = { + .early_demux = udp_v6_early_demux, .handler = udpv6_rcv, .err_handler = udpv6_err, .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, |