summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2020-06-21 03:47:53 +0300
committerDavid S. Miller <davem@davemloft.net>2020-06-21 03:47:53 +0300
commitc8f8a9f8e5e51bec320a5c2649e042b40630c564 (patch)
tree6c91f581ec5db35d4d77554be9d293b2ae57a776
parent902053f17dbeff125926bb07e781f6e63f3efe09 (diff)
parentdd2e0b86fc4ee146ac8f3275833d0187efeb950a (diff)
downloadlinux-c8f8a9f8e5e51bec320a5c2649e042b40630c564.tar.xz
Merge branch 'tcp-remove-two-indirect-calls-from-xmit-path'
Eric Dumazet says: ==================== tcp: remove two indirect calls from xmit path __tcp_transmit_skb() does two indirect calls per packet, lets get rid of them. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/ip.h6
-rw-r--r--include/net/ip6_checksum.h9
-rw-r--r--include/net/tcp.h4
-rw-r--r--net/ipv4/ip_output.c6
-rw-r--r--net/ipv4/tcp_output.c12
-rw-r--r--net/ipv6/tcp_ipv6.c7
6 files changed, 28 insertions, 16 deletions
diff --git a/include/net/ip.h b/include/net/ip.h
index 04ebe7bf54c6..862c9545833a 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -231,11 +231,7 @@ struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4,
struct ipcm_cookie *ipc, struct rtable **rtp,
struct inet_cork *cork, unsigned int flags);
-static inline int ip_queue_xmit(struct sock *sk, struct sk_buff *skb,
- struct flowi *fl)
-{
- return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos);
-}
+int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
{
diff --git a/include/net/ip6_checksum.h b/include/net/ip6_checksum.h
index 27ec612cd4a4..b3f4eaa88672 100644
--- a/include/net/ip6_checksum.h
+++ b/include/net/ip6_checksum.h
@@ -85,15 +85,6 @@ static inline void tcp_v6_gso_csum_prep(struct sk_buff *skb)
th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0);
}
-#if IS_ENABLED(CONFIG_IPV6)
-static inline void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
-{
- struct ipv6_pinfo *np = inet6_sk(sk);
-
- __tcp_v6_send_check(skb, &np->saddr, &sk->sk_v6_daddr);
-}
-#endif
-
static inline __sum16 udp_v6_check(int len,
const struct in6_addr *saddr,
const struct in6_addr *daddr,
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 4de9485f73d9..cd9cc348dbf9 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -25,6 +25,7 @@
#include <linux/skbuff.h>
#include <linux/kref.h>
#include <linux/ktime.h>
+#include <linux/indirect_call_wrapper.h>
#include <net/inet_connection_sock.h>
#include <net/inet_timewait_sock.h>
@@ -931,6 +932,9 @@ static inline int tcp_v6_sdif(const struct sk_buff *skb)
#endif
return 0;
}
+
+INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb));
+
#endif
static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 090d3097ee15..d946356187ed 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -539,6 +539,12 @@ no_route:
}
EXPORT_SYMBOL(__ip_queue_xmit);
+int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)
+{
+ return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos);
+}
+EXPORT_SYMBOL(ip_queue_xmit);
+
static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
{
to->pkt_type = from->pkt_type;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index a50e1990a845..04b70fe31fa2 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1064,6 +1064,10 @@ static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb,
list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
}
+INDIRECT_CALLABLE_DECLARE(int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl));
+INDIRECT_CALLABLE_DECLARE(int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl));
+INDIRECT_CALLABLE_DECLARE(void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb));
+
/* This routine actually transmits TCP packets queued in by
* tcp_do_sendmsg(). This is used by both the initial
* transmission and possible later retransmissions.
@@ -1207,7 +1211,9 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
}
#endif
- icsk->icsk_af_ops->send_check(sk, skb);
+ INDIRECT_CALL_INET(icsk->icsk_af_ops->send_check,
+ tcp_v6_send_check, tcp_v4_send_check,
+ sk, skb);
if (likely(tcb->tcp_flags & TCPHDR_ACK))
tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt);
@@ -1235,7 +1241,9 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
tcp_add_tx_delay(skb, tp);
- err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
+ err = INDIRECT_CALL_INET(icsk->icsk_af_ops->queue_xmit,
+ inet6_csk_xmit, ip_queue_xmit,
+ sk, skb, &inet->cork.fl);
if (unlikely(err > 0)) {
tcp_enter_cwr(sk);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index f67d45ff00b4..4502db706f75 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1811,6 +1811,13 @@ static struct timewait_sock_ops tcp6_timewait_sock_ops = {
.twsk_destructor = tcp_twsk_destructor,
};
+INDIRECT_CALLABLE_SCOPE void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
+{
+ struct ipv6_pinfo *np = inet6_sk(sk);
+
+ __tcp_v6_send_check(skb, &np->saddr, &sk->sk_v6_daddr);
+}
+
const struct inet_connection_sock_af_ops ipv6_specific = {
.queue_xmit = inet6_csk_xmit,
.send_check = tcp_v6_send_check,