summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2020-06-19 22:12:34 +0300
committerDavid S. Miller <davem@davemloft.net>2020-06-21 03:47:53 +0300
commit05e22e8395058745bd0312bc488b522197852aff (patch)
tree62d1ab4974d59015979fb24e1d823df5b3b825b2
parent902053f17dbeff125926bb07e781f6e63f3efe09 (diff)
downloadlinux-05e22e8395058745bd0312bc488b522197852aff.tar.xz
tcp: remove indirect calls for icsk->icsk_af_ops->queue_xmit
Mitigate RETPOLINE costs in __tcp_transmit_skb() by using INDIRECT_CALL_INET() wrapper. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/ip.h6
-rw-r--r--include/net/tcp.h1
-rw-r--r--net/ipv4/ip_output.c6
-rw-r--r--net/ipv4/tcp_output.c7
4 files changed, 14 insertions, 6 deletions
diff --git a/include/net/ip.h b/include/net/ip.h
index 04ebe7bf54c6..862c9545833a 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -231,11 +231,7 @@ struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4,
struct ipcm_cookie *ipc, struct rtable **rtp,
struct inet_cork *cork, unsigned int flags);
-static inline int ip_queue_xmit(struct sock *sk, struct sk_buff *skb,
- struct flowi *fl)
-{
- return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos);
-}
+int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
{
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 4de9485f73d9..e5d7e0b09924 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -25,6 +25,7 @@
#include <linux/skbuff.h>
#include <linux/kref.h>
#include <linux/ktime.h>
+#include <linux/indirect_call_wrapper.h>
#include <net/inet_connection_sock.h>
#include <net/inet_timewait_sock.h>
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 090d3097ee15..d946356187ed 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -539,6 +539,12 @@ no_route:
}
EXPORT_SYMBOL(__ip_queue_xmit);
+int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)
+{
+ return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos);
+}
+EXPORT_SYMBOL(ip_queue_xmit);
+
static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
{
to->pkt_type = from->pkt_type;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index a50e1990a845..be1bd37185d8 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1064,6 +1064,9 @@ static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb,
list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
}
+INDIRECT_CALLABLE_DECLARE(int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl));
+INDIRECT_CALLABLE_DECLARE(int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl));
+
/* This routine actually transmits TCP packets queued in by
* tcp_do_sendmsg(). This is used by both the initial
* transmission and possible later retransmissions.
@@ -1235,7 +1238,9 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
tcp_add_tx_delay(skb, tp);
- err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
+ err = INDIRECT_CALL_INET(icsk->icsk_af_ops->queue_xmit,
+ inet6_csk_xmit, ip_queue_xmit,
+ sk, skb, &inet->cork.fl);
if (unlikely(err > 0)) {
tcp_enter_cwr(sk);