summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-06-27 22:43:57 +0300
committerDavid S. Miller <davem@davemloft.net>2017-06-27 22:43:57 +0300
commited9208be8d8f3ce164892ba7ca484501c881de66 (patch)
tree4d593cd01e8f7783935fae566b9e2cb4a29159a6 /include
parentd97af30f615eea23ecfefd0e80b2f5f2f41afe55 (diff)
parent67a51780aebb683c3b217b2867e25102cdb0afbc (diff)
downloadlinux-ed9208be8d8f3ce164892ba7ca484501c881de66.tar.xz
Merge branch 'udp-ipv6-use-scratch-helpers'
Paolo Abeni says: ==================== ipv6: udp: exploit dev_scratch helpers When bringing in the recent cache optimization for the UDP protocol, I forgot to leverage the newly introduced scratched area helpers in the UDPv6 code path. As a result, the UDPv6 implementation suffers some unnecessary performance penality when compared to v4. This series aim to bring back UDPv6 on equal footing in respect to v4. The first patch moves the shared helpers to the common include files, while the second uses them in the UDPv6 code. This gives 5-8% performance improvement for a system under flood with small UDPv6 packets. The performance delta is less than the one reported on the original patch set because the UDPv6 code path already leveraged some of the optimization. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r--include/net/udp.h61
1 files changed, 61 insertions, 0 deletions
diff --git a/include/net/udp.h b/include/net/udp.h
index 1468dbd0f09a..972ce4baab6b 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -302,6 +302,67 @@ struct sock *__udp6_lib_lookup(struct net *net,
struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
__be16 sport, __be16 dport);
+/* UDP uses skb->dev_scratch to cache as much information as possible and avoid
+ * possibly multiple cache miss on dequeue()
+ */
+#if BITS_PER_LONG == 64
+
+/* truesize, len and the bit needed to compute skb_csum_unnecessary will be on
+ * cold cache lines at recvmsg time.
+ * skb->len can be stored on 16 bits since the udp header has been already
+ * validated and pulled.
+ */
+struct udp_dev_scratch {
+ u32 truesize;
+ u16 len;
+ bool is_linear;
+ bool csum_unnecessary;
+};
+
+static inline unsigned int udp_skb_len(struct sk_buff *skb)
+{
+ return ((struct udp_dev_scratch *)&skb->dev_scratch)->len;
+}
+
+static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
+{
+ return ((struct udp_dev_scratch *)&skb->dev_scratch)->csum_unnecessary;
+}
+
+static inline bool udp_skb_is_linear(struct sk_buff *skb)
+{
+ return ((struct udp_dev_scratch *)&skb->dev_scratch)->is_linear;
+}
+
+#else
+static inline unsigned int udp_skb_len(struct sk_buff *skb)
+{
+ return skb->len;
+}
+
+static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
+{
+ return skb_csum_unnecessary(skb);
+}
+
+static inline bool udp_skb_is_linear(struct sk_buff *skb)
+{
+ return !skb_is_nonlinear(skb);
+}
+#endif
+
+static inline int copy_linear_skb(struct sk_buff *skb, int len, int off,
+ struct iov_iter *to)
+{
+ int n, copy = len - off;
+
+ n = copy_to_iter(skb->data + off, copy, to);
+ if (n == copy)
+ return 0;
+
+ return -EFAULT;
+}
+
/*
* SNMP statistics for UDP and UDP-Lite
*/