summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-02-11 16:55:42 +0300
committerDavid S. Miller <davem@davemloft.net>2016-02-11 16:55:42 +0300
commite7e9956d8fc3c92e797e7334f2aee31dd9c623f3 (patch)
treeb04ab120785a4f827079361fffc1b78cfb88f8f3 /include
parenta060679c6b3da17dc9e95d0500f811de118ec901 (diff)
parentf245d079c1d11dc6927e56f5a89dd566fef2a415 (diff)
downloadlinux-e7e9956d8fc3c92e797e7334f2aee31dd9c623f3.tar.xz
Merge branch 'gso-checksums'
Alexander Duyck says: ==================== Add GSO support for outer checksum w/ inner checksum offloads This patch series updates the existing segmentation offload code for tunnels to make better use of existing and updated GSO checksum computation. This is done primarily through two mechanisms. First we maintain a separate checksum in the GSO context block of the sk_buff. This allows us to maintain two checksum values, one offloaded with values stored in csum_start and csum_offset, and one computed and tracked in SKB_GSO_CB(skb)->csum. By maintaining these two values we are able to take advantage of the same sort of math used in local checksum offload so that we can provide both inner and outer checksums with minimal overhead. Below is the performance for a netperf session between an ixgbe PF and VF on the same host but in different namespaces. As can be seen a significant gain in performance can be had from allowing the use of Tx checksum offload on the inner headers while performing a software offload on the outer header computation: Recv Send Send Utilization Service Demand Socket Socket Message Elapsed Send Recv Send Recv Size Size Size Time Throughput local remote local remote bytes bytes bytes secs. 10^6bits/s % S % U us/KB us/KB Before: 87380 16384 16384 10.00 12844.38 9.30 -1.00 0.712 -1.00 After: 87380 16384 16384 10.00 13216.63 6.78 -1.00 0.504 -1.000 Changes from v1: * Dropped use of CHECKSUM_UNNECESSARY for remote checksum offload * Left encap_hdr_csum as it will likely be needed in future for SCTP GSO * Broke the changes out over many more patches * Updated GRE segmentation to more closely match UDP tunnel segmentation ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r--include/linux/skbuff.h29
1 files changed, 22 insertions, 7 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 11f935c1a090..a8fc2220e8ce 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2161,6 +2161,11 @@ static inline int skb_checksum_start_offset(const struct sk_buff *skb)
return skb->csum_start - skb_headroom(skb);
}
+static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
+{
+ return skb->head + skb->csum_start;
+}
+
static inline int skb_transport_offset(const struct sk_buff *skb)
{
return skb_transport_header(skb) - skb->data;
@@ -3549,6 +3554,7 @@ static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
struct skb_gso_cb {
int mac_offset;
int encap_level;
+ __wsum csum;
__u16 csum_start;
};
#define SKB_SGO_CB_OFFSET 32
@@ -3575,6 +3581,16 @@ static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
return 0;
}
+static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
+{
+ /* Do not update partial checksums if remote checksum is enabled. */
+ if (skb->remcsum_offload)
+ return;
+
+ SKB_GSO_CB(skb)->csum = res;
+ SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
+}
+
/* Compute the checksum for a gso segment. First compute the checksum value
* from the start of transport header to SKB_GSO_CB(skb)->csum_start, and
* then add in skb->csum (checksum from csum_start to end of packet).
@@ -3585,15 +3601,14 @@ static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
*/
static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
{
- int plen = SKB_GSO_CB(skb)->csum_start - skb_headroom(skb) -
- skb_transport_offset(skb);
- __wsum partial;
+ unsigned char *csum_start = skb_transport_header(skb);
+ int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
+ __wsum partial = SKB_GSO_CB(skb)->csum;
- partial = csum_partial(skb_transport_header(skb), plen, skb->csum);
- skb->csum = res;
- SKB_GSO_CB(skb)->csum_start -= plen;
+ SKB_GSO_CB(skb)->csum = res;
+ SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
- return csum_fold(partial);
+ return csum_fold(csum_partial(csum_start, plen, partial));
}
static inline bool skb_is_gso(const struct sk_buff *skb)