summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c')
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c434
1 files changed, 270 insertions, 164 deletions
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index 0ac2ff828320..3676976c875b 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, 2021, The Linux Foundation. All rights reserved.
*
* RMNET Data MAP protocol
*/
@@ -8,6 +8,7 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
+#include <linux/bitfield.h>
#include "rmnet_config.h"
#include "rmnet_map.h"
#include "rmnet_private.h"
@@ -18,23 +19,13 @@
static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
const void *txporthdr)
{
- __sum16 *check = NULL;
+ if (protocol == IPPROTO_TCP)
+ return &((struct tcphdr *)txporthdr)->check;
- switch (protocol) {
- case IPPROTO_TCP:
- check = &(((struct tcphdr *)txporthdr)->check);
- break;
-
- case IPPROTO_UDP:
- check = &(((struct udphdr *)txporthdr)->check);
- break;
+ if (protocol == IPPROTO_UDP)
+ return &((struct udphdr *)txporthdr)->check;
- default:
- check = NULL;
- break;
- }
-
- return check;
+ return NULL;
}
static int
@@ -42,71 +33,74 @@ rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
struct rmnet_map_dl_csum_trailer *csum_trailer,
struct rmnet_priv *priv)
{
- __sum16 *csum_field, csum_temp, pseudo_csum, hdr_csum, ip_payload_csum;
- u16 csum_value, csum_value_final;
- struct iphdr *ip4h;
- void *txporthdr;
- __be16 addend;
-
- ip4h = (struct iphdr *)(skb->data);
- if ((ntohs(ip4h->frag_off) & IP_MF) ||
- ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0)) {
+ struct iphdr *ip4h = (struct iphdr *)skb->data;
+ void *txporthdr = skb->data + ip4h->ihl * 4;
+ __sum16 *csum_field, pseudo_csum;
+ __sum16 ip_payload_csum;
+
+ /* Computing the checksum over just the IPv4 header--including its
+ * checksum field--should yield 0. If it doesn't, the IP header
+ * is bad, so return an error and let the IP layer drop it.
+ */
+ if (ip_fast_csum(ip4h, ip4h->ihl)) {
+ priv->stats.csum_ip4_header_bad++;
+ return -EINVAL;
+ }
+
+ /* We don't support checksum offload on IPv4 fragments */
+ if (ip_is_fragment(ip4h)) {
priv->stats.csum_fragmented_pkt++;
return -EOPNOTSUPP;
}
- txporthdr = skb->data + ip4h->ihl * 4;
-
+ /* Checksum offload is only supported for UDP and TCP protocols */
csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr);
-
if (!csum_field) {
priv->stats.csum_err_invalid_transport++;
return -EPROTONOSUPPORT;
}
- /* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */
- if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP) {
+ /* RFC 768: UDP checksum is optional for IPv4, and is 0 if unused */
+ if (!*csum_field && ip4h->protocol == IPPROTO_UDP) {
priv->stats.csum_skipped++;
return 0;
}
- csum_value = ~ntohs(csum_trailer->csum_value);
- hdr_csum = ~ip_fast_csum(ip4h, (int)ip4h->ihl);
- ip_payload_csum = csum16_sub((__force __sum16)csum_value,
- (__force __be16)hdr_csum);
-
- pseudo_csum = ~csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
- ntohs(ip4h->tot_len) - ip4h->ihl * 4,
- ip4h->protocol, 0);
- addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
- pseudo_csum = csum16_add(ip_payload_csum, addend);
-
- addend = (__force __be16)ntohs((__force __be16)*csum_field);
- csum_temp = ~csum16_sub(pseudo_csum, addend);
- csum_value_final = (__force u16)csum_temp;
-
- if (unlikely(csum_value_final == 0)) {
- switch (ip4h->protocol) {
- case IPPROTO_UDP:
- /* RFC 768 - DL4 1's complement rule for UDP csum 0 */
- csum_value_final = ~csum_value_final;
- break;
-
- case IPPROTO_TCP:
- /* DL4 Non-RFC compliant TCP checksum found */
- if (*csum_field == (__force __sum16)0xFFFF)
- csum_value_final = ~csum_value_final;
- break;
- }
- }
-
- if (csum_value_final == ntohs((__force __be16)*csum_field)) {
- priv->stats.csum_ok++;
- return 0;
- } else {
+ /* The checksum value in the trailer is computed over the entire
+ * IP packet, including the IP header and payload. To derive the
+ * transport checksum from this, we first subract the contribution
+ * of the IP header from the trailer checksum. We then add the
+ * checksum computed over the pseudo header.
+ *
+ * We verified above that the IP header contributes zero to the
+ * trailer checksum. Therefore the checksum in the trailer is
+ * just the checksum computed over the IP payload.
+
+ * If the IP payload arrives intact, adding the pseudo header
+ * checksum to the IP payload checksum will yield 0xffff (negative
+ * zero). This means the trailer checksum and the pseudo checksum
+ * are additive inverses of each other. Put another way, the
+ * message passes the checksum test if the trailer checksum value
+ * is the negated pseudo header checksum.
+ *
+ * Knowing this, we don't even need to examine the transport
+ * header checksum value; it is already accounted for in the
+ * checksum value found in the trailer.
+ */
+ ip_payload_csum = csum_trailer->csum_value;
+
+ pseudo_csum = csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
+ ntohs(ip4h->tot_len) - ip4h->ihl * 4,
+ ip4h->protocol, 0);
+
+ /* The cast is required to ensure only the low 16 bits are examined */
+ if (ip_payload_csum != (__sum16)~pseudo_csum) {
priv->stats.csum_validation_failed++;
return -EINVAL;
}
+
+ priv->stats.csum_ok++;
+ return 0;
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -115,76 +109,66 @@ rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
struct rmnet_map_dl_csum_trailer *csum_trailer,
struct rmnet_priv *priv)
{
- __sum16 *csum_field, ip6_payload_csum, pseudo_csum, csum_temp;
- u16 csum_value, csum_value_final;
- __be16 ip6_hdr_csum, addend;
- struct ipv6hdr *ip6h;
- void *txporthdr;
- u32 length;
-
- ip6h = (struct ipv6hdr *)(skb->data);
-
- txporthdr = skb->data + sizeof(struct ipv6hdr);
+ struct ipv6hdr *ip6h = (struct ipv6hdr *)skb->data;
+ void *txporthdr = skb->data + sizeof(*ip6h);
+ __sum16 *csum_field, pseudo_csum;
+ __sum16 ip6_payload_csum;
+ __be16 ip_header_csum;
+
+ /* Checksum offload is only supported for UDP and TCP protocols;
+ * the packet cannot include any IPv6 extension headers
+ */
csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr);
-
if (!csum_field) {
priv->stats.csum_err_invalid_transport++;
return -EPROTONOSUPPORT;
}
- csum_value = ~ntohs(csum_trailer->csum_value);
- ip6_hdr_csum = (__force __be16)
- ~ntohs((__force __be16)ip_compute_csum(ip6h,
- (int)(txporthdr - (void *)(skb->data))));
- ip6_payload_csum = csum16_sub((__force __sum16)csum_value,
- ip6_hdr_csum);
-
- length = (ip6h->nexthdr == IPPROTO_UDP) ?
- ntohs(((struct udphdr *)txporthdr)->len) :
- ntohs(ip6h->payload_len);
- pseudo_csum = ~(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
- length, ip6h->nexthdr, 0));
- addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
- pseudo_csum = csum16_add(ip6_payload_csum, addend);
-
- addend = (__force __be16)ntohs((__force __be16)*csum_field);
- csum_temp = ~csum16_sub(pseudo_csum, addend);
- csum_value_final = (__force u16)csum_temp;
-
- if (unlikely(csum_value_final == 0)) {
- switch (ip6h->nexthdr) {
- case IPPROTO_UDP:
- /* RFC 2460 section 8.1
- * DL6 One's complement rule for UDP checksum 0
- */
- csum_value_final = ~csum_value_final;
- break;
-
- case IPPROTO_TCP:
- /* DL6 Non-RFC compliant TCP checksum found */
- if (*csum_field == (__force __sum16)0xFFFF)
- csum_value_final = ~csum_value_final;
- break;
- }
- }
-
- if (csum_value_final == ntohs((__force __be16)*csum_field)) {
- priv->stats.csum_ok++;
- return 0;
- } else {
+ /* The checksum value in the trailer is computed over the entire
+ * IP packet, including the IP header and payload. To derive the
+ * transport checksum from this, we first subract the contribution
+ * of the IP header from the trailer checksum. We then add the
+ * checksum computed over the pseudo header.
+ */
+ ip_header_csum = (__force __be16)ip_fast_csum(ip6h, sizeof(*ip6h) / 4);
+ ip6_payload_csum = csum16_sub(csum_trailer->csum_value, ip_header_csum);
+
+ pseudo_csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+ ntohs(ip6h->payload_len),
+ ip6h->nexthdr, 0);
+
+ /* It's sufficient to compare the IP payload checksum with the
+ * negated pseudo checksum to determine whether the packet
+ * checksum was good. (See further explanation in comments
+ * in rmnet_map_ipv4_dl_csum_trailer()).
+ *
+ * The cast is required to ensure only the low 16 bits are
+ * examined.
+ */
+ if (ip6_payload_csum != (__sum16)~pseudo_csum) {
priv->stats.csum_validation_failed++;
return -EINVAL;
}
+
+ priv->stats.csum_ok++;
+ return 0;
+}
+#else
+static int
+rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
+ struct rmnet_map_dl_csum_trailer *csum_trailer,
+ struct rmnet_priv *priv)
+{
+ return 0;
}
#endif
-static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
+static void rmnet_map_complement_ipv4_txporthdr_csum_field(struct iphdr *ip4h)
{
- struct iphdr *ip4h = (struct iphdr *)iphdr;
void *txphdr;
u16 *csum;
- txphdr = iphdr + ip4h->ihl * 4;
+ txphdr = (void *)ip4h + ip4h->ihl * 4;
if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) {
csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr);
@@ -193,15 +177,14 @@ static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
}
static void
-rmnet_map_ipv4_ul_csum_header(void *iphdr,
+rmnet_map_ipv4_ul_csum_header(struct iphdr *iphdr,
struct rmnet_map_ul_csum_header *ul_header,
struct sk_buff *skb)
{
- struct iphdr *ip4h = iphdr;
u16 val;
val = MAP_CSUM_UL_ENABLED_FLAG;
- if (ip4h->protocol == IPPROTO_UDP)
+ if (iphdr->protocol == IPPROTO_UDP)
val |= MAP_CSUM_UL_UDP_FLAG;
val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK;
@@ -214,13 +197,13 @@ rmnet_map_ipv4_ul_csum_header(void *iphdr,
}
#if IS_ENABLED(CONFIG_IPV6)
-static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
+static void
+rmnet_map_complement_ipv6_txporthdr_csum_field(struct ipv6hdr *ip6h)
{
- struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
void *txphdr;
u16 *csum;
- txphdr = ip6hdr + sizeof(struct ipv6hdr);
+ txphdr = ip6h + 1;
if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) {
csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr);
@@ -229,15 +212,14 @@ static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
}
static void
-rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
+rmnet_map_ipv6_ul_csum_header(struct ipv6hdr *ipv6hdr,
struct rmnet_map_ul_csum_header *ul_header,
struct sk_buff *skb)
{
- struct ipv6hdr *ip6h = ip6hdr;
u16 val;
val = MAP_CSUM_UL_ENABLED_FLAG;
- if (ip6h->nexthdr == IPPROTO_UDP)
+ if (ipv6hdr->nexthdr == IPPROTO_UDP)
val |= MAP_CSUM_UL_UDP_FLAG;
val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK;
@@ -246,16 +228,73 @@ rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
skb->ip_summed = CHECKSUM_NONE;
- rmnet_map_complement_ipv6_txporthdr_csum_field(ip6hdr);
+ rmnet_map_complement_ipv6_txporthdr_csum_field(ipv6hdr);
+}
+#else
+static void
+rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
+ struct rmnet_map_ul_csum_header *ul_header,
+ struct sk_buff *skb)
+{
}
#endif
+static void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb,
+ struct rmnet_port *port,
+ struct net_device *orig_dev)
+{
+ struct rmnet_priv *priv = netdev_priv(orig_dev);
+ struct rmnet_map_v5_csum_header *ul_header;
+
+ ul_header = skb_push(skb, sizeof(*ul_header));
+ memset(ul_header, 0, sizeof(*ul_header));
+ ul_header->header_info = u8_encode_bits(RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD,
+ MAPV5_HDRINFO_HDR_TYPE_FMASK);
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ void *iph = ip_hdr(skb);
+ __sum16 *check;
+ void *trans;
+ u8 proto;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ u16 ip_len = ((struct iphdr *)iph)->ihl * 4;
+
+ proto = ((struct iphdr *)iph)->protocol;
+ trans = iph + ip_len;
+ } else if (IS_ENABLED(CONFIG_IPV6) &&
+ skb->protocol == htons(ETH_P_IPV6)) {
+ u16 ip_len = sizeof(struct ipv6hdr);
+
+ proto = ((struct ipv6hdr *)iph)->nexthdr;
+ trans = iph + ip_len;
+ } else {
+ priv->stats.csum_err_invalid_ip_version++;
+ goto sw_csum;
+ }
+
+ check = rmnet_map_get_csum_field(proto, trans);
+ if (check) {
+ skb->ip_summed = CHECKSUM_NONE;
+ /* Ask for checksum offloading */
+ ul_header->csum_info |= MAPV5_CSUMINFO_VALID_FLAG;
+ priv->stats.csum_hw++;
+ return;
+ }
+ }
+
+sw_csum:
+ priv->stats.csum_sw++;
+}
+
/* Adds MAP header to front of skb->data
* Padding is calculated and set appropriately in MAP header. Mux ID is
* initialized to 0.
*/
struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
- int hdrlen, int pad)
+ int hdrlen,
+ struct rmnet_port *port,
+ int pad)
{
struct rmnet_map_header *map_header;
u32 padding, map_datalen;
@@ -266,6 +305,10 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
skb_push(skb, sizeof(struct rmnet_map_header));
memset(map_header, 0, sizeof(struct rmnet_map_header));
+ /* Set next_hdr bit for csum offload packets */
+ if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5)
+ map_header->flags |= MAP_NEXT_HEADER_FLAG;
+
if (pad == RMNET_MAP_NO_PAD_BYTES) {
map_header->pkt_len = htons(map_datalen);
return map_header;
@@ -300,8 +343,11 @@ done:
struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
struct rmnet_port *port)
{
+ struct rmnet_map_v5_csum_header *next_hdr = NULL;
struct rmnet_map_header *maph;
+ void *data = skb->data;
struct sk_buff *skbn;
+ u8 nexthdr_type;
u32 packet_len;
if (skb->len == 0)
@@ -310,8 +356,18 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
maph = (struct rmnet_map_header *)skb->data;
packet_len = ntohs(maph->pkt_len) + sizeof(*maph);
- if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
+ if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
+ } else if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV5) {
+ if (!(maph->flags & MAP_CMD_FLAG)) {
+ packet_len += sizeof(*next_hdr);
+ if (maph->flags & MAP_NEXT_HEADER_FLAG)
+ next_hdr = data + sizeof(*maph);
+ else
+ /* Mapv5 data pkt without csum hdr is invalid */
+ return NULL;
+ }
+ }
if (((int)skb->len - (int)packet_len) < 0)
return NULL;
@@ -320,6 +376,13 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
if (!maph->pkt_len)
return NULL;
+ if (next_hdr) {
+ nexthdr_type = u8_get_bits(next_hdr->header_info,
+ MAPV5_HDRINFO_HDR_TYPE_FMASK);
+ if (nexthdr_type != RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD)
+ return NULL;
+ }
+
skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC);
if (!skbn)
return NULL;
@@ -355,28 +418,19 @@ int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
return -EINVAL;
}
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (skb->protocol == htons(ETH_P_IP))
return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer, priv);
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
-#if IS_ENABLED(CONFIG_IPV6)
+
+ if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6))
return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer, priv);
-#else
- priv->stats.csum_err_invalid_ip_version++;
- return -EPROTONOSUPPORT;
-#endif
- } else {
- priv->stats.csum_err_invalid_ip_version++;
- return -EPROTONOSUPPORT;
- }
- return 0;
+ priv->stats.csum_err_invalid_ip_version++;
+
+ return -EPROTONOSUPPORT;
}
-/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
- * packets that are supported for UL checksum offload.
- */
-void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
- struct net_device *orig_dev)
+static void rmnet_map_v4_checksum_uplink_packet(struct sk_buff *skb,
+ struct net_device *orig_dev)
{
struct rmnet_priv *priv = netdev_priv(orig_dev);
struct rmnet_map_ul_csum_header *ul_header;
@@ -389,28 +443,80 @@ void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))))
goto sw_csum;
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- iphdr = (char *)ul_header +
- sizeof(struct rmnet_map_ul_csum_header);
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ goto sw_csum;
- if (skb->protocol == htons(ETH_P_IP)) {
- rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
- return;
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
-#if IS_ENABLED(CONFIG_IPV6)
- rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
- return;
-#else
- priv->stats.csum_err_invalid_ip_version++;
- goto sw_csum;
-#endif
- } else {
- priv->stats.csum_err_invalid_ip_version++;
- }
+ iphdr = (char *)ul_header +
+ sizeof(struct rmnet_map_ul_csum_header);
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
+ priv->stats.csum_hw++;
+ return;
+ }
+
+ if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6)) {
+ rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
+ priv->stats.csum_hw++;
+ return;
}
+ priv->stats.csum_err_invalid_ip_version++;
+
sw_csum:
memset(ul_header, 0, sizeof(*ul_header));
priv->stats.csum_sw++;
}
+
+/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
+ * packets that are supported for UL checksum offload.
+ */
+void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
+ struct rmnet_port *port,
+ struct net_device *orig_dev,
+ int csum_type)
+{
+ switch (csum_type) {
+ case RMNET_FLAGS_EGRESS_MAP_CKSUMV4:
+ rmnet_map_v4_checksum_uplink_packet(skb, orig_dev);
+ break;
+ case RMNET_FLAGS_EGRESS_MAP_CKSUMV5:
+ rmnet_map_v5_checksum_uplink_packet(skb, port, orig_dev);
+ break;
+ default:
+ break;
+ }
+}
+
+/* Process a MAPv5 packet header */
+int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
+ u16 len)
+{
+ struct rmnet_priv *priv = netdev_priv(skb->dev);
+ struct rmnet_map_v5_csum_header *next_hdr;
+ u8 nexthdr_type;
+
+ next_hdr = (struct rmnet_map_v5_csum_header *)(skb->data +
+ sizeof(struct rmnet_map_header));
+
+ nexthdr_type = u8_get_bits(next_hdr->header_info,
+ MAPV5_HDRINFO_HDR_TYPE_FMASK);
+
+ if (nexthdr_type != RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD)
+ return -EINVAL;
+
+ if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
+ priv->stats.csum_sw++;
+ } else if (next_hdr->csum_info & MAPV5_CSUMINFO_VALID_FLAG) {
+ priv->stats.csum_ok++;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ priv->stats.csum_valid_unset++;
+ }
+
+ /* Pull csum v5 header */
+ skb_pull(skb, sizeof(*next_hdr));
+
+ return 0;
+}