summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
authorRonak Doshi <doshir@vmware.com>2022-11-30 11:21:46 +0300
committerDavid S. Miller <davem@davemloft.net>2022-12-02 13:30:07 +0300
commit40b8c2a1af03ba3e8da55a4490d646bfa845e71a (patch)
tree916f1458d58582479e9c35ecbf2b3760d3b5e3aa /drivers/net
parent4eb0c28551fdafdb1af71d88a111c6f3b609d501 (diff)
downloadlinux-40b8c2a1af03ba3e8da55a4490d646bfa845e71a.tar.xz
vmxnet3: correctly report encapsulated LRO packet
Commit dacce2be3312 ("vmxnet3: add geneve and vxlan tunnel offload support") added support for encapsulation offload. However, the pathc did not report correctly the encapsulated packet which is LRO'ed by the hypervisor. This patch fixes this issue by using correct callback for the LRO'ed encapsulated packet. Fixes: dacce2be3312 ("vmxnet3: add geneve and vxlan tunnel offload support") Signed-off-by: Ronak Doshi <doshir@vmware.com> Acked-by: Guolin Yang <gyang@vmware.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c11
1 files changed, 9 insertions, 2 deletions
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index d3e7b27eb933..3111a8a6b26a 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1396,6 +1396,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
};
u32 num_pkts = 0;
bool skip_page_frags = false;
+ bool encap_lro = false;
struct Vmxnet3_RxCompDesc *rcd;
struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
u16 segCnt = 0, mss = 0;
@@ -1556,13 +1557,18 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
if (VMXNET3_VERSION_GE_2(adapter) &&
rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) {
struct Vmxnet3_RxCompDescExt *rcdlro;
+ union Vmxnet3_GenericDesc *gdesc;
+
rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
+ gdesc = (union Vmxnet3_GenericDesc *)rcd;
segCnt = rcdlro->segCnt;
WARN_ON_ONCE(segCnt == 0);
mss = rcdlro->mss;
if (unlikely(segCnt <= 1))
segCnt = 0;
+ encap_lro = (le32_to_cpu(gdesc->dword[0]) &
+ (1UL << VMXNET3_RCD_HDR_INNER_SHIFT));
} else {
segCnt = 0;
}
@@ -1630,7 +1636,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
vmxnet3_rx_csum(adapter, skb,
(union Vmxnet3_GenericDesc *)rcd);
skb->protocol = eth_type_trans(skb, adapter->netdev);
- if (!rcd->tcp ||
+ if ((!rcd->tcp && !encap_lro) ||
!(adapter->netdev->features & NETIF_F_LRO))
goto not_lro;
@@ -1639,7 +1645,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
skb_shinfo(skb)->gso_size = mss;
skb_shinfo(skb)->gso_segs = segCnt;
- } else if (segCnt != 0 || skb->len > mtu) {
+ } else if ((segCnt != 0 || skb->len > mtu) && !encap_lro) {
u32 hlen;
hlen = vmxnet3_get_hdr_len(adapter, skb,
@@ -1668,6 +1674,7 @@ not_lro:
napi_gro_receive(&rq->napi, skb);
ctx->skb = NULL;
+ encap_lro = false;
num_pkts++;
}