summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c10
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c15
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c37
3 files changed, 48 insertions, 14 deletions
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index b5f105709e49..129f3e11a442 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -791,7 +791,7 @@ static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv,
cmd->create_rx_queue.rx_buff_ring_size =
cpu_to_be16(priv->rx_desc_cnt);
cmd->create_rx_queue.enable_rsc =
- !!(priv->dev->features & NETIF_F_LRO);
+ !!(priv->dev->features & NETIF_F_GRO_HW);
if (priv->header_split_enabled)
cmd->create_rx_queue.header_buffer_size =
cpu_to_be16(priv->header_buf_size);
@@ -1117,9 +1117,11 @@ int gve_adminq_describe_device(struct gve_priv *priv)
gve_set_default_rss_sizes(priv);
- /* DQO supports LRO. */
- if (!gve_is_gqi(priv))
- priv->dev->hw_features |= NETIF_F_LRO;
+ /* DQO supports HW-GRO. */
+ if (gve_is_dqo(priv)) {
+ priv->dev->hw_features |= NETIF_F_GRO_HW;
+ priv->dev->features |= NETIF_F_GRO_HW;
+ }
priv->max_registered_pages =
be64_to_cpu(descriptor->max_registered_pages);
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index c654cf503c1a..424d973c97f2 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -1758,9 +1758,9 @@ static int gve_verify_xdp_configuration(struct net_device *dev,
struct gve_priv *priv = netdev_priv(dev);
u16 max_xdp_mtu;
- if (dev->features & NETIF_F_LRO) {
+ if (dev->features & NETIF_F_GRO_HW) {
NL_SET_ERR_MSG_MOD(extack,
- "XDP is not supported when LRO is on.");
+ "XDP is not supported when HW-GRO is on.");
return -EOPNOTSUPP;
}
@@ -2177,12 +2177,13 @@ static int gve_set_features(struct net_device *netdev,
gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
- if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) {
- netdev->features ^= NETIF_F_LRO;
- if (priv->xdp_prog && (netdev->features & NETIF_F_LRO)) {
+ if ((netdev->features & NETIF_F_GRO_HW) !=
+ (features & NETIF_F_GRO_HW)) {
+ netdev->features ^= NETIF_F_GRO_HW;
+ if (priv->xdp_prog && (netdev->features & NETIF_F_GRO_HW)) {
netdev_warn(netdev,
- "XDP is not supported when LRO is on.\n");
- err = -EOPNOTSUPP;
+ "HW-GRO is not supported when XDP is on.");
+ err = -EOPNOTSUPP;
goto revert_features;
}
if (netif_running(netdev)) {
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 3b10139941ea..7924dce719e2 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -942,11 +942,18 @@ static int gve_rx_complete_rsc(struct sk_buff *skb,
struct gve_ptype ptype)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
+ int rsc_segments, rsc_seg_len, hdr_len;
+ skb_frag_t *frag;
+ void *va;
- /* Only TCP is supported right now. */
+ /* HW-GRO only coalesces TCP. */
if (ptype.l4_type != GVE_L4_TYPE_TCP)
return -EINVAL;
+ rsc_seg_len = le16_to_cpu(desc->rsc_seg_len);
+ if (!rsc_seg_len)
+ return 0;
+
switch (ptype.l3_type) {
case GVE_L3_TYPE_IPV4:
shinfo->gso_type = SKB_GSO_TCPV4;
@@ -958,7 +965,31 @@ static int gve_rx_complete_rsc(struct sk_buff *skb,
return -EINVAL;
}
- shinfo->gso_size = le16_to_cpu(desc->rsc_seg_len);
+ if (skb_headlen(skb)) {
+ /* With header-split, payload is in the non-linear part */
+ rsc_segments = DIV_ROUND_UP(skb->data_len, rsc_seg_len);
+ } else {
+ /* HW-GRO packets are guaranteed to have complete TCP/IP
+ * headers in frag[0] when header-split is not enabled.
+ */
+ frag = &skb_shinfo(skb)->frags[0];
+ va = skb_frag_address(frag);
+ hdr_len =
+ eth_get_headlen(skb->dev, va, skb_frag_size(frag));
+ rsc_segments = DIV_ROUND_UP(skb->len - hdr_len, rsc_seg_len);
+ skb_copy_to_linear_data(skb, va, hdr_len);
+ skb_frag_size_sub(frag, hdr_len);
+ /* Verify we didn't empty the fragment completely as that could
+ * otherwise lead to page leaks.
+ */
+ DEBUG_NET_WARN_ON_ONCE(!skb_frag_size(frag));
+ skb_frag_off_add(frag, hdr_len);
+ skb->data_len -= hdr_len;
+ skb->tail += hdr_len;
+ }
+ shinfo->gso_size = rsc_seg_len;
+ shinfo->gso_segs = rsc_segments;
+
return 0;
}
@@ -991,7 +1022,7 @@ static int gve_rx_complete_skb(struct gve_rx_ring *rx, struct napi_struct *napi,
return err;
}
- if (skb_headlen(rx->ctx.skb_head) == 0)
+ if (rx->ctx.skb_head == napi->skb)
napi_gro_frags(napi);
else
napi_gro_receive(napi, rx->ctx.skb_head);