diff options
author | Edward Cree <ecree@solarflare.com> | 2016-11-17 13:52:36 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-11-18 19:55:38 +0300 |
commit | 46d1efd852ccbc94e8c4f8c41cfd84147a103436 (patch) | |
tree | 0ea6687be60f6afcc7d6cd1798480e2ef4b8209e /drivers/net/ethernet/sfc/tx.c | |
parent | e638ee1d0a6aa10a1a32d9161e88758ecb8d1823 (diff) | |
download | linux-46d1efd852ccbc94e8c4f8c41cfd84147a103436.tar.xz |
sfc: remove Software TSO
It gives no advantage over GSO now that xmit_more exists. If we find
ourselves unable to handle a TSO skb (because our TXQ doesn't have a
TSOv2 context and the NIC doesn't support TSOv1), hand it back to GSO.
Also do that if the TSO handler fails with EINVAL for any other reason.
As Falcon-architecture NICs don't support any firmware-assisted TSO,
they no longer advertise TSO feature flags at all.
Signed-off-by: Edward Cree <ecree@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/sfc/tx.c')
-rw-r--r-- | drivers/net/ethernet/sfc/tx.c | 46 |
1 files changed, 41 insertions, 5 deletions
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 3089e888a08d..1aa728cfa8ba 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -446,10 +446,38 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) } } -static int efx_tx_tso_sw(struct efx_tx_queue *tx_queue, struct sk_buff *skb, - bool *data_mapped) +/* + * Fallback to software TSO. + * + * This is used if we are unable to send a GSO packet through hardware TSO. + * This should only ever happen due to per-queue restrictions - unsupported + * packets should first be filtered by the feature flags. + * + * Returns 0 on success, error code otherwise. + */ +static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, + struct sk_buff *skb) { - return efx_enqueue_skb_tso(tx_queue, skb, data_mapped); + struct sk_buff *segments, *next; + + segments = skb_gso_segment(skb, 0); + if (IS_ERR(segments)) + return PTR_ERR(segments); + + dev_kfree_skb_any(skb); + skb = segments; + + while (skb) { + next = skb->next; + skb->next = NULL; + + if (next) + skb->xmit_more = true; + efx_enqueue_skb(tx_queue, skb); + skb = next; + } + + return 0; } /* @@ -473,6 +501,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) bool data_mapped = false; unsigned int segments; unsigned int skb_len; + int rc; skb_len = skb->len; segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0; @@ -485,7 +514,14 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) */ if (segments) { EFX_BUG_ON_PARANOID(!tx_queue->handle_tso); - if (tx_queue->handle_tso(tx_queue, skb, &data_mapped)) + rc = tx_queue->handle_tso(tx_queue, skb, &data_mapped); + if (rc == -EINVAL) { + rc = efx_tx_tso_fallback(tx_queue, skb); + tx_queue->tso_fallbacks++; + if (rc == 0) + return 0; + } + if (rc) goto err; #ifdef EFX_USE_PIO } else if (skb_len <= efx_piobuf_size && !skb->xmit_more && @@ -801,7 +837,7 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue) /* Set up default function pointers. These may get replaced by * efx_nic_init_tx() based off NIC/queue capabilities. */ - tx_queue->handle_tso = efx_tx_tso_sw; + tx_queue->handle_tso = efx_enqueue_skb_tso; /* Some older hardware requires Tx writes larger than 32. */ tx_queue->tx_min_size = EFX_WORKAROUND_15592(efx) ? 33 : 0; |