summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel
diff options
context:
space:
mode:
authorSven Auhagen <sven.auhagen@voleatech.de>2020-11-11 20:04:53 +0300
committerTony Nguyen <anthony.l.nguyen@intel.com>2020-12-10 02:26:58 +0300
commitec107e775d84392b35db46f6c3baa441e074042e (patch)
treeea09506775789abff0f62a68f57601bd93d13656 /drivers/net/ethernet/intel
parent3eca859008a75a4ad363db65b0fe83be1a3d5ad1 (diff)
downloadlinux-ec107e775d84392b35db46f6c3baa441e074042e.tar.xz
igb: avoid transmit queue timeout in xdp path
Since we share the transmit queue with the network stack, it is possible that we run into a transmit queue timeout. This will reset the queue. This happens under high load when XDP is using the transmit queue pretty much exclusively. netdev_start_xmit() sets the trans_start variable of the transmit queue to jiffies which is later utilized by dev_watchdog(), so to avoid timeout, let stack know that XDP xmit happened by bumping the trans_start within XDP Tx routines to jiffies. Fixes: 9cbc948b5a20 ("igb: add XDP support") Acked-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com> Signed-off-by: Sven Auhagen <sven.auhagen@voleatech.de> Tested-by: Sandeep Penigalapati <sandeep.penigalapati@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel')
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c5
1 files changed, 5 insertions, 0 deletions
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index af6ace6c0f87..0d343d050973 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2919,6 +2919,8 @@ static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
nq = txring_txq(tx_ring);
__netif_tx_lock(nq, cpu);
+ /* Avoid transmit queue timeout since we share it with the slow path */
+ nq->trans_start = jiffies;
ret = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
__netif_tx_unlock(nq);
@@ -2951,6 +2953,9 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
nq = txring_txq(tx_ring);
__netif_tx_lock(nq, cpu);
+ /* Avoid transmit queue timeout since we share it with the slow path */
+ nq->trans_start = jiffies;
+
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
int err;