summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSriram Yagnaraman <sriram.yagnaraman@est.tech>2024-08-22 10:42:07 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2024-09-18 20:23:05 +0300
commitb01930b75f98b52569ac6575f6bbe4200f21a70e (patch)
tree948ffb049327c2bf3b0a0e4a1f61359b4b143049
parent231442c47c613cba67322d0c1d6b36cb9e0feecb (diff)
downloadlinux-b01930b75f98b52569ac6575f6bbe4200f21a70e.tar.xz
igb: Always call igb_xdp_ring_update_tail() under Tx lock
[ Upstream commit 27717f8b17c098c4373ddb8fe89e1a1899c7779d ] Always call igb_xdp_ring_update_tail() under __netif_tx_lock, add a comment and lockdep assert to indicate that. This is needed to share the same TX ring between XDP, XSK and slow paths. Furthermore, the current XDP implementation is racy on tail updates. Fixes: 9cbc948b5a20 ("igb: add XDP support") Signed-off-by: Sriram Yagnaraman <sriram.yagnaraman@est.tech> [Kurt: Add lockdep assert and fixes tag] Signed-off-by: Kurt Kanzenbach <kurt@linutronix.de> Acked-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com> Tested-by: George Kuruvinakunnel <george.kuruvinakunnel@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com> Signed-off-by: Sasha Levin <sashal@kernel.org>
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c17
1 files changed, 13 insertions, 4 deletions
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 76bd41058f3a..f2f719a952f8 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -34,6 +34,7 @@
#include <linux/bpf_trace.h>
#include <linux/pm_runtime.h>
#include <linux/etherdevice.h>
+#include <linux/lockdep.h>
#ifdef CONFIG_IGB_DCA
#include <linux/dca.h>
#endif
@@ -2915,8 +2916,11 @@ static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
}
+/* This function assumes __netif_tx_lock is held by the caller. */
static void igb_xdp_ring_update_tail(struct igb_ring *ring)
{
+ lockdep_assert_held(&txring_txq(ring)->_xmit_lock);
+
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch.
*/
@@ -3001,11 +3005,11 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
nxmit++;
}
- __netif_tx_unlock(nq);
-
if (unlikely(flags & XDP_XMIT_FLUSH))
igb_xdp_ring_update_tail(tx_ring);
+ __netif_tx_unlock(nq);
+
return nxmit;
}
@@ -8829,12 +8833,14 @@ static void igb_put_rx_buffer(struct igb_ring *rx_ring,
static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
{
+ unsigned int total_bytes = 0, total_packets = 0;
struct igb_adapter *adapter = q_vector->adapter;
struct igb_ring *rx_ring = q_vector->rx.ring;
- struct sk_buff *skb = rx_ring->skb;
- unsigned int total_bytes = 0, total_packets = 0;
u16 cleaned_count = igb_desc_unused(rx_ring);
+ struct sk_buff *skb = rx_ring->skb;
+ int cpu = smp_processor_id();
unsigned int xdp_xmit = 0;
+ struct netdev_queue *nq;
struct xdp_buff xdp;
u32 frame_sz = 0;
int rx_buf_pgcnt;
@@ -8962,7 +8968,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
if (xdp_xmit & IGB_XDP_TX) {
struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
+ nq = txring_txq(tx_ring);
+ __netif_tx_lock(nq, cpu);
igb_xdp_ring_update_tail(tx_ring);
+ __netif_tx_unlock(nq);
}
u64_stats_update_begin(&rx_ring->rx_syncp);