summaryrefslogtreecommitdiff
path: root/include/linux/netdevice.h
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.com>2026-04-16 22:01:18 +0300
committerJiri Kosina <jkosina@suse.com>2026-04-16 22:01:18 +0300
commitd4eb7b2da66c848709e31585b9c371fa234abc39 (patch)
tree5c6311cfa45567ecdcb8946a2252da7d55d1ac10 /include/linux/netdevice.h
parent8b9a097eb2fc37b486afd81388c693bf3ab44466 (diff)
parent69c02ffde6ed4d535fa4e693a9e572729cad3d0d (diff)
downloadlinux-d4eb7b2da66c848709e31585b9c371fa234abc39.tar.xz
Merge branch 'for-7.1/core-v2' into for-linus
- fixed handling of 0-sized reports (Dmitry Torokhov) - convert core code to __free() (Dmitry Torokhov) - support for multiple batteries per HID device (Lucas Zampieri)
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r--include/linux/netdevice.h59
1 files changed, 54 insertions, 5 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d4e6e00bb90a..ae269a2e7f4d 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3576,17 +3576,49 @@ struct page_pool_bh {
};
DECLARE_PER_CPU(struct page_pool_bh, system_page_pool);
+#define XMIT_RECURSION_LIMIT 8
+
#ifndef CONFIG_PREEMPT_RT
static inline int dev_recursion_level(void)
{
return this_cpu_read(softnet_data.xmit.recursion);
}
+
+static inline bool dev_xmit_recursion(void)
+{
+ return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
+ XMIT_RECURSION_LIMIT);
+}
+
+static inline void dev_xmit_recursion_inc(void)
+{
+ __this_cpu_inc(softnet_data.xmit.recursion);
+}
+
+static inline void dev_xmit_recursion_dec(void)
+{
+ __this_cpu_dec(softnet_data.xmit.recursion);
+}
#else
static inline int dev_recursion_level(void)
{
return current->net_xmit.recursion;
}
+static inline bool dev_xmit_recursion(void)
+{
+ return unlikely(current->net_xmit.recursion > XMIT_RECURSION_LIMIT);
+}
+
+static inline void dev_xmit_recursion_inc(void)
+{
+ current->net_xmit.recursion++;
+}
+
+static inline void dev_xmit_recursion_dec(void)
+{
+ current->net_xmit.recursion--;
+}
#endif
void __netif_schedule(struct Qdisc *q);
@@ -4711,7 +4743,7 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
{
spin_lock(&txq->_xmit_lock);
- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ /* Pairs with READ_ONCE() in netif_tx_owned() */
WRITE_ONCE(txq->xmit_lock_owner, cpu);
}
@@ -4729,7 +4761,7 @@ static inline void __netif_tx_release(struct netdev_queue *txq)
static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
{
spin_lock_bh(&txq->_xmit_lock);
- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ /* Pairs with READ_ONCE() in netif_tx_owned() */
WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
}
@@ -4738,7 +4770,7 @@ static inline bool __netif_tx_trylock(struct netdev_queue *txq)
bool ok = spin_trylock(&txq->_xmit_lock);
if (likely(ok)) {
- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ /* Pairs with READ_ONCE() in netif_tx_owned() */
WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
}
return ok;
@@ -4746,14 +4778,14 @@ static inline bool __netif_tx_trylock(struct netdev_queue *txq)
static inline void __netif_tx_unlock(struct netdev_queue *txq)
{
- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ /* Pairs with READ_ONCE() in netif_tx_owned() */
WRITE_ONCE(txq->xmit_lock_owner, -1);
spin_unlock(&txq->_xmit_lock);
}
static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
{
- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ /* Pairs with READ_ONCE() in netif_tx_owned() */
WRITE_ONCE(txq->xmit_lock_owner, -1);
spin_unlock_bh(&txq->_xmit_lock);
}
@@ -4846,6 +4878,23 @@ static inline void netif_tx_disable(struct net_device *dev)
local_bh_enable();
}
+#ifndef CONFIG_PREEMPT_RT
+static inline bool netif_tx_owned(struct netdev_queue *txq, unsigned int cpu)
+{
+ /* Other cpus might concurrently change txq->xmit_lock_owner
+ * to -1 or to their cpu id, but not to our id.
+ */
+ return READ_ONCE(txq->xmit_lock_owner) == cpu;
+}
+
+#else
+static inline bool netif_tx_owned(struct netdev_queue *txq, unsigned int cpu)
+{
+ return rt_mutex_owner(&txq->_xmit_lock.lock) == current;
+}
+
+#endif
+
static inline void netif_addr_lock(struct net_device *dev)
{
unsigned char nest_level = 0;