summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStanislav Fomichev <sdf.kernel@gmail.com>2026-04-16 21:57:01 +0300
committerPaolo Abeni <pabeni@redhat.com>2026-04-21 13:50:24 +0300
commit7ef83bf1712b5c441a21d5df844202433f6b0b05 (patch)
tree822645c9edb090595a2c129033795567a0eeada4
parenta4c833278144917982510ca43a3438155756122a (diff)
downloadlinux-7ef83bf1712b5c441a21d5df844202433f6b0b05.tar.xz
net: move promiscuity handling into netdev_rx_mode_work
Move unicast promiscuity tracking into netdev_rx_mode_work so it runs under netdev_ops_lock instead of under the addr_lock spinlock. This is required because __dev_set_promiscuity calls dev_change_rx_flags and __dev_notify_flags, both of which may need to sleep. Change ASSERT_RTNL() to netdev_ops_assert_locked() in __dev_set_promiscuity, netif_set_allmulti and __dev_change_flags since these are now called from the work queue under the ops lock. Link: https://lore.kernel.org/netdev/20260214033859.43857-1-jiayuan.chen@linux.dev/ Fixes: 78cd408356fe ("net: add missing instance lock to dev_set_promiscuity") Reported-by: syzbot+2b3391f44313b3983e91@syzkaller.appspotmail.com Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com> Signed-off-by: Stanislav Fomichev <sdf@fomichev.me> Link: https://patch.msgid.link/20260416185712.2155425-5-sdf@fomichev.me Signed-off-by: Paolo Abeni <pabeni@redhat.com>
-rw-r--r--Documentation/networking/netdevices.rst4
-rw-r--r--net/core/dev.c16
-rw-r--r--net/core/dev_addr_lists.c82
3 files changed, 68 insertions, 34 deletions
diff --git a/Documentation/networking/netdevices.rst b/Documentation/networking/netdevices.rst
index e89b12d4f3a7..93e06e8d51a9 100644
--- a/Documentation/networking/netdevices.rst
+++ b/Documentation/networking/netdevices.rst
@@ -299,6 +299,10 @@ ndo_set_rx_mode_async:
Notes: Async version of ndo_set_rx_mode which runs in process
context. Receives snapshots of the unicast and multicast address lists.
+ndo_change_rx_flags:
+ Synchronization: rtnl_lock() semaphore. In addition, netdev instance
+ lock if the driver implements queue management or shaper API.
+
ndo_setup_tc:
``TC_SETUP_BLOCK`` and ``TC_SETUP_FT`` are running under NFT locks
(i.e. no ``rtnl_lock`` and no device instance lock). The rest of
diff --git a/net/core/dev.c b/net/core/dev.c
index 8597ec56fd64..8a69aed56fca 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -9600,7 +9600,7 @@ int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
kuid_t uid;
kgid_t gid;
- ASSERT_RTNL();
+ netdev_ops_assert_locked(dev);
promiscuity = dev->promiscuity + inc;
if (promiscuity == 0) {
@@ -9636,16 +9636,8 @@ int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
dev_change_rx_flags(dev, IFF_PROMISC);
}
- if (notify) {
- /* The ops lock is only required to ensure consistent locking
- * for `NETDEV_CHANGE` notifiers. This function is sometimes
- * called without the lock, even for devices that are ops
- * locked, such as in `dev_uc_sync_multiple` when using
- * bonding or teaming.
- */
- netdev_ops_assert_locked(dev);
+ if (notify)
__dev_notify_flags(dev, old_flags, IFF_PROMISC, 0, NULL);
- }
return 0;
}
@@ -9667,7 +9659,7 @@ int netif_set_allmulti(struct net_device *dev, int inc, bool notify)
unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
unsigned int allmulti, flags;
- ASSERT_RTNL();
+ netdev_ops_assert_locked(dev);
allmulti = dev->allmulti + inc;
if (allmulti == 0) {
@@ -9735,7 +9727,7 @@ int __dev_change_flags(struct net_device *dev, unsigned int flags,
unsigned int old_flags = dev->flags;
int ret;
- ASSERT_RTNL();
+ netdev_ops_assert_locked(dev);
/*
* Set the flags on our device.
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index 7bab2ed0f625..4c9e8a69493f 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -1229,10 +1229,34 @@ static void netif_addr_lists_reconcile(struct net_device *dev,
&dev->rx_mode_addr_cache);
}
+/**
+ * netif_uc_promisc_update() - evaluate whether uc_promisc should be toggled.
+ * @dev: device
+ *
+ * Must be called under netif_addr_lock_bh.
+ * Return: +1 to enter promisc, -1 to leave, 0 for no change.
+ */
+static int netif_uc_promisc_update(struct net_device *dev)
+{
+ if (dev->priv_flags & IFF_UNICAST_FLT)
+ return 0;
+
+ if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
+ dev->uc_promisc = true;
+ return 1;
+ }
+ if (netdev_uc_empty(dev) && dev->uc_promisc) {
+ dev->uc_promisc = false;
+ return -1;
+ }
+ return 0;
+}
+
static void netif_rx_mode_run(struct net_device *dev)
{
struct netdev_hw_addr_list uc_snap, mc_snap, uc_ref, mc_ref;
const struct net_device_ops *ops = dev->netdev_ops;
+ int promisc_inc;
int err;
might_sleep();
@@ -1246,22 +1270,39 @@ static void netif_rx_mode_run(struct net_device *dev)
if (!(dev->flags & IFF_UP) || !netif_device_present(dev))
return;
- netif_addr_lock_bh(dev);
- err = netif_addr_lists_snapshot(dev, &uc_snap, &mc_snap,
- &uc_ref, &mc_ref);
- if (err) {
- netdev_WARN(dev, "failed to sync uc/mc addresses\n");
+ if (ops->ndo_set_rx_mode_async) {
+ netif_addr_lock_bh(dev);
+ err = netif_addr_lists_snapshot(dev, &uc_snap, &mc_snap,
+ &uc_ref, &mc_ref);
+ if (err) {
+ netdev_WARN(dev, "failed to sync uc/mc addresses\n");
+ netif_addr_unlock_bh(dev);
+ return;
+ }
+
+ promisc_inc = netif_uc_promisc_update(dev);
+ netif_addr_unlock_bh(dev);
+ } else {
+ netif_addr_lock_bh(dev);
+ promisc_inc = netif_uc_promisc_update(dev);
netif_addr_unlock_bh(dev);
- return;
}
- netif_addr_unlock_bh(dev);
- ops->ndo_set_rx_mode_async(dev, &uc_snap, &mc_snap);
+ if (promisc_inc)
+ __dev_set_promiscuity(dev, promisc_inc, false);
- netif_addr_lock_bh(dev);
- netif_addr_lists_reconcile(dev, &uc_snap, &mc_snap,
- &uc_ref, &mc_ref);
- netif_addr_unlock_bh(dev);
+ if (ops->ndo_set_rx_mode_async) {
+ ops->ndo_set_rx_mode_async(dev, &uc_snap, &mc_snap);
+
+ netif_addr_lock_bh(dev);
+ netif_addr_lists_reconcile(dev, &uc_snap, &mc_snap,
+ &uc_ref, &mc_ref);
+ netif_addr_unlock_bh(dev);
+ } else if (ops->ndo_set_rx_mode) {
+ netif_addr_lock_bh(dev);
+ ops->ndo_set_rx_mode(dev);
+ netif_addr_unlock_bh(dev);
+ }
}
static void netdev_rx_mode_work(struct work_struct *work)
@@ -1320,6 +1361,7 @@ static void netif_rx_mode_queue(struct net_device *dev)
void __dev_set_rx_mode(struct net_device *dev)
{
const struct net_device_ops *ops = dev->netdev_ops;
+ int promisc_inc;
/* dev_open will call this function so the list will stay sane. */
if (!(dev->flags & IFF_UP))
@@ -1328,20 +1370,16 @@ void __dev_set_rx_mode(struct net_device *dev)
if (!netif_device_present(dev))
return;
- if (ops->ndo_set_rx_mode_async) {
+ if (ops->ndo_set_rx_mode_async || ops->ndo_change_rx_flags) {
netif_rx_mode_queue(dev);
return;
}
- if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
- if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
- __dev_set_promiscuity(dev, 1, false);
- dev->uc_promisc = true;
- } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
- __dev_set_promiscuity(dev, -1, false);
- dev->uc_promisc = false;
- }
- }
+ /* Legacy path for non-ops-locked HW devices. */
+
+ promisc_inc = netif_uc_promisc_update(dev);
+ if (promisc_inc)
+ __dev_set_promiscuity(dev, promisc_inc, false);
if (ops->ndo_set_rx_mode)
ops->ndo_set_rx_mode(dev);