diff options
| author | Jakub Kicinski <kuba@kernel.org> | 2025-03-25 20:04:55 +0300 |
|---|---|---|
| committer | Jakub Kicinski <kuba@kernel.org> | 2025-03-25 20:06:49 +0300 |
| commit | 7bd2e6b74ad56a49459ba84e8d4fa3730055ab5e (patch) | |
| tree | f3a934ee1001a31e62f38e7cef6c014efa7f6f56 /include | |
| parent | 51068769cc8c699eaba7d411f214bc969b35708b (diff) | |
| parent | b52458652eca5a551ddb55605201b136f091b04d (diff) | |
| download | linux-7bd2e6b74ad56a49459ba84e8d4fa3730055ab5e.tar.xz | |
Merge branch 'net-skip-taking-rtnl_lock-for-queue-get'
Jakub Kicinski says:
====================
net: skip taking rtnl_lock for queue GET (prep)
Skip taking rtnl_lock for queue GET ops on devices which opt
into running all ops under the instance lock. In preparating
for performing queue ops without rtnl lock clarify the protection
of queue-related fields.
v1: https://lore.kernel.org/20250312223507.805719-1-kuba@kernel.org
====================
Link: https://patch.msgid.link/20250324224537.248800-1-kuba@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'include')
| -rw-r--r-- | include/linux/netdevice.h | 44 | ||||
| -rw-r--r-- | include/net/netdev_lock.h | 20 | ||||
| -rw-r--r-- | include/net/netdev_rx_queue.h | 2 |
3 files changed, 44 insertions, 22 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index f22cca7c03ad..fa79145518d1 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -710,7 +710,7 @@ struct netdev_queue { * slow- / control-path part */ /* NAPI instance for the queue - * Readers and writers must hold RTNL + * "ops protected", see comment about net_device::lock */ struct napi_struct *napi; @@ -2496,18 +2496,38 @@ struct net_device { * Should always be taken using netdev_lock() / netdev_unlock() helpers. * Drivers are free to use it for other protection. * - * Protects: + * For the drivers that implement shaper or queue API, the scope + * of this lock is expanded to cover most ndo/queue/ethtool/sysfs + * operations. Drivers may opt-in to this behavior by setting + * @request_ops_lock. + * + * @lock protection mixes with rtnl_lock in multiple ways, fields are + * either: + * + * - simply protected by the instance @lock; + * + * - double protected - writers hold both locks, readers hold either; + * + * - ops protected - protected by the lock held around the NDOs + * and other callbacks, that is the instance lock on devices for + * which netdev_need_ops_lock() returns true, otherwise by rtnl_lock; + * + * - double ops protected - always protected by rtnl_lock but for + * devices for which netdev_need_ops_lock() returns true - also + * the instance lock. + * + * Simply protects: * @gro_flush_timeout, @napi_defer_hard_irqs, @napi_list, * @net_shaper_hierarchy, @reg_state, @threaded * - * Partially protects (writers must hold both @lock and rtnl_lock): + * Double protects: * @up * - * Also protects some fields in struct napi_struct. + * Double ops protects: + * @real_num_rx_queues, @real_num_tx_queues * - * For the drivers that implement shaper or queue API, the scope - * of this lock is expanded to cover most ndo/queue/ethtool/sysfs - * operations. + * Also protects some fields in: + * struct napi_struct, struct netdev_queue, struct netdev_rx_queue * * Ordering: take after rtnl_lock. */ @@ -4062,17 +4082,7 @@ static inline bool netif_is_multiqueue(const struct net_device *dev) } int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq); - -#ifdef CONFIG_SYSFS int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq); -#else -static inline int netif_set_real_num_rx_queues(struct net_device *dev, - unsigned int rxqs) -{ - dev->real_num_rx_queues = rxqs; - return 0; -} -#endif int netif_set_real_num_queues(struct net_device *dev, unsigned int txq, unsigned int rxq); diff --git a/include/net/netdev_lock.h b/include/net/netdev_lock.h index 99631fbd7f54..1c0c9a94cc22 100644 --- a/include/net/netdev_lock.h +++ b/include/net/netdev_lock.h @@ -5,25 +5,27 @@ #include <linux/lockdep.h> #include <linux/netdevice.h> +#include <linux/rtnetlink.h> static inline bool netdev_trylock(struct net_device *dev) { return mutex_trylock(&dev->lock); } -static inline void netdev_assert_locked(struct net_device *dev) +static inline void netdev_assert_locked(const struct net_device *dev) { lockdep_assert_held(&dev->lock); } -static inline void netdev_assert_locked_or_invisible(struct net_device *dev) +static inline void +netdev_assert_locked_or_invisible(const struct net_device *dev) { if (dev->reg_state == NETREG_REGISTERED || dev->reg_state == NETREG_UNREGISTERING) netdev_assert_locked(dev); } -static inline bool netdev_need_ops_lock(struct net_device *dev) +static inline bool netdev_need_ops_lock(const struct net_device *dev) { bool ret = dev->request_ops_lock || !!dev->queue_mgmt_ops; @@ -46,10 +48,20 @@ static inline void netdev_unlock_ops(struct net_device *dev) netdev_unlock(dev); } -static inline void netdev_ops_assert_locked(struct net_device *dev) +static inline void netdev_ops_assert_locked(const struct net_device *dev) { if (netdev_need_ops_lock(dev)) lockdep_assert_held(&dev->lock); + else + ASSERT_RTNL(); +} + +static inline void +netdev_ops_assert_locked_or_invisible(const struct net_device *dev) +{ + if (dev->reg_state == NETREG_REGISTERED || + dev->reg_state == NETREG_UNREGISTERING) + netdev_ops_assert_locked(dev); } static inline int netdev_lock_cmp_fn(const struct lockdep_map *a, diff --git a/include/net/netdev_rx_queue.h b/include/net/netdev_rx_queue.h index af40842f229d..b2238b551dce 100644 --- a/include/net/netdev_rx_queue.h +++ b/include/net/netdev_rx_queue.h @@ -24,7 +24,7 @@ struct netdev_rx_queue { struct xsk_buff_pool *pool; #endif /* NAPI instance for the queue - * Readers and writers must hold RTNL + * "ops protected", see comment about net_device::lock */ struct napi_struct *napi; struct pp_memory_provider_params mp_params; |
