From a435163d3100b044d620990772a5ce1684ff02ca Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 2 Mar 2026 18:14:32 +0000 Subject: net-sysfs: use rps_tag_ptr and remove metadata from rps_dev_flow_table Instead of storing the @log at the beginning of rps_dev_flow_table use 5 low order bits of the rps_tag_ptr to store the log of the size. This removes a potential cache line miss (for light traffic). This allows us to switch to one high-order allocation instead of vmalloc() when CONFIG_RFS_ACCEL is not set. Signed-off-by: Eric Dumazet Reviewed-by: Kuniyuki Iwashima Link: https://patch.msgid.link/20260302181432.1836150-8-edumazet@google.com Signed-off-by: Jakub Kicinski --- include/net/netdev_rx_queue.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/net/netdev_rx_queue.h') diff --git a/include/net/netdev_rx_queue.h b/include/net/netdev_rx_queue.h index cfa72c485387..08f81329fc11 100644 --- a/include/net/netdev_rx_queue.h +++ b/include/net/netdev_rx_queue.h @@ -8,13 +8,14 @@ #include #include #include +#include /* This structure contains an instance of an RX queue. */ struct netdev_rx_queue { struct xdp_rxq_info xdp_rxq; #ifdef CONFIG_RPS struct rps_map __rcu *rps_map; - struct rps_dev_flow_table __rcu *rps_flow_table; + rps_tag_ptr rps_flow_table; #endif struct kobject kobj; const struct attribute_group **groups; -- cgit v1.2.3 From d04686d9bc86432ea3008d5f358373d8466d1943 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 3 Apr 2026 01:10:19 +0200 Subject: net: Implement netdev_nl_queue_create_doit Implement netdev_nl_queue_create_doit which creates a new rx queue in a virtual netdev and then leases it to a rx queue in a physical netdev. Example with ynl client: # ynl --family netdev --output-json --do queue-create \ --json '{"ifindex": 8, "type": "rx", "lease": {"ifindex": 4, "queue": {"type": "rx", "id": 15}}}' {'id': 1} Note that the netdevice locking order is always from the virtual to the physical device. Signed-off-by: Daniel Borkmann Co-developed-by: David Wei Signed-off-by: David Wei Reviewed-by: Nikolay Aleksandrov Link: https://patch.msgid.link/20260402231031.447597-3-daniel@iogearbox.net Signed-off-by: Jakub Kicinski --- Documentation/networking/netdevices.rst | 6 ++ include/linux/netdevice.h | 9 +- include/net/netdev_queues.h | 19 +++- include/net/netdev_rx_queue.h | 15 ++- net/core/dev.c | 8 ++ net/core/dev.h | 5 + net/core/netdev-genl.c | 164 +++++++++++++++++++++++++++++++- net/core/netdev_queues.c | 62 ++++++++++++ net/core/netdev_rx_queue.c | 46 ++++++++- 9 files changed, 323 insertions(+), 11 deletions(-) (limited to 'include/net/netdev_rx_queue.h') diff --git a/Documentation/networking/netdevices.rst b/Documentation/networking/netdevices.rst index 35704d115312..83e28b96884f 100644 --- a/Documentation/networking/netdevices.rst +++ b/Documentation/networking/netdevices.rst @@ -329,6 +329,12 @@ by setting ``request_ops_lock`` to true. Code comments and docs refer to drivers which have ops called under the instance lock as "ops locked". See also the documentation of the ``lock`` member of struct net_device. +There is also a case of taking two per-netdev locks in sequence when netdev +queues are leased, that is, the netdev-scope lock is taken for both the +virtual and the physical device. To prevent deadlocks, the virtual device's +lock must always be acquired before the physical device's (see +``netdev_nl_queue_create_doit``). + In the future, there will be an option for individual drivers to opt out of using ``rtnl_lock`` and instead perform their control operations directly under the netdev instance lock. diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index e15367373f7c..e8aa9cc4075d 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2561,7 +2561,14 @@ struct net_device { * Also protects some fields in: * struct napi_struct, struct netdev_queue, struct netdev_rx_queue * - * Ordering: take after rtnl_lock. + * Ordering: + * + * - take after rtnl_lock + * + * - for the case of netdev queue leasing, the netdev-scope lock is + * taken for both the virtual and the physical device; to prevent + * deadlocks, the virtual device's lock must always be acquired + * before the physical device's (see netdev_nl_queue_create_doit) */ struct mutex lock; diff --git a/include/net/netdev_queues.h b/include/net/netdev_queues.h index 95ed28212f4e..748b70552ed1 100644 --- a/include/net/netdev_queues.h +++ b/include/net/netdev_queues.h @@ -150,6 +150,11 @@ enum { * When NIC-wide config is changed the callback will * be invoked for all queues. * + * @ndo_queue_create: Create a new RX queue on a virtual device that will + * be paired with a physical device's queue via leasing. + * Return the new queue id on success, negative error + * on failure. + * * @supported_params: Bitmask of supported parameters, see QCFG_*. * * Note that @ndo_queue_mem_alloc and @ndo_queue_mem_free may be called while @@ -178,6 +183,8 @@ struct netdev_queue_mgmt_ops { struct netlink_ext_ack *extack); struct device * (*ndo_queue_get_dma_dev)(struct net_device *dev, int idx); + int (*ndo_queue_create)(struct net_device *dev, + struct netlink_ext_ack *extack); unsigned int supported_params; }; @@ -185,7 +192,7 @@ struct netdev_queue_mgmt_ops { void netdev_queue_config(struct net_device *dev, int rxq, struct netdev_queue_config *qcfg); -bool netif_rxq_has_unreadable_mp(struct net_device *dev, int idx); +bool netif_rxq_has_unreadable_mp(struct net_device *dev, unsigned int rxq_idx); /** * DOC: Lockless queue stopping / waking helpers. @@ -374,5 +381,11 @@ static inline unsigned int netif_xmit_timeout_ms(struct netdev_queue *txq) }) struct device *netdev_queue_get_dma_dev(struct net_device *dev, int idx); - -#endif +bool netdev_can_create_queue(const struct net_device *dev, + struct netlink_ext_ack *extack); +bool netdev_can_lease_queue(const struct net_device *dev, + struct netlink_ext_ack *extack); +bool netdev_queue_busy(struct net_device *dev, unsigned int idx, + enum netdev_queue_type type, + struct netlink_ext_ack *extack); +#endif /* _LINUX_NET_QUEUES_H */ diff --git a/include/net/netdev_rx_queue.h b/include/net/netdev_rx_queue.h index 08f81329fc11..1d41c253f0a3 100644 --- a/include/net/netdev_rx_queue.h +++ b/include/net/netdev_rx_queue.h @@ -31,6 +31,14 @@ struct netdev_rx_queue { struct napi_struct *napi; struct netdev_queue_config qcfg; struct pp_memory_provider_params mp_params; + + /* If a queue is leased, then the lease pointer is always + * valid. From the physical device it points to the virtual + * queue, and from the virtual device it points to the + * physical queue. + */ + struct netdev_rx_queue *lease; + netdevice_tracker lease_tracker; } ____cacheline_aligned_in_smp; /* @@ -60,5 +68,8 @@ get_netdev_rx_queue_index(struct netdev_rx_queue *queue) } int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq); - -#endif +void netdev_rx_queue_lease(struct netdev_rx_queue *rxq_dst, + struct netdev_rx_queue *rxq_src); +void netdev_rx_queue_unlease(struct netdev_rx_queue *rxq_dst, + struct netdev_rx_queue *rxq_src); +#endif /* _LINUX_NETDEV_RX_QUEUE_H */ diff --git a/net/core/dev.c b/net/core/dev.c index 5a31f9d2128c..cc7bcac892af 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1121,6 +1121,14 @@ netdev_get_by_index_lock_ops_compat(struct net *net, int ifindex) return __netdev_put_lock_ops_compat(dev, net); } +struct net_device * +netdev_put_lock(struct net_device *dev, struct net *net, + netdevice_tracker *tracker) +{ + netdev_tracker_free(dev, tracker); + return __netdev_put_lock(dev, net); +} + struct net_device * netdev_xa_find_lock(struct net *net, struct net_device *dev, unsigned long *index) diff --git a/net/core/dev.h b/net/core/dev.h index 781619e76b3e..6516ce2b5517 100644 --- a/net/core/dev.h +++ b/net/core/dev.h @@ -31,6 +31,8 @@ netdev_napi_by_id_lock(struct net *net, unsigned int napi_id); struct net_device *dev_get_by_napi_id(unsigned int napi_id); struct net_device *__netdev_put_lock(struct net_device *dev, struct net *net); +struct net_device *netdev_put_lock(struct net_device *dev, struct net *net, + netdevice_tracker *tracker); struct net_device * netdev_xa_find_lock(struct net *net, struct net_device *dev, unsigned long *index); @@ -96,6 +98,9 @@ int netdev_queue_config_validate(struct net_device *dev, int rxq_idx, struct netdev_queue_config *qcfg, struct netlink_ext_ack *extack); +bool netif_rxq_has_mp(struct net_device *dev, unsigned int rxq_idx); +bool netif_rxq_is_leased(struct net_device *dev, unsigned int rxq_idx); + /* netdev management, shared between various uAPI entry points */ struct netdev_name_node { struct hlist_node hlist; diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c index aae75431858d..5d5e5b9a8af0 100644 --- a/net/core/netdev-genl.c +++ b/net/core/netdev-genl.c @@ -1122,7 +1122,169 @@ err_genlmsg_free: int netdev_nl_queue_create_doit(struct sk_buff *skb, struct genl_info *info) { - return -EOPNOTSUPP; + const int qmaxtype = ARRAY_SIZE(netdev_queue_id_nl_policy) - 1; + const int lmaxtype = ARRAY_SIZE(netdev_lease_nl_policy) - 1; + int err, ifindex, ifindex_lease, queue_id, queue_id_lease; + struct nlattr *qtb[ARRAY_SIZE(netdev_queue_id_nl_policy)]; + struct nlattr *ltb[ARRAY_SIZE(netdev_lease_nl_policy)]; + struct netdev_rx_queue *rxq, *rxq_lease; + struct net_device *dev, *dev_lease; + netdevice_tracker dev_tracker; + s32 netns_lease = -1; + struct nlattr *nest; + struct sk_buff *rsp; + struct net *net; + void *hdr; + + if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_IFINDEX) || + GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_TYPE) || + GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_LEASE)) + return -EINVAL; + if (nla_get_u32(info->attrs[NETDEV_A_QUEUE_TYPE]) != + NETDEV_QUEUE_TYPE_RX) { + NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_QUEUE_TYPE]); + return -EINVAL; + } + + ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]); + + nest = info->attrs[NETDEV_A_QUEUE_LEASE]; + err = nla_parse_nested(ltb, lmaxtype, nest, + netdev_lease_nl_policy, info->extack); + if (err < 0) + return err; + if (NL_REQ_ATTR_CHECK(info->extack, nest, ltb, NETDEV_A_LEASE_IFINDEX) || + NL_REQ_ATTR_CHECK(info->extack, nest, ltb, NETDEV_A_LEASE_QUEUE)) + return -EINVAL; + if (ltb[NETDEV_A_LEASE_NETNS_ID]) { + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + netns_lease = nla_get_s32(ltb[NETDEV_A_LEASE_NETNS_ID]); + } + + ifindex_lease = nla_get_u32(ltb[NETDEV_A_LEASE_IFINDEX]); + + nest = ltb[NETDEV_A_LEASE_QUEUE]; + err = nla_parse_nested(qtb, qmaxtype, nest, + netdev_queue_id_nl_policy, info->extack); + if (err < 0) + return err; + if (NL_REQ_ATTR_CHECK(info->extack, nest, qtb, NETDEV_A_QUEUE_ID) || + NL_REQ_ATTR_CHECK(info->extack, nest, qtb, NETDEV_A_QUEUE_TYPE)) + return -EINVAL; + if (nla_get_u32(qtb[NETDEV_A_QUEUE_TYPE]) != NETDEV_QUEUE_TYPE_RX) { + NL_SET_BAD_ATTR(info->extack, qtb[NETDEV_A_QUEUE_TYPE]); + return -EINVAL; + } + + queue_id_lease = nla_get_u32(qtb[NETDEV_A_QUEUE_ID]); + + rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!rsp) + return -ENOMEM; + + hdr = genlmsg_iput(rsp, info); + if (!hdr) { + err = -EMSGSIZE; + goto err_genlmsg_free; + } + + /* Locking order is always from the virtual to the physical device + * since this is also the same order when applications open the + * memory provider later on. + */ + dev = netdev_get_by_index_lock(genl_info_net(info), ifindex); + if (!dev) { + err = -ENODEV; + goto err_genlmsg_free; + } + if (!netdev_can_create_queue(dev, info->extack)) { + err = -EINVAL; + goto err_unlock_dev; + } + + net = genl_info_net(info); + if (netns_lease >= 0) { + net = get_net_ns_by_id(net, netns_lease); + if (!net) { + err = -ENONET; + goto err_unlock_dev; + } + } + + dev_lease = netdev_get_by_index(net, ifindex_lease, &dev_tracker, + GFP_KERNEL); + if (!dev_lease) { + err = -ENODEV; + goto err_put_netns; + } + if (!netdev_can_lease_queue(dev_lease, info->extack)) { + netdev_put(dev_lease, &dev_tracker); + err = -EINVAL; + goto err_put_netns; + } + + dev_lease = netdev_put_lock(dev_lease, net, &dev_tracker); + if (!dev_lease) { + err = -ENODEV; + goto err_put_netns; + } + if (queue_id_lease >= dev_lease->real_num_rx_queues) { + err = -ERANGE; + NL_SET_BAD_ATTR(info->extack, qtb[NETDEV_A_QUEUE_ID]); + goto err_unlock_dev_lease; + } + if (netdev_queue_busy(dev_lease, queue_id_lease, NETDEV_QUEUE_TYPE_RX, + info->extack)) { + err = -EBUSY; + goto err_unlock_dev_lease; + } + + rxq_lease = __netif_get_rx_queue(dev_lease, queue_id_lease); + rxq = __netif_get_rx_queue(dev, dev->real_num_rx_queues - 1); + + /* Leasing queues from different physical devices is currently + * not supported. Capabilities such as XDP features and DMA + * device may differ between physical devices, and computing + * a correct intersection for the virtual device is not yet + * implemented. + */ + if (rxq->lease && rxq->lease->dev != dev_lease) { + err = -EOPNOTSUPP; + NL_SET_ERR_MSG(info->extack, + "Leasing queues from different devices not supported"); + goto err_unlock_dev_lease; + } + + queue_id = dev->queue_mgmt_ops->ndo_queue_create(dev, info->extack); + if (queue_id < 0) { + err = queue_id; + goto err_unlock_dev_lease; + } + rxq = __netif_get_rx_queue(dev, queue_id); + + netdev_rx_queue_lease(rxq, rxq_lease); + + nla_put_u32(rsp, NETDEV_A_QUEUE_ID, queue_id); + genlmsg_end(rsp, hdr); + + netdev_unlock(dev_lease); + netdev_unlock(dev); + if (netns_lease >= 0) + put_net(net); + + return genlmsg_reply(rsp, info); + +err_unlock_dev_lease: + netdev_unlock(dev_lease); +err_put_netns: + if (netns_lease >= 0) + put_net(net); +err_unlock_dev: + netdev_unlock(dev); +err_genlmsg_free: + nlmsg_free(rsp); + return err; } void netdev_nl_sock_priv_init(struct netdev_nl_sock *priv) diff --git a/net/core/netdev_queues.c b/net/core/netdev_queues.c index 251f27a8307f..177401828e79 100644 --- a/net/core/netdev_queues.c +++ b/net/core/netdev_queues.c @@ -1,6 +1,10 @@ // SPDX-License-Identifier: GPL-2.0-or-later #include +#include +#include + +#include "dev.h" /** * netdev_queue_get_dma_dev() - get dma device for zero-copy operations @@ -25,3 +29,61 @@ struct device *netdev_queue_get_dma_dev(struct net_device *dev, int idx) return dma_dev && dma_dev->dma_mask ? dma_dev : NULL; } +bool netdev_can_create_queue(const struct net_device *dev, + struct netlink_ext_ack *extack) +{ + if (dev->dev.parent) { + NL_SET_ERR_MSG(extack, "Device is not a virtual device"); + return false; + } + if (!dev->queue_mgmt_ops || + !dev->queue_mgmt_ops->ndo_queue_create) { + NL_SET_ERR_MSG(extack, "Device does not support queue creation"); + return false; + } + if (dev->real_num_rx_queues < 1 || + dev->real_num_tx_queues < 1) { + NL_SET_ERR_MSG(extack, "Device must have at least one real queue"); + return false; + } + return true; +} + +bool netdev_can_lease_queue(const struct net_device *dev, + struct netlink_ext_ack *extack) +{ + if (!dev->dev.parent) { + NL_SET_ERR_MSG(extack, "Lease device is a virtual device"); + return false; + } + if (!netif_device_present(dev)) { + NL_SET_ERR_MSG(extack, "Lease device has been removed from the system"); + return false; + } + if (!dev->queue_mgmt_ops) { + NL_SET_ERR_MSG(extack, "Lease device does not support queue management operations"); + return false; + } + return true; +} + +bool netdev_queue_busy(struct net_device *dev, unsigned int idx, + enum netdev_queue_type type, + struct netlink_ext_ack *extack) +{ + if (xsk_get_pool_from_qid(dev, idx)) { + NL_SET_ERR_MSG(extack, "Device queue in use by AF_XDP"); + return true; + } + if (type == NETDEV_QUEUE_TYPE_TX) + return false; + if (netif_rxq_is_leased(dev, idx)) { + NL_SET_ERR_MSG(extack, "Device queue in use due to queue leasing"); + return true; + } + if (netif_rxq_has_mp(dev, idx)) { + NL_SET_ERR_MSG(extack, "Device queue in use by memory provider"); + return true; + } + return false; +} diff --git a/net/core/netdev_rx_queue.c b/net/core/netdev_rx_queue.c index 668a90658f25..a1f23c2c96d4 100644 --- a/net/core/netdev_rx_queue.c +++ b/net/core/netdev_rx_queue.c @@ -10,15 +10,53 @@ #include "dev.h" #include "page_pool_priv.h" -/* See also page_pool_is_unreadable() */ -bool netif_rxq_has_unreadable_mp(struct net_device *dev, int idx) +void netdev_rx_queue_lease(struct netdev_rx_queue *rxq_dst, + struct netdev_rx_queue *rxq_src) { - struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, idx); + netdev_assert_locked(rxq_src->dev); + netdev_assert_locked(rxq_dst->dev); + + netdev_hold(rxq_src->dev, &rxq_src->lease_tracker, GFP_KERNEL); - return !!rxq->mp_params.mp_ops; + WRITE_ONCE(rxq_src->lease, rxq_dst); + WRITE_ONCE(rxq_dst->lease, rxq_src); +} + +void netdev_rx_queue_unlease(struct netdev_rx_queue *rxq_dst, + struct netdev_rx_queue *rxq_src) +{ + netdev_assert_locked(rxq_dst->dev); + netdev_assert_locked(rxq_src->dev); + + WRITE_ONCE(rxq_src->lease, NULL); + WRITE_ONCE(rxq_dst->lease, NULL); + + netdev_put(rxq_src->dev, &rxq_src->lease_tracker); +} + +bool netif_rxq_is_leased(struct net_device *dev, unsigned int rxq_idx) +{ + if (rxq_idx < dev->real_num_rx_queues) + return READ_ONCE(__netif_get_rx_queue(dev, rxq_idx)->lease); + return false; +} + +/* See also page_pool_is_unreadable() */ +bool netif_rxq_has_unreadable_mp(struct net_device *dev, unsigned int rxq_idx) +{ + if (rxq_idx < dev->real_num_rx_queues) + return __netif_get_rx_queue(dev, rxq_idx)->mp_params.mp_ops; + return false; } EXPORT_SYMBOL(netif_rxq_has_unreadable_mp); +bool netif_rxq_has_mp(struct net_device *dev, unsigned int rxq_idx) +{ + if (rxq_idx < dev->real_num_rx_queues) + return __netif_get_rx_queue(dev, rxq_idx)->mp_params.mp_priv; + return false; +} + static int netdev_rx_queue_reconfig(struct net_device *dev, unsigned int rxq_idx, struct netdev_queue_config *qcfg_old, -- cgit v1.2.3 From 21d58b35e500ae099188c1be8398442733bc0d89 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 3 Apr 2026 01:10:20 +0200 Subject: net: Add lease info to queue-get response Populate nested lease info to the queue-get response that returns the ifindex, queue id with type and optionally netns id if the device resides in a different netns. Example with ynl client when using AF_XDP via queue leasing: # ip a [...] 4: enp10s0f0np0: mtu 1500 xdp/id:24 qdisc mq state UP group default qlen 1000 link/ether e8:eb:d3:a3:43:f6 brd ff:ff:ff:ff:ff:ff inet 10.0.0.2/24 scope global enp10s0f0np0 valid_lft forever preferred_lft forever inet6 fe80::eaeb:d3ff:fea3:43f6/64 scope link proto kernel_ll valid_lft forever preferred_lft forever [...] # ethtool -i enp10s0f0np0 driver: mlx5_core [...] # ynl --family netdev --output-json --do queue-get \ --json '{"ifindex": 4, "id": 15, "type": "rx"}' {'id': 15, 'ifindex': 4, 'lease': {'ifindex': 8, 'netns-id': 0, 'queue': {'id': 1, 'type': 'rx'}}, 'napi-id': 8227, 'type': 'rx', 'xsk': {}} # ip netns list foo (id: 0) # ip netns exec foo ip a [...] 8: nk@NONE: mtu 1500 qdisc noqueue state UP group default qlen 1000 link/ether 00:00:00:00:00:00 brd ff:ff:ff:ff:ff:ff inet6 fe80::200:ff:fe00:0/64 scope link proto kernel_ll valid_lft forever preferred_lft forever [...] # ip netns exec foo ethtool -i nk driver: netkit [...] # ip netns exec foo ls /sys/class/net/nk/queues/ rx-0 rx-1 tx-0 # ip netns exec foo ynl --family netdev --output-json --do queue-get \ --json '{"ifindex": 8, "id": 1, "type": "rx"}' {"id": 1, "type": "rx", "ifindex": 8, "xsk": {}} Note that the caller of netdev_nl_queue_fill_one() holds the netdevice lock. For the queue-get we do not lock both devices. When queues get {un,}leased, both devices are locked, thus if __netif_get_rx_queue_lease() returns a lease pointer, it points to a valid device. The netns-id is fetched via peernet2id_alloc() similarly as done in OVS. Signed-off-by: Daniel Borkmann Co-developed-by: David Wei Signed-off-by: David Wei Reviewed-by: Nikolay Aleksandrov Link: https://patch.msgid.link/20260402231031.447597-4-daniel@iogearbox.net Signed-off-by: Jakub Kicinski --- include/net/netdev_rx_queue.h | 14 +++++++++ net/core/netdev-genl.c | 66 ++++++++++++++++++++++++++++++++++++++++--- net/core/netdev_rx_queue.c | 54 +++++++++++++++++++++++++++++++++++ 3 files changed, 130 insertions(+), 4 deletions(-) (limited to 'include/net/netdev_rx_queue.h') diff --git a/include/net/netdev_rx_queue.h b/include/net/netdev_rx_queue.h index 1d41c253f0a3..7e98c679ea84 100644 --- a/include/net/netdev_rx_queue.h +++ b/include/net/netdev_rx_queue.h @@ -67,6 +67,20 @@ get_netdev_rx_queue_index(struct netdev_rx_queue *queue) return index; } +enum netif_lease_dir { + NETIF_VIRT_TO_PHYS, + NETIF_PHYS_TO_VIRT, +}; + +struct netdev_rx_queue * +__netif_get_rx_queue_lease(struct net_device **dev, unsigned int *rxq, + enum netif_lease_dir dir); + +struct netdev_rx_queue * +netif_get_rx_queue_lease_locked(struct net_device **dev, unsigned int *rxq); +void netif_put_rx_queue_lease_locked(struct net_device *orig_dev, + struct net_device *dev); + int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq); void netdev_rx_queue_lease(struct netdev_rx_queue *rxq_dst, struct netdev_rx_queue *rxq_src); diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c index 5d5e5b9a8af0..515832854251 100644 --- a/net/core/netdev-genl.c +++ b/net/core/netdev-genl.c @@ -386,12 +386,63 @@ static int nla_put_napi_id(struct sk_buff *skb, const struct napi_struct *napi) return 0; } +static int +netdev_nl_queue_fill_lease(struct sk_buff *rsp, struct net_device *netdev, + u32 q_idx, u32 q_type) +{ + struct net_device *orig_netdev = netdev; + struct nlattr *nest_lease, *nest_queue; + struct netdev_rx_queue *rxq; + struct net *net, *peer_net; + + rxq = __netif_get_rx_queue_lease(&netdev, &q_idx, + NETIF_PHYS_TO_VIRT); + if (!rxq || orig_netdev == netdev) + return 0; + + nest_lease = nla_nest_start(rsp, NETDEV_A_QUEUE_LEASE); + if (!nest_lease) + goto nla_put_failure; + + nest_queue = nla_nest_start(rsp, NETDEV_A_LEASE_QUEUE); + if (!nest_queue) + goto nla_put_failure; + if (nla_put_u32(rsp, NETDEV_A_QUEUE_ID, q_idx)) + goto nla_put_failure; + if (nla_put_u32(rsp, NETDEV_A_QUEUE_TYPE, q_type)) + goto nla_put_failure; + nla_nest_end(rsp, nest_queue); + + if (nla_put_u32(rsp, NETDEV_A_LEASE_IFINDEX, + READ_ONCE(netdev->ifindex))) + goto nla_put_failure; + + rcu_read_lock(); + peer_net = dev_net_rcu(netdev); + net = dev_net_rcu(orig_netdev); + if (!net_eq(net, peer_net)) { + s32 id = peernet2id_alloc(net, peer_net, GFP_ATOMIC); + + if (nla_put_s32(rsp, NETDEV_A_LEASE_NETNS_ID, id)) + goto nla_put_failure_unlock; + } + rcu_read_unlock(); + nla_nest_end(rsp, nest_lease); + return 0; + +nla_put_failure_unlock: + rcu_read_unlock(); +nla_put_failure: + return -ENOMEM; +} + static int netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx, u32 q_type, const struct genl_info *info) { struct pp_memory_provider_params *params; - struct netdev_rx_queue *rxq; + struct net_device *orig_netdev = netdev; + struct netdev_rx_queue *rxq, *rxq_lease; struct netdev_queue *txq; void *hdr; @@ -409,17 +460,22 @@ netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev, rxq = __netif_get_rx_queue(netdev, q_idx); if (nla_put_napi_id(rsp, rxq->napi)) goto nla_put_failure; + if (netdev_nl_queue_fill_lease(rsp, netdev, q_idx, q_type)) + goto nla_put_failure; + rxq_lease = netif_get_rx_queue_lease_locked(&netdev, &q_idx); + if (rxq_lease) + rxq = rxq_lease; params = &rxq->mp_params; if (params->mp_ops && params->mp_ops->nl_fill(params->mp_priv, rsp, rxq)) - goto nla_put_failure; + goto nla_put_failure_lease; #ifdef CONFIG_XDP_SOCKETS if (rxq->pool) if (nla_put_empty_nest(rsp, NETDEV_A_QUEUE_XSK)) - goto nla_put_failure; + goto nla_put_failure_lease; #endif - + netif_put_rx_queue_lease_locked(orig_netdev, netdev); break; case NETDEV_QUEUE_TYPE_TX: txq = netdev_get_tx_queue(netdev, q_idx); @@ -437,6 +493,8 @@ netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev, return 0; +nla_put_failure_lease: + netif_put_rx_queue_lease_locked(orig_netdev, netdev); nla_put_failure: genlmsg_cancel(rsp, hdr); return -EMSGSIZE; diff --git a/net/core/netdev_rx_queue.c b/net/core/netdev_rx_queue.c index a1f23c2c96d4..a4d8cad6db74 100644 --- a/net/core/netdev_rx_queue.c +++ b/net/core/netdev_rx_queue.c @@ -41,6 +41,60 @@ bool netif_rxq_is_leased(struct net_device *dev, unsigned int rxq_idx) return false; } +/* Virtual devices eligible for leasing have no dev->dev.parent, while + * physical devices always have one. Use this to enforce the correct + * lease traversal direction. + */ +static bool netif_lease_dir_ok(const struct net_device *dev, + enum netif_lease_dir dir) +{ + if (dir == NETIF_VIRT_TO_PHYS && !dev->dev.parent) + return true; + if (dir == NETIF_PHYS_TO_VIRT && dev->dev.parent) + return true; + return false; +} + +struct netdev_rx_queue * +__netif_get_rx_queue_lease(struct net_device **dev, unsigned int *rxq_idx, + enum netif_lease_dir dir) +{ + struct net_device *orig_dev = *dev; + struct netdev_rx_queue *rxq = __netif_get_rx_queue(orig_dev, *rxq_idx); + + if (rxq->lease) { + if (!netif_lease_dir_ok(orig_dev, dir)) + return NULL; + rxq = rxq->lease; + *rxq_idx = get_netdev_rx_queue_index(rxq); + *dev = rxq->dev; + } + return rxq; +} + +struct netdev_rx_queue * +netif_get_rx_queue_lease_locked(struct net_device **dev, unsigned int *rxq_idx) +{ + struct net_device *orig_dev = *dev; + struct netdev_rx_queue *rxq; + + /* Locking order is always from the virtual to the physical device + * see netdev_nl_queue_create_doit(). + */ + netdev_ops_assert_locked(orig_dev); + rxq = __netif_get_rx_queue_lease(dev, rxq_idx, NETIF_VIRT_TO_PHYS); + if (rxq && orig_dev != *dev) + netdev_lock(*dev); + return rxq; +} + +void netif_put_rx_queue_lease_locked(struct net_device *orig_dev, + struct net_device *dev) +{ + if (orig_dev != dev) + netdev_unlock(dev); +} + /* See also page_pool_is_unreadable() */ bool netif_rxq_has_unreadable_mp(struct net_device *dev, unsigned int rxq_idx) { -- cgit v1.2.3 From 581d28606cdd51c5da06330e8fb97476503cd74d Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 8 Apr 2026 15:12:51 -0700 Subject: net: remove the netif_get_rx_queue_lease_locked() helpers The netif_get_rx_queue_lease_locked() API hides the locking and the descend onto the leased queue. Making the code harder to follow (at least to me). Remove the API and open code the descend a bit. Most of the code now looks like: if (!leased) return __helper(x); hw_rxq = .. netdev_lock(hw_rxq->dev); ret = __helper(x); netdev_unlock(hw_rxq->dev); return ret; Of course if we have more code paths that need the wrapping we may need to revisit. For now, IMHO, having to know what netif_get_rx_queue_lease_locked() does is not worth the 20LoC it saves. Link: https://patch.msgid.link/20260408151251.72bd2482@kernel.org Signed-off-by: Jakub Kicinski --- include/net/netdev_rx_queue.h | 5 --- net/core/dev.h | 1 + net/core/netdev-genl.c | 59 +++++++++++++++++++++------------ net/core/netdev_queues.c | 16 ++++++--- net/core/netdev_rx_queue.c | 48 ++++++++------------------- net/xdp/xsk.c | 77 +++++++++++++++++++++++++++---------------- 6 files changed, 113 insertions(+), 93 deletions(-) (limited to 'include/net/netdev_rx_queue.h') diff --git a/include/net/netdev_rx_queue.h b/include/net/netdev_rx_queue.h index 7e98c679ea84..9415a94d333d 100644 --- a/include/net/netdev_rx_queue.h +++ b/include/net/netdev_rx_queue.h @@ -76,11 +76,6 @@ struct netdev_rx_queue * __netif_get_rx_queue_lease(struct net_device **dev, unsigned int *rxq, enum netif_lease_dir dir); -struct netdev_rx_queue * -netif_get_rx_queue_lease_locked(struct net_device **dev, unsigned int *rxq); -void netif_put_rx_queue_lease_locked(struct net_device *orig_dev, - struct net_device *dev); - int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq); void netdev_rx_queue_lease(struct netdev_rx_queue *rxq_dst, struct netdev_rx_queue *rxq_src); diff --git a/net/core/dev.h b/net/core/dev.h index 95edb2d4eff8..376bac4a82da 100644 --- a/net/core/dev.h +++ b/net/core/dev.h @@ -101,6 +101,7 @@ int netdev_queue_config_validate(struct net_device *dev, int rxq_idx, bool netif_rxq_has_mp(struct net_device *dev, unsigned int rxq_idx); bool netif_rxq_is_leased(struct net_device *dev, unsigned int rxq_idx); +bool netif_is_queue_leasee(const struct net_device *dev); void __netif_mp_uninstall_rxq(struct netdev_rx_queue *rxq, const struct pp_memory_provider_params *p); diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c index 056460d01940..b8f6076d8007 100644 --- a/net/core/netdev-genl.c +++ b/net/core/netdev-genl.c @@ -395,8 +395,7 @@ netdev_nl_queue_fill_lease(struct sk_buff *rsp, struct net_device *netdev, struct netdev_rx_queue *rxq; struct net *net, *peer_net; - rxq = __netif_get_rx_queue_lease(&netdev, &q_idx, - NETIF_PHYS_TO_VIRT); + rxq = __netif_get_rx_queue_lease(&netdev, &q_idx, NETIF_PHYS_TO_VIRT); if (!rxq || orig_netdev == netdev) return 0; @@ -436,13 +435,45 @@ nla_put_failure: return -ENOMEM; } +static int +__netdev_nl_queue_fill_mp(struct sk_buff *rsp, struct netdev_rx_queue *rxq) +{ + struct pp_memory_provider_params *params = &rxq->mp_params; + + if (params->mp_ops && + params->mp_ops->nl_fill(params->mp_priv, rsp, rxq)) + return -EMSGSIZE; + +#ifdef CONFIG_XDP_SOCKETS + if (rxq->pool) + if (nla_put_empty_nest(rsp, NETDEV_A_QUEUE_XSK)) + return -EMSGSIZE; +#endif + return 0; +} + +static int +netdev_nl_queue_fill_mp(struct sk_buff *rsp, struct net_device *netdev, + struct netdev_rx_queue *rxq) +{ + struct netdev_rx_queue *hw_rxq; + int ret; + + hw_rxq = rxq->lease; + if (!hw_rxq || !netif_is_queue_leasee(netdev)) + return __netdev_nl_queue_fill_mp(rsp, rxq); + + netdev_lock(hw_rxq->dev); + ret = __netdev_nl_queue_fill_mp(rsp, hw_rxq); + netdev_unlock(hw_rxq->dev); + return ret; +} + static int netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx, u32 q_type, const struct genl_info *info) { - struct pp_memory_provider_params *params; - struct net_device *orig_netdev = netdev; - struct netdev_rx_queue *rxq, *rxq_lease; + struct netdev_rx_queue *rxq; struct netdev_queue *txq; void *hdr; @@ -462,20 +493,8 @@ netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev, goto nla_put_failure; if (netdev_nl_queue_fill_lease(rsp, netdev, q_idx, q_type)) goto nla_put_failure; - - rxq_lease = netif_get_rx_queue_lease_locked(&netdev, &q_idx); - if (rxq_lease) - rxq = rxq_lease; - params = &rxq->mp_params; - if (params->mp_ops && - params->mp_ops->nl_fill(params->mp_priv, rsp, rxq)) - goto nla_put_failure_lease; -#ifdef CONFIG_XDP_SOCKETS - if (rxq->pool) - if (nla_put_empty_nest(rsp, NETDEV_A_QUEUE_XSK)) - goto nla_put_failure_lease; -#endif - netif_put_rx_queue_lease_locked(orig_netdev, netdev); + if (netdev_nl_queue_fill_mp(rsp, netdev, rxq)) + goto nla_put_failure; break; case NETDEV_QUEUE_TYPE_TX: txq = netdev_get_tx_queue(netdev, q_idx); @@ -493,8 +512,6 @@ netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev, return 0; -nla_put_failure_lease: - netif_put_rx_queue_lease_locked(orig_netdev, netdev); nla_put_failure: genlmsg_cancel(rsp, hdr); return -EMSGSIZE; diff --git a/net/core/netdev_queues.c b/net/core/netdev_queues.c index 265161e12a9c..73fb28087a93 100644 --- a/net/core/netdev_queues.c +++ b/net/core/netdev_queues.c @@ -37,18 +37,24 @@ struct device *netdev_queue_get_dma_dev(struct net_device *dev, unsigned int idx, enum netdev_queue_type type) { - struct net_device *orig_dev = dev; + struct netdev_rx_queue *hw_rxq; struct device *dma_dev; + netdev_ops_assert_locked(dev); + /* Only RX side supports queue leasing today. */ if (type != NETDEV_QUEUE_TYPE_RX || !netif_rxq_is_leased(dev, idx)) return __netdev_queue_get_dma_dev(dev, idx); - - if (!netif_get_rx_queue_lease_locked(&dev, &idx)) + if (!netif_is_queue_leasee(dev)) return NULL; - dma_dev = __netdev_queue_get_dma_dev(dev, idx); - netif_put_rx_queue_lease_locked(orig_dev, dev); + hw_rxq = __netif_get_rx_queue(dev, idx)->lease; + + netdev_lock(hw_rxq->dev); + idx = get_netdev_rx_queue_index(hw_rxq); + dma_dev = __netdev_queue_get_dma_dev(hw_rxq->dev, idx); + netdev_unlock(hw_rxq->dev); + return dma_dev; } diff --git a/net/core/netdev_rx_queue.c b/net/core/netdev_rx_queue.c index 469319451ba2..8771e06a0afe 100644 --- a/net/core/netdev_rx_queue.c +++ b/net/core/netdev_rx_queue.c @@ -57,6 +57,11 @@ static bool netif_lease_dir_ok(const struct net_device *dev, return false; } +bool netif_is_queue_leasee(const struct net_device *dev) +{ + return netif_lease_dir_ok(dev, NETIF_VIRT_TO_PHYS); +} + struct netdev_rx_queue * __netif_get_rx_queue_lease(struct net_device **dev, unsigned int *rxq_idx, enum netif_lease_dir dir) @@ -74,29 +79,6 @@ __netif_get_rx_queue_lease(struct net_device **dev, unsigned int *rxq_idx, return rxq; } -struct netdev_rx_queue * -netif_get_rx_queue_lease_locked(struct net_device **dev, unsigned int *rxq_idx) -{ - struct net_device *orig_dev = *dev; - struct netdev_rx_queue *rxq; - - /* Locking order is always from the virtual to the physical device - * see netdev_nl_queue_create_doit(). - */ - netdev_ops_assert_locked(orig_dev); - rxq = __netif_get_rx_queue_lease(dev, rxq_idx, NETIF_VIRT_TO_PHYS); - if (rxq && orig_dev != *dev) - netdev_lock(*dev); - return rxq; -} - -void netif_put_rx_queue_lease_locked(struct net_device *orig_dev, - struct net_device *dev) -{ - if (orig_dev != dev) - netdev_unlock(dev); -} - /* See also page_pool_is_unreadable() */ bool netif_rxq_has_unreadable_mp(struct net_device *dev, unsigned int rxq_idx) { @@ -264,7 +246,6 @@ int netif_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx, const struct pp_memory_provider_params *p, struct netlink_ext_ack *extack) { - struct net_device *orig_dev = dev; int ret; if (!netdev_need_ops_lock(dev)) @@ -279,19 +260,18 @@ int netif_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx, if (!netif_rxq_is_leased(dev, rxq_idx)) return __netif_mp_open_rxq(dev, rxq_idx, p, extack); - if (!netif_get_rx_queue_lease_locked(&dev, &rxq_idx)) { + if (!__netif_get_rx_queue_lease(&dev, &rxq_idx, NETIF_VIRT_TO_PHYS)) { NL_SET_ERR_MSG(extack, "rx queue leased to a virtual netdev"); return -EBUSY; } if (!dev->dev.parent) { NL_SET_ERR_MSG(extack, "rx queue belongs to a virtual netdev"); - ret = -EOPNOTSUPP; - goto out; + return -EOPNOTSUPP; } + netdev_lock(dev); ret = __netif_mp_open_rxq(dev, rxq_idx, p, extack); -out: - netif_put_rx_queue_lease_locked(orig_dev, dev); + netdev_unlock(dev); return ret; } @@ -326,18 +306,18 @@ static void __netif_mp_close_rxq(struct net_device *dev, unsigned int ifq_idx, void netif_mp_close_rxq(struct net_device *dev, unsigned int ifq_idx, const struct pp_memory_provider_params *old_p) { - struct net_device *orig_dev = dev; - if (WARN_ON_ONCE(ifq_idx >= dev->real_num_rx_queues)) return; if (!netif_rxq_is_leased(dev, ifq_idx)) return __netif_mp_close_rxq(dev, ifq_idx, old_p); - if (WARN_ON_ONCE(!netif_get_rx_queue_lease_locked(&dev, &ifq_idx))) + if (!__netif_get_rx_queue_lease(&dev, &ifq_idx, NETIF_VIRT_TO_PHYS)) { + WARN_ON_ONCE(1); return; - + } + netdev_lock(dev); __netif_mp_close_rxq(dev, ifq_idx, old_p); - netif_put_rx_queue_lease_locked(orig_dev, dev); + netdev_unlock(dev); } void __netif_mp_uninstall_rxq(struct netdev_rx_queue *rxq, diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 60be6561f486..145876089b4b 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -31,6 +31,8 @@ #include #include +#include "../core/dev.h" + #include "xsk_queue.h" #include "xdp_umem.h" #include "xsk.h" @@ -117,20 +119,42 @@ struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev, } EXPORT_SYMBOL(xsk_get_pool_from_qid); +static void __xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id) +{ + if (queue_id < dev->num_rx_queues) + dev->_rx[queue_id].pool = NULL; + if (queue_id < dev->num_tx_queues) + dev->_tx[queue_id].pool = NULL; +} + void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id) { - struct net_device *orig_dev = dev; - unsigned int id = queue_id; + struct netdev_rx_queue *hw_rxq; + + if (!netif_rxq_is_leased(dev, queue_id)) + return __xsk_clear_pool_at_qid(dev, queue_id); + WARN_ON_ONCE(!netif_is_queue_leasee(dev)); - if (id < dev->real_num_rx_queues) - WARN_ON_ONCE(!netif_get_rx_queue_lease_locked(&dev, &id)); + hw_rxq = __netif_get_rx_queue(dev, queue_id)->lease; - if (id < dev->num_rx_queues) - dev->_rx[id].pool = NULL; - if (id < dev->num_tx_queues) - dev->_tx[id].pool = NULL; + netdev_lock(hw_rxq->dev); + queue_id = get_netdev_rx_queue_index(hw_rxq); + __xsk_clear_pool_at_qid(hw_rxq->dev, queue_id); + netdev_unlock(hw_rxq->dev); +} + +static int __xsk_reg_pool_at_qid(struct net_device *dev, + struct xsk_buff_pool *pool, u16 queue_id) +{ + if (xsk_get_pool_from_qid(dev, queue_id)) + return -EBUSY; + + if (queue_id < dev->real_num_rx_queues) + dev->_rx[queue_id].pool = pool; + if (queue_id < dev->real_num_tx_queues) + dev->_tx[queue_id].pool = pool; - netif_put_rx_queue_lease_locked(orig_dev, dev); + return 0; } /* The buffer pool is stored both in the _rx struct and the _tx struct as we do @@ -140,29 +164,26 @@ void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id) int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool, u16 queue_id) { - struct net_device *orig_dev = dev; - unsigned int id = queue_id; - int ret = 0; + struct netdev_rx_queue *hw_rxq; + int ret; - if (id >= max(dev->real_num_rx_queues, - dev->real_num_tx_queues)) + if (queue_id >= max(dev->real_num_rx_queues, + dev->real_num_tx_queues)) return -EINVAL; - if (id < dev->real_num_rx_queues) { - if (!netif_get_rx_queue_lease_locked(&dev, &id)) - return -EBUSY; - if (xsk_get_pool_from_qid(dev, id)) { - ret = -EBUSY; - goto out; - } - } + if (queue_id >= dev->real_num_rx_queues || + !netif_rxq_is_leased(dev, queue_id)) + return __xsk_reg_pool_at_qid(dev, pool, queue_id); + if (!netif_is_queue_leasee(dev)) + return -EBUSY; + + hw_rxq = __netif_get_rx_queue(dev, queue_id)->lease; + + netdev_lock(hw_rxq->dev); + queue_id = get_netdev_rx_queue_index(hw_rxq); + ret = __xsk_reg_pool_at_qid(hw_rxq->dev, pool, queue_id); + netdev_unlock(hw_rxq->dev); - if (id < dev->real_num_rx_queues) - dev->_rx[id].pool = pool; - if (id < dev->real_num_tx_queues) - dev->_tx[id].pool = pool; -out: - netif_put_rx_queue_lease_locked(orig_dev, dev); return ret; } -- cgit v1.2.3