summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaolo Abeni <pabeni@redhat.com>2026-01-20 13:58:52 +0300
committerPaolo Abeni <pabeni@redhat.com>2026-01-20 14:25:29 +0300
commit77b9c4a438fc66e2ab004c411056b3fb71a54f2c (patch)
tree1a19c67570e38e8c1754b06bbc75bf3a8d0ebbf4
parent4515ec4ad58a37e70a9e1256c0b993958c9b7497 (diff)
parent931420a2fc363817c92990fa14eb1bdec024ce04 (diff)
downloadlinux-77b9c4a438fc66e2ab004c411056b3fb71a54f2c.tar.xz
Merge branch 'netkit-support-for-io_uring-zero-copy-and-af_xdp'
Daniel Borkmann says: ==================== netkit: Support for io_uring zero-copy and AF_XDP Containers use virtual netdevs to route traffic from a physical netdev in the host namespace. They do not have access to the physical netdev in the host and thus can't use memory providers or AF_XDP that require reconfiguring/restarting queues in the physical netdev. This patchset adds the concept of queue leasing to virtual netdevs that allow containers to use memory providers and AF_XDP at native speed. Leased queues are bound to a real queue in a physical netdev and act as a proxy. Memory providers and AF_XDP operations take an ifindex and queue id, so containers would pass in an ifindex for a virtual netdev and a queue id of a leased queue, which then gets proxied to the underlying real queue. We have implemented support for this concept in netkit and tested the latter against Nvidia ConnectX-6 (mlx5) as well as Broadcom BCM957504 (bnxt_en) 100G NICs. For more details see the individual patches. ==================== Link: https://patch.msgid.link/20260115082603.219152-1-daniel@iogearbox.net Signed-off-by: Paolo Abeni <pabeni@redhat.com>
-rw-r--r--Documentation/netlink/specs/netdev.yaml44
-rw-r--r--drivers/net/netkit.c360
-rw-r--r--include/linux/netdevice.h6
-rw-r--r--include/net/netdev_queues.h19
-rw-r--r--include/net/netdev_rx_queue.h21
-rw-r--r--include/net/page_pool/memory_provider.h4
-rw-r--r--include/net/xdp_sock_drv.h2
-rw-r--r--include/uapi/linux/if_link.h6
-rw-r--r--include/uapi/linux/netdev.h11
-rw-r--r--net/core/dev.c7
-rw-r--r--net/core/dev.h2
-rw-r--r--net/core/netdev-genl-gen.c20
-rw-r--r--net/core/netdev-genl-gen.h2
-rw-r--r--net/core/netdev-genl.c185
-rw-r--r--net/core/netdev_queues.c74
-rw-r--r--net/core/netdev_rx_queue.c169
-rw-r--r--net/ethtool/channels.c12
-rw-r--r--net/ethtool/ioctl.c9
-rw-r--r--net/xdp/xsk.c79
-rw-r--r--tools/include/uapi/linux/netdev.h11
-rw-r--r--tools/testing/selftests/drivers/net/README.rst7
-rw-r--r--tools/testing/selftests/drivers/net/hw/Makefile2
-rw-r--r--tools/testing/selftests/drivers/net/hw/lib/py/__init__.py7
-rw-r--r--tools/testing/selftests/drivers/net/hw/nk_forward.bpf.c49
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/nk_netns.py23
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/nk_qlease.py55
-rw-r--r--tools/testing/selftests/drivers/net/lib/py/__init__.py7
-rw-r--r--tools/testing/selftests/drivers/net/lib/py/env.py157
28 files changed, 1233 insertions, 117 deletions
diff --git a/Documentation/netlink/specs/netdev.yaml b/Documentation/netlink/specs/netdev.yaml
index 596c306ce52b..b86db8656eac 100644
--- a/Documentation/netlink/specs/netdev.yaml
+++ b/Documentation/netlink/specs/netdev.yaml
@@ -339,6 +339,15 @@ attribute-sets:
doc: XSK information for this queue, if any.
type: nest
nested-attributes: xsk-info
+ -
+ name: lease
+ doc: |
+ A queue from a virtual device can have a lease which refers to
+ another queue from a physical device. This is useful for memory
+ providers and AF_XDP operations which take an ifindex and queue id
+ to allow applications to bind against virtual devices in containers.
+ type: nest
+ nested-attributes: lease
-
name: qstats
doc: |
@@ -538,6 +547,24 @@ attribute-sets:
-
name: type
-
+ name: lease
+ attributes:
+ -
+ name: ifindex
+ doc: The netdev ifindex to lease the queue from.
+ type: u32
+ checks:
+ min: 1
+ -
+ name: queue
+ doc: The netdev queue to lease from.
+ type: nest
+ nested-attributes: queue-id
+ -
+ name: netns-id
+ doc: The network namespace id of the netdev.
+ type: s32
+ -
name: dmabuf
attributes:
-
@@ -686,6 +713,7 @@ operations:
- dmabuf
- io-uring
- xsk
+ - lease
dump:
request:
attributes:
@@ -797,6 +825,22 @@ operations:
reply:
attributes:
- id
+ -
+ name: queue-create
+ doc: |
+ Create a new queue for the given netdevice. Whether this operation
+ is supported depends on the device and the driver.
+ attribute-set: queue
+ flags: [admin-perm]
+ do:
+ request:
+ attributes:
+ - ifindex
+ - type
+ - lease
+ reply: &queue-create-op
+ attributes:
+ - id
kernel-family:
headers: ["net/netdev_netlink.h"]
diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c
index 0a2fef7caccb..0519f855d062 100644
--- a/drivers/net/netkit.c
+++ b/drivers/net/netkit.c
@@ -9,11 +9,21 @@
#include <linux/bpf_mprog.h>
#include <linux/indirect_call_wrapper.h>
+#include <net/netdev_lock.h>
+#include <net/netdev_queues.h>
+#include <net/netdev_rx_queue.h>
+#include <net/xdp_sock_drv.h>
#include <net/netkit.h>
#include <net/dst.h>
#include <net/tcx.h>
-#define DRV_NAME "netkit"
+#define NETKIT_DRV_NAME "netkit"
+
+#define NETKIT_NUM_RX_QUEUES_MAX 1024
+#define NETKIT_NUM_TX_QUEUES_MAX 1
+
+#define NETKIT_NUM_RX_QUEUES_REAL 1
+#define NETKIT_NUM_TX_QUEUES_REAL 1
struct netkit {
__cacheline_group_begin(netkit_fastpath);
@@ -26,6 +36,7 @@ struct netkit {
__cacheline_group_begin(netkit_slowpath);
enum netkit_mode mode;
+ enum netkit_pairing pair;
bool primary;
u32 headroom;
__cacheline_group_end(netkit_slowpath);
@@ -36,6 +47,8 @@ struct netkit_link {
struct net_device *dev;
};
+static struct rtnl_link_ops netkit_link_ops;
+
static __always_inline int
netkit_run(const struct bpf_mprog_entry *entry, struct sk_buff *skb,
enum netkit_action ret)
@@ -135,6 +148,10 @@ static int netkit_open(struct net_device *dev)
struct netkit *nk = netkit_priv(dev);
struct net_device *peer = rtnl_dereference(nk->peer);
+ if (nk->pair == NETKIT_DEVICE_SINGLE) {
+ netif_carrier_on(dev);
+ return 0;
+ }
if (!peer)
return -ENOTCONN;
if (peer->flags & IFF_UP) {
@@ -219,9 +236,86 @@ static void netkit_get_stats(struct net_device *dev,
stats->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
}
+static bool netkit_xsk_supported_at_phys(const struct net_device *dev)
+{
+ if (!dev->netdev_ops->ndo_bpf ||
+ !dev->netdev_ops->ndo_xdp_xmit ||
+ !dev->netdev_ops->ndo_xsk_wakeup)
+ return false;
+ if ((dev->xdp_features & NETDEV_XDP_ACT_XSK) != NETDEV_XDP_ACT_XSK)
+ return false;
+ return true;
+}
+
+static int netkit_xsk(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ struct netkit *nk = netkit_priv(dev);
+ struct netdev_bpf xdp_lower;
+ struct netdev_rx_queue *rxq;
+ struct net_device *phys;
+ int ret = -EBUSY;
+
+ switch (xdp->command) {
+ case XDP_SETUP_XSK_POOL:
+ if (nk->pair == NETKIT_DEVICE_PAIR)
+ return -EOPNOTSUPP;
+ if (xdp->xsk.queue_id >= dev->real_num_rx_queues)
+ return -EINVAL;
+
+ rxq = __netif_get_rx_queue(dev, xdp->xsk.queue_id);
+ if (!rxq->lease)
+ return -EOPNOTSUPP;
+
+ phys = rxq->lease->dev;
+ if (!netkit_xsk_supported_at_phys(phys))
+ return -EOPNOTSUPP;
+
+ memcpy(&xdp_lower, xdp, sizeof(xdp_lower));
+ xdp_lower.xsk.queue_id = get_netdev_rx_queue_index(rxq->lease);
+ break;
+ case XDP_SETUP_PROG:
+ return -EPERM;
+ default:
+ return -EINVAL;
+ }
+
+ netdev_lock(phys);
+ if (!dev_get_min_mp_channel_count(phys))
+ ret = phys->netdev_ops->ndo_bpf(phys, &xdp_lower);
+ netdev_unlock(phys);
+ return ret;
+}
+
+static int netkit_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
+{
+ struct netdev_rx_queue *rxq;
+ struct net_device *phys;
+
+ if (queue_id >= dev->real_num_rx_queues)
+ return -EINVAL;
+
+ rxq = __netif_get_rx_queue(dev, queue_id);
+ if (!rxq->lease)
+ return -EOPNOTSUPP;
+
+ phys = rxq->lease->dev;
+ if (!netkit_xsk_supported_at_phys(phys))
+ return -EOPNOTSUPP;
+
+ return phys->netdev_ops->ndo_xsk_wakeup(phys,
+ get_netdev_rx_queue_index(rxq->lease), flags);
+}
+
+static int netkit_init(struct net_device *dev)
+{
+ netdev_lockdep_set_classes(dev);
+ return 0;
+}
+
static void netkit_uninit(struct net_device *dev);
static const struct net_device_ops netkit_netdev_ops = {
+ .ndo_init = netkit_init,
.ndo_open = netkit_open,
.ndo_stop = netkit_close,
.ndo_start_xmit = netkit_xmit,
@@ -232,19 +326,95 @@ static const struct net_device_ops netkit_netdev_ops = {
.ndo_get_peer_dev = netkit_peer_dev,
.ndo_get_stats64 = netkit_get_stats,
.ndo_uninit = netkit_uninit,
+ .ndo_bpf = netkit_xsk,
+ .ndo_xsk_wakeup = netkit_xsk_wakeup,
.ndo_features_check = passthru_features_check,
};
static void netkit_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, NETKIT_DRV_NAME, sizeof(info->driver));
}
static const struct ethtool_ops netkit_ethtool_ops = {
.get_drvinfo = netkit_get_drvinfo,
};
+static int netkit_queue_create(struct net_device *dev)
+{
+ struct netkit *nk = netkit_priv(dev);
+ u32 rxq_count_old, rxq_count_new;
+ int err;
+
+ rxq_count_old = dev->real_num_rx_queues;
+ rxq_count_new = rxq_count_old + 1;
+
+ /* Only allow to lease a queue in single device mode or to
+ * lease against the peer device which then ends up in the
+ * target netns.
+ */
+ if (nk->pair == NETKIT_DEVICE_PAIR && nk->primary)
+ return -EOPNOTSUPP;
+
+ if (netif_running(dev))
+ netif_carrier_off(dev);
+ err = netif_set_real_num_rx_queues(dev, rxq_count_new);
+ if (netif_running(dev))
+ netif_carrier_on(dev);
+
+ return err ? : rxq_count_old;
+}
+
+static const struct netdev_queue_mgmt_ops netkit_queue_mgmt_ops = {
+ .ndo_queue_create = netkit_queue_create,
+};
+
+static struct net_device *netkit_alloc(struct nlattr *tb[],
+ const char *ifname,
+ unsigned char name_assign_type,
+ unsigned int num_tx_queues,
+ unsigned int num_rx_queues)
+{
+ const struct rtnl_link_ops *ops = &netkit_link_ops;
+ struct net_device *dev;
+
+ if (num_tx_queues > NETKIT_NUM_TX_QUEUES_MAX ||
+ num_rx_queues > NETKIT_NUM_RX_QUEUES_MAX)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ dev = alloc_netdev_mqs(ops->priv_size, ifname,
+ name_assign_type, ops->setup,
+ num_tx_queues, num_rx_queues);
+ if (dev) {
+ dev->real_num_tx_queues = NETKIT_NUM_TX_QUEUES_REAL;
+ dev->real_num_rx_queues = NETKIT_NUM_RX_QUEUES_REAL;
+ }
+ return dev;
+}
+
+static void netkit_queue_unlease(struct net_device *dev)
+{
+ struct netdev_rx_queue *rxq, *rxq_lease;
+ struct net_device *dev_lease;
+ int i;
+
+ if (dev->real_num_rx_queues == 1)
+ return;
+
+ netdev_lock(dev);
+ for (i = 1; i < dev->real_num_rx_queues; i++) {
+ rxq = __netif_get_rx_queue(dev, i);
+ rxq_lease = rxq->lease;
+ dev_lease = rxq_lease->dev;
+
+ netdev_lock(dev_lease);
+ netdev_rx_queue_unlease(rxq, rxq_lease);
+ netdev_unlock(dev_lease);
+ }
+ netdev_unlock(dev);
+}
+
static void netkit_setup(struct net_device *dev)
{
static const netdev_features_t netkit_features_hw_vlan =
@@ -275,18 +445,20 @@ static void netkit_setup(struct net_device *dev)
dev->priv_flags |= IFF_DISABLE_NETPOLL;
dev->lltx = true;
- dev->ethtool_ops = &netkit_ethtool_ops;
- dev->netdev_ops = &netkit_netdev_ops;
+ dev->netdev_ops = &netkit_netdev_ops;
+ dev->ethtool_ops = &netkit_ethtool_ops;
+ dev->queue_mgmt_ops = &netkit_queue_mgmt_ops;
dev->features |= netkit_features;
dev->hw_features = netkit_features;
dev->hw_enc_features = netkit_features;
dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
dev->vlan_features = dev->features & ~netkit_features_hw_vlan;
-
dev->needs_free_netdev = true;
netif_set_tso_max_size(dev, GSO_MAX_SIZE);
+
+ xdp_set_features_flag(dev, NETDEV_XDP_ACT_XSK);
}
static struct net *netkit_get_link_net(const struct net_device *dev)
@@ -325,8 +497,6 @@ static int netkit_validate(struct nlattr *tb[], struct nlattr *data[],
return 0;
}
-static struct rtnl_link_ops netkit_link_ops;
-
static int netkit_new_link(struct net_device *dev,
struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
@@ -335,6 +505,7 @@ static int netkit_new_link(struct net_device *dev,
enum netkit_scrub scrub_prim = NETKIT_SCRUB_DEFAULT;
enum netkit_scrub scrub_peer = NETKIT_SCRUB_DEFAULT;
struct nlattr *peer_tb[IFLA_MAX + 1], **tbp, *attr;
+ enum netkit_pairing pair = NETKIT_DEVICE_PAIR;
enum netkit_action policy_prim = NETKIT_PASS;
enum netkit_action policy_peer = NETKIT_PASS;
struct nlattr **data = params->data;
@@ -343,7 +514,8 @@ static int netkit_new_link(struct net_device *dev,
struct nlattr **tb = params->tb;
u16 headroom = 0, tailroom = 0;
struct ifinfomsg *ifmp = NULL;
- struct net_device *peer;
+ struct net_device *peer = NULL;
+ bool seen_peer = false;
char ifname[IFNAMSIZ];
struct netkit *nk;
int err;
@@ -380,6 +552,12 @@ static int netkit_new_link(struct net_device *dev,
headroom = nla_get_u16(data[IFLA_NETKIT_HEADROOM]);
if (data[IFLA_NETKIT_TAILROOM])
tailroom = nla_get_u16(data[IFLA_NETKIT_TAILROOM]);
+ if (data[IFLA_NETKIT_PAIRING])
+ pair = nla_get_u32(data[IFLA_NETKIT_PAIRING]);
+
+ seen_peer = data[IFLA_NETKIT_PEER_INFO] ||
+ data[IFLA_NETKIT_PEER_SCRUB] ||
+ data[IFLA_NETKIT_PEER_POLICY];
}
if (ifmp && tbp[IFLA_IFNAME]) {
@@ -392,45 +570,46 @@ static int netkit_new_link(struct net_device *dev,
if (mode != NETKIT_L2 &&
(tb[IFLA_ADDRESS] || tbp[IFLA_ADDRESS]))
return -EOPNOTSUPP;
+ if (pair == NETKIT_DEVICE_SINGLE &&
+ (tb != tbp || seen_peer || policy_prim != NETKIT_PASS))
+ return -EOPNOTSUPP;
- peer = rtnl_create_link(peer_net, ifname, ifname_assign_type,
- &netkit_link_ops, tbp, extack);
- if (IS_ERR(peer))
- return PTR_ERR(peer);
-
- netif_inherit_tso_max(peer, dev);
- if (headroom) {
- peer->needed_headroom = headroom;
- dev->needed_headroom = headroom;
- }
- if (tailroom) {
- peer->needed_tailroom = tailroom;
- dev->needed_tailroom = tailroom;
- }
-
- if (mode == NETKIT_L2 && !(ifmp && tbp[IFLA_ADDRESS]))
- eth_hw_addr_random(peer);
- if (ifmp && dev->ifindex)
- peer->ifindex = ifmp->ifi_index;
-
- nk = netkit_priv(peer);
- nk->primary = false;
- nk->policy = policy_peer;
- nk->scrub = scrub_peer;
- nk->mode = mode;
- nk->headroom = headroom;
- bpf_mprog_bundle_init(&nk->bundle);
+ if (pair == NETKIT_DEVICE_PAIR) {
+ peer = rtnl_create_link(peer_net, ifname, ifname_assign_type,
+ &netkit_link_ops, tbp, extack);
+ if (IS_ERR(peer))
+ return PTR_ERR(peer);
+
+ netif_inherit_tso_max(peer, dev);
+ if (headroom)
+ peer->needed_headroom = headroom;
+ if (tailroom)
+ peer->needed_tailroom = tailroom;
+ if (mode == NETKIT_L2 && !(ifmp && tbp[IFLA_ADDRESS]))
+ eth_hw_addr_random(peer);
+ if (ifmp && dev->ifindex)
+ peer->ifindex = ifmp->ifi_index;
- err = register_netdevice(peer);
- if (err < 0)
- goto err_register_peer;
- netif_carrier_off(peer);
- if (mode == NETKIT_L2)
- dev_change_flags(peer, peer->flags & ~IFF_NOARP, NULL);
+ nk = netkit_priv(peer);
+ nk->primary = false;
+ nk->policy = policy_peer;
+ nk->scrub = scrub_peer;
+ nk->mode = mode;
+ nk->pair = pair;
+ nk->headroom = headroom;
+ bpf_mprog_bundle_init(&nk->bundle);
+
+ err = register_netdevice(peer);
+ if (err < 0)
+ goto err_register_peer;
+ netif_carrier_off(peer);
+ if (mode == NETKIT_L2)
+ dev_change_flags(peer, peer->flags & ~IFF_NOARP, NULL);
- err = rtnl_configure_link(peer, NULL, 0, NULL);
- if (err < 0)
- goto err_configure_peer;
+ err = rtnl_configure_link(peer, NULL, 0, NULL);
+ if (err < 0)
+ goto err_configure_peer;
+ }
if (mode == NETKIT_L2 && !tb[IFLA_ADDRESS])
eth_hw_addr_random(dev);
@@ -438,12 +617,17 @@ static int netkit_new_link(struct net_device *dev,
nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
else
strscpy(dev->name, "nk%d", IFNAMSIZ);
+ if (headroom)
+ dev->needed_headroom = headroom;
+ if (tailroom)
+ dev->needed_tailroom = tailroom;
nk = netkit_priv(dev);
nk->primary = true;
nk->policy = policy_prim;
nk->scrub = scrub_prim;
nk->mode = mode;
+ nk->pair = pair;
nk->headroom = headroom;
bpf_mprog_bundle_init(&nk->bundle);
@@ -455,10 +639,12 @@ static int netkit_new_link(struct net_device *dev,
dev_change_flags(dev, dev->flags & ~IFF_NOARP, NULL);
rcu_assign_pointer(netkit_priv(dev)->peer, peer);
- rcu_assign_pointer(netkit_priv(peer)->peer, dev);
+ if (peer)
+ rcu_assign_pointer(netkit_priv(peer)->peer, dev);
return 0;
err_configure_peer:
- unregister_netdevice(peer);
+ if (peer)
+ unregister_netdevice(peer);
return err;
err_register_peer:
free_netdev(peer);
@@ -518,6 +704,8 @@ static struct net_device *netkit_dev_fetch(struct net *net, u32 ifindex, u32 whi
nk = netkit_priv(dev);
if (!nk->primary)
return ERR_PTR(-EACCES);
+ if (nk->pair == NETKIT_DEVICE_SINGLE)
+ return ERR_PTR(-EOPNOTSUPP);
if (which == BPF_NETKIT_PEER) {
dev = rcu_dereference_rtnl(nk->peer);
if (!dev)
@@ -844,6 +1032,7 @@ static void netkit_release_all(struct net_device *dev)
static void netkit_uninit(struct net_device *dev)
{
netkit_release_all(dev);
+ netkit_queue_unlease(dev);
}
static void netkit_del_link(struct net_device *dev, struct list_head *head)
@@ -879,6 +1068,7 @@ static int netkit_change_link(struct net_device *dev, struct nlattr *tb[],
{ IFLA_NETKIT_PEER_INFO, "peer info" },
{ IFLA_NETKIT_HEADROOM, "headroom" },
{ IFLA_NETKIT_TAILROOM, "tailroom" },
+ { IFLA_NETKIT_PAIRING, "pairing" },
};
if (!nk->primary) {
@@ -898,9 +1088,11 @@ static int netkit_change_link(struct net_device *dev, struct nlattr *tb[],
}
if (data[IFLA_NETKIT_POLICY]) {
+ err = -EOPNOTSUPP;
attr = data[IFLA_NETKIT_POLICY];
policy = nla_get_u32(attr);
- err = netkit_check_policy(policy, attr, extack);
+ if (nk->pair == NETKIT_DEVICE_PAIR)
+ err = netkit_check_policy(policy, attr, extack);
if (err)
return err;
WRITE_ONCE(nk->policy, policy);
@@ -921,6 +1113,48 @@ static int netkit_change_link(struct net_device *dev, struct nlattr *tb[],
return 0;
}
+static void netkit_check_lease_unregister(struct net_device *dev)
+{
+ LIST_HEAD(list_kill);
+ u32 q_idx;
+
+ if (READ_ONCE(dev->reg_state) != NETREG_UNREGISTERING ||
+ !dev->dev.parent)
+ return;
+
+ netdev_lock_ops(dev);
+ for (q_idx = 0; q_idx < dev->real_num_rx_queues; q_idx++) {
+ struct net_device *tmp = dev;
+ u32 tmp_q_idx = q_idx;
+
+ if (netif_rx_queue_lease_get_owner(&tmp, &tmp_q_idx)) {
+ if (tmp->netdev_ops != &netkit_netdev_ops)
+ continue;
+ /* A single phys device can have multiple queues leased
+ * to one netkit device. We can only queue that netkit
+ * device once to the list_kill. Queues of that phys
+ * device can be leased with different individual netkit
+ * devices, hence we batch via list_kill.
+ */
+ if (unregister_netdevice_queued(tmp))
+ continue;
+ netkit_del_link(tmp, &list_kill);
+ }
+ }
+ netdev_unlock_ops(dev);
+ unregister_netdevice_many(&list_kill);
+}
+
+static int netkit_notifier(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+ if (event == NETDEV_UNREGISTER)
+ netkit_check_lease_unregister(dev);
+ return NOTIFY_DONE;
+}
+
static size_t netkit_get_size(const struct net_device *dev)
{
return nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_POLICY */
@@ -931,6 +1165,7 @@ static size_t netkit_get_size(const struct net_device *dev)
nla_total_size(sizeof(u8)) + /* IFLA_NETKIT_PRIMARY */
nla_total_size(sizeof(u16)) + /* IFLA_NETKIT_HEADROOM */
nla_total_size(sizeof(u16)) + /* IFLA_NETKIT_TAILROOM */
+ nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_PAIRING */
0;
}
@@ -951,6 +1186,8 @@ static int netkit_fill_info(struct sk_buff *skb, const struct net_device *dev)
return -EMSGSIZE;
if (nla_put_u16(skb, IFLA_NETKIT_TAILROOM, dev->needed_tailroom))
return -EMSGSIZE;
+ if (nla_put_u32(skb, IFLA_NETKIT_PAIRING, nk->pair))
+ return -EMSGSIZE;
if (peer) {
nk = netkit_priv(peer);
@@ -972,13 +1209,15 @@ static const struct nla_policy netkit_policy[IFLA_NETKIT_MAX + 1] = {
[IFLA_NETKIT_TAILROOM] = { .type = NLA_U16 },
[IFLA_NETKIT_SCRUB] = NLA_POLICY_MAX(NLA_U32, NETKIT_SCRUB_DEFAULT),
[IFLA_NETKIT_PEER_SCRUB] = NLA_POLICY_MAX(NLA_U32, NETKIT_SCRUB_DEFAULT),
+ [IFLA_NETKIT_PAIRING] = NLA_POLICY_MAX(NLA_U32, NETKIT_DEVICE_SINGLE),
[IFLA_NETKIT_PRIMARY] = { .type = NLA_REJECT,
.reject_message = "Primary attribute is read-only" },
};
static struct rtnl_link_ops netkit_link_ops = {
- .kind = DRV_NAME,
+ .kind = NETKIT_DRV_NAME,
.priv_size = sizeof(struct netkit),
+ .alloc = netkit_alloc,
.setup = netkit_setup,
.newlink = netkit_new_link,
.dellink = netkit_del_link,
@@ -992,26 +1231,39 @@ static struct rtnl_link_ops netkit_link_ops = {
.maxtype = IFLA_NETKIT_MAX,
};
-static __init int netkit_init(void)
+static struct notifier_block netkit_netdev_notifier = {
+ .notifier_call = netkit_notifier,
+};
+
+static __init int netkit_mod_init(void)
{
+ int ret;
+
BUILD_BUG_ON((int)NETKIT_NEXT != (int)TCX_NEXT ||
(int)NETKIT_PASS != (int)TCX_PASS ||
(int)NETKIT_DROP != (int)TCX_DROP ||
(int)NETKIT_REDIRECT != (int)TCX_REDIRECT);
- return rtnl_link_register(&netkit_link_ops);
+ ret = rtnl_link_register(&netkit_link_ops);
+ if (ret)
+ return ret;
+ ret = register_netdevice_notifier(&netkit_netdev_notifier);
+ if (ret)
+ rtnl_link_unregister(&netkit_link_ops);
+ return ret;
}
-static __exit void netkit_exit(void)
+static __exit void netkit_mod_exit(void)
{
+ unregister_netdevice_notifier(&netkit_netdev_notifier);
rtnl_link_unregister(&netkit_link_ops);
}
-module_init(netkit_init);
-module_exit(netkit_exit);
+module_init(netkit_mod_init);
+module_exit(netkit_mod_exit);
MODULE_DESCRIPTION("BPF-programmable network device");
MODULE_AUTHOR("Daniel Borkmann <daniel@iogearbox.net>");
MODULE_AUTHOR("Nikolay Aleksandrov <razor@blackwall.org>");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_RTNL_LINK(DRV_NAME);
+MODULE_ALIAS_RTNL_LINK(NETKIT_DRV_NAME);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d99b0fbc1942..4d146c000e21 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3400,11 +3400,17 @@ static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
int register_netdevice(struct net_device *dev);
void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
void unregister_netdevice_many(struct list_head *head);
+
static inline void unregister_netdevice(struct net_device *dev)
{
unregister_netdevice_queue(dev, NULL);
}
+static inline bool unregister_netdevice_queued(const struct net_device *dev)
+{
+ return !list_empty(&dev->unreg_list);
+}
+
int netdev_refcnt_read(const struct net_device *dev);
void free_netdev(struct net_device *dev);
diff --git a/include/net/netdev_queues.h b/include/net/netdev_queues.h
index b55d3b9cb9c2..81dc7cb2360c 100644
--- a/include/net/netdev_queues.h
+++ b/include/net/netdev_queues.h
@@ -130,6 +130,11 @@ void netdev_stat_queue_sum(struct net_device *netdev,
* @ndo_queue_get_dma_dev: Get dma device for zero-copy operations to be used
* for this queue. Return NULL on error.
*
+ * @ndo_queue_create: Create a new RX queue which can be leased to another queue.
+ * Ops on this queue are redirected to the leased queue e.g.
+ * when opening a memory provider. Return the new queue id on
+ * success. Return negative error code on failure.
+ *
* Note that @ndo_queue_mem_alloc and @ndo_queue_mem_free may be called while
* the interface is closed. @ndo_queue_start and @ndo_queue_stop will only
* be called for an interface which is open.
@@ -149,9 +154,12 @@ struct netdev_queue_mgmt_ops {
int idx);
struct device * (*ndo_queue_get_dma_dev)(struct net_device *dev,
int idx);
+ int (*ndo_queue_create)(struct net_device *dev);
};
-bool netif_rxq_has_unreadable_mp(struct net_device *dev, int idx);
+bool netif_rxq_has_unreadable_mp(struct net_device *dev, unsigned int rxq_idx);
+bool netif_rxq_has_mp(struct net_device *dev, unsigned int rxq_idx);
+bool netif_rxq_is_leased(struct net_device *dev, unsigned int rxq_idx);
/**
* DOC: Lockless queue stopping / waking helpers.
@@ -340,5 +348,10 @@ static inline unsigned int netif_xmit_timeout_ms(struct netdev_queue *txq)
})
struct device *netdev_queue_get_dma_dev(struct net_device *dev, int idx);
-
-#endif
+bool netdev_can_create_queue(const struct net_device *dev,
+ struct netlink_ext_ack *extack);
+bool netdev_can_lease_queue(const struct net_device *dev,
+ struct netlink_ext_ack *extack);
+bool netdev_queue_busy(struct net_device *dev, int idx,
+ struct netlink_ext_ack *extack);
+#endif /* _LINUX_NET_QUEUES_H */
diff --git a/include/net/netdev_rx_queue.h b/include/net/netdev_rx_queue.h
index 8cdcd138b33f..508d11afaecb 100644
--- a/include/net/netdev_rx_queue.h
+++ b/include/net/netdev_rx_queue.h
@@ -28,6 +28,8 @@ struct netdev_rx_queue {
#endif
struct napi_struct *napi;
struct pp_memory_provider_params mp_params;
+ struct netdev_rx_queue *lease;
+ netdevice_tracker lease_tracker;
} ____cacheline_aligned_in_smp;
/*
@@ -57,5 +59,22 @@ get_netdev_rx_queue_index(struct netdev_rx_queue *queue)
}
int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq);
+void netdev_rx_queue_lease(struct netdev_rx_queue *rxq_dst,
+ struct netdev_rx_queue *rxq_src);
+void netdev_rx_queue_unlease(struct netdev_rx_queue *rxq_dst,
+ struct netdev_rx_queue *rxq_src);
+bool netif_rx_queue_lease_get_owner(struct net_device **dev, unsigned int *rxq);
-#endif
+enum netif_lease_dir {
+ NETIF_VIRT_TO_PHYS,
+ NETIF_PHYS_TO_VIRT,
+};
+
+struct netdev_rx_queue *
+__netif_get_rx_queue_lease(struct net_device **dev, unsigned int *rxq,
+ enum netif_lease_dir dir);
+struct netdev_rx_queue *
+netif_get_rx_queue_lease_locked(struct net_device **dev, unsigned int *rxq);
+void netif_put_rx_queue_lease_locked(struct net_device *orig_dev,
+ struct net_device *dev);
+#endif /* _LINUX_NETDEV_RX_QUEUE_H */
diff --git a/include/net/page_pool/memory_provider.h b/include/net/page_pool/memory_provider.h
index ada4f968960a..b6f811c3416b 100644
--- a/include/net/page_pool/memory_provider.h
+++ b/include/net/page_pool/memory_provider.h
@@ -23,12 +23,12 @@ bool net_mp_niov_set_dma_addr(struct net_iov *niov, dma_addr_t addr);
void net_mp_niov_set_page_pool(struct page_pool *pool, struct net_iov *niov);
void net_mp_niov_clear_page_pool(struct net_iov *niov);
-int net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
+int net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
struct pp_memory_provider_params *p);
int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
const struct pp_memory_provider_params *p,
struct netlink_ext_ack *extack);
-void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
+void net_mp_close_rxq(struct net_device *dev, unsigned int rxq_idx,
struct pp_memory_provider_params *old_p);
void __net_mp_close_rxq(struct net_device *dev, unsigned int rxq_idx,
const struct pp_memory_provider_params *old_p);
diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
index 242e34f771cc..c07cfb431eac 100644
--- a/include/net/xdp_sock_drv.h
+++ b/include/net/xdp_sock_drv.h
@@ -28,7 +28,7 @@ void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max);
void xsk_tx_release(struct xsk_buff_pool *pool);
-struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
+struct xsk_buff_pool *xsk_get_pool_from_qid(const struct net_device *dev,
u16 queue_id);
void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool);
void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool);
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 3b491d96e52e..bbd565757298 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -1296,6 +1296,11 @@ enum netkit_mode {
NETKIT_L3,
};
+enum netkit_pairing {
+ NETKIT_DEVICE_PAIR,
+ NETKIT_DEVICE_SINGLE,
+};
+
/* NETKIT_SCRUB_NONE leaves clearing skb->{mark,priority} up to
* the BPF program if attached. This also means the latter can
* consume the two fields if they were populated earlier.
@@ -1320,6 +1325,7 @@ enum {
IFLA_NETKIT_PEER_SCRUB,
IFLA_NETKIT_HEADROOM,
IFLA_NETKIT_TAILROOM,
+ IFLA_NETKIT_PAIRING,
__IFLA_NETKIT_MAX,
};
#define IFLA_NETKIT_MAX (__IFLA_NETKIT_MAX - 1)
diff --git a/include/uapi/linux/netdev.h b/include/uapi/linux/netdev.h
index e0b579a1df4f..7df1056a35fd 100644
--- a/include/uapi/linux/netdev.h
+++ b/include/uapi/linux/netdev.h
@@ -160,6 +160,7 @@ enum {
NETDEV_A_QUEUE_DMABUF,
NETDEV_A_QUEUE_IO_URING,
NETDEV_A_QUEUE_XSK,
+ NETDEV_A_QUEUE_LEASE,
__NETDEV_A_QUEUE_MAX,
NETDEV_A_QUEUE_MAX = (__NETDEV_A_QUEUE_MAX - 1)
@@ -203,6 +204,15 @@ enum {
};
enum {
+ NETDEV_A_LEASE_IFINDEX = 1,
+ NETDEV_A_LEASE_QUEUE,
+ NETDEV_A_LEASE_NETNS_ID,
+
+ __NETDEV_A_LEASE_MAX,
+ NETDEV_A_LEASE_MAX = (__NETDEV_A_LEASE_MAX - 1)
+};
+
+enum {
NETDEV_A_DMABUF_IFINDEX = 1,
NETDEV_A_DMABUF_QUEUES,
NETDEV_A_DMABUF_FD,
@@ -228,6 +238,7 @@ enum {
NETDEV_CMD_BIND_RX,
NETDEV_CMD_NAPI_SET,
NETDEV_CMD_BIND_TX,
+ NETDEV_CMD_QUEUE_CREATE,
__NETDEV_CMD_MAX,
NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1)
diff --git a/net/core/dev.c b/net/core/dev.c
index 2661b68f5be3..13a3de63a825 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1115,6 +1115,13 @@ netdev_get_by_index_lock_ops_compat(struct net *net, int ifindex)
}
struct net_device *
+netdev_put_lock(struct net_device *dev, netdevice_tracker *tracker)
+{
+ netdev_tracker_free(dev, tracker);
+ return __netdev_put_lock(dev, dev_net(dev));
+}
+
+struct net_device *
netdev_xa_find_lock(struct net *net, struct net_device *dev,
unsigned long *index)
{
diff --git a/net/core/dev.h b/net/core/dev.h
index da18536cbd35..9bcb76b325d0 100644
--- a/net/core/dev.h
+++ b/net/core/dev.h
@@ -30,6 +30,8 @@ netdev_napi_by_id_lock(struct net *net, unsigned int napi_id);
struct net_device *dev_get_by_napi_id(unsigned int napi_id);
struct net_device *__netdev_put_lock(struct net_device *dev, struct net *net);
+struct net_device *netdev_put_lock(struct net_device *dev,
+ netdevice_tracker *tracker);
struct net_device *
netdev_xa_find_lock(struct net *net, struct net_device *dev,
unsigned long *index);
diff --git a/net/core/netdev-genl-gen.c b/net/core/netdev-genl-gen.c
index ba673e81716f..52ba99c019e7 100644
--- a/net/core/netdev-genl-gen.c
+++ b/net/core/netdev-genl-gen.c
@@ -28,6 +28,12 @@ static const struct netlink_range_validation netdev_a_napi_defer_hard_irqs_range
};
/* Common nested types */
+const struct nla_policy netdev_lease_nl_policy[NETDEV_A_LEASE_NETNS_ID + 1] = {
+ [NETDEV_A_LEASE_IFINDEX] = NLA_POLICY_MIN(NLA_U32, 1),
+ [NETDEV_A_LEASE_QUEUE] = NLA_POLICY_NESTED(netdev_queue_id_nl_policy),
+ [NETDEV_A_LEASE_NETNS_ID] = { .type = NLA_S32, },
+};
+
const struct nla_policy netdev_page_pool_info_nl_policy[NETDEV_A_PAGE_POOL_IFINDEX + 1] = {
[NETDEV_A_PAGE_POOL_ID] = NLA_POLICY_FULL_RANGE(NLA_UINT, &netdev_a_page_pool_id_range),
[NETDEV_A_PAGE_POOL_IFINDEX] = NLA_POLICY_FULL_RANGE(NLA_U32, &netdev_a_page_pool_ifindex_range),
@@ -107,6 +113,13 @@ static const struct nla_policy netdev_bind_tx_nl_policy[NETDEV_A_DMABUF_FD + 1]
[NETDEV_A_DMABUF_FD] = { .type = NLA_U32, },
};
+/* NETDEV_CMD_QUEUE_CREATE - do */
+static const struct nla_policy netdev_queue_create_nl_policy[NETDEV_A_QUEUE_LEASE + 1] = {
+ [NETDEV_A_QUEUE_IFINDEX] = NLA_POLICY_MIN(NLA_U32, 1),
+ [NETDEV_A_QUEUE_TYPE] = NLA_POLICY_MAX(NLA_U32, 1),
+ [NETDEV_A_QUEUE_LEASE] = NLA_POLICY_NESTED(netdev_lease_nl_policy),
+};
+
/* Ops table for netdev */
static const struct genl_split_ops netdev_nl_ops[] = {
{
@@ -205,6 +218,13 @@ static const struct genl_split_ops netdev_nl_ops[] = {
.maxattr = NETDEV_A_DMABUF_FD,
.flags = GENL_CMD_CAP_DO,
},
+ {
+ .cmd = NETDEV_CMD_QUEUE_CREATE,
+ .doit = netdev_nl_queue_create_doit,
+ .policy = netdev_queue_create_nl_policy,
+ .maxattr = NETDEV_A_QUEUE_LEASE,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
};
static const struct genl_multicast_group netdev_nl_mcgrps[] = {
diff --git a/net/core/netdev-genl-gen.h b/net/core/netdev-genl-gen.h
index cffc08517a41..d71b435d72c1 100644
--- a/net/core/netdev-genl-gen.h
+++ b/net/core/netdev-genl-gen.h
@@ -14,6 +14,7 @@
#include <net/netdev_netlink.h>
/* Common nested types */
+extern const struct nla_policy netdev_lease_nl_policy[NETDEV_A_LEASE_NETNS_ID + 1];
extern const struct nla_policy netdev_page_pool_info_nl_policy[NETDEV_A_PAGE_POOL_IFINDEX + 1];
extern const struct nla_policy netdev_queue_id_nl_policy[NETDEV_A_QUEUE_TYPE + 1];
@@ -36,6 +37,7 @@ int netdev_nl_qstats_get_dumpit(struct sk_buff *skb,
int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info);
int netdev_nl_napi_set_doit(struct sk_buff *skb, struct genl_info *info);
int netdev_nl_bind_tx_doit(struct sk_buff *skb, struct genl_info *info);
+int netdev_nl_queue_create_doit(struct sk_buff *skb, struct genl_info *info);
enum {
NETDEV_NLGRP_MGMT,
diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
index 470fabbeacd9..51c830f88f10 100644
--- a/net/core/netdev-genl.c
+++ b/net/core/netdev-genl.c
@@ -391,8 +391,11 @@ netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev,
u32 q_idx, u32 q_type, const struct genl_info *info)
{
struct pp_memory_provider_params *params;
+ struct net_device *orig_netdev = netdev;
+ struct nlattr *nest_lease, *nest_queue;
struct netdev_rx_queue *rxq;
struct netdev_queue *txq;
+ u32 lease_q_idx = q_idx;
void *hdr;
hdr = genlmsg_iput(rsp, info);
@@ -410,6 +413,37 @@ netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev,
if (nla_put_napi_id(rsp, rxq->napi))
goto nla_put_failure;
+ if (netif_rx_queue_lease_get_owner(&netdev, &lease_q_idx)) {
+ struct net *net, *peer_net;
+
+ nest_lease = nla_nest_start(rsp, NETDEV_A_QUEUE_LEASE);
+ if (!nest_lease)
+ goto nla_put_failure;
+ nest_queue = nla_nest_start(rsp, NETDEV_A_LEASE_QUEUE);
+ if (!nest_queue)
+ goto nla_put_failure;
+ if (nla_put_u32(rsp, NETDEV_A_QUEUE_ID, lease_q_idx))
+ goto nla_put_failure;
+ if (nla_put_u32(rsp, NETDEV_A_QUEUE_TYPE, q_type))
+ goto nla_put_failure;
+ nla_nest_end(rsp, nest_queue);
+ if (nla_put_u32(rsp, NETDEV_A_LEASE_IFINDEX,
+ READ_ONCE(netdev->ifindex)))
+ goto nla_put_failure;
+ rcu_read_lock();
+ peer_net = dev_net_rcu(netdev);
+ net = dev_net_rcu(orig_netdev);
+ if (!net_eq(net, peer_net)) {
+ s32 id = peernet2id_alloc(net, peer_net, GFP_ATOMIC);
+
+ if (nla_put_s32(rsp, NETDEV_A_LEASE_NETNS_ID, id))
+ goto nla_put_failure_unlock;
+ }
+ rcu_read_unlock();
+ nla_nest_end(rsp, nest_lease);
+ netdev = orig_netdev;
+ }
+
params = &rxq->mp_params;
if (params->mp_ops &&
params->mp_ops->nl_fill(params->mp_priv, rsp, rxq))
@@ -437,6 +471,8 @@ netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev,
return 0;
+nla_put_failure_unlock:
+ rcu_read_unlock();
nla_put_failure:
genlmsg_cancel(rsp, hdr);
return -EMSGSIZE;
@@ -1120,6 +1156,155 @@ err_genlmsg_free:
return err;
}
+int netdev_nl_queue_create_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ const int qmaxtype = ARRAY_SIZE(netdev_queue_id_nl_policy) - 1;
+ const int lmaxtype = ARRAY_SIZE(netdev_lease_nl_policy) - 1;
+ int err, ifindex, ifindex_lease, queue_id, queue_id_lease;
+ struct nlattr *qtb[ARRAY_SIZE(netdev_queue_id_nl_policy)];
+ struct nlattr *ltb[ARRAY_SIZE(netdev_lease_nl_policy)];
+ struct netdev_rx_queue *rxq, *rxq_lease;
+ struct net_device *dev, *dev_lease;
+ netdevice_tracker dev_tracker;
+ struct nlattr *nest;
+ struct sk_buff *rsp;
+ void *hdr;
+
+ if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_IFINDEX) ||
+ GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_TYPE) ||
+ GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_LEASE))
+ return -EINVAL;
+ if (nla_get_u32(info->attrs[NETDEV_A_QUEUE_TYPE]) !=
+ NETDEV_QUEUE_TYPE_RX) {
+ NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_QUEUE_TYPE]);
+ return -EINVAL;
+ }
+
+ ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]);
+
+ nest = info->attrs[NETDEV_A_QUEUE_LEASE];
+ err = nla_parse_nested(ltb, lmaxtype, nest,
+ netdev_lease_nl_policy, info->extack);
+ if (err < 0)
+ return err;
+ if (NL_REQ_ATTR_CHECK(info->extack, nest, ltb, NETDEV_A_LEASE_IFINDEX) ||
+ NL_REQ_ATTR_CHECK(info->extack, nest, ltb, NETDEV_A_LEASE_QUEUE))
+ return -EINVAL;
+ if (ltb[NETDEV_A_LEASE_NETNS_ID]) {
+ NL_SET_BAD_ATTR(info->extack, ltb[NETDEV_A_LEASE_NETNS_ID]);
+ return -EINVAL;
+ }
+
+ ifindex_lease = nla_get_u32(ltb[NETDEV_A_LEASE_IFINDEX]);
+
+ nest = ltb[NETDEV_A_LEASE_QUEUE];
+ err = nla_parse_nested(qtb, qmaxtype, nest,
+ netdev_queue_id_nl_policy, info->extack);
+ if (err < 0)
+ return err;
+ if (NL_REQ_ATTR_CHECK(info->extack, nest, qtb, NETDEV_A_QUEUE_ID) ||
+ NL_REQ_ATTR_CHECK(info->extack, nest, qtb, NETDEV_A_QUEUE_TYPE))
+ return -EINVAL;
+ if (nla_get_u32(qtb[NETDEV_A_QUEUE_TYPE]) != NETDEV_QUEUE_TYPE_RX) {
+ NL_SET_BAD_ATTR(info->extack, qtb[NETDEV_A_QUEUE_TYPE]);
+ return -EINVAL;
+ }
+ if (ifindex == ifindex_lease) {
+ NL_SET_ERR_MSG(info->extack,
+ "Lease ifindex cannot be the same as queue creation ifindex");
+ return -EINVAL;
+ }
+
+ queue_id_lease = nla_get_u32(qtb[NETDEV_A_QUEUE_ID]);
+
+ rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!rsp)
+ return -ENOMEM;
+
+ hdr = genlmsg_iput(rsp, info);
+ if (!hdr) {
+ err = -EMSGSIZE;
+ goto err_genlmsg_free;
+ }
+
+ /* Locking order is always from the virtual to the physical device
+ * since this is also the same order when applications open the
+ * memory provider later on.
+ */
+ dev = netdev_get_by_index_lock(genl_info_net(info), ifindex);
+ if (!dev) {
+ err = -ENODEV;
+ goto err_genlmsg_free;
+ }
+ if (!netdev_can_create_queue(dev, info->extack)) {
+ err = -EINVAL;
+ goto err_unlock_dev;
+ }
+
+ dev_lease = netdev_get_by_index(genl_info_net(info), ifindex_lease,
+ &dev_tracker, GFP_KERNEL);
+ if (!dev_lease) {
+ err = -ENODEV;
+ goto err_unlock_dev;
+ }
+ if (!netdev_can_lease_queue(dev_lease, info->extack)) {
+ netdev_put(dev_lease, &dev_tracker);
+ err = -EINVAL;
+ goto err_unlock_dev;
+ }
+
+ dev_lease = netdev_put_lock(dev_lease, &dev_tracker);
+ if (!dev_lease) {
+ err = -ENODEV;
+ goto err_unlock_dev;
+ }
+ if (queue_id_lease >= dev_lease->real_num_rx_queues) {
+ err = -ERANGE;
+ NL_SET_BAD_ATTR(info->extack, qtb[NETDEV_A_QUEUE_ID]);
+ goto err_unlock_dev_lease;
+ }
+ if (netdev_queue_busy(dev_lease, queue_id_lease, info->extack)) {
+ err = -EBUSY;
+ goto err_unlock_dev_lease;
+ }
+
+ rxq_lease = __netif_get_rx_queue(dev_lease, queue_id_lease);
+ rxq = __netif_get_rx_queue(dev, dev->real_num_rx_queues - 1);
+
+ if (rxq->lease && rxq->lease->dev != dev_lease) {
+ err = -EOPNOTSUPP;
+ NL_SET_ERR_MSG(info->extack,
+ "Leasing multiple queues from different devices not supported");
+ goto err_unlock_dev_lease;
+ }
+
+ err = queue_id = dev->queue_mgmt_ops->ndo_queue_create(dev);
+ if (err < 0) {
+ NL_SET_ERR_MSG(info->extack,
+ "Device is unable to create a new queue");
+ goto err_unlock_dev_lease;
+ }
+
+ rxq = __netif_get_rx_queue(dev, queue_id);
+ netdev_rx_queue_lease(rxq, rxq_lease);
+
+ nla_put_u32(rsp, NETDEV_A_QUEUE_ID, queue_id);
+ genlmsg_end(rsp, hdr);
+
+ netdev_unlock(dev_lease);
+ netdev_unlock(dev);
+
+ return genlmsg_reply(rsp, info);
+
+err_unlock_dev_lease:
+ netdev_unlock(dev_lease);
+err_unlock_dev:
+ netdev_unlock(dev);
+err_genlmsg_free:
+ nlmsg_free(rsp);
+ return err;
+}
+
void netdev_nl_sock_priv_init(struct netdev_nl_sock *priv)
{
INIT_LIST_HEAD(&priv->bindings);
diff --git a/net/core/netdev_queues.c b/net/core/netdev_queues.c
index 251f27a8307f..97acf6440829 100644
--- a/net/core/netdev_queues.c
+++ b/net/core/netdev_queues.c
@@ -1,22 +1,37 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include <net/netdev_queues.h>
+#include <net/netdev_rx_queue.h>
+#include <net/xdp_sock_drv.h>
/**
* netdev_queue_get_dma_dev() - get dma device for zero-copy operations
* @dev: net_device
* @idx: queue index
*
- * Get dma device for zero-copy operations to be used for this queue.
+ * Get dma device for zero-copy operations to be used for this queue. If the
+ * queue is leased to a physical queue, we retrieve the latter's dma device.
* When such device is not available or valid, the function will return NULL.
*
* Return: Device or NULL on error
*/
struct device *netdev_queue_get_dma_dev(struct net_device *dev, int idx)
{
- const struct netdev_queue_mgmt_ops *queue_ops = dev->queue_mgmt_ops;
+ const struct netdev_queue_mgmt_ops *queue_ops;
struct device *dma_dev;
+ if (idx < dev->real_num_rx_queues) {
+ struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, idx);
+
+ if (rxq->lease) {
+ rxq = rxq->lease;
+ dev = rxq->dev;
+ idx = get_netdev_rx_queue_index(rxq);
+ }
+ }
+
+ queue_ops = dev->queue_mgmt_ops;
+
if (queue_ops && queue_ops->ndo_queue_get_dma_dev)
dma_dev = queue_ops->ndo_queue_get_dma_dev(dev, idx);
else
@@ -25,3 +40,58 @@ struct device *netdev_queue_get_dma_dev(struct net_device *dev, int idx)
return dma_dev && dma_dev->dma_mask ? dma_dev : NULL;
}
+bool netdev_can_create_queue(const struct net_device *dev,
+ struct netlink_ext_ack *extack)
+{
+ if (dev->dev.parent) {
+ NL_SET_ERR_MSG(extack, "Device is not a virtual device");
+ return false;
+ }
+ if (!dev->queue_mgmt_ops ||
+ !dev->queue_mgmt_ops->ndo_queue_create) {
+ NL_SET_ERR_MSG(extack, "Device does not support queue creation");
+ return false;
+ }
+ if (dev->real_num_rx_queues < 1 ||
+ dev->real_num_tx_queues < 1) {
+ NL_SET_ERR_MSG(extack, "Device must have at least one real queue");
+ return false;
+ }
+ return true;
+}
+
+bool netdev_can_lease_queue(const struct net_device *dev,
+ struct netlink_ext_ack *extack)
+{
+ if (!dev->dev.parent) {
+ NL_SET_ERR_MSG(extack, "Lease device is a virtual device");
+ return false;
+ }
+ if (!netif_device_present(dev)) {
+ NL_SET_ERR_MSG(extack, "Lease device has been removed from the system");
+ return false;
+ }
+ if (!dev->queue_mgmt_ops) {
+ NL_SET_ERR_MSG(extack, "Lease device does not support queue management operations");
+ return false;
+ }
+ return true;
+}
+
+bool netdev_queue_busy(struct net_device *dev, int idx,
+ struct netlink_ext_ack *extack)
+{
+ if (netif_rxq_is_leased(dev, idx)) {
+ NL_SET_ERR_MSG(extack, "Lease device queue is already leased");
+ return true;
+ }
+ if (xsk_get_pool_from_qid(dev, idx)) {
+ NL_SET_ERR_MSG(extack, "Lease device queue in use by AF_XDP");
+ return true;
+ }
+ if (netif_rxq_has_mp(dev, idx)) {
+ NL_SET_ERR_MSG(extack, "Lease device queue in use by memory provider");
+ return true;
+ }
+ return false;
+}
diff --git a/net/core/netdev_rx_queue.c b/net/core/netdev_rx_queue.c
index c7d9341b7630..75c7a68cb90d 100644
--- a/net/core/netdev_rx_queue.c
+++ b/net/core/netdev_rx_queue.c
@@ -9,15 +9,121 @@
#include "page_pool_priv.h"
-/* See also page_pool_is_unreadable() */
-bool netif_rxq_has_unreadable_mp(struct net_device *dev, int idx)
+void netdev_rx_queue_lease(struct netdev_rx_queue *rxq_dst,
+ struct netdev_rx_queue *rxq_src)
+{
+ netdev_assert_locked(rxq_src->dev);
+ netdev_assert_locked(rxq_dst->dev);
+
+ netdev_hold(rxq_src->dev, &rxq_src->lease_tracker, GFP_KERNEL);
+
+ WRITE_ONCE(rxq_src->lease, rxq_dst);
+ WRITE_ONCE(rxq_dst->lease, rxq_src);
+}
+
+void netdev_rx_queue_unlease(struct netdev_rx_queue *rxq_dst,
+ struct netdev_rx_queue *rxq_src)
+{
+ netdev_assert_locked(rxq_dst->dev);
+ netdev_assert_locked(rxq_src->dev);
+
+ WRITE_ONCE(rxq_src->lease, NULL);
+ WRITE_ONCE(rxq_dst->lease, NULL);
+
+ netdev_put(rxq_src->dev, &rxq_src->lease_tracker);
+}
+
+bool netif_rxq_is_leased(struct net_device *dev, unsigned int rxq_idx)
+{
+ if (rxq_idx < dev->real_num_rx_queues)
+ return READ_ONCE(__netif_get_rx_queue(dev, rxq_idx)->lease);
+ return false;
+}
+
+static bool netif_lease_dir_ok(const struct net_device *dev,
+ enum netif_lease_dir dir)
{
- struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, idx);
+ if (dir == NETIF_VIRT_TO_PHYS && !dev->dev.parent)
+ return true;
+ if (dir == NETIF_PHYS_TO_VIRT && dev->dev.parent)
+ return true;
+ return false;
+}
- return !!rxq->mp_params.mp_ops;
+struct netdev_rx_queue *
+__netif_get_rx_queue_lease(struct net_device **dev, unsigned int *rxq_idx,
+ enum netif_lease_dir dir)
+{
+ struct net_device *orig_dev = *dev;
+ struct netdev_rx_queue *rxq = __netif_get_rx_queue(orig_dev, *rxq_idx);
+
+ if (rxq->lease) {
+ if (!netif_lease_dir_ok(orig_dev, dir))
+ return NULL;
+ rxq = rxq->lease;
+ *rxq_idx = get_netdev_rx_queue_index(rxq);
+ *dev = rxq->dev;
+ }
+ return rxq;
+}
+
+struct netdev_rx_queue *
+netif_get_rx_queue_lease_locked(struct net_device **dev, unsigned int *rxq_idx)
+{
+ struct net_device *orig_dev = *dev;
+ struct netdev_rx_queue *rxq;
+
+ /* Locking order is always from the virtual to the physical device
+ * see netdev_nl_queue_create_doit().
+ */
+ netdev_ops_assert_locked(orig_dev);
+ rxq = __netif_get_rx_queue_lease(dev, rxq_idx, NETIF_VIRT_TO_PHYS);
+ if (rxq && orig_dev != *dev)
+ netdev_lock(*dev);
+ return rxq;
+}
+
+void netif_put_rx_queue_lease_locked(struct net_device *orig_dev,
+ struct net_device *dev)
+{
+ if (orig_dev != dev)
+ netdev_unlock(dev);
+}
+
+bool netif_rx_queue_lease_get_owner(struct net_device **dev,
+ unsigned int *rxq_idx)
+{
+ struct net_device *orig_dev = *dev;
+ struct netdev_rx_queue *rxq;
+
+ /* The physical device needs to be locked. If there is indeed a lease,
+ * then the virtual device holds a reference on the physical device
+ * and the lease stays active until the virtual device is torn down.
+ * When queues get {un,}leased both devices are always locked.
+ */
+ netdev_ops_assert_locked(orig_dev);
+ rxq = __netif_get_rx_queue_lease(dev, rxq_idx, NETIF_PHYS_TO_VIRT);
+ if (rxq && orig_dev != *dev)
+ return true;
+ return false;
+}
+
+/* See also page_pool_is_unreadable() */
+bool netif_rxq_has_unreadable_mp(struct net_device *dev, unsigned int rxq_idx)
+{
+ if (rxq_idx < dev->real_num_rx_queues)
+ return __netif_get_rx_queue(dev, rxq_idx)->mp_params.mp_ops;
+ return false;
}
EXPORT_SYMBOL(netif_rxq_has_unreadable_mp);
+bool netif_rxq_has_mp(struct net_device *dev, unsigned int rxq_idx)
+{
+ if (rxq_idx < dev->real_num_rx_queues)
+ return __netif_get_rx_queue(dev, rxq_idx)->mp_params.mp_priv;
+ return false;
+}
+
int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
{
struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, rxq_idx);
@@ -100,49 +206,63 @@ int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
const struct pp_memory_provider_params *p,
struct netlink_ext_ack *extack)
{
+ struct net_device *orig_dev = dev;
struct netdev_rx_queue *rxq;
int ret;
if (!netdev_need_ops_lock(dev))
return -EOPNOTSUPP;
-
if (rxq_idx >= dev->real_num_rx_queues) {
NL_SET_ERR_MSG(extack, "rx queue index out of range");
return -ERANGE;
}
- rxq_idx = array_index_nospec(rxq_idx, dev->real_num_rx_queues);
+ rxq_idx = array_index_nospec(rxq_idx, dev->real_num_rx_queues);
+ rxq = netif_get_rx_queue_lease_locked(&dev, &rxq_idx);
+ if (!rxq) {
+ NL_SET_ERR_MSG(extack, "rx queue peered to a virtual netdev");
+ return -EBUSY;
+ }
+ if (!dev->dev.parent) {
+ NL_SET_ERR_MSG(extack, "rx queue is mapped to a virtual netdev");
+ ret = -EBUSY;
+ goto out;
+ }
if (dev->cfg->hds_config != ETHTOOL_TCP_DATA_SPLIT_ENABLED) {
NL_SET_ERR_MSG(extack, "tcp-data-split is disabled");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
if (dev->cfg->hds_thresh) {
NL_SET_ERR_MSG(extack, "hds-thresh is not zero");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
if (dev_xdp_prog_count(dev)) {
NL_SET_ERR_MSG(extack, "unable to custom memory provider to device with XDP program attached");
- return -EEXIST;
+ ret = -EEXIST;
+ goto out;
}
-
- rxq = __netif_get_rx_queue(dev, rxq_idx);
if (rxq->mp_params.mp_ops) {
NL_SET_ERR_MSG(extack, "designated queue already memory provider bound");
- return -EEXIST;
+ ret = -EEXIST;
+ goto out;
}
#ifdef CONFIG_XDP_SOCKETS
if (rxq->pool) {
NL_SET_ERR_MSG(extack, "designated queue already in use by AF_XDP");
- return -EBUSY;
+ ret = -EBUSY;
+ goto out;
}
#endif
-
rxq->mp_params = *p;
ret = netdev_rx_queue_restart(dev, rxq_idx);
if (ret) {
rxq->mp_params.mp_ops = NULL;
rxq->mp_params.mp_priv = NULL;
}
+out:
+ netif_put_rx_queue_lease_locked(orig_dev, dev);
return ret;
}
@@ -157,38 +277,43 @@ int net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
return ret;
}
-void __net_mp_close_rxq(struct net_device *dev, unsigned int ifq_idx,
+void __net_mp_close_rxq(struct net_device *dev, unsigned int rxq_idx,
const struct pp_memory_provider_params *old_p)
{
+ struct net_device *orig_dev = dev;
struct netdev_rx_queue *rxq;
int err;
- if (WARN_ON_ONCE(ifq_idx >= dev->real_num_rx_queues))
+ if (WARN_ON_ONCE(rxq_idx >= dev->real_num_rx_queues))
return;
- rxq = __netif_get_rx_queue(dev, ifq_idx);
+ rxq = netif_get_rx_queue_lease_locked(&dev, &rxq_idx);
+ if (WARN_ON_ONCE(!rxq))
+ return;
/* Callers holding a netdev ref may get here after we already
* went thru shutdown via dev_memory_provider_uninstall().
*/
if (dev->reg_state > NETREG_REGISTERED &&
!rxq->mp_params.mp_ops)
- return;
+ goto out;
if (WARN_ON_ONCE(rxq->mp_params.mp_ops != old_p->mp_ops ||
rxq->mp_params.mp_priv != old_p->mp_priv))
- return;
+ goto out;
rxq->mp_params.mp_ops = NULL;
rxq->mp_params.mp_priv = NULL;
- err = netdev_rx_queue_restart(dev, ifq_idx);
+ err = netdev_rx_queue_restart(dev, rxq_idx);
WARN_ON(err && err != -ENETDOWN);
+out:
+ netif_put_rx_queue_lease_locked(orig_dev, dev);
}
-void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
+void net_mp_close_rxq(struct net_device *dev, unsigned int rxq_idx,
struct pp_memory_provider_params *old_p)
{
netdev_lock(dev);
- __net_mp_close_rxq(dev, ifq_idx, old_p);
+ __net_mp_close_rxq(dev, rxq_idx, old_p);
netdev_unlock(dev);
}
diff --git a/net/ethtool/channels.c b/net/ethtool/channels.c
index ca4f80282448..797d2a08c515 100644
--- a/net/ethtool/channels.c
+++ b/net/ethtool/channels.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
-#include <net/xdp_sock_drv.h>
+#include <net/netdev_queues.h>
#include "netlink.h"
#include "common.h"
@@ -169,14 +169,16 @@ ethnl_set_channels(struct ethnl_req_info *req_info, struct genl_info *info)
if (ret)
return ret;
- /* Disabling channels, query zero-copy AF_XDP sockets */
+ /* ensure channels are not busy at the moment */
from_channel = channels.combined_count +
min(channels.rx_count, channels.tx_count);
- for (i = from_channel; i < old_total; i++)
- if (xsk_get_pool_from_qid(dev, i)) {
- GENL_SET_ERR_MSG(info, "requested channel counts are too low for existing zerocopy AF_XDP sockets");
+ for (i = from_channel; i < old_total; i++) {
+ if (netdev_queue_busy(dev, i, NULL)) {
+ GENL_SET_ERR_MSG(info,
+ "requested channel counts are too low due to busy queues (AF_XDP or queue leasing)");
return -EINVAL;
}
+ }
ret = dev->ethtool_ops->set_channels(dev, &channels);
return ret < 0 ? ret : 1;
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index 9431e305b233..02a3454234d6 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -27,12 +27,13 @@
#include <linux/net.h>
#include <linux/pm_runtime.h>
#include <linux/utsname.h>
+#include <linux/ethtool_netlink.h>
#include <net/devlink.h>
#include <net/ipv6.h>
-#include <net/xdp_sock_drv.h>
#include <net/flow_offload.h>
#include <net/netdev_lock.h>
-#include <linux/ethtool_netlink.h>
+#include <net/netdev_queues.h>
+
#include "common.h"
/* State held across locks and calls for commands which have devlink fallback */
@@ -2282,12 +2283,12 @@ static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
if (ret)
return ret;
- /* Disabling channels, query zero-copy AF_XDP sockets */
+ /* Disabling channels, query busy queues (AF_XDP, queue leasing) */
from_channel = channels.combined_count +
min(channels.rx_count, channels.tx_count);
to_channel = curr.combined_count + max(curr.rx_count, curr.tx_count);
for (i = from_channel; i < to_channel; i++)
- if (xsk_get_pool_from_qid(dev, i))
+ if (netdev_queue_busy(dev, i, NULL))
return -EINVAL;
ret = dev->ethtool_ops->set_channels(dev, &channels);
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 3b46bc635c43..92f791433725 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -23,6 +23,8 @@
#include <linux/netdevice.h>
#include <linux/rculist.h>
#include <linux/vmalloc.h>
+
+#include <net/netdev_queues.h>
#include <net/xdp_sock_drv.h>
#include <net/busy_poll.h>
#include <net/netdev_lock.h>
@@ -103,7 +105,7 @@ bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
}
EXPORT_SYMBOL(xsk_uses_need_wakeup);
-struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
+struct xsk_buff_pool *xsk_get_pool_from_qid(const struct net_device *dev,
u16 queue_id)
{
if (queue_id < dev->real_num_rx_queues)
@@ -117,10 +119,18 @@ EXPORT_SYMBOL(xsk_get_pool_from_qid);
void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
{
- if (queue_id < dev->num_rx_queues)
- dev->_rx[queue_id].pool = NULL;
- if (queue_id < dev->num_tx_queues)
- dev->_tx[queue_id].pool = NULL;
+ struct net_device *orig_dev = dev;
+ unsigned int id = queue_id;
+
+ if (id < dev->real_num_rx_queues)
+ WARN_ON_ONCE(!netif_get_rx_queue_lease_locked(&dev, &id));
+
+ if (id < dev->real_num_rx_queues)
+ dev->_rx[id].pool = NULL;
+ if (id < dev->real_num_tx_queues)
+ dev->_tx[id].pool = NULL;
+
+ netif_put_rx_queue_lease_locked(orig_dev, dev);
}
/* The buffer pool is stored both in the _rx struct and the _tx struct as we do
@@ -130,17 +140,29 @@ void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
u16 queue_id)
{
- if (queue_id >= max_t(unsigned int,
- dev->real_num_rx_queues,
- dev->real_num_tx_queues))
- return -EINVAL;
+ struct net_device *orig_dev = dev;
+ unsigned int id = queue_id;
+ int ret = 0;
- if (queue_id < dev->real_num_rx_queues)
- dev->_rx[queue_id].pool = pool;
- if (queue_id < dev->real_num_tx_queues)
- dev->_tx[queue_id].pool = pool;
+ if (id >= max(dev->real_num_rx_queues,
+ dev->real_num_tx_queues))
+ return -EINVAL;
+ if (id < dev->real_num_rx_queues) {
+ if (!netif_get_rx_queue_lease_locked(&dev, &id))
+ return -EBUSY;
+ if (xsk_get_pool_from_qid(dev, id)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ }
- return 0;
+ if (id < dev->real_num_rx_queues)
+ dev->_rx[id].pool = pool;
+ if (id < dev->real_num_tx_queues)
+ dev->_tx[id].pool = pool;
+out:
+ netif_put_rx_queue_lease_locked(orig_dev, dev);
+ return ret;
}
static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff_xsk *xskb, u32 len,
@@ -324,14 +346,37 @@ static bool xsk_is_bound(struct xdp_sock *xs)
return false;
}
+static bool xsk_dev_queue_valid(const struct xdp_sock *xs,
+ const struct xdp_rxq_info *info)
+{
+ struct net_device *dev = xs->dev;
+ u32 queue_index = xs->queue_id;
+ struct netdev_rx_queue *rxq;
+
+ if (info->dev == dev &&
+ info->queue_index == queue_index)
+ return true;
+
+ if (queue_index < dev->real_num_rx_queues) {
+ rxq = READ_ONCE(__netif_get_rx_queue(dev, queue_index)->lease);
+ if (!rxq)
+ return false;
+
+ dev = rxq->dev;
+ queue_index = get_netdev_rx_queue_index(rxq);
+
+ return info->dev == dev &&
+ info->queue_index == queue_index;
+ }
+ return false;
+}
+
static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
{
if (!xsk_is_bound(xs))
return -ENXIO;
-
- if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
+ if (!xsk_dev_queue_valid(xs, xdp->rxq))
return -EINVAL;
-
if (len > xsk_pool_get_rx_frame_size(xs->pool) && !xs->sg) {
xs->rx_dropped++;
return -ENOSPC;
diff --git a/tools/include/uapi/linux/netdev.h b/tools/include/uapi/linux/netdev.h
index e0b579a1df4f..7df1056a35fd 100644
--- a/tools/include/uapi/linux/netdev.h
+++ b/tools/include/uapi/linux/netdev.h
@@ -160,6 +160,7 @@ enum {
NETDEV_A_QUEUE_DMABUF,
NETDEV_A_QUEUE_IO_URING,
NETDEV_A_QUEUE_XSK,
+ NETDEV_A_QUEUE_LEASE,
__NETDEV_A_QUEUE_MAX,
NETDEV_A_QUEUE_MAX = (__NETDEV_A_QUEUE_MAX - 1)
@@ -203,6 +204,15 @@ enum {
};
enum {
+ NETDEV_A_LEASE_IFINDEX = 1,
+ NETDEV_A_LEASE_QUEUE,
+ NETDEV_A_LEASE_NETNS_ID,
+
+ __NETDEV_A_LEASE_MAX,
+ NETDEV_A_LEASE_MAX = (__NETDEV_A_LEASE_MAX - 1)
+};
+
+enum {
NETDEV_A_DMABUF_IFINDEX = 1,
NETDEV_A_DMABUF_QUEUES,
NETDEV_A_DMABUF_FD,
@@ -228,6 +238,7 @@ enum {
NETDEV_CMD_BIND_RX,
NETDEV_CMD_NAPI_SET,
NETDEV_CMD_BIND_TX,
+ NETDEV_CMD_QUEUE_CREATE,
__NETDEV_CMD_MAX,
NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1)
diff --git a/tools/testing/selftests/drivers/net/README.rst b/tools/testing/selftests/drivers/net/README.rst
index eb838ae94844..b94e81c2e030 100644
--- a/tools/testing/selftests/drivers/net/README.rst
+++ b/tools/testing/selftests/drivers/net/README.rst
@@ -62,6 +62,13 @@ LOCAL_V4, LOCAL_V6, REMOTE_V4, REMOTE_V6
Local and remote endpoint IP addresses.
+LOCAL_PREFIX_V4, LOCAL_PREFIX_V6
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Local IP prefix/subnet which can be used to allocate extra IP addresses (for
+network name spaces behind macvlan, veth, netkit devices). DUT must be
+reachable using these addresses from the endpoint.
+
REMOTE_TYPE
~~~~~~~~~~~
diff --git a/tools/testing/selftests/drivers/net/hw/Makefile b/tools/testing/selftests/drivers/net/hw/Makefile
index 9c163ba6feee..39ad86d693b3 100644
--- a/tools/testing/selftests/drivers/net/hw/Makefile
+++ b/tools/testing/selftests/drivers/net/hw/Makefile
@@ -32,6 +32,8 @@ TEST_PROGS = \
irq.py \
loopback.sh \
nic_timestamp.py \
+ nk_netns.py \
+ nk_qlease.py \
pp_alloc_fail.py \
rss_api.py \
rss_ctx.py \
diff --git a/tools/testing/selftests/drivers/net/hw/lib/py/__init__.py b/tools/testing/selftests/drivers/net/hw/lib/py/__init__.py
index d5d247eca6b7..022008249313 100644
--- a/tools/testing/selftests/drivers/net/hw/lib/py/__init__.py
+++ b/tools/testing/selftests/drivers/net/hw/lib/py/__init__.py
@@ -3,6 +3,7 @@
"""
Driver test environment (hardware-only tests).
NetDrvEnv and NetDrvEpEnv are the main environment classes.
+NetDrvContEnv extends NetDrvEpEnv with netkit container support.
Former is for local host only tests, latter creates / connects
to a remote endpoint. See NIPA wiki for more information about
running and writing driver tests.
@@ -29,7 +30,7 @@ try:
from net.lib.py import ksft_eq, ksft_ge, ksft_in, ksft_is, ksft_lt, \
ksft_ne, ksft_not_in, ksft_raises, ksft_true, ksft_gt, ksft_not_none
from drivers.net.lib.py import GenerateTraffic, Remote, Iperf3Runner
- from drivers.net.lib.py import NetDrvEnv, NetDrvEpEnv
+ from drivers.net.lib.py import NetDrvEnv, NetDrvEpEnv, NetDrvContEnv
__all__ = ["NetNS", "NetNSEnter", "NetdevSimDev",
"EthtoolFamily", "NetdevFamily", "NetshaperFamily",
@@ -44,8 +45,8 @@ try:
"ksft_eq", "ksft_ge", "ksft_in", "ksft_is", "ksft_lt",
"ksft_ne", "ksft_not_in", "ksft_raises", "ksft_true", "ksft_gt",
"ksft_not_none", "ksft_not_none",
- "NetDrvEnv", "NetDrvEpEnv", "GenerateTraffic", "Remote",
- "Iperf3Runner"]
+ "NetDrvEnv", "NetDrvEpEnv", "NetDrvContEnv", "GenerateTraffic",
+ "Remote", "Iperf3Runner"]
except ModuleNotFoundError as e:
print("Failed importing `net` library from kernel sources")
print(str(e))
diff --git a/tools/testing/selftests/drivers/net/hw/nk_forward.bpf.c b/tools/testing/selftests/drivers/net/hw/nk_forward.bpf.c
new file mode 100644
index 000000000000..86ebfc1445b6
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/hw/nk_forward.bpf.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <linux/pkt_cls.h>
+#include <linux/if_ether.h>
+#include <linux/ipv6.h>
+#include <linux/in6.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_helpers.h>
+
+#define TC_ACT_OK 0
+#define ETH_P_IPV6 0x86DD
+
+#define ctx_ptr(field) ((void *)(long)(field))
+
+#define v6_p64_equal(a, b) (a.s6_addr32[0] == b.s6_addr32[0] && \
+ a.s6_addr32[1] == b.s6_addr32[1])
+
+volatile __u32 netkit_ifindex;
+volatile __u8 ipv6_prefix[16];
+
+SEC("tc/ingress")
+int tc_redirect_peer(struct __sk_buff *skb)
+{
+ void *data_end = ctx_ptr(skb->data_end);
+ void *data = ctx_ptr(skb->data);
+ struct in6_addr *peer_addr;
+ struct ipv6hdr *ip6h;
+ struct ethhdr *eth;
+
+ peer_addr = (struct in6_addr *)ipv6_prefix;
+
+ if (skb->protocol != bpf_htons(ETH_P_IPV6))
+ return TC_ACT_OK;
+
+ eth = data;
+ if ((void *)(eth + 1) > data_end)
+ return TC_ACT_OK;
+
+ ip6h = data + sizeof(struct ethhdr);
+ if ((void *)(ip6h + 1) > data_end)
+ return TC_ACT_OK;
+
+ if (!v6_p64_equal(ip6h->daddr, (*peer_addr)))
+ return TC_ACT_OK;
+
+ return bpf_redirect_peer(netkit_ifindex, 0);
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/drivers/net/hw/nk_netns.py b/tools/testing/selftests/drivers/net/hw/nk_netns.py
new file mode 100755
index 000000000000..afa8638195d8
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/hw/nk_netns.py
@@ -0,0 +1,23 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+from lib.py import ksft_run, ksft_exit
+from lib.py import NetDrvContEnv
+from lib.py import cmd
+
+
+def test_ping(cfg) -> None:
+ cfg.require_ipver("6")
+
+ cmd(f"ping -c 1 -W5 {cfg.nk_guest_ipv6}", host=cfg.remote)
+ cmd(f"ping -c 1 -W5 {cfg.remote_addr_v['6']}", ns=cfg.netns)
+
+
+def main() -> None:
+ with NetDrvContEnv(__file__) as cfg:
+ ksft_run([test_ping], args=(cfg,))
+ ksft_exit()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/testing/selftests/drivers/net/hw/nk_qlease.py b/tools/testing/selftests/drivers/net/hw/nk_qlease.py
new file mode 100755
index 000000000000..738a46d2d20c
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/hw/nk_qlease.py
@@ -0,0 +1,55 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+import re
+from os import path
+from lib.py import ksft_run, ksft_exit
+from lib.py import NetDrvContEnv
+from lib.py import bkg, cmd, defer, ethtool, rand_port, wait_port_listen
+
+
+def create_rss_ctx(cfg):
+ output = ethtool(f"-X {cfg.ifname} context new start {cfg.src_queue} equal 1").stdout
+ values = re.search(r'New RSS context is (\d+)', output).group(1)
+ return int(values)
+
+
+def set_flow_rule(cfg):
+ output = ethtool(f"-N {cfg.ifname} flow-type tcp6 dst-port {cfg.port} action {cfg.src_queue}").stdout
+ values = re.search(r'ID (\d+)', output).group(1)
+ return int(values)
+
+
+def set_flow_rule_rss(cfg, rss_ctx_id):
+ output = ethtool(f"-N {cfg.ifname} flow-type tcp6 dst-port {cfg.port} context {rss_ctx_id}").stdout
+ values = re.search(r'ID (\d+)', output).group(1)
+ return int(values)
+
+
+def test_iou_zcrx(cfg) -> None:
+ cfg.require_ipver('6')
+
+ ethtool(f"-X {cfg.ifname} equal {cfg.src_queue}")
+ defer(ethtool, f"-X {cfg.ifname} default")
+
+ flow_rule_id = set_flow_rule(cfg)
+ defer(ethtool, f"-N {cfg.ifname} delete {flow_rule_id}")
+
+ rx_cmd = f"ip netns exec {cfg.netns.name} {cfg.bin_local} -s -p {cfg.port} -i {cfg._nk_guest_ifname} -q {cfg.nk_queue}"
+ tx_cmd = f"{cfg.bin_remote} -c -h {cfg.nk_guest_ipv6} -p {cfg.port} -l 12840"
+ with bkg(rx_cmd, exit_wait=True):
+ wait_port_listen(cfg.port, proto="tcp", ns=cfg.netns)
+ cmd(tx_cmd, host=cfg.remote)
+
+
+def main() -> None:
+ with NetDrvContEnv(__file__, lease=True) as cfg:
+ cfg.bin_local = path.abspath(path.dirname(__file__) + "/../../../drivers/net/hw/iou-zcrx")
+ cfg.bin_remote = cfg.remote.deploy(cfg.bin_local)
+ cfg.port = rand_port()
+ ksft_run([test_iou_zcrx], args=(cfg,))
+ ksft_exit()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/testing/selftests/drivers/net/lib/py/__init__.py b/tools/testing/selftests/drivers/net/lib/py/__init__.py
index 8b75faa9af6d..be3a8a936882 100644
--- a/tools/testing/selftests/drivers/net/lib/py/__init__.py
+++ b/tools/testing/selftests/drivers/net/lib/py/__init__.py
@@ -3,6 +3,7 @@
"""
Driver test environment.
NetDrvEnv and NetDrvEpEnv are the main environment classes.
+NetDrvContEnv extends NetDrvEpEnv with netkit container support.
Former is for local host only tests, latter creates / connects
to a remote endpoint. See NIPA wiki for more information about
running and writing driver tests.
@@ -43,12 +44,12 @@ try:
"ksft_ne", "ksft_not_in", "ksft_raises", "ksft_true", "ksft_gt",
"ksft_not_none", "ksft_not_none"]
- from .env import NetDrvEnv, NetDrvEpEnv
+ from .env import NetDrvEnv, NetDrvEpEnv, NetDrvContEnv
from .load import GenerateTraffic, Iperf3Runner
from .remote import Remote
- __all__ += ["NetDrvEnv", "NetDrvEpEnv", "GenerateTraffic", "Remote",
- "Iperf3Runner"]
+ __all__ += ["NetDrvEnv", "NetDrvEpEnv", "NetDrvContEnv", "GenerateTraffic",
+ "Remote", "Iperf3Runner"]
except ModuleNotFoundError as e:
print("Failed importing `net` library from kernel sources")
print(str(e))
diff --git a/tools/testing/selftests/drivers/net/lib/py/env.py b/tools/testing/selftests/drivers/net/lib/py/env.py
index 41cc248ac848..7066d78395c6 100644
--- a/tools/testing/selftests/drivers/net/lib/py/env.py
+++ b/tools/testing/selftests/drivers/net/lib/py/env.py
@@ -1,13 +1,17 @@
# SPDX-License-Identifier: GPL-2.0
+import ipaddress
import os
+import re
import time
from pathlib import Path
from lib.py import KsftSkipEx, KsftXfailEx
from lib.py import ksft_setup, wait_file
from lib.py import cmd, ethtool, ip, CmdExitFailure
from lib.py import NetNS, NetdevSimDev
+from lib.py import NetdevFamily, EthtoolFamily
from .remote import Remote
+from . import bpftool
class NetDrvEnvBase:
@@ -289,3 +293,156 @@ class NetDrvEpEnv(NetDrvEnvBase):
data.get('stats-block-usecs', 0) / 1000 / 1000
time.sleep(self._stats_settle_time)
+
+
+class NetDrvContEnv(NetDrvEpEnv):
+ """
+ Class for an environment with a netkit pair setup for forwarding traffic
+ between the physical interface and a network namespace.
+ """
+
+ def __init__(self, src_path, lease=False, **kwargs):
+ super().__init__(src_path, **kwargs)
+
+ self.require_ipver("6")
+ local_prefix = self.env.get("LOCAL_PREFIX_V6")
+ if not local_prefix:
+ raise KsftSkipEx("LOCAL_PREFIX_V6 required")
+
+ self.netdevnl = NetdevFamily()
+ self.ethnl = EthtoolFamily()
+
+ local_prefix = local_prefix.rstrip("/64").rstrip("::").rstrip(":")
+ self.ipv6_prefix = f"{local_prefix}::"
+ self.nk_host_ipv6 = f"{local_prefix}::2:1"
+ self.nk_guest_ipv6 = f"{local_prefix}::2:2"
+
+ self.netns = None
+ self._nk_host_ifname = None
+ self._nk_guest_ifname = None
+ self._tc_attached = False
+ self._bpf_prog_pref = None
+ self._bpf_prog_id = None
+ self._leased = False
+
+ nk_rxqueues = 1
+ if lease:
+ nk_rxqueues = 2
+ ip(f"link add type netkit mode l2 forward peer forward numrxqueues {nk_rxqueues}")
+
+ all_links = ip("-d link show", json=True)
+ netkit_links = [link for link in all_links
+ if link.get('linkinfo', {}).get('info_kind') == 'netkit'
+ and 'UP' not in link.get('flags', [])]
+
+ if len(netkit_links) != 2:
+ raise KsftSkipEx("Failed to create netkit pair")
+
+ netkit_links.sort(key=lambda x: x['ifindex'])
+ self._nk_host_ifname = netkit_links[1]['ifname']
+ self._nk_guest_ifname = netkit_links[0]['ifname']
+ self.nk_host_ifindex = netkit_links[1]['ifindex']
+ self.nk_guest_ifindex = netkit_links[0]['ifindex']
+
+ if lease:
+ self._lease_queues()
+
+ self._setup_ns()
+ self._attach_bpf()
+
+ def __del__(self):
+ if self._tc_attached:
+ cmd(f"tc filter del dev {self.ifname} ingress pref {self._bpf_prog_pref}")
+ self._tc_attached = False
+
+ if self._nk_host_ifname:
+ cmd(f"ip link del dev {self._nk_host_ifname}")
+ self._nk_host_ifname = None
+ self._nk_guest_ifname = None
+
+ if self.netns:
+ del self.netns
+ self.netns = None
+
+ if self._leased:
+ self.ethnl.rings_set({'header': {'dev-index': self.ifindex},
+ 'tcp-data-split': 'unknown',
+ 'hds-thresh': self._hds_thresh,
+ 'rx': self._rx_rings})
+ self._leased = False
+
+ super().__del__()
+
+ def _lease_queues(self):
+ channels = self.ethnl.channels_get({'header': {'dev-index': self.ifindex}})
+ channels = channels['combined-count']
+ if channels < 2:
+ raise KsftSkipEx('Test requires NETIF with at least 2 combined channels')
+
+ rings = self.ethnl.rings_get({'header': {'dev-index': self.ifindex}})
+ self._rx_rings = rings['rx']
+ self._hds_thresh = rings.get('hds-thresh', 0)
+ self.ethnl.rings_set({'header': {'dev-index': self.ifindex},
+ 'tcp-data-split': 'enabled',
+ 'hds-thresh': 0,
+ 'rx': 64})
+ self.src_queue = channels - 1
+ bind_result = self.netdevnl.queue_create(
+ {
+ "ifindex": self.nk_guest_ifindex,
+ "type": "rx",
+ "lease": {
+ "ifindex": self.ifindex,
+ "queue": {"id": self.src_queue, "type": "rx"},
+ },
+ }
+ )
+ self.nk_queue = bind_result['id']
+ self._leased = True
+
+ def _setup_ns(self):
+ self.netns = NetNS()
+ ip(f"link set dev {self._nk_guest_ifname} netns {self.netns.name}")
+ ip(f"link set dev {self._nk_host_ifname} up")
+ ip(f"-6 addr add fe80::1/64 dev {self._nk_host_ifname} nodad")
+ ip(f"-6 route add {self.nk_guest_ipv6}/128 via fe80::2 dev {self._nk_host_ifname}")
+
+ ip("link set lo up", ns=self.netns)
+ ip(f"link set dev {self._nk_guest_ifname} up", ns=self.netns)
+ ip(f"-6 addr add fe80::2/64 dev {self._nk_guest_ifname}", ns=self.netns)
+ ip(f"-6 addr add {self.nk_guest_ipv6}/64 dev {self._nk_guest_ifname} nodad", ns=self.netns)
+ ip(f"-6 route add default via fe80::1 dev {self._nk_guest_ifname}", ns=self.netns)
+
+ def _attach_bpf(self):
+ bpf_obj = self.test_dir / "nk_forward.bpf.o"
+ if not bpf_obj.exists():
+ raise KsftSkipEx("BPF prog not found")
+
+ cmd(f"tc filter add dev {self.ifname} ingress bpf obj {bpf_obj} sec tc/ingress direct-action")
+ self._tc_attached = True
+
+ tc_info = cmd(f"tc filter show dev {self.ifname} ingress").stdout
+ match = re.search(r'pref (\d+).*nk_forward\.bpf.*id (\d+)', tc_info)
+ if not match:
+ raise Exception("Failed to get BPF prog ID")
+ self._bpf_prog_pref = int(match.group(1))
+ self._bpf_prog_id = int(match.group(2))
+
+ prog_info = bpftool(f"prog show id {self._bpf_prog_id}", json=True)
+ map_ids = prog_info.get("map_ids", [])
+
+ bss_map_id = None
+ for map_id in map_ids:
+ map_info = bpftool(f"map show id {map_id}", json=True)
+ if map_info.get("name").endswith("bss"):
+ bss_map_id = map_id
+
+ if bss_map_id is None:
+ raise Exception("Failed to find .bss map")
+
+ ipv6_addr = ipaddress.IPv6Address(self.ipv6_prefix)
+ ipv6_bytes = ipv6_addr.packed
+ ifindex_bytes = self.nk_host_ifindex.to_bytes(4, byteorder='little')
+ value = ipv6_bytes + ifindex_bytes
+ value_hex = ' '.join(f'{b:02x}' for b in value)
+ bpftool(f"map update id {bss_map_id} key hex 00 00 00 00 value hex {value_hex}")