summaryrefslogtreecommitdiff
path: root/net/core/devmem.c
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2026-04-10 04:24:34 +0300
committerJakub Kicinski <kuba@kernel.org>2026-04-10 04:24:35 +0300
commit15089225889ba4b29f0263757cd66932fa676cb0 (patch)
tree73b8cc252fcebbafad57f5b100c2f774eb7a42c1 /net/core/devmem.c
parentb6e39e48469e37057fce27a1b87cf6d3e456aa42 (diff)
parent65d657d806848add1e1f0632562d7f47d5d5c188 (diff)
downloadlinux-15089225889ba4b29f0263757cd66932fa676cb0.tar.xz
Merge branch 'netkit-support-for-io_uring-zero-copy-and-af_xdp'
Daniel Borkmann says: ==================== netkit: Support for io_uring zero-copy and AF_XDP Containers use virtual netdevs to route traffic from a physical netdev in the host namespace. They do not have access to the physical netdev in the host and thus can't use memory providers or AF_XDP that require reconfiguring/restarting queues in the physical netdev. This patchset adds the concept of queue leasing to virtual netdevs that allow containers to use memory providers and AF_XDP at native speed. Leased queues are bound to a real queue in a physical netdev and act as a proxy. Memory providers and AF_XDP operations take an ifindex and queue id, so containers would pass in an ifindex for a virtual netdev and a queue id of a leased queue, which then gets proxied to the underlying real queue. We have implemented support for this concept in netkit and tested the latter against Nvidia ConnectX-6 (mlx5) as well as Broadcom BCM957504 (bnxt_en) 100G NICs. For more details see the individual patches. ==================== Link: https://patch.msgid.link/20260402231031.447597-1-daniel@iogearbox.net Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/core/devmem.c')
-rw-r--r--net/core/devmem.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/net/core/devmem.c b/net/core/devmem.c
index 69d79aee07ef..cde4c89bc146 100644
--- a/net/core/devmem.c
+++ b/net/core/devmem.c
@@ -145,7 +145,7 @@ void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
rxq_idx = get_netdev_rx_queue_index(rxq);
- __net_mp_close_rxq(binding->dev, rxq_idx, &mp_params);
+ netif_mp_close_rxq(binding->dev, rxq_idx, &mp_params);
}
percpu_ref_kill(&binding->ref);
@@ -163,7 +163,7 @@ int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
u32 xa_idx;
int err;
- err = __net_mp_open_rxq(dev, rxq_idx, &mp_params, extack);
+ err = netif_mp_open_rxq(dev, rxq_idx, &mp_params, extack);
if (err)
return err;
@@ -176,7 +176,7 @@ int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
return 0;
err_close_rxq:
- __net_mp_close_rxq(dev, rxq_idx, &mp_params);
+ netif_mp_close_rxq(dev, rxq_idx, &mp_params);
return err;
}