summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorBjörn Töpel <bjorn.topel@intel.com>2019-12-19 09:10:02 +0300
committerAlexei Starovoitov <ast@kernel.org>2019-12-20 08:09:43 +0300
commite312b9e706ed6d94f6cc9088fcd9fbd81de4525c (patch)
tree9e7eeaba755c57e74ed2c8707ddedfad909e91af /net
parentfb5aacdf3603ccbafe1da74eecd132eb4a31e53f (diff)
downloadlinux-e312b9e706ed6d94f6cc9088fcd9fbd81de4525c.tar.xz
xsk: Make xskmap flush_list common for all map instances
The xskmap flush list is used to track entries that need to flushed from via the xdp_do_flush_map() function. This list used to be per-map, but there is really no reason for that. Instead make the flush list global for all xskmaps, which simplifies __xsk_map_flush() and xsk_map_alloc(). Signed-off-by: Björn Töpel <bjorn.topel@intel.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Toke Høiland-Jørgensen <toke@redhat.com> Link: https://lore.kernel.org/bpf/20191219061006.21980-5-bjorn.topel@gmail.com
Diffstat (limited to 'net')
-rw-r--r--net/core/filter.c9
-rw-r--r--net/xdp/xsk.c17
2 files changed, 13 insertions, 13 deletions
diff --git a/net/core/filter.c b/net/core/filter.c
index a411f7835dee..c51678c473c5 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -3511,8 +3511,7 @@ err:
static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
struct bpf_map *map,
- struct xdp_buff *xdp,
- u32 index)
+ struct xdp_buff *xdp)
{
int err;
@@ -3537,7 +3536,7 @@ static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
case BPF_MAP_TYPE_XSKMAP: {
struct xdp_sock *xs = fwd;
- err = __xsk_map_redirect(map, xdp, xs);
+ err = __xsk_map_redirect(xs, xdp);
return err;
}
default:
@@ -3562,7 +3561,7 @@ void xdp_do_flush_map(void)
__cpu_map_flush(map);
break;
case BPF_MAP_TYPE_XSKMAP:
- __xsk_map_flush(map);
+ __xsk_map_flush();
break;
default:
break;
@@ -3619,7 +3618,7 @@ static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
if (ri->map_to_flush && unlikely(ri->map_to_flush != map))
xdp_do_flush_map();
- err = __bpf_tx_xdp_map(dev, fwd, map, xdp, index);
+ err = __bpf_tx_xdp_map(dev, fwd, map, xdp);
if (unlikely(err))
goto err;
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 956793893c9d..e45c27f5cfca 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -31,6 +31,8 @@
#define TX_BATCH_SIZE 16
+static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
+
bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
{
return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) &&
@@ -264,11 +266,9 @@ out_unlock:
return err;
}
-int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
- struct xdp_sock *xs)
+int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
{
- struct xsk_map *m = container_of(map, struct xsk_map, map);
- struct list_head *flush_list = this_cpu_ptr(m->flush_list);
+ struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
int err;
err = xsk_rcv(xs, xdp);
@@ -281,10 +281,9 @@ int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
return 0;
}
-void __xsk_map_flush(struct bpf_map *map)
+void __xsk_map_flush(void)
{
- struct xsk_map *m = container_of(map, struct xsk_map, map);
- struct list_head *flush_list = this_cpu_ptr(m->flush_list);
+ struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
struct xdp_sock *xs, *tmp;
list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
@@ -1177,7 +1176,7 @@ static struct pernet_operations xsk_net_ops = {
static int __init xsk_init(void)
{
- int err;
+ int err, cpu;
err = proto_register(&xsk_proto, 0 /* no slab */);
if (err)
@@ -1195,6 +1194,8 @@ static int __init xsk_init(void)
if (err)
goto out_pernet;
+ for_each_possible_cpu(cpu)
+ INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
return 0;
out_pernet: