summaryrefslogtreecommitdiff
path: root/net/xdp/xsk_queue.c
diff options
context:
space:
mode:
authorMagnus Karlsson <magnus.karlsson@intel.com>2018-05-02 14:01:24 +0300
committerAlexei Starovoitov <ast@kernel.org>2018-05-04 01:55:23 +0300
commit423f38329d267969130fb6f2c685f73d72687558 (patch)
treeab7c0df0819c420846ac47c18ddc0fd614eaf035 /net/xdp/xsk_queue.c
parentc0c77d8fb787cfe0c3fca689c2a30d1dad4eaba7 (diff)
downloadlinux-423f38329d267969130fb6f2c685f73d72687558.tar.xz
xsk: add umem fill queue support and mmap
Here, we add another setsockopt for registered user memory (umem) called XDP_UMEM_FILL_QUEUE. Using this socket option, the process can ask the kernel to allocate a queue (ring buffer) and also mmap it (XDP_UMEM_PGOFF_FILL_QUEUE) into the process. The queue is used to explicitly pass ownership of umem frames from the user process to the kernel. These frames will in a later patch be filled in with Rx packet data by the kernel. v2: Fixed potential crash in xsk_mmap. Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'net/xdp/xsk_queue.c')
-rw-r--r--net/xdp/xsk_queue.c58
1 files changed, 58 insertions, 0 deletions
diff --git a/net/xdp/xsk_queue.c b/net/xdp/xsk_queue.c
new file mode 100644
index 000000000000..23da4f29d3fb
--- /dev/null
+++ b/net/xdp/xsk_queue.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0
+/* XDP user-space ring structure
+ * Copyright(c) 2018 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/slab.h>
+
+#include "xsk_queue.h"
+
+static u32 xskq_umem_get_ring_size(struct xsk_queue *q)
+{
+ return sizeof(struct xdp_umem_ring) + q->nentries * sizeof(u32);
+}
+
+struct xsk_queue *xskq_create(u32 nentries)
+{
+ struct xsk_queue *q;
+ gfp_t gfp_flags;
+ size_t size;
+
+ q = kzalloc(sizeof(*q), GFP_KERNEL);
+ if (!q)
+ return NULL;
+
+ q->nentries = nentries;
+ q->ring_mask = nentries - 1;
+
+ gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN |
+ __GFP_COMP | __GFP_NORETRY;
+ size = xskq_umem_get_ring_size(q);
+
+ q->ring = (struct xdp_ring *)__get_free_pages(gfp_flags,
+ get_order(size));
+ if (!q->ring) {
+ kfree(q);
+ return NULL;
+ }
+
+ return q;
+}
+
+void xskq_destroy(struct xsk_queue *q)
+{
+ if (!q)
+ return;
+
+ page_frag_free(q->ring);
+ kfree(q);
+}