summaryrefslogtreecommitdiff
path: root/tools/lib/bpf/xsk.h
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-04-23 07:35:55 +0300
committerDavid S. Miller <davem@davemloft.net>2019-04-23 07:35:55 +0300
commit2843ba2ec75948e274d2c4f0a9390980e68a6461 (patch)
treeb2fde70246956609ac43ad62026a4fdb48e33fbc /tools/lib/bpf/xsk.h
parentbe659b8d3c79afc54e087ebf8d849685d7b0d395 (diff)
parentf79b464fd6b56b6256de43bc4c7d5968c0e52968 (diff)
downloadlinux-2843ba2ec75948e274d2c4f0a9390980e68a6461.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Alexei Starovoitov says: ==================== pull-request: bpf-next 2019-04-22 The following pull-request contains BPF updates for your *net-next* tree. The main changes are: 1) allow stack/queue helpers from more bpf program types, from Alban. 2) allow parallel verification of root bpf programs, from Alexei. 3) introduce bpf sysctl hook for trusted root cases, from Andrey. 4) recognize var/datasec in btf deduplication, from Andrii. 5) cpumap performance optimizations, from Jesper. 6) verifier prep for alu32 optimization, from Jiong. 7) libbpf xsk cleanup, from Magnus. 8) other various fixes and cleanups. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'tools/lib/bpf/xsk.h')
-rw-r--r--tools/lib/bpf/xsk.h22
1 files changed, 16 insertions, 6 deletions
diff --git a/tools/lib/bpf/xsk.h b/tools/lib/bpf/xsk.h
index a497f00e2962..82ea71a0f3ec 100644
--- a/tools/lib/bpf/xsk.h
+++ b/tools/lib/bpf/xsk.h
@@ -16,6 +16,7 @@
#include <linux/if_xdp.h>
#include "libbpf.h"
+#include "libbpf_util.h"
#ifdef __cplusplus
extern "C" {
@@ -36,6 +37,10 @@ struct name { \
DEFINE_XSK_RING(xsk_ring_prod);
DEFINE_XSK_RING(xsk_ring_cons);
+/* For a detailed explanation on the memory barriers associated with the
+ * ring, please take a look at net/xdp/xsk_queue.h.
+ */
+
struct xsk_umem;
struct xsk_socket;
@@ -105,7 +110,7 @@ static inline __u32 xsk_cons_nb_avail(struct xsk_ring_cons *r, __u32 nb)
static inline size_t xsk_ring_prod__reserve(struct xsk_ring_prod *prod,
size_t nb, __u32 *idx)
{
- if (unlikely(xsk_prod_nb_free(prod, nb) < nb))
+ if (xsk_prod_nb_free(prod, nb) < nb)
return 0;
*idx = prod->cached_prod;
@@ -116,10 +121,10 @@ static inline size_t xsk_ring_prod__reserve(struct xsk_ring_prod *prod,
static inline void xsk_ring_prod__submit(struct xsk_ring_prod *prod, size_t nb)
{
- /* Make sure everything has been written to the ring before signalling
- * this to the kernel.
+ /* Make sure everything has been written to the ring before indicating
+ * this to the kernel by writing the producer pointer.
*/
- smp_wmb();
+ libbpf_smp_wmb();
*prod->producer += nb;
}
@@ -129,11 +134,11 @@ static inline size_t xsk_ring_cons__peek(struct xsk_ring_cons *cons,
{
size_t entries = xsk_cons_nb_avail(cons, nb);
- if (likely(entries > 0)) {
+ if (entries > 0) {
/* Make sure we do not speculatively read the data before
* we have received the packet buffers from the ring.
*/
- smp_rmb();
+ libbpf_smp_rmb();
*idx = cons->cached_cons;
cons->cached_cons += entries;
@@ -144,6 +149,11 @@ static inline size_t xsk_ring_cons__peek(struct xsk_ring_cons *cons,
static inline void xsk_ring_cons__release(struct xsk_ring_cons *cons, size_t nb)
{
+ /* Make sure data has been read before indicating we are done
+ * with the entries by updating the consumer pointer.
+ */
+ libbpf_smp_rwmb();
+
*cons->consumer += nb;
}