summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorJesper Dangaard Brouer <brouer@redhat.com>2019-06-18 16:05:58 +0300
committerDavid S. Miller <davem@davemloft.net>2019-06-19 18:23:13 +0300
commitf033b688c1ede5ec78c9a718fa9f0b374049bc31 (patch)
tree541546c9e967a1d4c6be385231980637149599be /include
parentd956a048cd3fc1ba154101a1a50fb37950081ff6 (diff)
downloadlinux-f033b688c1ede5ec78c9a718fa9f0b374049bc31.tar.xz
xdp: add tracepoints for XDP mem
These tracepoints make it easier to troubleshoot XDP mem id disconnect. The xdp:mem_disconnect tracepoint cannot be replaced via kprobe. It is placed at the last stable place for the pointer to struct xdp_mem_allocator, just before it's scheduled for RCU removal. It also extract info on 'safe_to_remove' and 'force'. Detailed info about in-flight pages is not available at this layer. The next patch will added tracepoints needed at the page_pool layer for this. Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r--include/net/xdp_priv.h23
-rw-r--r--include/trace/events/xdp.h115
2 files changed, 138 insertions, 0 deletions
diff --git a/include/net/xdp_priv.h b/include/net/xdp_priv.h
new file mode 100644
index 000000000000..6a8cba6ea79a
--- /dev/null
+++ b/include/net/xdp_priv.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_NET_XDP_PRIV_H__
+#define __LINUX_NET_XDP_PRIV_H__
+
+#include <linux/rhashtable.h>
+
+/* Private to net/core/xdp.c, but used by trace/events/xdp.h */
+struct xdp_mem_allocator {
+ struct xdp_mem_info mem;
+ union {
+ void *allocator;
+ struct page_pool *page_pool;
+ struct zero_copy_allocator *zc_alloc;
+ };
+ int disconnect_cnt;
+ unsigned long defer_start;
+ struct rhash_head node;
+ struct rcu_head rcu;
+ struct delayed_work defer_wq;
+ unsigned long defer_warn;
+};
+
+#endif /* __LINUX_NET_XDP_PRIV_H__ */
diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h
index e95cb86b65cf..bb5e380e2ef3 100644
--- a/include/trace/events/xdp.h
+++ b/include/trace/events/xdp.h
@@ -269,6 +269,121 @@ TRACE_EVENT(xdp_devmap_xmit,
__entry->from_ifindex, __entry->to_ifindex, __entry->err)
);
+/* Expect users already include <net/xdp.h>, but not xdp_priv.h */
+#include <net/xdp_priv.h>
+
+#define __MEM_TYPE_MAP(FN) \
+ FN(PAGE_SHARED) \
+ FN(PAGE_ORDER0) \
+ FN(PAGE_POOL) \
+ FN(ZERO_COPY)
+
+#define __MEM_TYPE_TP_FN(x) \
+ TRACE_DEFINE_ENUM(MEM_TYPE_##x);
+#define __MEM_TYPE_SYM_FN(x) \
+ { MEM_TYPE_##x, #x },
+#define __MEM_TYPE_SYM_TAB \
+ __MEM_TYPE_MAP(__MEM_TYPE_SYM_FN) { -1, 0 }
+__MEM_TYPE_MAP(__MEM_TYPE_TP_FN)
+
+TRACE_EVENT(mem_disconnect,
+
+ TP_PROTO(const struct xdp_mem_allocator *xa,
+ bool safe_to_remove, bool force),
+
+ TP_ARGS(xa, safe_to_remove, force),
+
+ TP_STRUCT__entry(
+ __field(const struct xdp_mem_allocator *, xa)
+ __field(u32, mem_id)
+ __field(u32, mem_type)
+ __field(const void *, allocator)
+ __field(bool, safe_to_remove)
+ __field(bool, force)
+ __field(int, disconnect_cnt)
+ ),
+
+ TP_fast_assign(
+ __entry->xa = xa;
+ __entry->mem_id = xa->mem.id;
+ __entry->mem_type = xa->mem.type;
+ __entry->allocator = xa->allocator;
+ __entry->safe_to_remove = safe_to_remove;
+ __entry->force = force;
+ __entry->disconnect_cnt = xa->disconnect_cnt;
+ ),
+
+ TP_printk("mem_id=%d mem_type=%s allocator=%p"
+ " safe_to_remove=%s force=%s disconnect_cnt=%d",
+ __entry->mem_id,
+ __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
+ __entry->allocator,
+ __entry->safe_to_remove ? "true" : "false",
+ __entry->force ? "true" : "false",
+ __entry->disconnect_cnt
+ )
+);
+
+TRACE_EVENT(mem_connect,
+
+ TP_PROTO(const struct xdp_mem_allocator *xa,
+ const struct xdp_rxq_info *rxq),
+
+ TP_ARGS(xa, rxq),
+
+ TP_STRUCT__entry(
+ __field(const struct xdp_mem_allocator *, xa)
+ __field(u32, mem_id)
+ __field(u32, mem_type)
+ __field(const void *, allocator)
+ __field(const struct xdp_rxq_info *, rxq)
+ __field(int, ifindex)
+ ),
+
+ TP_fast_assign(
+ __entry->xa = xa;
+ __entry->mem_id = xa->mem.id;
+ __entry->mem_type = xa->mem.type;
+ __entry->allocator = xa->allocator;
+ __entry->rxq = rxq;
+ __entry->ifindex = rxq->dev->ifindex;
+ ),
+
+ TP_printk("mem_id=%d mem_type=%s allocator=%p"
+ " ifindex=%d",
+ __entry->mem_id,
+ __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
+ __entry->allocator,
+ __entry->ifindex
+ )
+);
+
+TRACE_EVENT(mem_return_failed,
+
+ TP_PROTO(const struct xdp_mem_info *mem,
+ const struct page *page),
+
+ TP_ARGS(mem, page),
+
+ TP_STRUCT__entry(
+ __field(const struct page *, page)
+ __field(u32, mem_id)
+ __field(u32, mem_type)
+ ),
+
+ TP_fast_assign(
+ __entry->page = page;
+ __entry->mem_id = mem->id;
+ __entry->mem_type = mem->type;
+ ),
+
+ TP_printk("mem_id=%d mem_type=%s page=%p",
+ __entry->mem_id,
+ __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
+ __entry->page
+ )
+);
+
#endif /* _TRACE_XDP_H */
#include <trace/define_trace.h>