diff options
Diffstat (limited to 'include/trace/events')
-rw-r--r-- | include/trace/events/bridge.h | 12 | ||||
-rw-r--r-- | include/trace/events/btrfs.h | 134 | ||||
-rw-r--r-- | include/trace/events/cgroup.h | 6 | ||||
-rw-r--r-- | include/trace/events/io_uring.h | 358 | ||||
-rw-r--r-- | include/trace/events/page_pool.h | 44 | ||||
-rw-r--r-- | include/trace/events/rxrpc.h | 20 | ||||
-rw-r--r-- | include/trace/events/sock.h | 4 | ||||
-rw-r--r-- | include/trace/events/tcp.h | 2 | ||||
-rw-r--r-- | include/trace/events/wbt.h | 12 | ||||
-rw-r--r-- | include/trace/events/writeback.h | 140 | ||||
-rw-r--r-- | include/trace/events/xdp.h | 21 |
11 files changed, 566 insertions, 187 deletions
diff --git a/include/trace/events/bridge.h b/include/trace/events/bridge.h index 8ea966448b58..6b200059c2c5 100644 --- a/include/trace/events/bridge.h +++ b/include/trace/events/bridge.h @@ -95,16 +95,16 @@ TRACE_EVENT(fdb_delete, TRACE_EVENT(br_fdb_update, TP_PROTO(struct net_bridge *br, struct net_bridge_port *source, - const unsigned char *addr, u16 vid, bool added_by_user), + const unsigned char *addr, u16 vid, unsigned long flags), - TP_ARGS(br, source, addr, vid, added_by_user), + TP_ARGS(br, source, addr, vid, flags), TP_STRUCT__entry( __string(br_dev, br->dev->name) __string(dev, source->dev->name) __array(unsigned char, addr, ETH_ALEN) __field(u16, vid) - __field(bool, added_by_user) + __field(unsigned long, flags) ), TP_fast_assign( @@ -112,14 +112,14 @@ TRACE_EVENT(br_fdb_update, __assign_str(dev, source->dev->name); memcpy(__entry->addr, addr, ETH_ALEN); __entry->vid = vid; - __entry->added_by_user = added_by_user; + __entry->flags = flags; ), - TP_printk("br_dev %s source %s addr %02x:%02x:%02x:%02x:%02x:%02x vid %u added_by_user %d", + TP_printk("br_dev %s source %s addr %02x:%02x:%02x:%02x:%02x:%02x vid %u flags 0x%lx", __get_str(br_dev), __get_str(dev), __entry->addr[0], __entry->addr[1], __entry->addr[2], __entry->addr[3], __entry->addr[4], __entry->addr[5], __entry->vid, - __entry->added_by_user) + __entry->flags) ); diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 5df604de4f11..620bf1b38fba 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -19,7 +19,7 @@ struct btrfs_delayed_ref_node; struct btrfs_delayed_tree_ref; struct btrfs_delayed_data_ref; struct btrfs_delayed_ref_head; -struct btrfs_block_group_cache; +struct btrfs_block_group; struct btrfs_free_cluster; struct map_lookup; struct extent_buffer; @@ -170,7 +170,7 @@ DECLARE_EVENT_CLASS(btrfs__inode, TP_STRUCT__entry_btrfs( __field( u64, ino ) - __field( blkcnt_t, blocks ) + __field( u64, blocks ) __field( u64, disk_i_size ) __field( u64, generation ) __field( u64, last_trans ) @@ -194,7 +194,7 @@ DECLARE_EVENT_CLASS(btrfs__inode, show_root_type(__entry->root_objectid), __entry->generation, __entry->ino, - (unsigned long long)__entry->blocks, + __entry->blocks, __entry->disk_i_size, __entry->last_trans, __entry->logged_trans) @@ -292,7 +292,7 @@ TRACE_EVENT_CONDITION(btrfs_get_extent, TRACE_EVENT(btrfs_handle_em_exist, - TP_PROTO(struct btrfs_fs_info *fs_info, + TP_PROTO(const struct btrfs_fs_info *fs_info, const struct extent_map *existing, const struct extent_map *map, u64 start, u64 len), @@ -330,8 +330,8 @@ TRACE_EVENT(btrfs_handle_em_exist, /* file extent item */ DECLARE_EVENT_CLASS(btrfs__file_extent_item_regular, - TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l, - struct btrfs_file_extent_item *fi, u64 start), + TP_PROTO(const struct btrfs_inode *bi, const struct extent_buffer *l, + const struct btrfs_file_extent_item *fi, u64 start), TP_ARGS(bi, l, fi, start), @@ -385,8 +385,8 @@ DECLARE_EVENT_CLASS(btrfs__file_extent_item_regular, DECLARE_EVENT_CLASS( btrfs__file_extent_item_inline, - TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l, - struct btrfs_file_extent_item *fi, int slot, u64 start), + TP_PROTO(const struct btrfs_inode *bi, const struct extent_buffer *l, + const struct btrfs_file_extent_item *fi, int slot, u64 start), TP_ARGS(bi, l, fi, slot, start), @@ -426,8 +426,8 @@ DECLARE_EVENT_CLASS( DEFINE_EVENT( btrfs__file_extent_item_regular, btrfs_get_extent_show_fi_regular, - TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l, - struct btrfs_file_extent_item *fi, u64 start), + TP_PROTO(const struct btrfs_inode *bi, const struct extent_buffer *l, + const struct btrfs_file_extent_item *fi, u64 start), TP_ARGS(bi, l, fi, start) ); @@ -435,8 +435,8 @@ DEFINE_EVENT( DEFINE_EVENT( btrfs__file_extent_item_regular, btrfs_truncate_show_fi_regular, - TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l, - struct btrfs_file_extent_item *fi, u64 start), + TP_PROTO(const struct btrfs_inode *bi, const struct extent_buffer *l, + const struct btrfs_file_extent_item *fi, u64 start), TP_ARGS(bi, l, fi, start) ); @@ -444,8 +444,8 @@ DEFINE_EVENT( DEFINE_EVENT( btrfs__file_extent_item_inline, btrfs_get_extent_show_fi_inline, - TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l, - struct btrfs_file_extent_item *fi, int slot, u64 start), + TP_PROTO(const struct btrfs_inode *bi, const struct extent_buffer *l, + const struct btrfs_file_extent_item *fi, int slot, u64 start), TP_ARGS(bi, l, fi, slot, start) ); @@ -453,8 +453,8 @@ DEFINE_EVENT( DEFINE_EVENT( btrfs__file_extent_item_inline, btrfs_truncate_show_fi_inline, - TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l, - struct btrfs_file_extent_item *fi, int slot, u64 start), + TP_PROTO(const struct btrfs_inode *bi, const struct extent_buffer *l, + const struct btrfs_file_extent_item *fi, int slot, u64 start), TP_ARGS(bi, l, fi, slot, start) ); @@ -574,7 +574,7 @@ DECLARE_EVENT_CLASS(btrfs__writepage, __field( char, for_kupdate ) __field( char, for_reclaim ) __field( char, range_cyclic ) - __field( pgoff_t, writeback_index ) + __field( unsigned long, writeback_index ) __field( u64, root_objectid ) ), @@ -603,7 +603,7 @@ DECLARE_EVENT_CLASS(btrfs__writepage, __entry->range_start, __entry->range_end, __entry->for_kupdate, __entry->for_reclaim, __entry->range_cyclic, - (unsigned long)__entry->writeback_index) + __entry->writeback_index) ); DEFINE_EVENT(btrfs__writepage, __extent_writepage, @@ -622,7 +622,7 @@ TRACE_EVENT(btrfs_writepage_end_io_hook, TP_STRUCT__entry_btrfs( __field( u64, ino ) - __field( pgoff_t, index ) + __field( unsigned long, index ) __field( u64, start ) __field( u64, end ) __field( int, uptodate ) @@ -642,7 +642,7 @@ TRACE_EVENT(btrfs_writepage_end_io_hook, TP_printk_btrfs("root=%llu(%s) ino=%llu page_index=%lu start=%llu " "end=%llu uptodate=%d", show_root_type(__entry->root_objectid), - __entry->ino, (unsigned long)__entry->index, + __entry->ino, __entry->index, __entry->start, __entry->end, __entry->uptodate) ); @@ -699,7 +699,7 @@ TRACE_EVENT(btrfs_sync_fs, TRACE_EVENT(btrfs_add_block_group, TP_PROTO(const struct btrfs_fs_info *fs_info, - const struct btrfs_block_group_cache *block_group, int create), + const struct btrfs_block_group *block_group, int create), TP_ARGS(fs_info, block_group, create), @@ -713,11 +713,10 @@ TRACE_EVENT(btrfs_add_block_group, ), TP_fast_assign_btrfs(fs_info, - __entry->offset = block_group->key.objectid; - __entry->size = block_group->key.offset; + __entry->offset = block_group->start; + __entry->size = block_group->length; __entry->flags = block_group->flags; - __entry->bytes_used = - btrfs_block_group_used(&block_group->item); + __entry->bytes_used = block_group->used; __entry->bytes_super = block_group->bytes_super; __entry->create = create; ), @@ -1018,7 +1017,7 @@ TRACE_EVENT(btrfs_cow_block, TRACE_EVENT(btrfs_space_reservation, - TP_PROTO(const struct btrfs_fs_info *fs_info, char *type, u64 val, + TP_PROTO(const struct btrfs_fs_info *fs_info, const char *type, u64 val, u64 bytes, int reserve), TP_ARGS(fs_info, type, val, bytes, reserve), @@ -1051,7 +1050,7 @@ TRACE_EVENT(btrfs_space_reservation, TRACE_EVENT(btrfs_trigger_flush, TP_PROTO(const struct btrfs_fs_info *fs_info, u64 flags, u64 bytes, - int flush, char *reason), + int flush, const char *reason), TP_ARGS(fs_info, flags, bytes, flush, reason), @@ -1185,7 +1184,7 @@ TRACE_EVENT(find_free_extent, DECLARE_EVENT_CLASS(btrfs__reserve_extent, - TP_PROTO(const struct btrfs_block_group_cache *block_group, u64 start, + TP_PROTO(const struct btrfs_block_group *block_group, u64 start, u64 len), TP_ARGS(block_group, start, len), @@ -1198,7 +1197,7 @@ DECLARE_EVENT_CLASS(btrfs__reserve_extent, ), TP_fast_assign_btrfs(block_group->fs_info, - __entry->bg_objectid = block_group->key.objectid; + __entry->bg_objectid = block_group->start; __entry->flags = block_group->flags; __entry->start = start; __entry->len = len; @@ -1215,7 +1214,7 @@ DECLARE_EVENT_CLASS(btrfs__reserve_extent, DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent, - TP_PROTO(const struct btrfs_block_group_cache *block_group, u64 start, + TP_PROTO(const struct btrfs_block_group *block_group, u64 start, u64 len), TP_ARGS(block_group, start, len) @@ -1223,7 +1222,7 @@ DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent, DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent_cluster, - TP_PROTO(const struct btrfs_block_group_cache *block_group, u64 start, + TP_PROTO(const struct btrfs_block_group *block_group, u64 start, u64 len), TP_ARGS(block_group, start, len) @@ -1231,7 +1230,7 @@ DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent_cluster, TRACE_EVENT(btrfs_find_cluster, - TP_PROTO(const struct btrfs_block_group_cache *block_group, u64 start, + TP_PROTO(const struct btrfs_block_group *block_group, u64 start, u64 bytes, u64 empty_size, u64 min_bytes), TP_ARGS(block_group, start, bytes, empty_size, min_bytes), @@ -1246,7 +1245,7 @@ TRACE_EVENT(btrfs_find_cluster, ), TP_fast_assign_btrfs(block_group->fs_info, - __entry->bg_objectid = block_group->key.objectid; + __entry->bg_objectid = block_group->start; __entry->flags = block_group->flags; __entry->start = start; __entry->bytes = bytes; @@ -1264,7 +1263,7 @@ TRACE_EVENT(btrfs_find_cluster, TRACE_EVENT(btrfs_failed_cluster_setup, - TP_PROTO(const struct btrfs_block_group_cache *block_group), + TP_PROTO(const struct btrfs_block_group *block_group), TP_ARGS(block_group), @@ -1273,7 +1272,7 @@ TRACE_EVENT(btrfs_failed_cluster_setup, ), TP_fast_assign_btrfs(block_group->fs_info, - __entry->bg_objectid = block_group->key.objectid; + __entry->bg_objectid = block_group->start; ), TP_printk_btrfs("block_group=%llu", __entry->bg_objectid) @@ -1281,7 +1280,7 @@ TRACE_EVENT(btrfs_failed_cluster_setup, TRACE_EVENT(btrfs_setup_cluster, - TP_PROTO(const struct btrfs_block_group_cache *block_group, + TP_PROTO(const struct btrfs_block_group *block_group, const struct btrfs_free_cluster *cluster, u64 size, int bitmap), @@ -1297,7 +1296,7 @@ TRACE_EVENT(btrfs_setup_cluster, ), TP_fast_assign_btrfs(block_group->fs_info, - __entry->bg_objectid = block_group->key.objectid; + __entry->bg_objectid = block_group->start; __entry->flags = block_group->flags; __entry->start = cluster->window_start; __entry->max_size = cluster->max_size; @@ -1325,17 +1324,17 @@ TRACE_EVENT(alloc_extent_state, TP_STRUCT__entry( __field(const struct extent_state *, state) __field(gfp_t, mask) - __field(unsigned long, ip) + __field(const void*, ip) ), TP_fast_assign( __entry->state = state, __entry->mask = mask, - __entry->ip = IP + __entry->ip = (const void *)IP ), TP_printk("state=%p mask=%s caller=%pS", __entry->state, - show_gfp_flags(__entry->mask), (const void *)__entry->ip) + show_gfp_flags(__entry->mask), __entry->ip) ); TRACE_EVENT(free_extent_state, @@ -1346,16 +1345,15 @@ TRACE_EVENT(free_extent_state, TP_STRUCT__entry( __field(const struct extent_state *, state) - __field(unsigned long, ip) + __field(const void*, ip) ), TP_fast_assign( __entry->state = state, - __entry->ip = IP + __entry->ip = (const void *)IP ), - TP_printk("state=%p caller=%pS", __entry->state, - (const void *)__entry->ip) + TP_printk("state=%p caller=%pS", __entry->state, __entry->ip) ); DECLARE_EVENT_CLASS(btrfs__work, @@ -1389,9 +1387,9 @@ DECLARE_EVENT_CLASS(btrfs__work, ); /* - * For situiations when the work is freed, we pass fs_info and a tag that that - * matches address of the work structure so it can be paired with the - * scheduling event. + * For situations when the work is freed, we pass fs_info and a tag that matches + * the address of the work structure so it can be paired with the scheduling + * event. DO NOT add anything here that dereferences wtag. */ DECLARE_EVENT_CLASS(btrfs__work__done, @@ -1567,8 +1565,7 @@ DECLARE_EVENT_CLASS(btrfs_qgroup_extent, ), TP_printk_btrfs("bytenr=%llu num_bytes=%llu", - (unsigned long long)__entry->bytenr, - (unsigned long long)__entry->num_bytes) + __entry->bytenr, __entry->num_bytes) ); DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_account_extents, @@ -1644,7 +1641,7 @@ TRACE_EVENT(btrfs_qgroup_account_extent, TRACE_EVENT(qgroup_update_counters, TP_PROTO(const struct btrfs_fs_info *fs_info, - struct btrfs_qgroup *qgroup, + const struct btrfs_qgroup *qgroup, u64 cur_old_count, u64 cur_new_count), TP_ARGS(fs_info, qgroup, cur_old_count, cur_new_count), @@ -1688,6 +1685,7 @@ TRACE_EVENT(qgroup_update_reserve, __entry->qgid = qgroup->qgroupid; __entry->cur_reserved = qgroup->rsv.values[type]; __entry->diff = diff; + __entry->type = type; ), TP_printk_btrfs("qgid=%llu type=%s cur_reserved=%llu diff=%lld", @@ -1710,6 +1708,7 @@ TRACE_EVENT(qgroup_meta_reserve, TP_fast_assign_btrfs(root->fs_info, __entry->refroot = root->root_key.objectid; __entry->diff = diff; + __entry->type = type; ), TP_printk_btrfs("refroot=%llu(%s) type=%s diff=%lld", @@ -1726,7 +1725,6 @@ TRACE_EVENT(qgroup_meta_convert, TP_STRUCT__entry_btrfs( __field( u64, refroot ) __field( s64, diff ) - __field( int, type ) ), TP_fast_assign_btrfs(root->fs_info, @@ -1824,7 +1822,7 @@ DEFINE_EVENT(btrfs__prelim_ref, btrfs_prelim_ref_insert, ); TRACE_EVENT(btrfs_inode_mod_outstanding_extents, - TP_PROTO(struct btrfs_root *root, u64 ino, int mod), + TP_PROTO(const struct btrfs_root *root, u64 ino, int mod), TP_ARGS(root, ino, mod), @@ -1846,7 +1844,7 @@ TRACE_EVENT(btrfs_inode_mod_outstanding_extents, ); DECLARE_EVENT_CLASS(btrfs__block_group, - TP_PROTO(const struct btrfs_block_group_cache *bg_cache), + TP_PROTO(const struct btrfs_block_group *bg_cache), TP_ARGS(bg_cache), @@ -1858,9 +1856,9 @@ DECLARE_EVENT_CLASS(btrfs__block_group, ), TP_fast_assign_btrfs(bg_cache->fs_info, - __entry->bytenr = bg_cache->key.objectid, - __entry->len = bg_cache->key.offset, - __entry->used = btrfs_block_group_used(&bg_cache->item); + __entry->bytenr = bg_cache->start, + __entry->len = bg_cache->length, + __entry->used = bg_cache->used; __entry->flags = bg_cache->flags; ), @@ -1870,19 +1868,19 @@ DECLARE_EVENT_CLASS(btrfs__block_group, ); DEFINE_EVENT(btrfs__block_group, btrfs_remove_block_group, - TP_PROTO(const struct btrfs_block_group_cache *bg_cache), + TP_PROTO(const struct btrfs_block_group *bg_cache), TP_ARGS(bg_cache) ); DEFINE_EVENT(btrfs__block_group, btrfs_add_unused_block_group, - TP_PROTO(const struct btrfs_block_group_cache *bg_cache), + TP_PROTO(const struct btrfs_block_group *bg_cache), TP_ARGS(bg_cache) ); DEFINE_EVENT(btrfs__block_group, btrfs_skip_unused_block_group, - TP_PROTO(const struct btrfs_block_group_cache *bg_cache), + TP_PROTO(const struct btrfs_block_group *bg_cache), TP_ARGS(bg_cache) ); @@ -1905,7 +1903,7 @@ TRACE_EVENT(btrfs_set_extent_bit, TP_fast_assign_btrfs(tree->fs_info, __entry->owner = tree->owner; if (tree->private_data) { - struct inode *inode = tree->private_data; + const struct inode *inode = tree->private_data; __entry->ino = btrfs_ino(BTRFS_I(inode)); __entry->rootid = @@ -1944,7 +1942,7 @@ TRACE_EVENT(btrfs_clear_extent_bit, TP_fast_assign_btrfs(tree->fs_info, __entry->owner = tree->owner; if (tree->private_data) { - struct inode *inode = tree->private_data; + const struct inode *inode = tree->private_data; __entry->ino = btrfs_ino(BTRFS_I(inode)); __entry->rootid = @@ -1984,7 +1982,7 @@ TRACE_EVENT(btrfs_convert_extent_bit, TP_fast_assign_btrfs(tree->fs_info, __entry->owner = tree->owner; if (tree->private_data) { - struct inode *inode = tree->private_data; + const struct inode *inode = tree->private_data; __entry->ino = btrfs_ino(BTRFS_I(inode)); __entry->rootid = @@ -2093,8 +2091,8 @@ DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_lock_atomic); DECLARE_EVENT_CLASS(btrfs__space_info_update, - TP_PROTO(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *sinfo, u64 old, s64 diff), + TP_PROTO(const struct btrfs_fs_info *fs_info, + const struct btrfs_space_info *sinfo, u64 old, s64 diff), TP_ARGS(fs_info, sinfo, old, diff), @@ -2116,16 +2114,16 @@ DECLARE_EVENT_CLASS(btrfs__space_info_update, DEFINE_EVENT(btrfs__space_info_update, update_bytes_may_use, - TP_PROTO(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *sinfo, u64 old, s64 diff), + TP_PROTO(const struct btrfs_fs_info *fs_info, + const struct btrfs_space_info *sinfo, u64 old, s64 diff), TP_ARGS(fs_info, sinfo, old, diff) ); DEFINE_EVENT(btrfs__space_info_update, update_bytes_pinned, - TP_PROTO(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *sinfo, u64 old, s64 diff), + TP_PROTO(const struct btrfs_fs_info *fs_info, + const struct btrfs_space_info *sinfo, u64 old, s64 diff), TP_ARGS(fs_info, sinfo, old, diff) ); diff --git a/include/trace/events/cgroup.h b/include/trace/events/cgroup.h index a566cc521476..7f42a3de59e6 100644 --- a/include/trace/events/cgroup.h +++ b/include/trace/events/cgroup.h @@ -66,7 +66,7 @@ DECLARE_EVENT_CLASS(cgroup, TP_fast_assign( __entry->root = cgrp->root->hierarchy_id; - __entry->id = cgrp->id; + __entry->id = cgroup_id(cgrp); __entry->level = cgrp->level; __assign_str(path, path); ), @@ -135,7 +135,7 @@ DECLARE_EVENT_CLASS(cgroup_migrate, TP_fast_assign( __entry->dst_root = dst_cgrp->root->hierarchy_id; - __entry->dst_id = dst_cgrp->id; + __entry->dst_id = cgroup_id(dst_cgrp); __entry->dst_level = dst_cgrp->level; __assign_str(dst_path, path); __entry->pid = task->pid; @@ -179,7 +179,7 @@ DECLARE_EVENT_CLASS(cgroup_event, TP_fast_assign( __entry->root = cgrp->root->hierarchy_id; - __entry->id = cgrp->id; + __entry->id = cgroup_id(cgrp); __entry->level = cgrp->level; __assign_str(path, path); __entry->val = val; diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h new file mode 100644 index 000000000000..72a4d0174b02 --- /dev/null +++ b/include/trace/events/io_uring.h @@ -0,0 +1,358 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM io_uring + +#if !defined(_TRACE_IO_URING_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_IO_URING_H + +#include <linux/tracepoint.h> + +struct io_wq_work; + +/** + * io_uring_create - called after a new io_uring context was prepared + * + * @fd: corresponding file descriptor + * @ctx: pointer to a ring context structure + * @sq_entries: actual SQ size + * @cq_entries: actual CQ size + * @flags: SQ ring flags, provided to io_uring_setup(2) + * + * Allows to trace io_uring creation and provide pointer to a context, that can + * be used later to find correlated events. + */ +TRACE_EVENT(io_uring_create, + + TP_PROTO(int fd, void *ctx, u32 sq_entries, u32 cq_entries, u32 flags), + + TP_ARGS(fd, ctx, sq_entries, cq_entries, flags), + + TP_STRUCT__entry ( + __field( int, fd ) + __field( void *, ctx ) + __field( u32, sq_entries ) + __field( u32, cq_entries ) + __field( u32, flags ) + ), + + TP_fast_assign( + __entry->fd = fd; + __entry->ctx = ctx; + __entry->sq_entries = sq_entries; + __entry->cq_entries = cq_entries; + __entry->flags = flags; + ), + + TP_printk("ring %p, fd %d sq size %d, cq size %d, flags %d", + __entry->ctx, __entry->fd, __entry->sq_entries, + __entry->cq_entries, __entry->flags) +); + +/** + * io_uring_register - called after a buffer/file/eventfd was succesfully + * registered for a ring + * + * @ctx: pointer to a ring context structure + * @opcode: describes which operation to perform + * @nr_user_files: number of registered files + * @nr_user_bufs: number of registered buffers + * @cq_ev_fd: whether eventfs registered or not + * @ret: return code + * + * Allows to trace fixed files/buffers/eventfds, that could be registered to + * avoid an overhead of getting references to them for every operation. This + * event, together with io_uring_file_get, can provide a full picture of how + * much overhead one can reduce via fixing. + */ +TRACE_EVENT(io_uring_register, + + TP_PROTO(void *ctx, unsigned opcode, unsigned nr_files, + unsigned nr_bufs, bool eventfd, long ret), + + TP_ARGS(ctx, opcode, nr_files, nr_bufs, eventfd, ret), + + TP_STRUCT__entry ( + __field( void *, ctx ) + __field( unsigned, opcode ) + __field( unsigned, nr_files ) + __field( unsigned, nr_bufs ) + __field( bool, eventfd ) + __field( long, ret ) + ), + + TP_fast_assign( + __entry->ctx = ctx; + __entry->opcode = opcode; + __entry->nr_files = nr_files; + __entry->nr_bufs = nr_bufs; + __entry->eventfd = eventfd; + __entry->ret = ret; + ), + + TP_printk("ring %p, opcode %d, nr_user_files %d, nr_user_bufs %d, " + "eventfd %d, ret %ld", + __entry->ctx, __entry->opcode, __entry->nr_files, + __entry->nr_bufs, __entry->eventfd, __entry->ret) +); + +/** + * io_uring_file_get - called before getting references to an SQE file + * + * @ctx: pointer to a ring context structure + * @fd: SQE file descriptor + * + * Allows to trace out how often an SQE file reference is obtained, which can + * help figuring out if it makes sense to use fixed files, or check that fixed + * files are used correctly. + */ +TRACE_EVENT(io_uring_file_get, + + TP_PROTO(void *ctx, int fd), + + TP_ARGS(ctx, fd), + + TP_STRUCT__entry ( + __field( void *, ctx ) + __field( int, fd ) + ), + + TP_fast_assign( + __entry->ctx = ctx; + __entry->fd = fd; + ), + + TP_printk("ring %p, fd %d", __entry->ctx, __entry->fd) +); + +/** + * io_uring_queue_async_work - called before submitting a new async work + * + * @ctx: pointer to a ring context structure + * @hashed: type of workqueue, hashed or normal + * @req: pointer to a submitted request + * @work: pointer to a submitted io_wq_work + * + * Allows to trace asynchronous work submission. + */ +TRACE_EVENT(io_uring_queue_async_work, + + TP_PROTO(void *ctx, int rw, void * req, struct io_wq_work *work, + unsigned int flags), + + TP_ARGS(ctx, rw, req, work, flags), + + TP_STRUCT__entry ( + __field( void *, ctx ) + __field( int, rw ) + __field( void *, req ) + __field( struct io_wq_work *, work ) + __field( unsigned int, flags ) + ), + + TP_fast_assign( + __entry->ctx = ctx; + __entry->rw = rw; + __entry->req = req; + __entry->work = work; + __entry->flags = flags; + ), + + TP_printk("ring %p, request %p, flags %d, %s queue, work %p", + __entry->ctx, __entry->req, __entry->flags, + __entry->rw ? "hashed" : "normal", __entry->work) +); + +/** + * io_uring_defer_list - called before the io_uring work added into defer_list + * + * @ctx: pointer to a ring context structure + * @req: pointer to a deferred request + * @shadow: whether request is shadow or not + * + * Allows to track deferred requests, to get an insight about what requests are + * not started immediately. + */ +TRACE_EVENT(io_uring_defer, + + TP_PROTO(void *ctx, void *req, bool shadow), + + TP_ARGS(ctx, req, shadow), + + TP_STRUCT__entry ( + __field( void *, ctx ) + __field( void *, req ) + __field( bool, shadow ) + ), + + TP_fast_assign( + __entry->ctx = ctx; + __entry->req = req; + __entry->shadow = shadow; + ), + + TP_printk("ring %p, request %p%s", __entry->ctx, __entry->req, + __entry->shadow ? ", shadow": "") +); + +/** + * io_uring_link - called before the io_uring request added into link_list of + * another request + * + * @ctx: pointer to a ring context structure + * @req: pointer to a linked request + * @target_req: pointer to a previous request, that would contain @req + * + * Allows to track linked requests, to understand dependencies between requests + * and how does it influence their execution flow. + */ +TRACE_EVENT(io_uring_link, + + TP_PROTO(void *ctx, void *req, void *target_req), + + TP_ARGS(ctx, req, target_req), + + TP_STRUCT__entry ( + __field( void *, ctx ) + __field( void *, req ) + __field( void *, target_req ) + ), + + TP_fast_assign( + __entry->ctx = ctx; + __entry->req = req; + __entry->target_req = target_req; + ), + + TP_printk("ring %p, request %p linked after %p", + __entry->ctx, __entry->req, __entry->target_req) +); + +/** + * io_uring_cqring_wait - called before start waiting for an available CQE + * + * @ctx: pointer to a ring context structure + * @min_events: minimal number of events to wait for + * + * Allows to track waiting for CQE, so that we can e.g. troubleshoot + * situations, when an application wants to wait for an event, that never + * comes. + */ +TRACE_EVENT(io_uring_cqring_wait, + + TP_PROTO(void *ctx, int min_events), + + TP_ARGS(ctx, min_events), + + TP_STRUCT__entry ( + __field( void *, ctx ) + __field( int, min_events ) + ), + + TP_fast_assign( + __entry->ctx = ctx; + __entry->min_events = min_events; + ), + + TP_printk("ring %p, min_events %d", __entry->ctx, __entry->min_events) +); + +/** + * io_uring_fail_link - called before failing a linked request + * + * @req: request, which links were cancelled + * @link: cancelled link + * + * Allows to track linked requests cancellation, to see not only that some work + * was cancelled, but also which request was the reason. + */ +TRACE_EVENT(io_uring_fail_link, + + TP_PROTO(void *req, void *link), + + TP_ARGS(req, link), + + TP_STRUCT__entry ( + __field( void *, req ) + __field( void *, link ) + ), + + TP_fast_assign( + __entry->req = req; + __entry->link = link; + ), + + TP_printk("request %p, link %p", __entry->req, __entry->link) +); + +/** + * io_uring_complete - called when completing an SQE + * + * @ctx: pointer to a ring context structure + * @user_data: user data associated with the request + * @res: result of the request + * + */ +TRACE_EVENT(io_uring_complete, + + TP_PROTO(void *ctx, u64 user_data, long res), + + TP_ARGS(ctx, user_data, res), + + TP_STRUCT__entry ( + __field( void *, ctx ) + __field( u64, user_data ) + __field( long, res ) + ), + + TP_fast_assign( + __entry->ctx = ctx; + __entry->user_data = user_data; + __entry->res = res; + ), + + TP_printk("ring %p, user_data 0x%llx, result %ld", + __entry->ctx, (unsigned long long)__entry->user_data, + __entry->res) +); + + +/** + * io_uring_submit_sqe - called before submitting one SQE + * + * @ctx: pointer to a ring context structure + * @user_data: user data associated with the request + * @force_nonblock: whether a context blocking or not + * @sq_thread: true if sq_thread has submitted this SQE + * + * Allows to track SQE submitting, to understand what was the source of it, SQ + * thread or io_uring_enter call. + */ +TRACE_EVENT(io_uring_submit_sqe, + + TP_PROTO(void *ctx, u64 user_data, bool force_nonblock, bool sq_thread), + + TP_ARGS(ctx, user_data, force_nonblock, sq_thread), + + TP_STRUCT__entry ( + __field( void *, ctx ) + __field( u64, user_data ) + __field( bool, force_nonblock ) + __field( bool, sq_thread ) + ), + + TP_fast_assign( + __entry->ctx = ctx; + __entry->user_data = user_data; + __entry->force_nonblock = force_nonblock; + __entry->sq_thread = sq_thread; + ), + + TP_printk("ring %p, user data 0x%llx, non block %d, sq_thread %d", + __entry->ctx, (unsigned long long) __entry->user_data, + __entry->force_nonblock, __entry->sq_thread) +); + +#endif /* _TRACE_IO_URING_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/page_pool.h b/include/trace/events/page_pool.h index 47b5ee880aa9..ad0aa7f31675 100644 --- a/include/trace/events/page_pool.h +++ b/include/trace/events/page_pool.h @@ -8,9 +8,10 @@ #include <linux/types.h> #include <linux/tracepoint.h> +#include <trace/events/mmflags.h> #include <net/page_pool.h> -TRACE_EVENT(page_pool_inflight, +TRACE_EVENT(page_pool_release, TP_PROTO(const struct page_pool *pool, s32 inflight, u32 hold, u32 release), @@ -22,6 +23,7 @@ TRACE_EVENT(page_pool_inflight, __field(s32, inflight) __field(u32, hold) __field(u32, release) + __field(u64, cnt) ), TP_fast_assign( @@ -29,10 +31,12 @@ TRACE_EVENT(page_pool_inflight, __entry->inflight = inflight; __entry->hold = hold; __entry->release = release; + __entry->cnt = pool->destroy_cnt; ), - TP_printk("page_pool=%p inflight=%d hold=%u release=%u", - __entry->pool, __entry->inflight, __entry->hold, __entry->release) + TP_printk("page_pool=%p inflight=%d hold=%u release=%u cnt=%llu", + __entry->pool, __entry->inflight, __entry->hold, + __entry->release, __entry->cnt) ); TRACE_EVENT(page_pool_state_release, @@ -46,16 +50,18 @@ TRACE_EVENT(page_pool_state_release, __field(const struct page_pool *, pool) __field(const struct page *, page) __field(u32, release) + __field(unsigned long, pfn) ), TP_fast_assign( __entry->pool = pool; __entry->page = page; __entry->release = release; + __entry->pfn = page_to_pfn(page); ), - TP_printk("page_pool=%p page=%p release=%u", - __entry->pool, __entry->page, __entry->release) + TP_printk("page_pool=%p page=%p pfn=%lu release=%u", + __entry->pool, __entry->page, __entry->pfn, __entry->release) ); TRACE_EVENT(page_pool_state_hold, @@ -69,16 +75,40 @@ TRACE_EVENT(page_pool_state_hold, __field(const struct page_pool *, pool) __field(const struct page *, page) __field(u32, hold) + __field(unsigned long, pfn) ), TP_fast_assign( __entry->pool = pool; __entry->page = page; __entry->hold = hold; + __entry->pfn = page_to_pfn(page); ), - TP_printk("page_pool=%p page=%p hold=%u", - __entry->pool, __entry->page, __entry->hold) + TP_printk("page_pool=%p page=%p pfn=%lu hold=%u", + __entry->pool, __entry->page, __entry->pfn, __entry->hold) +); + +TRACE_EVENT(page_pool_update_nid, + + TP_PROTO(const struct page_pool *pool, int new_nid), + + TP_ARGS(pool, new_nid), + + TP_STRUCT__entry( + __field(const struct page_pool *, pool) + __field(int, pool_nid) + __field(int, new_nid) + ), + + TP_fast_assign( + __entry->pool = pool; + __entry->pool_nid = pool->p.nid; + __entry->new_nid = new_nid; + ), + + TP_printk("page_pool=%p pool_nid=%d new_nid=%d", + __entry->pool, __entry->pool_nid, __entry->new_nid) ); #endif /* _TRACE_PAGE_POOL_H */ diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index a13a62db3565..191fe447f990 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -519,10 +519,10 @@ TRACE_EVENT(rxrpc_local, ); TRACE_EVENT(rxrpc_peer, - TP_PROTO(struct rxrpc_peer *peer, enum rxrpc_peer_trace op, + TP_PROTO(unsigned int peer_debug_id, enum rxrpc_peer_trace op, int usage, const void *where), - TP_ARGS(peer, op, usage, where), + TP_ARGS(peer_debug_id, op, usage, where), TP_STRUCT__entry( __field(unsigned int, peer ) @@ -532,7 +532,7 @@ TRACE_EVENT(rxrpc_peer, ), TP_fast_assign( - __entry->peer = peer->debug_id; + __entry->peer = peer_debug_id; __entry->op = op; __entry->usage = usage; __entry->where = where; @@ -546,10 +546,10 @@ TRACE_EVENT(rxrpc_peer, ); TRACE_EVENT(rxrpc_conn, - TP_PROTO(struct rxrpc_connection *conn, enum rxrpc_conn_trace op, + TP_PROTO(unsigned int conn_debug_id, enum rxrpc_conn_trace op, int usage, const void *where), - TP_ARGS(conn, op, usage, where), + TP_ARGS(conn_debug_id, op, usage, where), TP_STRUCT__entry( __field(unsigned int, conn ) @@ -559,7 +559,7 @@ TRACE_EVENT(rxrpc_conn, ), TP_fast_assign( - __entry->conn = conn->debug_id; + __entry->conn = conn_debug_id; __entry->op = op; __entry->usage = usage; __entry->where = where; @@ -606,10 +606,10 @@ TRACE_EVENT(rxrpc_client, ); TRACE_EVENT(rxrpc_call, - TP_PROTO(struct rxrpc_call *call, enum rxrpc_call_trace op, + TP_PROTO(unsigned int call_debug_id, enum rxrpc_call_trace op, int usage, const void *where, const void *aux), - TP_ARGS(call, op, usage, where, aux), + TP_ARGS(call_debug_id, op, usage, where, aux), TP_STRUCT__entry( __field(unsigned int, call ) @@ -620,7 +620,7 @@ TRACE_EVENT(rxrpc_call, ), TP_fast_assign( - __entry->call = call->debug_id; + __entry->call = call_debug_id; __entry->op = op; __entry->usage = usage; __entry->where = where; @@ -1068,7 +1068,7 @@ TRACE_EVENT(rxrpc_recvmsg, ), TP_fast_assign( - __entry->call = call->debug_id; + __entry->call = call ? call->debug_id : 0; __entry->why = why; __entry->seq = seq; __entry->offset = offset; diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h index a0c4b8a30966..51fe9f6719eb 100644 --- a/include/trace/events/sock.h +++ b/include/trace/events/sock.h @@ -82,7 +82,7 @@ TRACE_EVENT(sock_rcvqueue_full, TP_fast_assign( __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc); __entry->truesize = skb->truesize; - __entry->sk_rcvbuf = sk->sk_rcvbuf; + __entry->sk_rcvbuf = READ_ONCE(sk->sk_rcvbuf); ), TP_printk("rmem_alloc=%d truesize=%u sk_rcvbuf=%d", @@ -115,7 +115,7 @@ TRACE_EVENT(sock_exceed_buf_limit, __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc); __entry->sysctl_wmem = sk_get_wmem0(sk, prot); __entry->wmem_alloc = refcount_read(&sk->sk_wmem_alloc); - __entry->wmem_queued = sk->sk_wmem_queued; + __entry->wmem_queued = READ_ONCE(sk->sk_wmem_queued); __entry->kind = kind; ), diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h index 2bc9960a31aa..cf97f6339acb 100644 --- a/include/trace/events/tcp.h +++ b/include/trace/events/tcp.h @@ -86,7 +86,7 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb, sk->sk_v6_rcv_saddr, sk->sk_v6_daddr); ), - TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c state=%s\n", + TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c state=%s", __entry->sport, __entry->dport, __entry->saddr, __entry->daddr, __entry->saddr_v6, __entry->daddr_v6, show_tcp_state_name(__entry->state)) diff --git a/include/trace/events/wbt.h b/include/trace/events/wbt.h index b048694070e2..37342a13c9cb 100644 --- a/include/trace/events/wbt.h +++ b/include/trace/events/wbt.h @@ -33,7 +33,8 @@ TRACE_EVENT(wbt_stat, ), TP_fast_assign( - strncpy(__entry->name, dev_name(bdi->dev), 32); + strlcpy(__entry->name, dev_name(bdi->dev), + ARRAY_SIZE(__entry->name)); __entry->rmean = stat[0].mean; __entry->rmin = stat[0].min; __entry->rmax = stat[0].max; @@ -67,7 +68,8 @@ TRACE_EVENT(wbt_lat, ), TP_fast_assign( - strncpy(__entry->name, dev_name(bdi->dev), 32); + strlcpy(__entry->name, dev_name(bdi->dev), + ARRAY_SIZE(__entry->name)); __entry->lat = div_u64(lat, 1000); ), @@ -103,7 +105,8 @@ TRACE_EVENT(wbt_step, ), TP_fast_assign( - strncpy(__entry->name, dev_name(bdi->dev), 32); + strlcpy(__entry->name, dev_name(bdi->dev), + ARRAY_SIZE(__entry->name)); __entry->msg = msg; __entry->step = step; __entry->window = div_u64(window, 1000); @@ -138,7 +141,8 @@ TRACE_EVENT(wbt_timer, ), TP_fast_assign( - strncpy(__entry->name, dev_name(bdi->dev), 32); + strlcpy(__entry->name, dev_name(bdi->dev), + ARRAY_SIZE(__entry->name)); __entry->status = status; __entry->step = step; __entry->inflight = inflight; diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index c2ce6480b4b1..ef50be4e5e6c 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -61,7 +61,7 @@ DECLARE_EVENT_CLASS(writeback_page_template, TP_STRUCT__entry ( __array(char, name, 32) - __field(unsigned long, ino) + __field(ino_t, ino) __field(pgoff_t, index) ), @@ -75,7 +75,7 @@ DECLARE_EVENT_CLASS(writeback_page_template, TP_printk("bdi %s: ino=%lu index=%lu", __entry->name, - __entry->ino, + (unsigned long)__entry->ino, __entry->index ) ); @@ -102,7 +102,7 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template, TP_STRUCT__entry ( __array(char, name, 32) - __field(unsigned long, ino) + __field(ino_t, ino) __field(unsigned long, state) __field(unsigned long, flags) ), @@ -120,7 +120,7 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template, TP_printk("bdi %s: ino=%lu state=%s flags=%s", __entry->name, - __entry->ino, + (unsigned long)__entry->ino, show_inode_state(__entry->state), show_inode_state(__entry->flags) ) @@ -150,28 +150,28 @@ DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode, #ifdef CREATE_TRACE_POINTS #ifdef CONFIG_CGROUP_WRITEBACK -static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb) +static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb) { - return wb->memcg_css->cgroup->kn->id.ino; + return cgroup_ino(wb->memcg_css->cgroup); } -static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc) +static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc) { if (wbc->wb) return __trace_wb_assign_cgroup(wbc->wb); else - return -1U; + return 1; } #else /* CONFIG_CGROUP_WRITEBACK */ -static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb) +static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb) { - return -1U; + return 1; } -static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc) +static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc) { - return -1U; + return 1; } #endif /* CONFIG_CGROUP_WRITEBACK */ @@ -187,8 +187,8 @@ TRACE_EVENT(inode_foreign_history, TP_STRUCT__entry( __array(char, name, 32) - __field(unsigned long, ino) - __field(unsigned int, cgroup_ino) + __field(ino_t, ino) + __field(ino_t, cgroup_ino) __field(unsigned int, history) ), @@ -199,10 +199,10 @@ TRACE_EVENT(inode_foreign_history, __entry->history = history; ), - TP_printk("bdi %s: ino=%lu cgroup_ino=%u history=0x%x", + TP_printk("bdi %s: ino=%lu cgroup_ino=%lu history=0x%x", __entry->name, - __entry->ino, - __entry->cgroup_ino, + (unsigned long)__entry->ino, + (unsigned long)__entry->cgroup_ino, __entry->history ) ); @@ -216,9 +216,9 @@ TRACE_EVENT(inode_switch_wbs, TP_STRUCT__entry( __array(char, name, 32) - __field(unsigned long, ino) - __field(unsigned int, old_cgroup_ino) - __field(unsigned int, new_cgroup_ino) + __field(ino_t, ino) + __field(ino_t, old_cgroup_ino) + __field(ino_t, new_cgroup_ino) ), TP_fast_assign( @@ -228,11 +228,11 @@ TRACE_EVENT(inode_switch_wbs, __entry->new_cgroup_ino = __trace_wb_assign_cgroup(new_wb); ), - TP_printk("bdi %s: ino=%lu old_cgroup_ino=%u new_cgroup_ino=%u", + TP_printk("bdi %s: ino=%lu old_cgroup_ino=%lu new_cgroup_ino=%lu", __entry->name, - __entry->ino, - __entry->old_cgroup_ino, - __entry->new_cgroup_ino + (unsigned long)__entry->ino, + (unsigned long)__entry->old_cgroup_ino, + (unsigned long)__entry->new_cgroup_ino ) ); @@ -245,10 +245,10 @@ TRACE_EVENT(track_foreign_dirty, TP_STRUCT__entry( __array(char, name, 32) __field(u64, bdi_id) - __field(unsigned long, ino) + __field(ino_t, ino) __field(unsigned int, memcg_id) - __field(unsigned int, cgroup_ino) - __field(unsigned int, page_cgroup_ino) + __field(ino_t, cgroup_ino) + __field(ino_t, page_cgroup_ino) ), TP_fast_assign( @@ -260,16 +260,16 @@ TRACE_EVENT(track_foreign_dirty, __entry->ino = inode ? inode->i_ino : 0; __entry->memcg_id = wb->memcg_css->id; __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); - __entry->page_cgroup_ino = page->mem_cgroup->css.cgroup->kn->id.ino; + __entry->page_cgroup_ino = cgroup_ino(page->mem_cgroup->css.cgroup); ), - TP_printk("bdi %s[%llu]: ino=%lu memcg_id=%u cgroup_ino=%u page_cgroup_ino=%u", + TP_printk("bdi %s[%llu]: ino=%lu memcg_id=%u cgroup_ino=%lu page_cgroup_ino=%lu", __entry->name, __entry->bdi_id, - __entry->ino, + (unsigned long)__entry->ino, __entry->memcg_id, - __entry->cgroup_ino, - __entry->page_cgroup_ino + (unsigned long)__entry->cgroup_ino, + (unsigned long)__entry->page_cgroup_ino ) ); @@ -282,7 +282,7 @@ TRACE_EVENT(flush_foreign, TP_STRUCT__entry( __array(char, name, 32) - __field(unsigned int, cgroup_ino) + __field(ino_t, cgroup_ino) __field(unsigned int, frn_bdi_id) __field(unsigned int, frn_memcg_id) ), @@ -294,9 +294,9 @@ TRACE_EVENT(flush_foreign, __entry->frn_memcg_id = frn_memcg_id; ), - TP_printk("bdi %s: cgroup_ino=%u frn_bdi_id=%u frn_memcg_id=%u", + TP_printk("bdi %s: cgroup_ino=%lu frn_bdi_id=%u frn_memcg_id=%u", __entry->name, - __entry->cgroup_ino, + (unsigned long)__entry->cgroup_ino, __entry->frn_bdi_id, __entry->frn_memcg_id ) @@ -311,9 +311,9 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template, TP_STRUCT__entry ( __array(char, name, 32) - __field(unsigned long, ino) + __field(ino_t, ino) __field(int, sync_mode) - __field(unsigned int, cgroup_ino) + __field(ino_t, cgroup_ino) ), TP_fast_assign( @@ -324,11 +324,11 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template, __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc); ), - TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%u", + TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%lu", __entry->name, - __entry->ino, + (unsigned long)__entry->ino, __entry->sync_mode, - __entry->cgroup_ino + (unsigned long)__entry->cgroup_ino ) ); @@ -358,7 +358,7 @@ DECLARE_EVENT_CLASS(writeback_work_class, __field(int, range_cyclic) __field(int, for_background) __field(int, reason) - __field(unsigned int, cgroup_ino) + __field(ino_t, cgroup_ino) ), TP_fast_assign( strscpy_pad(__entry->name, @@ -374,7 +374,7 @@ DECLARE_EVENT_CLASS(writeback_work_class, __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); ), TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d " - "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%u", + "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%lu", __entry->name, MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev), __entry->nr_pages, @@ -383,7 +383,7 @@ DECLARE_EVENT_CLASS(writeback_work_class, __entry->range_cyclic, __entry->for_background, __print_symbolic(__entry->reason, WB_WORK_REASON), - __entry->cgroup_ino + (unsigned long)__entry->cgroup_ino ) ); #define DEFINE_WRITEBACK_WORK_EVENT(name) \ @@ -413,15 +413,15 @@ DECLARE_EVENT_CLASS(writeback_class, TP_ARGS(wb), TP_STRUCT__entry( __array(char, name, 32) - __field(unsigned int, cgroup_ino) + __field(ino_t, cgroup_ino) ), TP_fast_assign( strscpy_pad(__entry->name, dev_name(wb->bdi->dev), 32); __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); ), - TP_printk("bdi %s: cgroup_ino=%u", + TP_printk("bdi %s: cgroup_ino=%lu", __entry->name, - __entry->cgroup_ino + (unsigned long)__entry->cgroup_ino ) ); #define DEFINE_WRITEBACK_EVENT(name) \ @@ -459,7 +459,7 @@ DECLARE_EVENT_CLASS(wbc_class, __field(int, range_cyclic) __field(long, range_start) __field(long, range_end) - __field(unsigned int, cgroup_ino) + __field(ino_t, cgroup_ino) ), TP_fast_assign( @@ -478,7 +478,7 @@ DECLARE_EVENT_CLASS(wbc_class, TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d " "bgrd=%d reclm=%d cyclic=%d " - "start=0x%lx end=0x%lx cgroup_ino=%u", + "start=0x%lx end=0x%lx cgroup_ino=%lu", __entry->name, __entry->nr_to_write, __entry->pages_skipped, @@ -489,7 +489,7 @@ DECLARE_EVENT_CLASS(wbc_class, __entry->range_cyclic, __entry->range_start, __entry->range_end, - __entry->cgroup_ino + (unsigned long)__entry->cgroup_ino ) ) @@ -510,7 +510,7 @@ TRACE_EVENT(writeback_queue_io, __field(long, age) __field(int, moved) __field(int, reason) - __field(unsigned int, cgroup_ino) + __field(ino_t, cgroup_ino) ), TP_fast_assign( unsigned long *older_than_this = work->older_than_this; @@ -522,13 +522,13 @@ TRACE_EVENT(writeback_queue_io, __entry->reason = work->reason; __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); ), - TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%u", + TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%lu", __entry->name, __entry->older, /* older_than_this in jiffies */ __entry->age, /* older_than_this in relative milliseconds */ __entry->moved, __print_symbolic(__entry->reason, WB_WORK_REASON), - __entry->cgroup_ino + (unsigned long)__entry->cgroup_ino ) ); @@ -596,7 +596,7 @@ TRACE_EVENT(bdi_dirty_ratelimit, __field(unsigned long, dirty_ratelimit) __field(unsigned long, task_ratelimit) __field(unsigned long, balanced_dirty_ratelimit) - __field(unsigned int, cgroup_ino) + __field(ino_t, cgroup_ino) ), TP_fast_assign( @@ -614,7 +614,7 @@ TRACE_EVENT(bdi_dirty_ratelimit, TP_printk("bdi %s: " "write_bw=%lu awrite_bw=%lu dirty_rate=%lu " "dirty_ratelimit=%lu task_ratelimit=%lu " - "balanced_dirty_ratelimit=%lu cgroup_ino=%u", + "balanced_dirty_ratelimit=%lu cgroup_ino=%lu", __entry->bdi, __entry->write_bw, /* write bandwidth */ __entry->avg_write_bw, /* avg write bandwidth */ @@ -622,7 +622,7 @@ TRACE_EVENT(bdi_dirty_ratelimit, __entry->dirty_ratelimit, /* base ratelimit */ __entry->task_ratelimit, /* ratelimit with position control */ __entry->balanced_dirty_ratelimit, /* the balanced ratelimit */ - __entry->cgroup_ino + (unsigned long)__entry->cgroup_ino ) ); @@ -660,7 +660,7 @@ TRACE_EVENT(balance_dirty_pages, __field( long, pause) __field(unsigned long, period) __field( long, think) - __field(unsigned int, cgroup_ino) + __field(ino_t, cgroup_ino) ), TP_fast_assign( @@ -692,7 +692,7 @@ TRACE_EVENT(balance_dirty_pages, "bdi_setpoint=%lu bdi_dirty=%lu " "dirty_ratelimit=%lu task_ratelimit=%lu " "dirtied=%u dirtied_pause=%u " - "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%u", + "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%lu", __entry->bdi, __entry->limit, __entry->setpoint, @@ -707,7 +707,7 @@ TRACE_EVENT(balance_dirty_pages, __entry->pause, /* ms */ __entry->period, /* ms */ __entry->think, /* ms */ - __entry->cgroup_ino + (unsigned long)__entry->cgroup_ino ) ); @@ -718,10 +718,10 @@ TRACE_EVENT(writeback_sb_inodes_requeue, TP_STRUCT__entry( __array(char, name, 32) - __field(unsigned long, ino) + __field(ino_t, ino) __field(unsigned long, state) __field(unsigned long, dirtied_when) - __field(unsigned int, cgroup_ino) + __field(ino_t, cgroup_ino) ), TP_fast_assign( @@ -733,13 +733,13 @@ TRACE_EVENT(writeback_sb_inodes_requeue, __entry->cgroup_ino = __trace_wb_assign_cgroup(inode_to_wb(inode)); ), - TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%u", + TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%lu", __entry->name, - __entry->ino, + (unsigned long)__entry->ino, show_inode_state(__entry->state), __entry->dirtied_when, (jiffies - __entry->dirtied_when) / HZ, - __entry->cgroup_ino + (unsigned long)__entry->cgroup_ino ) ); @@ -789,13 +789,13 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template, TP_STRUCT__entry( __array(char, name, 32) - __field(unsigned long, ino) + __field(ino_t, ino) __field(unsigned long, state) __field(unsigned long, dirtied_when) __field(unsigned long, writeback_index) __field(long, nr_to_write) __field(unsigned long, wrote) - __field(unsigned int, cgroup_ino) + __field(ino_t, cgroup_ino) ), TP_fast_assign( @@ -811,16 +811,16 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template, ), TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu " - "index=%lu to_write=%ld wrote=%lu cgroup_ino=%u", + "index=%lu to_write=%ld wrote=%lu cgroup_ino=%lu", __entry->name, - __entry->ino, + (unsigned long)__entry->ino, show_inode_state(__entry->state), __entry->dirtied_when, (jiffies - __entry->dirtied_when) / HZ, __entry->writeback_index, __entry->nr_to_write, __entry->wrote, - __entry->cgroup_ino + (unsigned long)__entry->cgroup_ino ) ); @@ -845,7 +845,7 @@ DECLARE_EVENT_CLASS(writeback_inode_template, TP_STRUCT__entry( __field( dev_t, dev ) - __field(unsigned long, ino ) + __field( ino_t, ino ) __field(unsigned long, state ) __field( __u16, mode ) __field(unsigned long, dirtied_when ) @@ -861,7 +861,7 @@ DECLARE_EVENT_CLASS(writeback_inode_template, TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o", MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->ino, __entry->dirtied_when, + (unsigned long)__entry->ino, __entry->dirtied_when, show_inode_state(__entry->state), __entry->mode) ); diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h index 8c8420230a10..a7378bcd9928 100644 --- a/include/trace/events/xdp.h +++ b/include/trace/events/xdp.h @@ -22,7 +22,7 @@ #define __XDP_ACT_SYM_FN(x) \ { XDP_##x, #x }, #define __XDP_ACT_SYM_TAB \ - __XDP_ACT_MAP(__XDP_ACT_SYM_FN) { -1, 0 } + __XDP_ACT_MAP(__XDP_ACT_SYM_FN) { -1, NULL } __XDP_ACT_MAP(__XDP_ACT_TP_FN) TRACE_EVENT(xdp_exception, @@ -317,19 +317,15 @@ __MEM_TYPE_MAP(__MEM_TYPE_TP_FN) TRACE_EVENT(mem_disconnect, - TP_PROTO(const struct xdp_mem_allocator *xa, - bool safe_to_remove, bool force), + TP_PROTO(const struct xdp_mem_allocator *xa), - TP_ARGS(xa, safe_to_remove, force), + TP_ARGS(xa), TP_STRUCT__entry( __field(const struct xdp_mem_allocator *, xa) __field(u32, mem_id) __field(u32, mem_type) __field(const void *, allocator) - __field(bool, safe_to_remove) - __field(bool, force) - __field(int, disconnect_cnt) ), TP_fast_assign( @@ -337,19 +333,12 @@ TRACE_EVENT(mem_disconnect, __entry->mem_id = xa->mem.id; __entry->mem_type = xa->mem.type; __entry->allocator = xa->allocator; - __entry->safe_to_remove = safe_to_remove; - __entry->force = force; - __entry->disconnect_cnt = xa->disconnect_cnt; ), - TP_printk("mem_id=%d mem_type=%s allocator=%p" - " safe_to_remove=%s force=%s disconnect_cnt=%d", + TP_printk("mem_id=%d mem_type=%s allocator=%p", __entry->mem_id, __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB), - __entry->allocator, - __entry->safe_to_remove ? "true" : "false", - __entry->force ? "true" : "false", - __entry->disconnect_cnt + __entry->allocator ) ); |