diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-05-26 22:32:41 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-05-26 22:32:41 +0300 |
commit | 98931dd95fd489fcbfa97da563505a6f071d7c77 (patch) | |
tree | 44683fc4a92efa614acdca2742a7ff19d26da1e3 /include/trace | |
parent | df202b452fe6c6d6f1351bad485e2367ef1e644e (diff) | |
parent | f403f22f8ccb12860b2b62fec3173c6ccd45938b (diff) | |
download | linux-98931dd95fd489fcbfa97da563505a6f071d7c77.tar.xz |
Merge tag 'mm-stable-2022-05-25' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull MM updates from Andrew Morton:
"Almost all of MM here. A few things are still getting finished off,
reviewed, etc.
- Yang Shi has improved the behaviour of khugepaged collapsing of
readonly file-backed transparent hugepages.
- Johannes Weiner has arranged for zswap memory use to be tracked and
managed on a per-cgroup basis.
- Munchun Song adds a /proc knob ("hugetlb_optimize_vmemmap") for
runtime enablement of the recent huge page vmemmap optimization
feature.
- Baolin Wang contributes a series to fix some issues around hugetlb
pagetable invalidation.
- Zhenwei Pi has fixed some interactions between hwpoisoned pages and
virtualization.
- Tong Tiangen has enabled the use of the presently x86-only
page_table_check debugging feature on arm64 and riscv.
- David Vernet has done some fixup work on the memcg selftests.
- Peter Xu has taught userfaultfd to handle write protection faults
against shmem- and hugetlbfs-backed files.
- More DAMON development from SeongJae Park - adding online tuning of
the feature and support for monitoring of fixed virtual address
ranges. Also easier discovery of which monitoring operations are
available.
- Nadav Amit has done some optimization of TLB flushing during
mprotect().
- Neil Brown continues to labor away at improving our swap-over-NFS
support.
- David Hildenbrand has some fixes to anon page COWing versus
get_user_pages().
- Peng Liu fixed some errors in the core hugetlb code.
- Joao Martins has reduced the amount of memory consumed by
device-dax's compound devmaps.
- Some cleanups of the arch-specific pagemap code from Anshuman
Khandual.
- Muchun Song has found and fixed some errors in the TLB flushing of
transparent hugepages.
- Roman Gushchin has done more work on the memcg selftests.
... and, of course, many smaller fixes and cleanups. Notably, the
customary million cleanup serieses from Miaohe Lin"
* tag 'mm-stable-2022-05-25' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (381 commits)
mm: kfence: use PAGE_ALIGNED helper
selftests: vm: add the "settings" file with timeout variable
selftests: vm: add "test_hmm.sh" to TEST_FILES
selftests: vm: check numa_available() before operating "merge_across_nodes" in ksm_tests
selftests: vm: add migration to the .gitignore
selftests/vm/pkeys: fix typo in comment
ksm: fix typo in comment
selftests: vm: add process_mrelease tests
Revert "mm/vmscan: never demote for memcg reclaim"
mm/kfence: print disabling or re-enabling message
include/trace/events/percpu.h: cleanup for "percpu: improve percpu_alloc_percpu event trace"
include/trace/events/mmflags.h: cleanup for "tracing: incorrect gfp_t conversion"
mm: fix a potential infinite loop in start_isolate_page_range()
MAINTAINERS: add Muchun as co-maintainer for HugeTLB
zram: fix Kconfig dependency warning
mm/shmem: fix shmem folio swapoff hang
cgroup: fix an error handling path in alloc_pagecache_max_30M()
mm: damon: use HPAGE_PMD_SIZE
tracing: incorrect isolate_mote_t cast in mm_vmscan_lru_isolate
nodemask.h: fix compilation error with GCC12
...
Diffstat (limited to 'include/trace')
-rw-r--r-- | include/trace/events/btrfs.h | 4 | ||||
-rw-r--r-- | include/trace/events/compaction.h | 4 | ||||
-rw-r--r-- | include/trace/events/kmem.h | 26 | ||||
-rw-r--r-- | include/trace/events/mmflags.h | 84 | ||||
-rw-r--r-- | include/trace/events/percpu.h | 23 | ||||
-rw-r--r-- | include/trace/events/vmscan.h | 20 |
6 files changed, 89 insertions, 72 deletions
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 290f07eb050a..9ae94ef3e270 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -1344,13 +1344,13 @@ TRACE_EVENT(alloc_extent_state, TP_STRUCT__entry( __field(const struct extent_state *, state) - __field(gfp_t, mask) + __field(unsigned long, mask) __field(const void*, ip) ), TP_fast_assign( __entry->state = state, - __entry->mask = mask, + __entry->mask = (__force unsigned long)mask, __entry->ip = (const void *)IP ), diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h index c6d5d70dc7a5..3313eb83c117 100644 --- a/include/trace/events/compaction.h +++ b/include/trace/events/compaction.h @@ -162,13 +162,13 @@ TRACE_EVENT(mm_compaction_try_to_compact_pages, TP_STRUCT__entry( __field(int, order) - __field(gfp_t, gfp_mask) + __field(unsigned long, gfp_mask) __field(int, prio) ), TP_fast_assign( __entry->order = order; - __entry->gfp_mask = gfp_mask; + __entry->gfp_mask = (__force unsigned long)gfp_mask; __entry->prio = prio; ), diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index ddc8c944f417..f76668305ac5 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -24,7 +24,7 @@ DECLARE_EVENT_CLASS(kmem_alloc, __field( const void *, ptr ) __field( size_t, bytes_req ) __field( size_t, bytes_alloc ) - __field( gfp_t, gfp_flags ) + __field( unsigned long, gfp_flags ) ), TP_fast_assign( @@ -32,7 +32,7 @@ DECLARE_EVENT_CLASS(kmem_alloc, __entry->ptr = ptr; __entry->bytes_req = bytes_req; __entry->bytes_alloc = bytes_alloc; - __entry->gfp_flags = gfp_flags; + __entry->gfp_flags = (__force unsigned long)gfp_flags; ), TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s", @@ -75,7 +75,7 @@ DECLARE_EVENT_CLASS(kmem_alloc_node, __field( const void *, ptr ) __field( size_t, bytes_req ) __field( size_t, bytes_alloc ) - __field( gfp_t, gfp_flags ) + __field( unsigned long, gfp_flags ) __field( int, node ) ), @@ -84,7 +84,7 @@ DECLARE_EVENT_CLASS(kmem_alloc_node, __entry->ptr = ptr; __entry->bytes_req = bytes_req; __entry->bytes_alloc = bytes_alloc; - __entry->gfp_flags = gfp_flags; + __entry->gfp_flags = (__force unsigned long)gfp_flags; __entry->node = node; ), @@ -208,14 +208,14 @@ TRACE_EVENT(mm_page_alloc, TP_STRUCT__entry( __field( unsigned long, pfn ) __field( unsigned int, order ) - __field( gfp_t, gfp_flags ) + __field( unsigned long, gfp_flags ) __field( int, migratetype ) ), TP_fast_assign( __entry->pfn = page ? page_to_pfn(page) : -1UL; __entry->order = order; - __entry->gfp_flags = gfp_flags; + __entry->gfp_flags = (__force unsigned long)gfp_flags; __entry->migratetype = migratetype; ), @@ -229,20 +229,23 @@ TRACE_EVENT(mm_page_alloc, DECLARE_EVENT_CLASS(mm_page, - TP_PROTO(struct page *page, unsigned int order, int migratetype), + TP_PROTO(struct page *page, unsigned int order, int migratetype, + int percpu_refill), - TP_ARGS(page, order, migratetype), + TP_ARGS(page, order, migratetype, percpu_refill), TP_STRUCT__entry( __field( unsigned long, pfn ) __field( unsigned int, order ) __field( int, migratetype ) + __field( int, percpu_refill ) ), TP_fast_assign( __entry->pfn = page ? page_to_pfn(page) : -1UL; __entry->order = order; __entry->migratetype = migratetype; + __entry->percpu_refill = percpu_refill; ), TP_printk("page=%p pfn=0x%lx order=%u migratetype=%d percpu_refill=%d", @@ -250,14 +253,15 @@ DECLARE_EVENT_CLASS(mm_page, __entry->pfn != -1UL ? __entry->pfn : 0, __entry->order, __entry->migratetype, - __entry->order == 0) + __entry->percpu_refill) ); DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked, - TP_PROTO(struct page *page, unsigned int order, int migratetype), + TP_PROTO(struct page *page, unsigned int order, int migratetype, + int percpu_refill), - TP_ARGS(page, order, migratetype) + TP_ARGS(page, order, migratetype, percpu_refill) ); TRACE_EVENT(mm_page_pcpu_drain, diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index 6532119a6bf1..e87cb2b80ed3 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -13,49 +13,51 @@ * Thus most bits set go first. */ -#define __def_gfpflag_names \ - {(unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \ - {(unsigned long)GFP_TRANSHUGE_LIGHT, "GFP_TRANSHUGE_LIGHT"}, \ - {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"},\ - {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \ - {(unsigned long)GFP_USER, "GFP_USER"}, \ - {(unsigned long)GFP_KERNEL_ACCOUNT, "GFP_KERNEL_ACCOUNT"}, \ - {(unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \ - {(unsigned long)GFP_NOFS, "GFP_NOFS"}, \ - {(unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \ - {(unsigned long)GFP_NOIO, "GFP_NOIO"}, \ - {(unsigned long)GFP_NOWAIT, "GFP_NOWAIT"}, \ - {(unsigned long)GFP_DMA, "GFP_DMA"}, \ - {(unsigned long)__GFP_HIGHMEM, "__GFP_HIGHMEM"}, \ - {(unsigned long)GFP_DMA32, "GFP_DMA32"}, \ - {(unsigned long)__GFP_HIGH, "__GFP_HIGH"}, \ - {(unsigned long)__GFP_ATOMIC, "__GFP_ATOMIC"}, \ - {(unsigned long)__GFP_IO, "__GFP_IO"}, \ - {(unsigned long)__GFP_FS, "__GFP_FS"}, \ - {(unsigned long)__GFP_NOWARN, "__GFP_NOWARN"}, \ - {(unsigned long)__GFP_RETRY_MAYFAIL, "__GFP_RETRY_MAYFAIL"}, \ - {(unsigned long)__GFP_NOFAIL, "__GFP_NOFAIL"}, \ - {(unsigned long)__GFP_NORETRY, "__GFP_NORETRY"}, \ - {(unsigned long)__GFP_COMP, "__GFP_COMP"}, \ - {(unsigned long)__GFP_ZERO, "__GFP_ZERO"}, \ - {(unsigned long)__GFP_NOMEMALLOC, "__GFP_NOMEMALLOC"}, \ - {(unsigned long)__GFP_MEMALLOC, "__GFP_MEMALLOC"}, \ - {(unsigned long)__GFP_HARDWALL, "__GFP_HARDWALL"}, \ - {(unsigned long)__GFP_THISNODE, "__GFP_THISNODE"}, \ - {(unsigned long)__GFP_RECLAIMABLE, "__GFP_RECLAIMABLE"}, \ - {(unsigned long)__GFP_MOVABLE, "__GFP_MOVABLE"}, \ - {(unsigned long)__GFP_ACCOUNT, "__GFP_ACCOUNT"}, \ - {(unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \ - {(unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \ - {(unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\ - {(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"},\ - {(unsigned long)__GFP_ZEROTAGS, "__GFP_ZEROTAGS"} \ +#define gfpflag_string(flag) {(__force unsigned long)flag, #flag} + +#define __def_gfpflag_names \ + gfpflag_string(GFP_TRANSHUGE), \ + gfpflag_string(GFP_TRANSHUGE_LIGHT), \ + gfpflag_string(GFP_HIGHUSER_MOVABLE), \ + gfpflag_string(GFP_HIGHUSER), \ + gfpflag_string(GFP_USER), \ + gfpflag_string(GFP_KERNEL_ACCOUNT), \ + gfpflag_string(GFP_KERNEL), \ + gfpflag_string(GFP_NOFS), \ + gfpflag_string(GFP_ATOMIC), \ + gfpflag_string(GFP_NOIO), \ + gfpflag_string(GFP_NOWAIT), \ + gfpflag_string(GFP_DMA), \ + gfpflag_string(__GFP_HIGHMEM), \ + gfpflag_string(GFP_DMA32), \ + gfpflag_string(__GFP_HIGH), \ + gfpflag_string(__GFP_ATOMIC), \ + gfpflag_string(__GFP_IO), \ + gfpflag_string(__GFP_FS), \ + gfpflag_string(__GFP_NOWARN), \ + gfpflag_string(__GFP_RETRY_MAYFAIL), \ + gfpflag_string(__GFP_NOFAIL), \ + gfpflag_string(__GFP_NORETRY), \ + gfpflag_string(__GFP_COMP), \ + gfpflag_string(__GFP_ZERO), \ + gfpflag_string(__GFP_NOMEMALLOC), \ + gfpflag_string(__GFP_MEMALLOC), \ + gfpflag_string(__GFP_HARDWALL), \ + gfpflag_string(__GFP_THISNODE), \ + gfpflag_string(__GFP_RECLAIMABLE), \ + gfpflag_string(__GFP_MOVABLE), \ + gfpflag_string(__GFP_ACCOUNT), \ + gfpflag_string(__GFP_WRITE), \ + gfpflag_string(__GFP_RECLAIM), \ + gfpflag_string(__GFP_DIRECT_RECLAIM), \ + gfpflag_string(__GFP_KSWAPD_RECLAIM), \ + gfpflag_string(__GFP_ZEROTAGS) #ifdef CONFIG_KASAN_HW_TAGS -#define __def_gfpflag_names_kasan , \ - {(unsigned long)__GFP_SKIP_ZERO, "__GFP_SKIP_ZERO"}, \ - {(unsigned long)__GFP_SKIP_KASAN_POISON, "__GFP_SKIP_KASAN_POISON"}, \ - {(unsigned long)__GFP_SKIP_KASAN_UNPOISON, "__GFP_SKIP_KASAN_UNPOISON"} +#define __def_gfpflag_names_kasan , \ + gfpflag_string(__GFP_SKIP_ZERO), \ + gfpflag_string(__GFP_SKIP_KASAN_POISON), \ + gfpflag_string(__GFP_SKIP_KASAN_UNPOISON) #else #define __def_gfpflag_names_kasan #endif diff --git a/include/trace/events/percpu.h b/include/trace/events/percpu.h index df112a64f6c9..5b8211ca8950 100644 --- a/include/trace/events/percpu.h +++ b/include/trace/events/percpu.h @@ -6,15 +6,20 @@ #define _TRACE_PERCPU_H #include <linux/tracepoint.h> +#include <trace/events/mmflags.h> TRACE_EVENT(percpu_alloc_percpu, - TP_PROTO(bool reserved, bool is_atomic, size_t size, - size_t align, void *base_addr, int off, void __percpu *ptr), + TP_PROTO(unsigned long call_site, + bool reserved, bool is_atomic, size_t size, + size_t align, void *base_addr, int off, + void __percpu *ptr, size_t bytes_alloc, gfp_t gfp_flags), - TP_ARGS(reserved, is_atomic, size, align, base_addr, off, ptr), + TP_ARGS(call_site, reserved, is_atomic, size, align, base_addr, off, + ptr, bytes_alloc, gfp_flags), TP_STRUCT__entry( + __field( unsigned long, call_site ) __field( bool, reserved ) __field( bool, is_atomic ) __field( size_t, size ) @@ -22,9 +27,11 @@ TRACE_EVENT(percpu_alloc_percpu, __field( void *, base_addr ) __field( int, off ) __field( void __percpu *, ptr ) + __field( size_t, bytes_alloc ) + __field( unsigned long, gfp_flags ) ), - TP_fast_assign( + __entry->call_site = call_site; __entry->reserved = reserved; __entry->is_atomic = is_atomic; __entry->size = size; @@ -32,12 +39,16 @@ TRACE_EVENT(percpu_alloc_percpu, __entry->base_addr = base_addr; __entry->off = off; __entry->ptr = ptr; + __entry->bytes_alloc = bytes_alloc; + __entry->gfp_flags = (__force unsigned long)gfp_flags; ), - TP_printk("reserved=%d is_atomic=%d size=%zu align=%zu base_addr=%p off=%d ptr=%p", + TP_printk("call_site=%pS reserved=%d is_atomic=%d size=%zu align=%zu base_addr=%p off=%d ptr=%p bytes_alloc=%zu gfp_flags=%s", + (void *)__entry->call_site, __entry->reserved, __entry->is_atomic, __entry->size, __entry->align, - __entry->base_addr, __entry->off, __entry->ptr) + __entry->base_addr, __entry->off, __entry->ptr, + __entry->bytes_alloc, show_gfp_flags(__entry->gfp_flags)) ); TRACE_EVENT(percpu_free_percpu, diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h index de136dbd623a..d2123dd960d5 100644 --- a/include/trace/events/vmscan.h +++ b/include/trace/events/vmscan.h @@ -96,14 +96,14 @@ TRACE_EVENT(mm_vmscan_wakeup_kswapd, __field( int, nid ) __field( int, zid ) __field( int, order ) - __field( gfp_t, gfp_flags ) + __field( unsigned long, gfp_flags ) ), TP_fast_assign( __entry->nid = nid; __entry->zid = zid; __entry->order = order; - __entry->gfp_flags = gfp_flags; + __entry->gfp_flags = (__force unsigned long)gfp_flags; ), TP_printk("nid=%d order=%d gfp_flags=%s", @@ -120,12 +120,12 @@ DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_begin_template, TP_STRUCT__entry( __field( int, order ) - __field( gfp_t, gfp_flags ) + __field( unsigned long, gfp_flags ) ), TP_fast_assign( __entry->order = order; - __entry->gfp_flags = gfp_flags; + __entry->gfp_flags = (__force unsigned long)gfp_flags; ), TP_printk("order=%d gfp_flags=%s", @@ -210,7 +210,7 @@ TRACE_EVENT(mm_shrink_slab_start, __field(void *, shrink) __field(int, nid) __field(long, nr_objects_to_shrink) - __field(gfp_t, gfp_flags) + __field(unsigned long, gfp_flags) __field(unsigned long, cache_items) __field(unsigned long long, delta) __field(unsigned long, total_scan) @@ -222,7 +222,7 @@ TRACE_EVENT(mm_shrink_slab_start, __entry->shrink = shr->scan_objects; __entry->nid = sc->nid; __entry->nr_objects_to_shrink = nr_objects_to_shrink; - __entry->gfp_flags = sc->gfp_mask; + __entry->gfp_flags = (__force unsigned long)sc->gfp_mask; __entry->cache_items = cache_items; __entry->delta = delta; __entry->total_scan = total_scan; @@ -297,7 +297,7 @@ TRACE_EVENT(mm_vmscan_lru_isolate, __field(unsigned long, nr_scanned) __field(unsigned long, nr_skipped) __field(unsigned long, nr_taken) - __field(isolate_mode_t, isolate_mode) + __field(unsigned int, isolate_mode) __field(int, lru) ), @@ -308,7 +308,7 @@ TRACE_EVENT(mm_vmscan_lru_isolate, __entry->nr_scanned = nr_scanned; __entry->nr_skipped = nr_skipped; __entry->nr_taken = nr_taken; - __entry->isolate_mode = isolate_mode; + __entry->isolate_mode = (__force unsigned int)isolate_mode; __entry->lru = lru; ), @@ -446,13 +446,13 @@ TRACE_EVENT(mm_vmscan_node_reclaim_begin, TP_STRUCT__entry( __field(int, nid) __field(int, order) - __field(gfp_t, gfp_flags) + __field(unsigned long, gfp_flags) ), TP_fast_assign( __entry->nid = nid; __entry->order = order; - __entry->gfp_flags = gfp_flags; + __entry->gfp_flags = (__force unsigned long)gfp_flags; ), TP_printk("nid=%d order=%d gfp_flags=%s", |