diff options
| author | David Hildenbrand (Arm) <david@kernel.org> | 2026-02-27 23:08:45 +0300 |
|---|---|---|
| committer | Andrew Morton <akpm@linux-foundation.org> | 2026-04-05 23:53:15 +0300 |
| commit | 0326440c3545c86b6501c7c636fcf018d6e87b8c (patch) | |
| tree | f8d0f6b41102dd69437be2828b9bd049e9193287 | |
| parent | 784a742e7b6db236ef1ccfcdbfe29bf6234b3ccb (diff) | |
| download | linux-0326440c3545c86b6501c7c636fcf018d6e87b8c.tar.xz | |
mm: rename zap_page_range_single() to zap_vma_range()
Let's rename it to make it better match our new naming scheme.
While at it, polish the kerneldoc.
[akpm@linux-foundation.org: fix rustfmtcheck]
Link: https://lkml.kernel.org/r/20260227200848.114019-15-david@kernel.org
Signed-off-by: David Hildenbrand (Arm) <david@kernel.org>
Reviewed-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
Acked-by: Puranjay Mohan <puranjay@kernel.org>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Alice Ryhl <aliceryhl@google.com>
Cc: Andrii Nakryiko <andrii@kernel.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Arve <arve@android.com>
Cc: "Borislav Petkov (AMD)" <bp@alien8.de>
Cc: Carlos Llamas <cmllamas@google.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Claudio Imbrenda <imbrenda@linux.ibm.com>
Cc: Daniel Borkman <daniel@iogearbox.net>
Cc: Dave Airlie <airlied@gmail.com>
Cc: David Ahern <dsahern@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Dimitri Sivanich <dimitri.sivanich@hpe.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Hartley Sweeten <hsweeten@visionengravers.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Ian Abbott <abbotti@mev.co.uk>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jakub Kacinski <kuba@kernel.org>
Cc: Jani Nikula <jani.nikula@linux.intel.com>
Cc: Jann Horn <jannh@google.com>
Cc: Janosch Frank <frankja@linux.ibm.com>
Cc: Jarkko Sakkinen <jarkko@kernel.org>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Leon Romanovsky <leon@kernel.org>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Miguel Ojeda <ojeda@kernel.org>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Namhyung kim <namhyung@kernel.org>
Cc: Neal Cardwell <ncardwell@google.com>
Cc: Paolo Abeni <pabeni@redhat.com>
Cc: Pedro Falcato <pfalcato@suse.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Todd Kjos <tkjos@android.com>
Cc: Tvrtko Ursulin <tursulin@ursulin.net>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
| -rw-r--r-- | arch/s390/mm/gmap_helpers.c | 2 | ||||
| -rw-r--r-- | drivers/android/binder/page_range.rs | 4 | ||||
| -rw-r--r-- | drivers/android/binder_alloc.c | 2 | ||||
| -rw-r--r-- | include/linux/mm.h | 4 | ||||
| -rw-r--r-- | kernel/bpf/arena.c | 2 | ||||
| -rw-r--r-- | kernel/events/core.c | 2 | ||||
| -rw-r--r-- | mm/madvise.c | 4 | ||||
| -rw-r--r-- | mm/memory.c | 14 | ||||
| -rw-r--r-- | net/ipv4/tcp.c | 6 | ||||
| -rw-r--r-- | rust/kernel/mm/virt.rs | 4 |
10 files changed, 22 insertions, 22 deletions
diff --git a/arch/s390/mm/gmap_helpers.c b/arch/s390/mm/gmap_helpers.c index ae2d59a19313..f8789ffcc05c 100644 --- a/arch/s390/mm/gmap_helpers.c +++ b/arch/s390/mm/gmap_helpers.c @@ -89,7 +89,7 @@ void gmap_helper_discard(struct mm_struct *mm, unsigned long vmaddr, unsigned lo if (!vma) return; if (!is_vm_hugetlb_page(vma)) - zap_page_range_single(vma, vmaddr, min(end, vma->vm_end) - vmaddr); + zap_vma_range(vma, vmaddr, min(end, vma->vm_end) - vmaddr); vmaddr = vma->vm_end; } } diff --git a/drivers/android/binder/page_range.rs b/drivers/android/binder/page_range.rs index 9dfc154e5dd4..8882fd18d9f3 100644 --- a/drivers/android/binder/page_range.rs +++ b/drivers/android/binder/page_range.rs @@ -130,7 +130,7 @@ pub(crate) struct ShrinkablePageRange { pid: Pid, /// The mm for the relevant process. mm: ARef<Mm>, - /// Used to synchronize calls to `vm_insert_page` and `zap_page_range_single`. + /// Used to synchronize calls to `vm_insert_page` and `zap_vma_range`. #[pin] mm_lock: Mutex<()>, /// Spinlock protecting changes to pages. @@ -762,7 +762,7 @@ unsafe extern "C" fn rust_shrink_free_page( if let Some(unchecked_vma) = mmap_read.vma_lookup(vma_addr) { if let Some(vma) = check_vma(unchecked_vma, range_ptr) { let user_page_addr = vma_addr + (page_index << PAGE_SHIFT); - vma.zap_page_range_single(user_page_addr, PAGE_SIZE); + vma.zap_vma_range(user_page_addr, PAGE_SIZE); } } diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index dd2046bd5cde..e4488ad86a65 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -1185,7 +1185,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item, if (vma) { trace_binder_unmap_user_start(alloc, index); - zap_page_range_single(vma, page_addr, PAGE_SIZE); + zap_vma_range(vma, page_addr, PAGE_SIZE); trace_binder_unmap_user_end(alloc, index); } diff --git a/include/linux/mm.h b/include/linux/mm.h index 60c13d40c65c..10a5b9ba4eeb 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2804,7 +2804,7 @@ struct page *vm_normal_page_pud(struct vm_area_struct *vma, unsigned long addr, void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, unsigned long size); -void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, +void zap_vma_range(struct vm_area_struct *vma, unsigned long address, unsigned long size); /** * zap_vma - zap all page table entries in a vma @@ -2812,7 +2812,7 @@ void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, */ static inline void zap_vma(struct vm_area_struct *vma) { - zap_page_range_single(vma, vma->vm_start, vma->vm_end - vma->vm_start); + zap_vma_range(vma, vma->vm_start, vma->vm_end - vma->vm_start); } struct mmu_notifier_range; diff --git a/kernel/bpf/arena.c b/kernel/bpf/arena.c index 19cca936eb9d..08d008cc471e 100644 --- a/kernel/bpf/arena.c +++ b/kernel/bpf/arena.c @@ -656,7 +656,7 @@ static void zap_pages(struct bpf_arena *arena, long uaddr, long page_cnt) guard(mutex)(&arena->lock); /* iterate link list under lock */ list_for_each_entry(vml, &arena->vma_list, head) - zap_page_range_single(vml->vma, uaddr, PAGE_SIZE * page_cnt); + zap_vma_range(vml->vma, uaddr, PAGE_SIZE * page_cnt); } static void arena_free_pages(struct bpf_arena *arena, long uaddr, long page_cnt, bool sleepable) diff --git a/kernel/events/core.c b/kernel/events/core.c index 2ecdaabf1b4d..d5b21077e829 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -7213,7 +7213,7 @@ static int map_range(struct perf_buffer *rb, struct vm_area_struct *vma) #ifdef CONFIG_MMU /* Clear any partial mappings on error. */ if (err) - zap_page_range_single(vma, vma->vm_start, nr_pages * PAGE_SIZE); + zap_vma_range(vma, vma->vm_start, nr_pages * PAGE_SIZE); #endif return err; diff --git a/mm/madvise.c b/mm/madvise.c index a50ec5f90e3e..afe0f01765c4 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -832,7 +832,7 @@ static int madvise_free_single_vma(struct madvise_behavior *madv_behavior) * Application no longer needs these pages. If the pages are dirty, * it's OK to just throw them away. The app will be more careful about * data it wants to keep. Be sure to free swap resources too. The - * zap_page_range_single call sets things up for shrink_active_list to actually + * zap_vma_range call sets things up for shrink_active_list to actually * free these pages later if no one else has touched them in the meantime, * although we could add these pages to a global reuse list for * shrink_active_list to pick up before reclaiming other pages. @@ -1191,7 +1191,7 @@ static long madvise_guard_install(struct madvise_behavior *madv_behavior) * OK some of the range have non-guard pages mapped, zap * them. This leaves existing guard pages in place. */ - zap_page_range_single(vma, range->start, range->end - range->start); + zap_vma_range(vma, range->start, range->end - range->start); } /* diff --git a/mm/memory.c b/mm/memory.c index 879858e466ef..dd80fbf6473a 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2215,14 +2215,14 @@ void zap_vma_range_batched(struct mmu_gather *tlb, } /** - * zap_page_range_single - remove user pages in a given range - * @vma: vm_area_struct holding the applicable pages - * @address: starting address of pages to zap + * zap_vma_range - zap all page table entries in a vma range + * @vma: the vma covering the range to zap + * @address: starting address of the range to zap * @size: number of bytes to zap * - * The range must fit into one VMA. + * The provided address range must be fully contained within @vma. */ -void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, +void zap_vma_range(struct vm_area_struct *vma, unsigned long address, unsigned long size) { struct mmu_gather tlb; @@ -2250,7 +2250,7 @@ void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, !(vma->vm_flags & VM_PFNMAP)) return; - zap_page_range_single(vma, address, size); + zap_vma_range(vma, address, size); } EXPORT_SYMBOL_GPL(zap_vma_ptes); @@ -3018,7 +3018,7 @@ static int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long add * maintain page reference counts, and callers may free * pages due to the error. So zap it early. */ - zap_page_range_single(vma, addr, size); + zap_vma_range(vma, addr, size); return error; } diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 89c962672e51..9573ce9b0ac1 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2105,7 +2105,7 @@ static int tcp_zerocopy_vm_insert_batch_error(struct vm_area_struct *vma, maybe_zap_len = total_bytes_to_map - /* All bytes to map */ *length + /* Mapped or pending */ (pages_remaining * PAGE_SIZE); /* Failed map. */ - zap_page_range_single(vma, *address, maybe_zap_len); + zap_vma_range(vma, *address, maybe_zap_len); err = 0; } @@ -2113,7 +2113,7 @@ static int tcp_zerocopy_vm_insert_batch_error(struct vm_area_struct *vma, unsigned long leftover_pages = pages_remaining; int bytes_mapped; - /* We called zap_page_range_single, try to reinsert. */ + /* We called zap_vma_range, try to reinsert. */ err = vm_insert_pages(vma, *address, pending_pages, &pages_remaining); @@ -2270,7 +2270,7 @@ static int tcp_zerocopy_receive(struct sock *sk, total_bytes_to_map = avail_len & ~(PAGE_SIZE - 1); if (total_bytes_to_map) { if (!(zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT)) - zap_page_range_single(vma, address, total_bytes_to_map); + zap_vma_range(vma, address, total_bytes_to_map); zc->length = total_bytes_to_map; zc->recv_skip_hint = 0; } else { diff --git a/rust/kernel/mm/virt.rs b/rust/kernel/mm/virt.rs index 6bfd91cfa1f4..63eb730b0b05 100644 --- a/rust/kernel/mm/virt.rs +++ b/rust/kernel/mm/virt.rs @@ -113,7 +113,7 @@ impl VmaRef { /// kernel goes further in freeing unused page tables, but for the purposes of this operation /// we must only assume that the leaf level is cleared. #[inline] - pub fn zap_page_range_single(&self, address: usize, size: usize) { + pub fn zap_vma_range(&self, address: usize, size: usize) { let (end, did_overflow) = address.overflowing_add(size); if did_overflow || address < self.start() || self.end() < end { // TODO: call WARN_ONCE once Rust version of it is added @@ -123,7 +123,7 @@ impl VmaRef { // SAFETY: By the type invariants, the caller has read access to this VMA, which is // sufficient for this method call. This method has no requirements on the vma flags. The // address range is checked to be within the vma. - unsafe { bindings::zap_page_range_single(self.as_ptr(), address, size) }; + unsafe { bindings::zap_vma_range(self.as_ptr(), address, size) }; } /// If the [`VM_MIXEDMAP`] flag is set, returns a [`VmaMixedMap`] to this VMA, otherwise |
