summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/s390/mm/gmap_helpers.c2
-rw-r--r--drivers/android/binder_alloc.c2
-rw-r--r--include/linux/mm.h5
-rw-r--r--kernel/bpf/arena.c3
-rw-r--r--kernel/events/core.c2
-rw-r--r--mm/madvise.c3
-rw-r--r--mm/memory.c16
-rw-r--r--net/ipv4/tcp.c5
-rw-r--r--rust/kernel/mm/virt.rs4
9 files changed, 20 insertions, 22 deletions
diff --git a/arch/s390/mm/gmap_helpers.c b/arch/s390/mm/gmap_helpers.c
index dea83e3103e5..ae2d59a19313 100644
--- a/arch/s390/mm/gmap_helpers.c
+++ b/arch/s390/mm/gmap_helpers.c
@@ -89,7 +89,7 @@ void gmap_helper_discard(struct mm_struct *mm, unsigned long vmaddr, unsigned lo
if (!vma)
return;
if (!is_vm_hugetlb_page(vma))
- zap_page_range_single(vma, vmaddr, min(end, vma->vm_end) - vmaddr, NULL);
+ zap_page_range_single(vma, vmaddr, min(end, vma->vm_end) - vmaddr);
vmaddr = vma->vm_end;
}
}
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 241f16a9b63d..dd2046bd5cde 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -1185,7 +1185,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
if (vma) {
trace_binder_unmap_user_start(alloc, index);
- zap_page_range_single(vma, page_addr, PAGE_SIZE, NULL);
+ zap_page_range_single(vma, page_addr, PAGE_SIZE);
trace_binder_unmap_user_end(alloc, index);
}
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 08b743aab92a..6512d70c5852 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2804,11 +2804,10 @@ struct page *vm_normal_page_pud(struct vm_area_struct *vma, unsigned long addr,
void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
- unsigned long size, struct zap_details *details);
+ unsigned long size);
static inline void zap_vma_pages(struct vm_area_struct *vma)
{
- zap_page_range_single(vma, vma->vm_start,
- vma->vm_end - vma->vm_start, NULL);
+ zap_page_range_single(vma, vma->vm_start, vma->vm_end - vma->vm_start);
}
struct mmu_notifier_range;
diff --git a/kernel/bpf/arena.c b/kernel/bpf/arena.c
index f355cf1c1a16..19cca936eb9d 100644
--- a/kernel/bpf/arena.c
+++ b/kernel/bpf/arena.c
@@ -656,8 +656,7 @@ static void zap_pages(struct bpf_arena *arena, long uaddr, long page_cnt)
guard(mutex)(&arena->lock);
/* iterate link list under lock */
list_for_each_entry(vml, &arena->vma_list, head)
- zap_page_range_single(vml->vma, uaddr,
- PAGE_SIZE * page_cnt, NULL);
+ zap_page_range_single(vml->vma, uaddr, PAGE_SIZE * page_cnt);
}
static void arena_free_pages(struct bpf_arena *arena, long uaddr, long page_cnt, bool sleepable)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 89b40e439717..2ecdaabf1b4d 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7213,7 +7213,7 @@ static int map_range(struct perf_buffer *rb, struct vm_area_struct *vma)
#ifdef CONFIG_MMU
/* Clear any partial mappings on error. */
if (err)
- zap_page_range_single(vma, vma->vm_start, nr_pages * PAGE_SIZE, NULL);
+ zap_page_range_single(vma, vma->vm_start, nr_pages * PAGE_SIZE);
#endif
return err;
diff --git a/mm/madvise.c b/mm/madvise.c
index 1313166c5514..e4a2728593a8 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -1193,8 +1193,7 @@ static long madvise_guard_install(struct madvise_behavior *madv_behavior)
* OK some of the range have non-guard pages mapped, zap
* them. This leaves existing guard pages in place.
*/
- zap_page_range_single(vma, range->start,
- range->end - range->start, NULL);
+ zap_page_range_single(vma, range->start, range->end - range->start);
}
/*
diff --git a/mm/memory.c b/mm/memory.c
index f78ab3869f8d..fbd02d5bd520 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2203,17 +2203,16 @@ void zap_page_range_single_batched(struct mmu_gather *tlb,
* @vma: vm_area_struct holding the applicable pages
* @address: starting address of pages to zap
* @size: number of bytes to zap
- * @details: details of shared cache invalidation
*
* The range must fit into one VMA.
*/
void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
- unsigned long size, struct zap_details *details)
+ unsigned long size)
{
struct mmu_gather tlb;
tlb_gather_mmu(&tlb, vma->vm_mm);
- zap_page_range_single_batched(&tlb, vma, address, size, details);
+ zap_page_range_single_batched(&tlb, vma, address, size, NULL);
tlb_finish_mmu(&tlb);
}
@@ -2235,7 +2234,7 @@ void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
!(vma->vm_flags & VM_PFNMAP))
return;
- zap_page_range_single(vma, address, size, NULL);
+ zap_page_range_single(vma, address, size);
}
EXPORT_SYMBOL_GPL(zap_vma_ptes);
@@ -3003,7 +3002,7 @@ static int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long add
* maintain page reference counts, and callers may free
* pages due to the error. So zap it early.
*/
- zap_page_range_single(vma, addr, size, NULL);
+ zap_page_range_single(vma, addr, size);
return error;
}
@@ -4226,7 +4225,12 @@ static void unmap_mapping_range_vma(struct vm_area_struct *vma,
unsigned long start_addr, unsigned long end_addr,
struct zap_details *details)
{
- zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
+ struct mmu_gather tlb;
+
+ tlb_gather_mmu(&tlb, vma->vm_mm);
+ zap_page_range_single_batched(&tlb, vma, start_addr,
+ end_addr - start_addr, details);
+ tlb_finish_mmu(&tlb);
}
static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 202a4e57a218..89c962672e51 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2105,7 +2105,7 @@ static int tcp_zerocopy_vm_insert_batch_error(struct vm_area_struct *vma,
maybe_zap_len = total_bytes_to_map - /* All bytes to map */
*length + /* Mapped or pending */
(pages_remaining * PAGE_SIZE); /* Failed map. */
- zap_page_range_single(vma, *address, maybe_zap_len, NULL);
+ zap_page_range_single(vma, *address, maybe_zap_len);
err = 0;
}
@@ -2270,8 +2270,7 @@ static int tcp_zerocopy_receive(struct sock *sk,
total_bytes_to_map = avail_len & ~(PAGE_SIZE - 1);
if (total_bytes_to_map) {
if (!(zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT))
- zap_page_range_single(vma, address, total_bytes_to_map,
- NULL);
+ zap_page_range_single(vma, address, total_bytes_to_map);
zc->length = total_bytes_to_map;
zc->recv_skip_hint = 0;
} else {
diff --git a/rust/kernel/mm/virt.rs b/rust/kernel/mm/virt.rs
index da21d65ccd20..6bfd91cfa1f4 100644
--- a/rust/kernel/mm/virt.rs
+++ b/rust/kernel/mm/virt.rs
@@ -123,9 +123,7 @@ impl VmaRef {
// SAFETY: By the type invariants, the caller has read access to this VMA, which is
// sufficient for this method call. This method has no requirements on the vma flags. The
// address range is checked to be within the vma.
- unsafe {
- bindings::zap_page_range_single(self.as_ptr(), address, size, core::ptr::null_mut())
- };
+ unsafe { bindings::zap_page_range_single(self.as_ptr(), address, size) };
}
/// If the [`VM_MIXEDMAP`] flag is set, returns a [`VmaMixedMap`] to this VMA, otherwise