diff options
Diffstat (limited to 'mm/internal.h')
| -rw-r--r-- | mm/internal.h | 37 | 
1 files changed, 19 insertions, 18 deletions
diff --git a/mm/internal.h b/mm/internal.h index e9695baa5922..6b8ed2017743 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -248,11 +248,9 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr,  		pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags,  		bool *any_writable, bool *any_young, bool *any_dirty)  { -	unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio); -	const pte_t *end_ptep = start_ptep + max_nr;  	pte_t expected_pte, *ptep;  	bool writable, young, dirty; -	int nr; +	int nr, cur_nr;  	if (any_writable)  		*any_writable = false; @@ -265,11 +263,15 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr,  	VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio);  	VM_WARN_ON_FOLIO(page_folio(pfn_to_page(pte_pfn(pte))) != folio, folio); +	/* Limit max_nr to the actual remaining PFNs in the folio we could batch. */ +	max_nr = min_t(unsigned long, max_nr, +		       folio_pfn(folio) + folio_nr_pages(folio) - pte_pfn(pte)); +  	nr = pte_batch_hint(start_ptep, pte);  	expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, nr), flags);  	ptep = start_ptep + nr; -	while (ptep < end_ptep) { +	while (nr < max_nr) {  		pte = ptep_get(ptep);  		if (any_writable)  			writable = !!pte_write(pte); @@ -282,14 +284,6 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr,  		if (!pte_same(pte, expected_pte))  			break; -		/* -		 * Stop immediately once we reached the end of the folio. In -		 * corner cases the next PFN might fall into a different -		 * folio. -		 */ -		if (pte_pfn(pte) >= folio_end_pfn) -			break; -  		if (any_writable)  			*any_writable |= writable;  		if (any_young) @@ -297,12 +291,13 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr,  		if (any_dirty)  			*any_dirty |= dirty; -		nr = pte_batch_hint(ptep, pte); -		expected_pte = pte_advance_pfn(expected_pte, nr); -		ptep += nr; +		cur_nr = pte_batch_hint(ptep, pte); +		expected_pte = pte_advance_pfn(expected_pte, cur_nr); +		ptep += cur_nr; +		nr += cur_nr;  	} -	return min(ptep - start_ptep, max_nr); +	return min(nr, max_nr);  }  /** @@ -435,6 +430,9 @@ void unmap_page_range(struct mmu_gather *tlb,  			     struct vm_area_struct *vma,  			     unsigned long addr, unsigned long end,  			     struct zap_details *details); +void zap_page_range_single_batched(struct mmu_gather *tlb, +		struct vm_area_struct *vma, unsigned long addr, +		unsigned long size, struct zap_details *details);  int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,  			   gfp_t gfp); @@ -915,7 +913,7 @@ static inline void init_cma_pageblock(struct page *page)  int find_suitable_fallback(struct free_area *area, unsigned int order, -			int migratetype, bool claim_only, bool *claim_block); +			   int migratetype, bool claimable);  static inline bool free_area_empty(struct free_area *area, int migratetype)  { @@ -1121,6 +1119,8 @@ DECLARE_STATIC_KEY_TRUE(deferred_pages);  bool __init deferred_grow_zone(struct zone *zone, unsigned int order);  #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ +void init_deferred_page(unsigned long pfn, int nid); +  enum mminit_level {  	MMINIT_WARNING,  	MMINIT_VERIFY, @@ -1595,7 +1595,6 @@ unsigned long move_page_tables(struct pagetable_move_control *pmc);  #ifdef CONFIG_UNACCEPTED_MEMORY  void accept_page(struct page *page); -void unaccepted_cleanup_work(struct work_struct *work);  #else /* CONFIG_UNACCEPTED_MEMORY */  static inline void accept_page(struct page *page)  { @@ -1625,5 +1624,7 @@ static inline bool reclaim_pt_is_enabled(unsigned long start, unsigned long end,  }  #endif /* CONFIG_PT_RECLAIM */ +void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm); +int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm);  #endif	/* __MM_INTERNAL_H */  | 
