summaryrefslogtreecommitdiff
path: root/include/linux/huge_mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/huge_mm.h')
-rw-r--r--include/linux/huge_mm.h79
1 files changed, 53 insertions, 26 deletions
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index ef5b80e48599..2f190c90192d 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -2,7 +2,6 @@
#ifndef _LINUX_HUGE_MM_H
#define _LINUX_HUGE_MM_H
-#include <linux/sched/coredump.h>
#include <linux/mm_types.h>
#include <linux/fs.h> /* only for vma_is_dax() */
@@ -40,6 +39,10 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
+vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio,
+ bool write);
+vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio,
+ bool write);
enum transparent_hugepage_flag {
TRANSPARENT_HUGEPAGE_UNSUPPORTED,
@@ -120,6 +123,10 @@ enum mthp_stat_item {
MTHP_STAT_ANON_FAULT_ALLOC,
MTHP_STAT_ANON_FAULT_FALLBACK,
MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
+ MTHP_STAT_ZSWPOUT,
+ MTHP_STAT_SWPIN,
+ MTHP_STAT_SWPIN_FALLBACK,
+ MTHP_STAT_SWPIN_FALLBACK_CHARGE,
MTHP_STAT_SWPOUT,
MTHP_STAT_SWPOUT_FALLBACK,
MTHP_STAT_SHMEM_ALLOC,
@@ -253,19 +260,6 @@ static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
return orders;
}
-static inline bool file_thp_enabled(struct vm_area_struct *vma)
-{
- struct inode *inode;
-
- if (!vma->vm_file)
- return false;
-
- inode = vma->vm_file->f_inode;
-
- return (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) &&
- !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
-}
-
unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
unsigned long vm_flags,
unsigned long tva_flags,
@@ -351,6 +345,36 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
unsigned int new_order);
int min_order_for_split(struct folio *folio);
int split_folio_to_list(struct folio *folio, struct list_head *list);
+bool uniform_split_supported(struct folio *folio, unsigned int new_order,
+ bool warns);
+bool non_uniform_split_supported(struct folio *folio, unsigned int new_order,
+ bool warns);
+int folio_split(struct folio *folio, unsigned int new_order, struct page *page,
+ struct list_head *list);
+/*
+ * try_folio_split - try to split a @folio at @page using non uniform split.
+ * @folio: folio to be split
+ * @page: split to order-0 at the given page
+ * @list: store the after-split folios
+ *
+ * Try to split a @folio at @page using non uniform split to order-0, if
+ * non uniform split is not supported, fall back to uniform split.
+ *
+ * Return: 0: split is successful, otherwise split failed.
+ */
+static inline int try_folio_split(struct folio *folio, struct page *page,
+ struct list_head *list)
+{
+ int ret = min_order_for_split(folio);
+
+ if (ret < 0)
+ return ret;
+
+ if (!non_uniform_split_supported(folio, 0, false))
+ return split_huge_page_to_list_to_order(&folio->page, list,
+ ret);
+ return folio_split(folio, ret, page, list);
+}
static inline int split_huge_page(struct page *page)
{
struct folio *folio = page_folio(page);
@@ -371,7 +395,7 @@ static inline int split_huge_page(struct page *page)
void deferred_split_folio(struct folio *folio, bool partially_mapped);
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long address, bool freeze, struct folio *folio);
+ unsigned long address, bool freeze);
#define split_huge_pmd(__vma, __pmd, __address) \
do { \
@@ -379,12 +403,11 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \
|| pmd_devmap(*____pmd)) \
__split_huge_pmd(__vma, __pmd, __address, \
- false, NULL); \
+ false); \
} while (0)
-
void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
- bool freeze, struct folio *folio);
+ bool freeze);
void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
unsigned long address);
@@ -414,7 +437,7 @@ int madvise_collapse(struct vm_area_struct *vma,
struct vm_area_struct **prev,
unsigned long start, unsigned long end);
void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, long adjust_next);
+ unsigned long end, struct vm_area_struct *next);
spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
@@ -471,15 +494,13 @@ static inline bool is_huge_zero_pmd(pmd_t pmd)
struct folio *mm_get_huge_zero_folio(struct mm_struct *mm);
void mm_put_huge_zero_folio(struct mm_struct *mm);
-#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
-
static inline bool thp_migration_supported(void)
{
return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
}
void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
- pmd_t *pmd, bool freeze, struct folio *folio);
+ pmd_t *pmd, bool freeze);
bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmdp, struct folio *folio);
@@ -543,17 +564,23 @@ static inline int split_folio_to_list(struct folio *folio, struct list_head *lis
return 0;
}
+static inline int try_folio_split(struct folio *folio, struct page *page,
+ struct list_head *list)
+{
+ return 0;
+}
+
static inline void deferred_split_folio(struct folio *folio, bool partially_mapped) {}
#define split_huge_pmd(__vma, __pmd, __address) \
do { } while (0)
static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long address, bool freeze, struct folio *folio) {}
+ unsigned long address, bool freeze) {}
static inline void split_huge_pmd_address(struct vm_area_struct *vma,
- unsigned long address, bool freeze, struct folio *folio) {}
+ unsigned long address, bool freeze) {}
static inline void split_huge_pmd_locked(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd,
- bool freeze, struct folio *folio) {}
+ bool freeze) {}
static inline bool unmap_huge_pmd_locked(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp,
@@ -581,7 +608,7 @@ static inline int madvise_collapse(struct vm_area_struct *vma,
static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start,
unsigned long end,
- long adjust_next)
+ struct vm_area_struct *next)
{
}
static inline int is_swap_pmd(pmd_t pmd)