summaryrefslogtreecommitdiff
path: root/include/linux/ksm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/ksm.h')
-rw-r--r--include/linux/ksm.h49
1 files changed, 22 insertions, 27 deletions
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index b9cdeba03668..ec9c05044d4f 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -54,18 +54,17 @@ static inline long mm_ksm_zero_pages(struct mm_struct *mm)
return atomic_long_read(&mm->ksm_zero_pages);
}
-static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+static inline void ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
- int ret;
-
- if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) {
- ret = __ksm_enter(mm);
- if (ret)
- return ret;
- }
+ /* Adding mm to ksm is best effort on fork. */
+ if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
+ __ksm_enter(mm);
+}
- if (test_bit(MMF_VM_MERGE_ANY, &oldmm->flags))
- set_bit(MMF_VM_MERGE_ANY, &mm->flags);
+static inline int ksm_execve(struct mm_struct *mm)
+{
+ if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
+ return __ksm_enter(mm);
return 0;
}
@@ -87,20 +86,14 @@ static inline void ksm_exit(struct mm_struct *mm)
* We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
* but what if the vma was unmerged while the page was swapped out?
*/
-struct page *ksm_might_need_to_copy(struct page *page,
- struct vm_area_struct *vma, unsigned long address);
+struct folio *ksm_might_need_to_copy(struct folio *folio,
+ struct vm_area_struct *vma, unsigned long addr);
void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
-
-#ifdef CONFIG_MEMORY_FAILURE
-void collect_procs_ksm(struct page *page, struct list_head *to_kill,
- int force_early);
-#endif
-
-#ifdef CONFIG_PROC_FS
+void collect_procs_ksm(struct folio *folio, struct page *page,
+ struct list_head *to_kill, int force_early);
long ksm_process_profit(struct mm_struct *);
-#endif /* CONFIG_PROC_FS */
#else /* !CONFIG_KSM */
@@ -113,7 +106,11 @@ static inline int ksm_disable(struct mm_struct *mm)
return 0;
}
-static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+static inline void ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+{
+}
+
+static inline int ksm_execve(struct mm_struct *mm)
{
return 0;
}
@@ -126,12 +123,10 @@ static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
{
}
-#ifdef CONFIG_MEMORY_FAILURE
-static inline void collect_procs_ksm(struct page *page,
+static inline void collect_procs_ksm(struct folio *folio, struct page *page,
struct list_head *to_kill, int force_early)
{
}
-#endif
#ifdef CONFIG_MMU
static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
@@ -140,10 +135,10 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
return 0;
}
-static inline struct page *ksm_might_need_to_copy(struct page *page,
- struct vm_area_struct *vma, unsigned long address)
+static inline struct folio *ksm_might_need_to_copy(struct folio *folio,
+ struct vm_area_struct *vma, unsigned long addr)
{
- return page;
+ return folio;
}
static inline void rmap_walk_ksm(struct folio *folio,