From 579c571e2efdb8e5b50959ae66b6142e05bd704f Mon Sep 17 00:00:00 2001 From: Song Liu Date: Mon, 23 Sep 2019 15:37:57 -0700 Subject: khugepaged: rename collapse_shmem() and khugepaged_scan_shmem() Next patch will add khugepaged support of non-shmem files. This patch renames these two functions to reflect the new functionality: collapse_shmem() => collapse_file() khugepaged_scan_shmem() => khugepaged_scan_file() Link: http://lkml.kernel.org/r/20190801184244.3169074-6-songliubraving@fb.com Signed-off-by: Song Liu Acked-by: Rik van Riel Acked-by: Kirill A. Shutemov Acked-by: Johannes Weiner Cc: Hillf Danton Cc: Hugh Dickins Cc: William Kucharski Cc: Oleg Nesterov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/khugepaged.c | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) (limited to 'mm') diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 04a54ff5a8ac..7e36cc893f25 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1287,7 +1287,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) } /** - * collapse_shmem - collapse small tmpfs/shmem pages into huge one. + * collapse_file - collapse small tmpfs/shmem pages into huge one. * * Basic scheme is simple, details are more complex: * - allocate and lock a new huge page; @@ -1304,10 +1304,11 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) * + restore gaps in the page cache; * + unlock and free huge page; */ -static void collapse_shmem(struct mm_struct *mm, - struct address_space *mapping, pgoff_t start, +static void collapse_file(struct mm_struct *mm, + struct file *file, pgoff_t start, struct page **hpage, int node) { + struct address_space *mapping = file->f_mapping; gfp_t gfp; struct page *new_page; struct mem_cgroup *memcg; @@ -1563,11 +1564,11 @@ out: /* TODO: tracepoints */ } -static void khugepaged_scan_shmem(struct mm_struct *mm, - struct address_space *mapping, - pgoff_t start, struct page **hpage) +static void khugepaged_scan_file(struct mm_struct *mm, + struct file *file, pgoff_t start, struct page **hpage) { struct page *page = NULL; + struct address_space *mapping = file->f_mapping; XA_STATE(xas, &mapping->i_pages, start); int present, swap; int node = NUMA_NO_NODE; @@ -1631,16 +1632,15 @@ static void khugepaged_scan_shmem(struct mm_struct *mm, result = SCAN_EXCEED_NONE_PTE; } else { node = khugepaged_find_target_node(); - collapse_shmem(mm, mapping, start, hpage, node); + collapse_file(mm, file, start, hpage, node); } } /* TODO: tracepoints */ } #else -static void khugepaged_scan_shmem(struct mm_struct *mm, - struct address_space *mapping, - pgoff_t start, struct page **hpage) +static void khugepaged_scan_file(struct mm_struct *mm, + struct file *file, pgoff_t start, struct page **hpage) { BUILD_BUG(); } @@ -1722,8 +1722,7 @@ skip: file = get_file(vma->vm_file); up_read(&mm->mmap_sem); ret = 1; - khugepaged_scan_shmem(mm, file->f_mapping, - pgoff, hpage); + khugepaged_scan_file(mm, file, pgoff, hpage); fput(file); } else { ret = khugepaged_scan_pmd(mm, vma, -- cgit v1.2.3