summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>2012-10-09 03:29:49 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 11:22:27 +0400
commit420256ef02660af0acf28c12fe4b7d514ca88a4d (patch)
treea2710b4d7b9983d084c59fb8c4a4df35be98d321 /mm
parentd516904bd239fe2c9f1bd46cf146bb4b8831321c (diff)
downloadlinux-420256ef02660af0acf28c12fe4b7d514ca88a4d.tar.xz
thp: release page in page pre-alloc path
If NUMA is enabled, we can release the page in the page pre-alloc operation, then the CONFIG_NUMA dependent code can be reduced Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c19
1 files changed, 7 insertions, 12 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index d5b5fcc73c44..9c4390f60c3e 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1873,15 +1873,12 @@ static void collapse_huge_page(struct mm_struct *mm,
*hpage = ERR_PTR(-ENOMEM);
return;
}
+ *hpage = new_page;
count_vm_event(THP_COLLAPSE_ALLOC);
#endif
- if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
-#ifdef CONFIG_NUMA
- put_page(new_page);
-#endif
+ if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)))
return;
- }
/*
* Prevent all access to pagetables with the exception of
@@ -1982,9 +1979,8 @@ static void collapse_huge_page(struct mm_struct *mm,
prepare_pmd_huge_pte(pgtable, mm);
spin_unlock(&mm->page_table_lock);
-#ifndef CONFIG_NUMA
*hpage = NULL;
-#endif
+
khugepaged_pages_collapsed++;
out_up_write:
up_write(&mm->mmap_sem);
@@ -1992,9 +1988,6 @@ out_up_write:
out:
mem_cgroup_uncharge_page(new_page);
-#ifdef CONFIG_NUMA
- put_page(new_page);
-#endif
goto out_up_write;
}
@@ -2260,8 +2253,6 @@ static void khugepaged_do_scan(void)
barrier(); /* write khugepaged_pages_to_scan to local stack */
while (progress < pages) {
- cond_resched();
-
#ifndef CONFIG_NUMA
if (!hpage)
hpage = khugepaged_alloc_hugepage(&wait);
@@ -2274,8 +2265,12 @@ static void khugepaged_do_scan(void)
break;
wait = false;
khugepaged_alloc_sleep();
+ } else if (hpage) {
+ put_page(hpage);
+ hpage = NULL;
}
#endif
+ cond_resched();
if (unlikely(kthread_should_stop() || freezing(current)))
break;