diff options
author | Andrea Arcangeli <aarcange@redhat.com> | 2011-05-25 04:12:14 +0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-25 19:39:21 +0400 |
commit | 692e0b35427a088bf75d9363788c61c7edbe93a5 (patch) | |
tree | db1b4365b70fbe809753e6ac097b60fb4f7299a8 /mm/huge_memory.c | |
parent | 9547d01bfb9c351dc19067f8a4cea9d3955f4125 (diff) | |
download | linux-692e0b35427a088bf75d9363788c61c7edbe93a5.tar.xz |
mm: thp: optimize memcg charge in khugepaged
We don't need to hold the mmmap_sem through mem_cgroup_newpage_charge(),
the mmap_sem is only hold for keeping the vma stable and we don't need the
vma stable anymore after we return from alloc_hugepage_vma().
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Cc: Johannes Weiner <jweiner@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r-- | mm/huge_memory.c | 21 |
1 files changed, 11 insertions, 10 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 90eef404ec2e..615d9743a3cb 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1771,12 +1771,9 @@ static void collapse_huge_page(struct mm_struct *mm, VM_BUG_ON(address & ~HPAGE_PMD_MASK); #ifndef CONFIG_NUMA + up_read(&mm->mmap_sem); VM_BUG_ON(!*hpage); new_page = *hpage; - if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { - up_read(&mm->mmap_sem); - return; - } #else VM_BUG_ON(*hpage); /* @@ -1791,22 +1788,26 @@ static void collapse_huge_page(struct mm_struct *mm, */ new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address, node, __GFP_OTHER_NODE); + + /* + * After allocating the hugepage, release the mmap_sem read lock in + * preparation for taking it in write mode. + */ + up_read(&mm->mmap_sem); if (unlikely(!new_page)) { - up_read(&mm->mmap_sem); count_vm_event(THP_COLLAPSE_ALLOC_FAILED); *hpage = ERR_PTR(-ENOMEM); return; } +#endif + count_vm_event(THP_COLLAPSE_ALLOC); if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { - up_read(&mm->mmap_sem); +#ifdef CONFIG_NUMA put_page(new_page); +#endif return; } -#endif - - /* after allocating the hugepage upgrade to mmap_sem write mode */ - up_read(&mm->mmap_sem); /* * Prevent all access to pagetables with the exception of |