summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHillf Danton <dhillf@gmail.com>2011-12-29 03:57:16 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-12-30 04:31:57 +0400
commitb0365c8d0cb6e79eb5f21418ae61ab511f31b575 (patch)
tree2e8abddb0f0f7829e48d00464f95b835bdb6ff51
parent34845636a184f3be91a531098192592cbe6db587 (diff)
downloadlinux-b0365c8d0cb6e79eb5f21418ae61ab511f31b575.tar.xz
mm: hugetlb: fix non-atomic enqueue of huge page
If a huge page is enqueued under the protection of hugetlb_lock, then the operation is atomic and safe. Signed-off-by: Hillf Danton <dhillf@gmail.com> Reviewed-by: Michal Hocko <mhocko@suse.cz> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: <stable@vger.kernel.org> [2.6.37+] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/hugetlb.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 73f17c0293c0..2316840b337a 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -901,7 +901,6 @@ retry:
h->resv_huge_pages += delta;
ret = 0;
- spin_unlock(&hugetlb_lock);
/* Free the needed pages to the hugetlb pool */
list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
if ((--needed) < 0)
@@ -915,6 +914,7 @@ retry:
VM_BUG_ON(page_count(page));
enqueue_huge_page(h, page);
}
+ spin_unlock(&hugetlb_lock);
/* Free unnecessary surplus pages to the buddy allocator */
free: