summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorXin Hao <xhao@linux.alibaba.com>2023-05-31 12:58:17 +0300
committerAndrew Morton <akpm@linux-foundation.org>2023-06-10 02:25:51 +0300
commit16618670276a77480e274117992cec5e42ce66a9 (patch)
treef9cccd496b940ec7dd06bc118471e2036d208471 /mm
parent3b11edf1f2398cac206a224308de6628ebeea924 (diff)
downloadlinux-16618670276a77480e274117992cec5e42ce66a9.tar.xz
mm: khugepaged: avoid pointless allocation for "struct mm_slot"
In __khugepaged_enter(), if "mm->flags" with MMF_VM_HUGEPAGE bit is set, the "mm_slot" will be released and return, so we can call mm_slot_alloc() after test_and_set_bit(). Link: https://lkml.kernel.org/r/20230531095817.11012-1-xhao@linux.alibaba.com Signed-off-by: Xin Hao <xhao@linux.alibaba.com> Reviewed-by: Andrew Morton <akpm@linux-foudation.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/khugepaged.c12
1 files changed, 5 insertions, 7 deletions
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 6b9d39d65b73..3649ba12a235 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -422,19 +422,17 @@ void __khugepaged_enter(struct mm_struct *mm)
struct mm_slot *slot;
int wakeup;
+ /* __khugepaged_exit() must not run from under us */
+ VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm);
+ if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags)))
+ return;
+
mm_slot = mm_slot_alloc(mm_slot_cache);
if (!mm_slot)
return;
slot = &mm_slot->slot;
- /* __khugepaged_exit() must not run from under us */
- VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm);
- if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
- mm_slot_free(mm_slot_cache, mm_slot);
- return;
- }
-
spin_lock(&khugepaged_mm_lock);
mm_slot_insert(mm_slots_hash, mm, slot);
/*