diff options
author | Ebru Akagunduz <ebru.akagunduz@gmail.com> | 2016-07-27 01:24:59 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-27 02:19:19 +0300 |
commit | 70652f6ec0566ae6b4147d88c6d043c68484227f (patch) | |
tree | 834639192cd3ff32322af52b290ea9b5eda7947c /mm | |
parent | ef3cc4db415e0cee73ea37ac6d79821d77f15f1d (diff) | |
download | linux-70652f6ec0566ae6b4147d88c6d043c68484227f.tar.xz |
mm: make optimistic check for swapin readahead
Introduce a new sysfs integer knob
/sys/kernel/mm/transparent_hugepage/khugepaged/max_ptes_swap which makes
optimistic check for swapin readahead to increase thp collapse rate.
Before getting swapped out pages to memory, checks them and allows up to a
certain number. It also prints out using tracepoints amount of unmapped
ptes.
[vdavydov@parallels.com: fix scan not aborted on SCAN_EXCEED_SWAP_PTE]
[sfr@canb.auug.org.au: build fix]
Link: http://lkml.kernel.org/r/20160616154503.65806e12@canb.auug.org.au
Signed-off-by: Ebru Akagunduz <ebru.akagunduz@gmail.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Xie XiuQi <xiexiuqi@huawei.com>
Cc: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: David Rientjes <rientjes@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/huge_memory.c | 45 |
1 files changed, 42 insertions, 3 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 23d1bf42fef1..ed474483a620 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -57,7 +57,8 @@ enum scan_result { SCAN_SWAP_CACHE_PAGE, SCAN_DEL_PAGE_LRU, SCAN_ALLOC_HUGE_PAGE_FAIL, - SCAN_CGROUP_CHARGE_FAIL + SCAN_CGROUP_CHARGE_FAIL, + SCAN_EXCEED_SWAP_PTE }; #define CREATE_TRACE_POINTS @@ -100,6 +101,7 @@ static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait); * fault. */ static unsigned int khugepaged_max_ptes_none __read_mostly; +static unsigned int khugepaged_max_ptes_swap __read_mostly; static int khugepaged(void *none); static int khugepaged_slab_init(void); @@ -598,6 +600,33 @@ static struct kobj_attribute khugepaged_max_ptes_none_attr = __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show, khugepaged_max_ptes_none_store); +static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + return sprintf(buf, "%u\n", khugepaged_max_ptes_swap); +} + +static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int err; + unsigned long max_ptes_swap; + + err = kstrtoul(buf, 10, &max_ptes_swap); + if (err || max_ptes_swap > HPAGE_PMD_NR-1) + return -EINVAL; + + khugepaged_max_ptes_swap = max_ptes_swap; + + return count; +} + +static struct kobj_attribute khugepaged_max_ptes_swap_attr = + __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show, + khugepaged_max_ptes_swap_store); + static struct attribute *khugepaged_attr[] = { &khugepaged_defrag_attr.attr, &khugepaged_max_ptes_none_attr.attr, @@ -606,6 +635,7 @@ static struct attribute *khugepaged_attr[] = { &full_scans_attr.attr, &scan_sleep_millisecs_attr.attr, &alloc_sleep_millisecs_attr.attr, + &khugepaged_max_ptes_swap_attr.attr, NULL, }; @@ -674,6 +704,7 @@ static int __init hugepage_init(void) khugepaged_pages_to_scan = HPAGE_PMD_NR * 8; khugepaged_max_ptes_none = HPAGE_PMD_NR - 1; + khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8; /* * hugepages can't be allocated by the buddy allocator */ @@ -2507,7 +2538,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, struct page *page = NULL; unsigned long _address; spinlock_t *ptl; - int node = NUMA_NO_NODE; + int node = NUMA_NO_NODE, unmapped = 0; bool writable = false, referenced = false; VM_BUG_ON(address & ~HPAGE_PMD_MASK); @@ -2523,6 +2554,14 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++, _address += PAGE_SIZE) { pte_t pteval = *_pte; + if (is_swap_pte(pteval)) { + if (++unmapped <= khugepaged_max_ptes_swap) { + continue; + } else { + result = SCAN_EXCEED_SWAP_PTE; + goto out_unmap; + } + } if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { if (!userfaultfd_armed(vma) && ++none_or_zero <= khugepaged_max_ptes_none) { @@ -2609,7 +2648,7 @@ out_unmap: } out: trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced, - none_or_zero, result); + none_or_zero, result, unmapped); return ret; } |