From af8713b701a74c3784ce6683f64f474a94b1b643 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 8 Sep 2015 14:58:25 -0700 Subject: selftests/userfaultfd: fix compiler warnings on 32-bit On 32-bit: userfaultfd.c: In function 'locking_thread': userfaultfd.c:152: warning: left shift count >= width of type userfaultfd.c: In function 'uffd_poll_thread': userfaultfd.c:295: warning: cast to pointer from integer of different size userfaultfd.c: In function 'uffd_read_thread': userfaultfd.c:332: warning: cast to pointer from integer of different size Fix the shift warning by splitting the shift in two parts, and the integer/pointer warnigns by adding intermediate casts to "unsigned long". Signed-off-by: Geert Uytterhoeven Cc: Andrea Arcangeli Cc: Shuah Khan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- tools/testing/selftests/vm/userfaultfd.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c index 0c0b83953352..b619f672131e 100644 --- a/tools/testing/selftests/vm/userfaultfd.c +++ b/tools/testing/selftests/vm/userfaultfd.c @@ -147,7 +147,8 @@ static void *locking_thread(void *arg) if (sizeof(page_nr) > sizeof(rand_nr)) { if (random_r(&rand, &rand_nr)) fprintf(stderr, "random_r 2 error\n"), exit(1); - page_nr |= ((unsigned long) rand_nr) << 32; + page_nr |= (((unsigned long) rand_nr) << 16) << + 16; } } else page_nr += 1; @@ -290,7 +291,8 @@ static void *uffd_poll_thread(void *arg) msg.event), exit(1); if (msg.arg.pagefault.flags & UFFD_PAGEFAULT_FLAG_WRITE) fprintf(stderr, "unexpected write fault\n"), exit(1); - offset = (char *)msg.arg.pagefault.address - area_dst; + offset = (char *)(unsigned long)msg.arg.pagefault.address - + area_dst; offset &= ~(page_size-1); if (copy_page(offset)) userfaults++; @@ -327,7 +329,8 @@ static void *uffd_read_thread(void *arg) if (bounces & BOUNCE_VERIFY && msg.arg.pagefault.flags & UFFD_PAGEFAULT_FLAG_WRITE) fprintf(stderr, "unexpected write fault\n"), exit(1); - offset = (char *)msg.arg.pagefault.address - area_dst; + offset = (char *)(unsigned long)msg.arg.pagefault.address - + area_dst; offset &= ~(page_size-1); if (copy_page(offset)) (*this_cpu_userfaults)++; -- cgit v1.2.3 From deb945441b9408d6cd15751f5232eeca9f50a5a1 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Tue, 8 Sep 2015 15:00:02 -0700 Subject: pagemap: switch to the new format and do some cleanup This patch removes page-shift bits (scheduled to remove since 3.11) and completes migration to the new bit layout. Also it cleans messy macro. Signed-off-by: Konstantin Khlebnikov Reviewed-by: Naoya Horiguchi Cc: Mark Williamson Tested-by: Mark Williamson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/task_mmu.c | 150 ++++++++++++++++++-------------------------------- tools/vm/page-types.c | 25 +++------ 2 files changed, 61 insertions(+), 114 deletions(-) (limited to 'tools') diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 07c86f51d225..41c0a0a500f7 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -712,23 +712,6 @@ const struct file_operations proc_tid_smaps_operations = { .release = proc_map_release, }; -/* - * We do not want to have constant page-shift bits sitting in - * pagemap entries and are about to reuse them some time soon. - * - * Here's the "migration strategy": - * 1. when the system boots these bits remain what they are, - * but a warning about future change is printed in log; - * 2. once anyone clears soft-dirty bits via clear_refs file, - * these flag is set to denote, that user is aware of the - * new API and those page-shift bits change their meaning. - * The respective warning is printed in dmesg; - * 3. In a couple of releases we will remove all the mentions - * of page-shift in pagemap entries. - */ - -static bool soft_dirty_cleared __read_mostly; - enum clear_refs_types { CLEAR_REFS_ALL = 1, CLEAR_REFS_ANON, @@ -889,13 +872,6 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST) return -EINVAL; - if (type == CLEAR_REFS_SOFT_DIRTY) { - soft_dirty_cleared = true; - pr_warn_once("The pagemap bits 55-60 has changed their meaning!" - " See the linux/Documentation/vm/pagemap.txt for " - "details.\n"); - } - task = get_proc_task(file_inode(file)); if (!task) return -ESRCH; @@ -963,36 +939,24 @@ typedef struct { struct pagemapread { int pos, len; /* units: PM_ENTRY_BYTES, not bytes */ pagemap_entry_t *buffer; - bool v2; }; #define PAGEMAP_WALK_SIZE (PMD_SIZE) #define PAGEMAP_WALK_MASK (PMD_MASK) -#define PM_ENTRY_BYTES sizeof(pagemap_entry_t) -#define PM_STATUS_BITS 3 -#define PM_STATUS_OFFSET (64 - PM_STATUS_BITS) -#define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET) -#define PM_STATUS(nr) (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK) -#define PM_PSHIFT_BITS 6 -#define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS) -#define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET) -#define __PM_PSHIFT(x) (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK) -#define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1) -#define PM_PFRAME(x) ((x) & PM_PFRAME_MASK) -/* in "new" pagemap pshift bits are occupied with more status bits */ -#define PM_STATUS2(v2, x) (__PM_PSHIFT(v2 ? x : PAGE_SHIFT)) - -#define __PM_SOFT_DIRTY (1LL) -#define PM_PRESENT PM_STATUS(4LL) -#define PM_SWAP PM_STATUS(2LL) -#define PM_FILE PM_STATUS(1LL) -#define PM_NOT_PRESENT(v2) PM_STATUS2(v2, 0) +#define PM_ENTRY_BYTES sizeof(pagemap_entry_t) +#define PM_PFRAME_BITS 55 +#define PM_PFRAME_MASK GENMASK_ULL(PM_PFRAME_BITS - 1, 0) +#define PM_SOFT_DIRTY BIT_ULL(55) +#define PM_FILE BIT_ULL(61) +#define PM_SWAP BIT_ULL(62) +#define PM_PRESENT BIT_ULL(63) + #define PM_END_OF_BUFFER 1 -static inline pagemap_entry_t make_pme(u64 val) +static inline pagemap_entry_t make_pme(u64 frame, u64 flags) { - return (pagemap_entry_t) { .pme = val }; + return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags }; } static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme, @@ -1013,7 +977,7 @@ static int pagemap_pte_hole(unsigned long start, unsigned long end, while (addr < end) { struct vm_area_struct *vma = find_vma(walk->mm, addr); - pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2)); + pagemap_entry_t pme = make_pme(0, 0); /* End of address space hole, which we mark as non-present. */ unsigned long hole_end; @@ -1033,7 +997,7 @@ static int pagemap_pte_hole(unsigned long start, unsigned long end, /* Addresses in the VMA. */ if (vma->vm_flags & VM_SOFTDIRTY) - pme.pme |= PM_STATUS2(pm->v2, __PM_SOFT_DIRTY); + pme = make_pme(0, PM_SOFT_DIRTY); for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) { err = add_to_pagemap(addr, &pme, pm); if (err) @@ -1044,63 +1008,61 @@ out: return err; } -static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm, +static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm, struct vm_area_struct *vma, unsigned long addr, pte_t pte) { - u64 frame, flags; + u64 frame = 0, flags = 0; struct page *page = NULL; - int flags2 = 0; if (pte_present(pte)) { frame = pte_pfn(pte); - flags = PM_PRESENT; + flags |= PM_PRESENT; page = vm_normal_page(vma, addr, pte); if (pte_soft_dirty(pte)) - flags2 |= __PM_SOFT_DIRTY; + flags |= PM_SOFT_DIRTY; } else if (is_swap_pte(pte)) { swp_entry_t entry; if (pte_swp_soft_dirty(pte)) - flags2 |= __PM_SOFT_DIRTY; + flags |= PM_SOFT_DIRTY; entry = pte_to_swp_entry(pte); frame = swp_type(entry) | (swp_offset(entry) << MAX_SWAPFILES_SHIFT); - flags = PM_SWAP; + flags |= PM_SWAP; if (is_migration_entry(entry)) page = migration_entry_to_page(entry); - } else { - if (vma->vm_flags & VM_SOFTDIRTY) - flags2 |= __PM_SOFT_DIRTY; - *pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, flags2)); - return; } if (page && !PageAnon(page)) flags |= PM_FILE; - if ((vma->vm_flags & VM_SOFTDIRTY)) - flags2 |= __PM_SOFT_DIRTY; + if (vma->vm_flags & VM_SOFTDIRTY) + flags |= PM_SOFT_DIRTY; - *pme = make_pme(PM_PFRAME(frame) | PM_STATUS2(pm->v2, flags2) | flags); + return make_pme(frame, flags); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE -static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm, - pmd_t pmd, int offset, int pmd_flags2) +static pagemap_entry_t thp_pmd_to_pagemap_entry(struct pagemapread *pm, + pmd_t pmd, int offset, u64 flags) { + u64 frame = 0; + /* * Currently pmd for thp is always present because thp can not be * swapped-out, migrated, or HWPOISONed (split in such cases instead.) * This if-check is just to prepare for future implementation. */ - if (pmd_present(pmd)) - *pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset) - | PM_STATUS2(pm->v2, pmd_flags2) | PM_PRESENT); - else - *pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, pmd_flags2)); + if (pmd_present(pmd)) { + frame = pmd_pfn(pmd) + offset; + flags |= PM_PRESENT; + } + + return make_pme(frame, flags); } #else -static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm, - pmd_t pmd, int offset, int pmd_flags2) +static pagemap_entry_t thp_pmd_to_pagemap_entry(struct pagemapread *pm, + pmd_t pmd, int offset, u64 flags) { + return make_pme(0, 0); } #endif @@ -1114,12 +1076,10 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, int err = 0; if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { - int pmd_flags2; + u64 flags = 0; if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(*pmd)) - pmd_flags2 = __PM_SOFT_DIRTY; - else - pmd_flags2 = 0; + flags |= PM_SOFT_DIRTY; for (; addr != end; addr += PAGE_SIZE) { unsigned long offset; @@ -1127,7 +1087,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, offset = (addr & ~PAGEMAP_WALK_MASK) >> PAGE_SHIFT; - thp_pmd_to_pagemap_entry(&pme, pm, *pmd, offset, pmd_flags2); + pme = thp_pmd_to_pagemap_entry(pm, *pmd, offset, flags); err = add_to_pagemap(addr, &pme, pm); if (err) break; @@ -1147,7 +1107,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, for (; addr < end; pte++, addr += PAGE_SIZE) { pagemap_entry_t pme; - pte_to_pagemap_entry(&pme, pm, vma, addr, *pte); + pme = pte_to_pagemap_entry(pm, vma, addr, *pte); err = add_to_pagemap(addr, &pme, pm); if (err) break; @@ -1160,16 +1120,17 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, } #ifdef CONFIG_HUGETLB_PAGE -static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm, - pte_t pte, int offset, int flags2) +static pagemap_entry_t huge_pte_to_pagemap_entry(struct pagemapread *pm, + pte_t pte, int offset, u64 flags) { - if (pte_present(pte)) - *pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset) | - PM_STATUS2(pm->v2, flags2) | - PM_PRESENT); - else - *pme = make_pme(PM_NOT_PRESENT(pm->v2) | - PM_STATUS2(pm->v2, flags2)); + u64 frame = 0; + + if (pte_present(pte)) { + frame = pte_pfn(pte) + offset; + flags |= PM_PRESENT; + } + + return make_pme(frame, flags); } /* This function walks within one hugetlb entry in the single call */ @@ -1180,17 +1141,15 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask, struct pagemapread *pm = walk->private; struct vm_area_struct *vma = walk->vma; int err = 0; - int flags2; + u64 flags = 0; pagemap_entry_t pme; if (vma->vm_flags & VM_SOFTDIRTY) - flags2 = __PM_SOFT_DIRTY; - else - flags2 = 0; + flags |= PM_SOFT_DIRTY; for (; addr != end; addr += PAGE_SIZE) { int offset = (addr & ~hmask) >> PAGE_SHIFT; - huge_pte_to_pagemap_entry(&pme, pm, *pte, offset, flags2); + pme = huge_pte_to_pagemap_entry(pm, *pte, offset, flags); err = add_to_pagemap(addr, &pme, pm); if (err) return err; @@ -1211,7 +1170,8 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask, * Bits 0-54 page frame number (PFN) if present * Bits 0-4 swap type if swapped * Bits 5-54 swap offset if swapped - * Bits 55-60 page shift (page size = 1<> PAGE_SHIFT); pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY); ret = -ENOMEM; @@ -1323,9 +1282,6 @@ static int pagemap_open(struct inode *inode, struct file *file) /* do not disclose physical addresses: attack vector */ if (!capable(CAP_SYS_ADMIN)) return -EPERM; - pr_warn_once("Bits 55-60 of /proc/PID/pagemap entries are about " - "to stop being page-shift some time soon. See the " - "linux/Documentation/vm/pagemap.txt for details.\n"); mm = proc_mem_open(inode, PTRACE_MODE_READ); if (IS_ERR(mm)) diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c index 8bdf16b8ba60..603ec916716b 100644 --- a/tools/vm/page-types.c +++ b/tools/vm/page-types.c @@ -57,23 +57,14 @@ * pagemap kernel ABI bits */ -#define PM_ENTRY_BYTES sizeof(uint64_t) -#define PM_STATUS_BITS 3 -#define PM_STATUS_OFFSET (64 - PM_STATUS_BITS) -#define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET) -#define PM_STATUS(nr) (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK) -#define PM_PSHIFT_BITS 6 -#define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS) -#define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET) -#define __PM_PSHIFT(x) (((uint64_t) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK) -#define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1) -#define PM_PFRAME(x) ((x) & PM_PFRAME_MASK) - -#define __PM_SOFT_DIRTY (1LL) -#define PM_PRESENT PM_STATUS(4LL) -#define PM_SWAP PM_STATUS(2LL) -#define PM_SOFT_DIRTY __PM_PSHIFT(__PM_SOFT_DIRTY) - +#define PM_ENTRY_BYTES 8 +#define PM_PFRAME_BITS 55 +#define PM_PFRAME_MASK ((1LL << PM_PFRAME_BITS) - 1) +#define PM_PFRAME(x) ((x) & PM_PFRAME_MASK) +#define PM_SOFT_DIRTY (1ULL << 55) +#define PM_FILE (1ULL << 61) +#define PM_SWAP (1ULL << 62) +#define PM_PRESENT (1ULL << 63) /* * kernel page flags -- cgit v1.2.3 From 77bb499bb60f4b79cca7d139c8041662860fcf87 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Tue, 8 Sep 2015 15:00:10 -0700 Subject: pagemap: add mmap-exclusive bit for marking pages mapped only here This patch sets bit 56 in pagemap if this page is mapped only once. It allows to detect exclusively used pages without exposing PFN: present file exclusive state 0 0 0 non-present 1 1 0 file page mapped somewhere else 1 1 1 file page mapped only here 1 0 0 anon non-CoWed page (shared with parent/child) 1 0 1 anon CoWed page (or never forked) CoWed pages in (MAP_FILE | MAP_PRIVATE) areas are anon in this context. MMap-exclusive bit doesn't reflect potential page-sharing via swapcache: page could be mapped once but has several swap-ptes which point to it. Application could detect that by swap bit in pagemap entry and touch that pte via /proc/pid/mem to get real information. See http://lkml.kernel.org/r/CAEVpBa+_RyACkhODZrRvQLs80iy0sqpdrd0AaP_-tgnX3Y9yNQ@mail.gmail.com Requested by Mark Williamson. [akpm@linux-foundation.org: fix spello] Signed-off-by: Konstantin Khlebnikov Reviewed-by: Mark Williamson Tested-by: Mark Williamson Reviewed-by: Naoya Horiguchi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/vm/pagemap.txt | 3 ++- fs/proc/task_mmu.c | 14 +++++++++++++- tools/vm/page-types.c | 10 ++++++++++ 3 files changed, 25 insertions(+), 2 deletions(-) (limited to 'tools') diff --git a/Documentation/vm/pagemap.txt b/Documentation/vm/pagemap.txt index 6bfbc172cdb9..56faec0f73f7 100644 --- a/Documentation/vm/pagemap.txt +++ b/Documentation/vm/pagemap.txt @@ -16,7 +16,8 @@ There are three components to pagemap: * Bits 0-4 swap type if swapped * Bits 5-54 swap offset if swapped * Bit 55 pte is soft-dirty (see Documentation/vm/soft-dirty.txt) - * Bits 56-60 zero + * Bit 56 page exclusively mapped + * Bits 57-60 zero * Bit 61 page is file-page or shared-anon * Bit 62 page swapped * Bit 63 page present diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index bc651644b1b2..67c76468a7be 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -949,6 +949,7 @@ struct pagemapread { #define PM_PFRAME_BITS 55 #define PM_PFRAME_MASK GENMASK_ULL(PM_PFRAME_BITS - 1, 0) #define PM_SOFT_DIRTY BIT_ULL(55) +#define PM_MMAP_EXCLUSIVE BIT_ULL(56) #define PM_FILE BIT_ULL(61) #define PM_SWAP BIT_ULL(62) #define PM_PRESENT BIT_ULL(63) @@ -1036,6 +1037,8 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm, if (page && !PageAnon(page)) flags |= PM_FILE; + if (page && page_mapcount(page) == 1) + flags |= PM_MMAP_EXCLUSIVE; if (vma->vm_flags & VM_SOFTDIRTY) flags |= PM_SOFT_DIRTY; @@ -1066,6 +1069,11 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, * This if-check is just to prepare for future implementation. */ if (pmd_present(pmd)) { + struct page *page = pmd_page(pmd); + + if (page_mapcount(page) == 1) + flags |= PM_MMAP_EXCLUSIVE; + flags |= PM_PRESENT; if (pm->show_pfn) frame = pmd_pfn(pmd) + @@ -1131,6 +1139,9 @@ static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask, if (!PageAnon(page)) flags |= PM_FILE; + if (page_mapcount(page) == 1) + flags |= PM_MMAP_EXCLUSIVE; + flags |= PM_PRESENT; if (pm->show_pfn) frame = pte_pfn(pte) + @@ -1163,7 +1174,8 @@ static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask, * Bits 0-4 swap type if swapped * Bits 5-54 swap offset if swapped * Bit 55 pte is soft-dirty (see Documentation/vm/soft-dirty.txt) - * Bits 56-60 zero + * Bit 56 page exclusively mapped + * Bits 57-60 zero * Bit 61 page is file-page or shared-anon * Bit 62 page swapped * Bit 63 page present diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c index 603ec916716b..7f73fa32a590 100644 --- a/tools/vm/page-types.c +++ b/tools/vm/page-types.c @@ -62,6 +62,7 @@ #define PM_PFRAME_MASK ((1LL << PM_PFRAME_BITS) - 1) #define PM_PFRAME(x) ((x) & PM_PFRAME_MASK) #define PM_SOFT_DIRTY (1ULL << 55) +#define PM_MMAP_EXCLUSIVE (1ULL << 56) #define PM_FILE (1ULL << 61) #define PM_SWAP (1ULL << 62) #define PM_PRESENT (1ULL << 63) @@ -91,6 +92,8 @@ #define KPF_SLOB_FREE 49 #define KPF_SLUB_FROZEN 50 #define KPF_SLUB_DEBUG 51 +#define KPF_FILE 62 +#define KPF_MMAP_EXCLUSIVE 63 #define KPF_ALL_BITS ((uint64_t)~0ULL) #define KPF_HACKERS_BITS (0xffffULL << 32) @@ -140,6 +143,9 @@ static const char * const page_flag_names[] = { [KPF_SLOB_FREE] = "P:slob_free", [KPF_SLUB_FROZEN] = "A:slub_frozen", [KPF_SLUB_DEBUG] = "E:slub_debug", + + [KPF_FILE] = "F:file", + [KPF_MMAP_EXCLUSIVE] = "1:mmap_exclusive", }; @@ -443,6 +449,10 @@ static uint64_t expand_overloaded_flags(uint64_t flags, uint64_t pme) if (pme & PM_SOFT_DIRTY) flags |= BIT(SOFTDIRTY); + if (pme & PM_FILE) + flags |= BIT(FILE); + if (pme & PM_MMAP_EXCLUSIVE) + flags |= BIT(MMAP_EXCLUSIVE); return flags; } -- cgit v1.2.3 From 243db5351aae5e6756fb610d41431a30d44b56a6 Mon Sep 17 00:00:00 2001 From: Mike Kravetz Date: Tue, 8 Sep 2015 15:02:52 -0700 Subject: Revert "selftests: add hugetlbfstest" This manually reverts 7e50533d4b842 ("selftests: add hugetlbfstest"). The hugetlbfstest test depends on hugetlb pages being counted in a task's rss. This functionality is not in the kernel, so the test will always fail. Remove test to avoid confusion. Signed-off-by: Mike Kravetz Acked-by: Naoya Horiguchi Cc: Joern Engel Cc: Davidlohr Bueso Cc: David Rientjes Cc: Shuah Khan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- tools/testing/selftests/vm/Makefile | 1 - tools/testing/selftests/vm/hugetlbfstest.c | 86 ------------------------------ tools/testing/selftests/vm/run_vmtests | 11 ---- 3 files changed, 98 deletions(-) delete mode 100644 tools/testing/selftests/vm/hugetlbfstest.c (limited to 'tools') diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile index 0d6854744b37..d36fab7d8ebd 100644 --- a/tools/testing/selftests/vm/Makefile +++ b/tools/testing/selftests/vm/Makefile @@ -4,7 +4,6 @@ CFLAGS = -Wall BINARIES = compaction_test BINARIES += hugepage-mmap BINARIES += hugepage-shm -BINARIES += hugetlbfstest BINARIES += map_hugetlb BINARIES += thuge-gen BINARIES += transhuge-stress diff --git a/tools/testing/selftests/vm/hugetlbfstest.c b/tools/testing/selftests/vm/hugetlbfstest.c deleted file mode 100644 index 02e1072ec187..000000000000 --- a/tools/testing/selftests/vm/hugetlbfstest.c +++ /dev/null @@ -1,86 +0,0 @@ -#define _GNU_SOURCE -#include -#include -#include -#include -#include -#include -#include -#include -#include - -typedef unsigned long long u64; - -static size_t length = 1 << 24; - -static u64 read_rss(void) -{ - char buf[4096], *s = buf; - int i, fd; - u64 rss; - - fd = open("/proc/self/statm", O_RDONLY); - assert(fd > 2); - memset(buf, 0, sizeof(buf)); - read(fd, buf, sizeof(buf) - 1); - for (i = 0; i < 1; i++) - s = strchr(s, ' ') + 1; - rss = strtoull(s, NULL, 10); - return rss << 12; /* assumes 4k pagesize */ -} - -static void do_mmap(int fd, int extra_flags, int unmap) -{ - int *p; - int flags = MAP_PRIVATE | MAP_POPULATE | extra_flags; - u64 before, after; - int ret; - - before = read_rss(); - p = mmap(NULL, length, PROT_READ | PROT_WRITE, flags, fd, 0); - assert(p != MAP_FAILED || - !"mmap returned an unexpected error"); - after = read_rss(); - assert(llabs(after - before - length) < 0x40000 || - !"rss didn't grow as expected"); - if (!unmap) - return; - ret = munmap(p, length); - assert(!ret || !"munmap returned an unexpected error"); - after = read_rss(); - assert(llabs(after - before) < 0x40000 || - !"rss didn't shrink as expected"); -} - -static int open_file(const char *path) -{ - int fd, err; - - unlink(path); - fd = open(path, O_CREAT | O_RDWR | O_TRUNC | O_EXCL - | O_LARGEFILE | O_CLOEXEC, 0600); - assert(fd > 2); - unlink(path); - err = ftruncate(fd, length); - assert(!err); - return fd; -} - -int main(void) -{ - int hugefd, fd; - - fd = open_file("/dev/shm/hugetlbhog"); - hugefd = open_file("/hugepages/hugetlbhog"); - - system("echo 100 > /proc/sys/vm/nr_hugepages"); - do_mmap(-1, MAP_ANONYMOUS, 1); - do_mmap(fd, 0, 1); - do_mmap(-1, MAP_ANONYMOUS | MAP_HUGETLB, 1); - do_mmap(hugefd, 0, 1); - do_mmap(hugefd, MAP_HUGETLB, 1); - /* Leak the last one to test do_exit() */ - do_mmap(-1, MAP_ANONYMOUS | MAP_HUGETLB, 0); - printf("oll korrekt.\n"); - return 0; -} diff --git a/tools/testing/selftests/vm/run_vmtests b/tools/testing/selftests/vm/run_vmtests index 831adeb5fc55..d891d6e326f4 100755 --- a/tools/testing/selftests/vm/run_vmtests +++ b/tools/testing/selftests/vm/run_vmtests @@ -75,17 +75,6 @@ else echo "[PASS]" fi -echo "--------------------" -echo "running hugetlbfstest" -echo "--------------------" -./hugetlbfstest -if [ $? -ne 0 ]; then - echo "[FAIL]" - exitcode=1 -else - echo "[PASS]" -fi - echo "--------------------" echo "running userfaultfd" echo "--------------------" -- cgit v1.2.3 From fd5a9ecd6880619bea74c6b12ec86819eacfb012 Mon Sep 17 00:00:00 2001 From: Mike Kravetz Date: Tue, 8 Sep 2015 15:02:55 -0700 Subject: selftests:vm: point to libhugetlbfs for regression testing The hugetlb selftests provide minimal coverage. Have run script point people at libhugetlbfs for better regression testing. Signed-off-by: Mike Kravetz Acked-by: Naoya Horiguchi Cc: Joern Engel Cc: Davidlohr Bueso Cc: David Rientjes Cc: Shuah Khan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- tools/testing/selftests/vm/run_vmtests | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/vm/run_vmtests b/tools/testing/selftests/vm/run_vmtests index d891d6e326f4..9179ce8df485 100755 --- a/tools/testing/selftests/vm/run_vmtests +++ b/tools/testing/selftests/vm/run_vmtests @@ -75,6 +75,10 @@ else echo "[PASS]" fi +echo "NOTE: The above hugetlb tests provide minimal coverage. Use" +echo " https://github.com/libhugetlbfs/libhugetlbfs.git for" +echo " hugetlb regression testing." + echo "--------------------" echo "running userfaultfd" echo "--------------------" -- cgit v1.2.3