From b6afcb94ce331b6d94dbeb56def173483993faf3 Mon Sep 17 00:00:00 2001 From: Ding Xiang Date: Thu, 31 Aug 2023 17:31:44 +0800 Subject: selftests/mm: gup_longterm: fix a resource leak The opened file should be closed in run_with_tmpfile(), otherwise resource leak will occur Link: https://lkml.kernel.org/r/20230831093144.7520-1-dingxiang@cmss.chinamobile.com Signed-off-by: Ding Xiang Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/gup_longterm.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/testing/selftests/mm/gup_longterm.c b/tools/testing/selftests/mm/gup_longterm.c index d33d3e68ffab..ad168d35b23b 100644 --- a/tools/testing/selftests/mm/gup_longterm.c +++ b/tools/testing/selftests/mm/gup_longterm.c @@ -265,10 +265,11 @@ static void run_with_tmpfile(test_fn fn, const char *desc) fd = fileno(file); if (fd < 0) { ksft_test_result_fail("fileno() failed\n"); - return; + goto close; } fn(fd, pagesize); +close: fclose(file); } -- cgit v1.2.3 From 99eb26d59ce38a9f9ac0d63421deca6073f45086 Mon Sep 17 00:00:00 2001 From: "Joel Fernandes (Google)" Date: Sun, 3 Sep 2023 15:13:24 +0000 Subject: selftests: mm: fix failure case when new remap region was not found When a valid remap region could not be found, the source mapping is not cleaned up. Fix the goto statement such that the clean up happens. Link: https://lkml.kernel.org/r/20230903151328.2981432-4-joel@joelfernandes.org Signed-off-by: Joel Fernandes (Google) Reviewed-by: Lorenzo Stoakes Cc: Kalesh Singh Cc: "Kirill A. Shutemov" Cc: Liam R. Howlett Cc: Linus Torvalds Cc: Lokesh Gidra Cc: Michal Hocko Cc: Paul E. McKenney Cc: Shuah Khan Cc: Suren Baghdasaryan Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/mremap_test.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/testing/selftests/mm/mremap_test.c b/tools/testing/selftests/mm/mremap_test.c index 5c3773de9f0f..6822d657f589 100644 --- a/tools/testing/selftests/mm/mremap_test.c +++ b/tools/testing/selftests/mm/mremap_test.c @@ -316,7 +316,7 @@ static long long remap_region(struct config c, unsigned int threshold_mb, if (addr + c.dest_alignment < addr) { ksft_print_msg("Couldn't find a valid region to remap to\n"); ret = -1; - goto out; + goto clean_up_src; } addr += c.dest_alignment; } -- cgit v1.2.3 From 8ed873d8e5cdec3852bcb59f7cc6d017f7bc0ab7 Mon Sep 17 00:00:00 2001 From: "Joel Fernandes (Google)" Date: Sun, 3 Sep 2023 15:13:25 +0000 Subject: selftests: mm: add a test for mutually aligned moves > PMD size This patch adds a test case to check if a PMD-alignment optimization successfully happens. I add support to make sure there is some room before the source mapping, otherwise the optimization to trigger PMD-aligned move will be disabled as the kernel will detect that a mapping before the source exists and such optimization becomes impossible. Link: https://lkml.kernel.org/r/20230903151328.2981432-5-joel@joelfernandes.org Signed-off-by: Joel Fernandes (Google) Reviewed-by: Lorenzo Stoakes Cc: Kalesh Singh Cc: "Kirill A. Shutemov" Cc: Liam R. Howlett Cc: Linus Torvalds Cc: Lokesh Gidra Cc: Michal Hocko Cc: Paul E. McKenney Cc: Shuah Khan Cc: Suren Baghdasaryan Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/mremap_test.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/testing/selftests/mm/mremap_test.c b/tools/testing/selftests/mm/mremap_test.c index 6822d657f589..6304eb0947a3 100644 --- a/tools/testing/selftests/mm/mremap_test.c +++ b/tools/testing/selftests/mm/mremap_test.c @@ -44,6 +44,7 @@ enum { _1MB = 1ULL << 20, _2MB = 2ULL << 20, _4MB = 4ULL << 20, + _5MB = 5ULL << 20, _1GB = 1ULL << 30, _2GB = 2ULL << 30, PMD = _2MB, @@ -235,6 +236,11 @@ static void *get_source_mapping(struct config c) unsigned long long mmap_min_addr; mmap_min_addr = get_mmap_min_addr(); + /* + * For some tests, we need to not have any mappings below the + * source mapping. Add some headroom to mmap_min_addr for this. + */ + mmap_min_addr += 10 * _4MB; retry: addr += c.src_alignment; @@ -434,7 +440,7 @@ static int parse_args(int argc, char **argv, unsigned int *threshold_mb, return 0; } -#define MAX_TEST 13 +#define MAX_TEST 14 #define MAX_PERF_TEST 3 int main(int argc, char **argv) { @@ -500,6 +506,10 @@ int main(int argc, char **argv) test_cases[12] = MAKE_TEST(PUD, PUD, _2GB, NON_OVERLAPPING, EXPECT_SUCCESS, "2GB mremap - Source PUD-aligned, Destination PUD-aligned"); + /* Src and Dest addr 1MB aligned. 5MB mremap. */ + test_cases[13] = MAKE_TEST(_1MB, _1MB, _5MB, NON_OVERLAPPING, EXPECT_SUCCESS, + "5MB mremap - Source 1MB-aligned, Destination 1MB-aligned"); + perf_test_cases[0] = MAKE_TEST(page_size, page_size, _1GB, NON_OVERLAPPING, EXPECT_SUCCESS, "1GB mremap - Source PTE-aligned, Destination PTE-aligned"); /* -- cgit v1.2.3 From a4cb3b2433434377ae820ac4adcde1a570c6f332 Mon Sep 17 00:00:00 2001 From: "Joel Fernandes (Google)" Date: Sun, 3 Sep 2023 15:13:26 +0000 Subject: selftests: mm: add a test for remapping to area immediately after existing mapping This patch adds support for verifying that we correctly handle the situation where something is already mapped before the destination of the remap. Any realignment of destination address and PMD-copy will destroy that existing mapping. In such cases, we need to avoid doing the optimization. To test this, we map an area called the preamble before the remap region. Then we verify after the mremap operation that this region did not get corrupted. Putting some prints in the kernel, I verified that we optimize correctly in different situations: Optimize when there is alignment and no previous mapping (this is tested by previous patch). can_align_down(old_vma->vm_start=2900000, old_addr=2900000, mask=-2097152): 0 can_align_down(new_vma->vm_start=2f00000, new_addr=2f00000, mask=-2097152): 0 === Starting move_page_tables === Doing PUD move for 2800000 -> 2e00000 of extent=200000 <-- Optimization Doing PUD move for 2a00000 -> 3000000 of extent=200000 Doing PUD move for 2c00000 -> 3200000 of extent=200000 Don't optimize when there is alignment but there is previous mapping (this is tested by this patch). Notice that can_align_down() returns 1 for the destination mapping as we detected there is something there. can_align_down(old_vma->vm_start=2900000, old_addr=2900000, mask=-2097152): 0 can_align_down(new_vma->vm_start=5700000, new_addr=5700000, mask=-2097152): 1 === Starting move_page_tables === Doing move_ptes for 2900000 -> 5700000 of extent=100000 <-- Unoptimized Doing PUD move for 2a00000 -> 5800000 of extent=200000 Doing PUD move for 2c00000 -> 5a00000 of extent=200000 Link: https://lkml.kernel.org/r/20230903151328.2981432-6-joel@joelfernandes.org Signed-off-by: Joel Fernandes (Google) Reviewed-by: Lorenzo Stoakes Cc: Kalesh Singh Cc: "Kirill A. Shutemov" Cc: Liam R. Howlett Cc: Linus Torvalds Cc: Lokesh Gidra Cc: Michal Hocko Cc: Paul E. McKenney Cc: Shuah Khan Cc: Suren Baghdasaryan Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/mremap_test.c | 57 +++++++++++++++++++++++++++++--- 1 file changed, 52 insertions(+), 5 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/mm/mremap_test.c b/tools/testing/selftests/mm/mremap_test.c index 6304eb0947a3..d7366074e2a8 100644 --- a/tools/testing/selftests/mm/mremap_test.c +++ b/tools/testing/selftests/mm/mremap_test.c @@ -29,6 +29,7 @@ struct config { unsigned long long dest_alignment; unsigned long long region_size; int overlapping; + int dest_preamble_size; }; struct test { @@ -283,7 +284,7 @@ error: static long long remap_region(struct config c, unsigned int threshold_mb, char pattern_seed) { - void *addr, *src_addr, *dest_addr; + void *addr, *src_addr, *dest_addr, *dest_preamble_addr; unsigned long long i; struct timespec t_start = {0, 0}, t_end = {0, 0}; long long start_ns, end_ns, align_mask, ret, offset; @@ -300,7 +301,7 @@ static long long remap_region(struct config c, unsigned int threshold_mb, goto out; } - /* Set byte pattern */ + /* Set byte pattern for source block. */ srand(pattern_seed); for (i = 0; i < threshold; i++) memset((char *) src_addr + i, (char) rand(), 1); @@ -312,6 +313,9 @@ static long long remap_region(struct config c, unsigned int threshold_mb, addr = (void *) (((unsigned long long) src_addr + c.region_size + offset) & align_mask); + /* Remap after the destination block preamble. */ + addr += c.dest_preamble_size; + /* See comment in get_source_mapping() */ if (!((unsigned long long) addr & c.dest_alignment)) addr = (void *) ((unsigned long long) addr | c.dest_alignment); @@ -327,6 +331,24 @@ static long long remap_region(struct config c, unsigned int threshold_mb, addr += c.dest_alignment; } + if (c.dest_preamble_size) { + dest_preamble_addr = mmap((void *) addr - c.dest_preamble_size, c.dest_preamble_size, + PROT_READ | PROT_WRITE, + MAP_FIXED_NOREPLACE | MAP_ANONYMOUS | MAP_SHARED, + -1, 0); + if (dest_preamble_addr == MAP_FAILED) { + ksft_print_msg("Failed to map dest preamble region: %s\n", + strerror(errno)); + ret = -1; + goto clean_up_src; + } + + /* Set byte pattern for the dest preamble block. */ + srand(pattern_seed); + for (i = 0; i < c.dest_preamble_size; i++) + memset((char *) dest_preamble_addr + i, (char) rand(), 1); + } + clock_gettime(CLOCK_MONOTONIC, &t_start); dest_addr = mremap(src_addr, c.region_size, c.region_size, MREMAP_MAYMOVE|MREMAP_FIXED, (char *) addr); @@ -335,7 +357,7 @@ static long long remap_region(struct config c, unsigned int threshold_mb, if (dest_addr == MAP_FAILED) { ksft_print_msg("mremap failed: %s\n", strerror(errno)); ret = -1; - goto clean_up_src; + goto clean_up_dest_preamble; } /* Verify byte pattern after remapping */ @@ -353,6 +375,23 @@ static long long remap_region(struct config c, unsigned int threshold_mb, } } + /* Verify the dest preamble byte pattern after remapping */ + if (c.dest_preamble_size) { + srand(pattern_seed); + for (i = 0; i < c.dest_preamble_size; i++) { + char c = (char) rand(); + + if (((char *) dest_preamble_addr)[i] != c) { + ksft_print_msg("Preamble data after remap doesn't match at offset %d\n", + i); + ksft_print_msg("Expected: %#x\t Got: %#x\n", c & 0xff, + ((char *) dest_preamble_addr)[i] & 0xff); + ret = -1; + goto clean_up_dest; + } + } + } + start_ns = t_start.tv_sec * NS_PER_SEC + t_start.tv_nsec; end_ns = t_end.tv_sec * NS_PER_SEC + t_end.tv_nsec; ret = end_ns - start_ns; @@ -365,6 +404,9 @@ static long long remap_region(struct config c, unsigned int threshold_mb, */ clean_up_dest: munmap(dest_addr, c.region_size); +clean_up_dest_preamble: + if (c.dest_preamble_size && dest_preamble_addr) + munmap(dest_preamble_addr, c.dest_preamble_size); clean_up_src: munmap(src_addr, c.region_size); out: @@ -440,7 +482,7 @@ static int parse_args(int argc, char **argv, unsigned int *threshold_mb, return 0; } -#define MAX_TEST 14 +#define MAX_TEST 15 #define MAX_PERF_TEST 3 int main(int argc, char **argv) { @@ -449,7 +491,7 @@ int main(int argc, char **argv) unsigned int threshold_mb = VALIDATION_DEFAULT_THRESHOLD; unsigned int pattern_seed; int num_expand_tests = 2; - struct test test_cases[MAX_TEST]; + struct test test_cases[MAX_TEST] = {}; struct test perf_test_cases[MAX_PERF_TEST]; int page_size; time_t t; @@ -510,6 +552,11 @@ int main(int argc, char **argv) test_cases[13] = MAKE_TEST(_1MB, _1MB, _5MB, NON_OVERLAPPING, EXPECT_SUCCESS, "5MB mremap - Source 1MB-aligned, Destination 1MB-aligned"); + /* Src and Dest addr 1MB aligned. 5MB mremap. */ + test_cases[14] = MAKE_TEST(_1MB, _1MB, _5MB, NON_OVERLAPPING, EXPECT_SUCCESS, + "5MB mremap - Source 1MB-aligned, Dest 1MB-aligned with 40MB Preamble"); + test_cases[14].config.dest_preamble_size = 10 * _4MB; + perf_test_cases[0] = MAKE_TEST(page_size, page_size, _1GB, NON_OVERLAPPING, EXPECT_SUCCESS, "1GB mremap - Source PTE-aligned, Destination PTE-aligned"); /* -- cgit v1.2.3 From 85a22845b094ec6eeb7c24c692f2290e56d9f07c Mon Sep 17 00:00:00 2001 From: "Joel Fernandes (Google)" Date: Sun, 3 Sep 2023 15:13:27 +0000 Subject: selftests: mm: add a test for remapping within a range Move a block of memory within a memory range. Any alignment optimization on the source address may cause corruption. Verify using kselftest that it works. I have also verified with tracing that such optimization does not happen due to this check in can_align_down(): if (!for_stack && vma->vm_start != addr_to_align) return false; Link: https://lkml.kernel.org/r/20230903151328.2981432-7-joel@joelfernandes.org Signed-off-by: Joel Fernandes (Google) Reviewed-by: Lorenzo Stoakes Cc: Kalesh Singh Cc: "Kirill A. Shutemov" Cc: Liam R. Howlett Cc: Linus Torvalds Cc: Lokesh Gidra Cc: Michal Hocko Cc: Paul E. McKenney Cc: Shuah Khan Cc: Suren Baghdasaryan Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/mremap_test.c | 79 +++++++++++++++++++++++++++++++- 1 file changed, 78 insertions(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/testing/selftests/mm/mremap_test.c b/tools/testing/selftests/mm/mremap_test.c index d7366074e2a8..12a095457f4c 100644 --- a/tools/testing/selftests/mm/mremap_test.c +++ b/tools/testing/selftests/mm/mremap_test.c @@ -23,6 +23,7 @@ #define VALIDATION_NO_THRESHOLD 0 /* Verify the entire region */ #define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) +#define SIZE_MB(m) ((size_t)m * (1024 * 1024)) struct config { unsigned long long src_alignment; @@ -226,6 +227,79 @@ out: ksft_test_result_fail("%s\n", test_name); } +/* + * Verify that an mremap within a range does not cause corruption + * of unrelated part of range. + * + * Consider the following range which is 2MB aligned and is + * a part of a larger 20MB range which is not shown. Each + * character is 256KB below making the source and destination + * 2MB each. The lower case letters are moved (s to d) and the + * upper case letters are not moved. The below test verifies + * that the upper case S letters are not corrupted by the + * adjacent mremap. + * + * |DDDDddddSSSSssss| + */ +static void mremap_move_within_range(char pattern_seed) +{ + char *test_name = "mremap mremap move within range"; + void *src, *dest; + int i, success = 1; + + size_t size = SIZE_MB(20); + void *ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (ptr == MAP_FAILED) { + perror("mmap"); + success = 0; + goto out; + } + memset(ptr, 0, size); + + src = ptr + SIZE_MB(6); + src = (void *)((unsigned long)src & ~(SIZE_MB(2) - 1)); + + /* Set byte pattern for source block. */ + srand(pattern_seed); + for (i = 0; i < SIZE_MB(2); i++) { + ((char *)src)[i] = (char) rand(); + } + + dest = src - SIZE_MB(2); + + void *new_ptr = mremap(src + SIZE_MB(1), SIZE_MB(1), SIZE_MB(1), + MREMAP_MAYMOVE | MREMAP_FIXED, dest + SIZE_MB(1)); + if (new_ptr == MAP_FAILED) { + perror("mremap"); + success = 0; + goto out; + } + + /* Verify byte pattern after remapping */ + srand(pattern_seed); + for (i = 0; i < SIZE_MB(1); i++) { + char c = (char) rand(); + + if (((char *)src)[i] != c) { + ksft_print_msg("Data at src at %d got corrupted due to unrelated mremap\n", + i); + ksft_print_msg("Expected: %#x\t Got: %#x\n", c & 0xff, + ((char *) src)[i] & 0xff); + success = 0; + } + } + +out: + if (munmap(ptr, size) == -1) + perror("munmap"); + + if (success) + ksft_test_result_pass("%s\n", test_name); + else + ksft_test_result_fail("%s\n", test_name); +} + /* * Returns the start address of the mapping on success, else returns * NULL on failure. @@ -491,6 +565,7 @@ int main(int argc, char **argv) unsigned int threshold_mb = VALIDATION_DEFAULT_THRESHOLD; unsigned int pattern_seed; int num_expand_tests = 2; + int num_misc_tests = 1; struct test test_cases[MAX_TEST] = {}; struct test perf_test_cases[MAX_PERF_TEST]; int page_size; @@ -572,7 +647,7 @@ int main(int argc, char **argv) (threshold_mb * _1MB >= _1GB); ksft_set_plan(ARRAY_SIZE(test_cases) + (run_perf_tests ? - ARRAY_SIZE(perf_test_cases) : 0) + num_expand_tests); + ARRAY_SIZE(perf_test_cases) : 0) + num_expand_tests + num_misc_tests); for (i = 0; i < ARRAY_SIZE(test_cases); i++) run_mremap_test_case(test_cases[i], &failures, threshold_mb, @@ -590,6 +665,8 @@ int main(int argc, char **argv) fclose(maps_fp); + mremap_move_within_range(pattern_seed); + if (run_perf_tests) { ksft_print_msg("\n%s\n", "mremap HAVE_MOVE_PMD/PUD optimization time comparison for 1GB region:"); -- cgit v1.2.3 From 7b709f38dc0faf0d6dfeff681e37e59f9aabebd6 Mon Sep 17 00:00:00 2001 From: Joel Fernandes Date: Sun, 3 Sep 2023 15:13:28 +0000 Subject: selftests: mm: add a test for moving from an offset from start of mapping It is possible that the aligned address falls on no existing mapping, however that does not mean that we can just align it down to that. This test verifies that the "vma->vm_start != addr_to_align" check in can_align_down() prevents disastrous results if aligning down when source and dest are mutually aligned within a PMD but the source/dest addresses requested are not at the beginning of the respective mapping containing these addresses. Link: https://lkml.kernel.org/r/20230903151328.2981432-8-joel@joelfernandes.org Signed-off-by: Joel Fernandes (Google) Reviewed-by: Lorenzo Stoakes Cc: Kalesh Singh Cc: "Kirill A. Shutemov" Cc: Liam R. Howlett Cc: Linus Torvalds Cc: Lokesh Gidra Cc: Michal Hocko Cc: Paul E. McKenney Cc: Shuah Khan Cc: Suren Baghdasaryan Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/mremap_test.c | 189 ++++++++++++++++++++++--------- 1 file changed, 134 insertions(+), 55 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/mm/mremap_test.c b/tools/testing/selftests/mm/mremap_test.c index 12a095457f4c..1f836e670a37 100644 --- a/tools/testing/selftests/mm/mremap_test.c +++ b/tools/testing/selftests/mm/mremap_test.c @@ -24,6 +24,7 @@ #define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) #define SIZE_MB(m) ((size_t)m * (1024 * 1024)) +#define SIZE_KB(k) ((size_t)k * 1024) struct config { unsigned long long src_alignment; @@ -148,6 +149,60 @@ static bool is_range_mapped(FILE *maps_fp, void *start, void *end) return success; } +/* + * Returns the start address of the mapping on success, else returns + * NULL on failure. + */ +static void *get_source_mapping(struct config c) +{ + unsigned long long addr = 0ULL; + void *src_addr = NULL; + unsigned long long mmap_min_addr; + + mmap_min_addr = get_mmap_min_addr(); + /* + * For some tests, we need to not have any mappings below the + * source mapping. Add some headroom to mmap_min_addr for this. + */ + mmap_min_addr += 10 * _4MB; + +retry: + addr += c.src_alignment; + if (addr < mmap_min_addr) + goto retry; + + src_addr = mmap((void *) addr, c.region_size, PROT_READ | PROT_WRITE, + MAP_FIXED_NOREPLACE | MAP_ANONYMOUS | MAP_SHARED, + -1, 0); + if (src_addr == MAP_FAILED) { + if (errno == EPERM || errno == EEXIST) + goto retry; + goto error; + } + /* + * Check that the address is aligned to the specified alignment. + * Addresses which have alignments that are multiples of that + * specified are not considered valid. For instance, 1GB address is + * 2MB-aligned, however it will not be considered valid for a + * requested alignment of 2MB. This is done to reduce coincidental + * alignment in the tests. + */ + if (((unsigned long long) src_addr & (c.src_alignment - 1)) || + !((unsigned long long) src_addr & c.src_alignment)) { + munmap(src_addr, c.region_size); + goto retry; + } + + if (!src_addr) + goto error; + + return src_addr; +error: + ksft_print_msg("Failed to map source region: %s\n", + strerror(errno)); + return NULL; +} + /* * This test validates that merge is called when expanding a mapping. * Mapping containing three pages is created, middle page is unmapped @@ -300,60 +355,6 @@ out: ksft_test_result_fail("%s\n", test_name); } -/* - * Returns the start address of the mapping on success, else returns - * NULL on failure. - */ -static void *get_source_mapping(struct config c) -{ - unsigned long long addr = 0ULL; - void *src_addr = NULL; - unsigned long long mmap_min_addr; - - mmap_min_addr = get_mmap_min_addr(); - /* - * For some tests, we need to not have any mappings below the - * source mapping. Add some headroom to mmap_min_addr for this. - */ - mmap_min_addr += 10 * _4MB; - -retry: - addr += c.src_alignment; - if (addr < mmap_min_addr) - goto retry; - - src_addr = mmap((void *) addr, c.region_size, PROT_READ | PROT_WRITE, - MAP_FIXED_NOREPLACE | MAP_ANONYMOUS | MAP_SHARED, - -1, 0); - if (src_addr == MAP_FAILED) { - if (errno == EPERM || errno == EEXIST) - goto retry; - goto error; - } - /* - * Check that the address is aligned to the specified alignment. - * Addresses which have alignments that are multiples of that - * specified are not considered valid. For instance, 1GB address is - * 2MB-aligned, however it will not be considered valid for a - * requested alignment of 2MB. This is done to reduce coincidental - * alignment in the tests. - */ - if (((unsigned long long) src_addr & (c.src_alignment - 1)) || - !((unsigned long long) src_addr & c.src_alignment)) { - munmap(src_addr, c.region_size); - goto retry; - } - - if (!src_addr) - goto error; - - return src_addr; -error: - ksft_print_msg("Failed to map source region: %s\n", - strerror(errno)); - return NULL; -} - /* Returns the time taken for the remap on success else returns -1. */ static long long remap_region(struct config c, unsigned int threshold_mb, char pattern_seed) @@ -487,6 +488,83 @@ out: return ret; } +/* + * Verify that an mremap aligning down does not destroy + * the beginning of the mapping just because the aligned + * down address landed on a mapping that maybe does not exist. + */ +static void mremap_move_1mb_from_start(char pattern_seed) +{ + char *test_name = "mremap move 1mb from start at 1MB+256KB aligned src"; + void *src = NULL, *dest = NULL; + int i, success = 1; + + /* Config to reuse get_source_mapping() to do an aligned mmap. */ + struct config c = { + .src_alignment = SIZE_MB(1) + SIZE_KB(256), + .region_size = SIZE_MB(6) + }; + + src = get_source_mapping(c); + if (!src) { + success = 0; + goto out; + } + + c.src_alignment = SIZE_MB(1) + SIZE_KB(256); + dest = get_source_mapping(c); + if (!dest) { + success = 0; + goto out; + } + + /* Set byte pattern for source block. */ + srand(pattern_seed); + for (i = 0; i < SIZE_MB(2); i++) { + ((char *)src)[i] = (char) rand(); + } + + /* + * Unmap the beginning of dest so that the aligned address + * falls on no mapping. + */ + munmap(dest, SIZE_MB(1)); + + void *new_ptr = mremap(src + SIZE_MB(1), SIZE_MB(1), SIZE_MB(1), + MREMAP_MAYMOVE | MREMAP_FIXED, dest + SIZE_MB(1)); + if (new_ptr == MAP_FAILED) { + perror("mremap"); + success = 0; + goto out; + } + + /* Verify byte pattern after remapping */ + srand(pattern_seed); + for (i = 0; i < SIZE_MB(1); i++) { + char c = (char) rand(); + + if (((char *)src)[i] != c) { + ksft_print_msg("Data at src at %d got corrupted due to unrelated mremap\n", + i); + ksft_print_msg("Expected: %#x\t Got: %#x\n", c & 0xff, + ((char *) src)[i] & 0xff); + success = 0; + } + } + +out: + if (src && munmap(src, c.region_size) == -1) + perror("munmap src"); + + if (dest && munmap(dest, c.region_size) == -1) + perror("munmap dest"); + + if (success) + ksft_test_result_pass("%s\n", test_name); + else + ksft_test_result_fail("%s\n", test_name); +} + static void run_mremap_test_case(struct test test_case, int *failures, unsigned int threshold_mb, unsigned int pattern_seed) @@ -565,7 +643,7 @@ int main(int argc, char **argv) unsigned int threshold_mb = VALIDATION_DEFAULT_THRESHOLD; unsigned int pattern_seed; int num_expand_tests = 2; - int num_misc_tests = 1; + int num_misc_tests = 2; struct test test_cases[MAX_TEST] = {}; struct test perf_test_cases[MAX_PERF_TEST]; int page_size; @@ -666,6 +744,7 @@ int main(int argc, char **argv) fclose(maps_fp); mremap_move_within_range(pattern_seed); + mremap_move_1mb_from_start(pattern_seed); if (run_perf_tests) { ksft_print_msg("\n%s\n", -- cgit v1.2.3 From 65ded14e2818bdd9b9bdc128cf47122533f46778 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Sat, 16 Sep 2023 02:09:43 +0000 Subject: selftests/damon/sysfs: test DAMOS apply intervals Update DAMON selftests to test existence of the file for reading/writing DAMOS apply interval under each scheme directory. Link: https://lkml.kernel.org/r/20230916020945.47296-8-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Cc: Shuah Khan Cc: Steven Rostedt (Google) Signed-off-by: Andrew Morton --- tools/testing/selftests/damon/sysfs.sh | 1 + 1 file changed, 1 insertion(+) (limited to 'tools') diff --git a/tools/testing/selftests/damon/sysfs.sh b/tools/testing/selftests/damon/sysfs.sh index 60a9a305aef0..56f0230a8b92 100644 --- a/tools/testing/selftests/damon/sysfs.sh +++ b/tools/testing/selftests/damon/sysfs.sh @@ -175,6 +175,7 @@ test_scheme() ensure_dir "$scheme_dir" "exist" ensure_file "$scheme_dir/action" "exist" "600" test_access_pattern "$scheme_dir/access_pattern" + ensure_file "$scheme_dir/apply_interval_us" "exist" "600" test_quotas "$scheme_dir/quotas" test_watermarks "$scheme_dir/watermarks" test_filters "$scheme_dir/filters" -- cgit v1.2.3 From 29d68b219ff858e11b6bc1477fa7af58db6895c9 Mon Sep 17 00:00:00 2001 From: Florent Revest Date: Mon, 28 Aug 2023 17:08:53 +0200 Subject: kselftest: vm: fix tabs/spaces inconsistency in the mdwe test Patch series "MDWE without inheritance", v4. Joey recently introduced a Memory-Deny-Write-Executable (MDWE) prctl which tags current with a flag that prevents pages that were previously not executable from becoming executable. This tag always gets inherited by children tasks. (it's in MMF_INIT_MASK) At Google, we've been using a somewhat similar downstream patch for a few years now. To make the adoption of this feature easier, we've had it support a mode in which the W^X flag does not propagate to children. For example, this is handy if a C process which wants W^X protection suspects it could start children processes that would use a JIT. I'd like to align our features with the upstream prctl. This series proposes a new NO_INHERIT flag to the MDWE prctl to make this kind of adoption easier. It sets a different flag in current that is not in MMF_INIT_MASK and which does not propagate. As part of looking into MDWE, I also fixed a couple of things in the MDWE test. The background for this was discussed in these threads: v1: https://lore.kernel.org/all/66900d0ad42797a55259061f757beece@ispras.ru/ v2: https://lore.kernel.org/all/d7e3749c-a718-df94-92af-1cb0fecab772@redhat.com/ This patch (of 6): Fix tabs/spaces inconsistency in the mdwe test. Link: https://lkml.kernel.org/r/20230828150858.393570-1-revest@chromium.org Link: https://lkml.kernel.org/r/20230828150858.393570-2-revest@chromium.org Signed-off-by: Florent Revest Reviewed-by: David Hildenbrand Reviewed-by: Kees Cook Acked-by: Catalin Marinas Cc: Alexey Izbyshev Cc: Anshuman Khandual Cc: Ayush Jain Cc: Greg Thelen Cc: Joey Gouly Cc: KP Singh Cc: Mark Brown Cc: Michal Hocko Cc: Peter Xu Cc: Szabolcs Nagy Cc: Topi Miettinen Cc: Ryan Roberts Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/mdwe_test.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/mm/mdwe_test.c b/tools/testing/selftests/mm/mdwe_test.c index bc91bef5d254..d0954c657feb 100644 --- a/tools/testing/selftests/mm/mdwe_test.c +++ b/tools/testing/selftests/mm/mdwe_test.c @@ -49,19 +49,19 @@ FIXTURE_VARIANT(mdwe) FIXTURE_VARIANT_ADD(mdwe, stock) { - .enabled = false, + .enabled = false, .forked = false, }; FIXTURE_VARIANT_ADD(mdwe, enabled) { - .enabled = true, + .enabled = true, .forked = false, }; FIXTURE_VARIANT_ADD(mdwe, forked) { - .enabled = true, + .enabled = true, .forked = true, }; -- cgit v1.2.3 From a27e2e2d465e4ed73371974040689ac3e78fe3ee Mon Sep 17 00:00:00 2001 From: Florent Revest Date: Mon, 28 Aug 2023 17:08:54 +0200 Subject: kselftest: vm: fix mdwe's mmap_FIXED test case I checked with the original author, the mmap_FIXED test case wasn't properly tested and fails. Currently, it maps two consecutive (non overlapping) pages and expects the second mapping to be denied by MDWE but these two pages have nothing to do with each other so MDWE is actually out of the picture here. What the test actually intended to do was to remap a virtual address using MAP_FIXED. However, this operation unmaps the existing mapping and creates a new one so the va is backed by a new page and MDWE is again out of the picture, all remappings should succeed. This patch keeps the test case to make it clear that this situation is expected to work: MDWE shouldn't block a MAP_FIXED replacement. Link: https://lkml.kernel.org/r/20230828150858.393570-3-revest@chromium.org Fixes: 4cf1fe34fd18 ("kselftest: vm: add tests for memory-deny-write-execute") Signed-off-by: Florent Revest Reviewed-by: David Hildenbrand Reviewed-by: Kees Cook Reviewed-by: Catalin Marinas Reviewed-by: Ryan Roberts Tested-by: Ryan Roberts Tested-by: Ayush Jain Cc: Alexey Izbyshev Cc: Anshuman Khandual Cc: Greg Thelen Cc: Joey Gouly Cc: KP Singh Cc: Mark Brown Cc: Michal Hocko Cc: Peter Xu Cc: Szabolcs Nagy Cc: Topi Miettinen Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/mdwe_test.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/mm/mdwe_test.c b/tools/testing/selftests/mm/mdwe_test.c index d0954c657feb..91aa9c3099e7 100644 --- a/tools/testing/selftests/mm/mdwe_test.c +++ b/tools/testing/selftests/mm/mdwe_test.c @@ -168,13 +168,10 @@ TEST_F(mdwe, mmap_FIXED) self->p = mmap(NULL, self->size, PROT_READ, self->flags, 0, 0); ASSERT_NE(self->p, MAP_FAILED); - p = mmap(self->p + self->size, self->size, PROT_READ | PROT_EXEC, + /* MAP_FIXED unmaps the existing page before mapping which is allowed */ + p = mmap(self->p, self->size, PROT_READ | PROT_EXEC, self->flags | MAP_FIXED, 0, 0); - if (variant->enabled) { - EXPECT_EQ(p, MAP_FAILED); - } else { - EXPECT_EQ(p, self->p); - } + EXPECT_EQ(p, self->p); } TEST_F(mdwe, arm64_BTI) -- cgit v1.2.3 From c93d05a729f9646a38aaf53781b447c99cfd86fb Mon Sep 17 00:00:00 2001 From: Florent Revest Date: Mon, 28 Aug 2023 17:08:55 +0200 Subject: kselftest: vm: check errnos in mdwe_test Invalid prctls return a negative code and set errno. It's good practice to check that errno is set as expected. Link: https://lkml.kernel.org/r/20230828150858.393570-4-revest@chromium.org Signed-off-by: Florent Revest Reviewed-by: Kees Cook Acked-by: Catalin Marinas Cc: Alexey Izbyshev Cc: Anshuman Khandual Cc: Ayush Jain Cc: David Hildenbrand Cc: Greg Thelen Cc: Joey Gouly Cc: KP Singh Cc: Mark Brown Cc: Michal Hocko Cc: Peter Xu Cc: Ryan Roberts Cc: Szabolcs Nagy Cc: Topi Miettinen Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/mdwe_test.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/mm/mdwe_test.c b/tools/testing/selftests/mm/mdwe_test.c index 91aa9c3099e7..1b84cf8e1bbe 100644 --- a/tools/testing/selftests/mm/mdwe_test.c +++ b/tools/testing/selftests/mm/mdwe_test.c @@ -23,14 +23,22 @@ TEST(prctl_flags) { EXPECT_LT(prctl(PR_SET_MDWE, 7L, 0L, 0L, 0L), 0); + EXPECT_EQ(errno, EINVAL); EXPECT_LT(prctl(PR_SET_MDWE, 0L, 7L, 0L, 0L), 0); + EXPECT_EQ(errno, EINVAL); EXPECT_LT(prctl(PR_SET_MDWE, 0L, 0L, 7L, 0L), 0); + EXPECT_EQ(errno, EINVAL); EXPECT_LT(prctl(PR_SET_MDWE, 0L, 0L, 0L, 7L), 0); + EXPECT_EQ(errno, EINVAL); EXPECT_LT(prctl(PR_GET_MDWE, 7L, 0L, 0L, 0L), 0); + EXPECT_EQ(errno, EINVAL); EXPECT_LT(prctl(PR_GET_MDWE, 0L, 7L, 0L, 0L), 0); + EXPECT_EQ(errno, EINVAL); EXPECT_LT(prctl(PR_GET_MDWE, 0L, 0L, 7L, 0L), 0); + EXPECT_EQ(errno, EINVAL); EXPECT_LT(prctl(PR_GET_MDWE, 0L, 0L, 0L, 7L), 0); + EXPECT_EQ(errno, EINVAL); } FIXTURE(mdwe) -- cgit v1.2.3 From 0da668333fb07805c2836d5d50e26eda915b24a1 Mon Sep 17 00:00:00 2001 From: Florent Revest Date: Mon, 28 Aug 2023 17:08:56 +0200 Subject: mm: make PR_MDWE_REFUSE_EXEC_GAIN an unsigned long Defining a prctl flag as an int is a footgun because on a 64 bit machine and with a variadic implementation of prctl (like in musl and glibc), when used directly as a prctl argument, it can get casted to long with garbage upper bits which would result in unexpected behaviors. This patch changes the constant to an unsigned long to eliminate that possibilities. This does not break UAPI. I think that a stable backport would be "nice to have": to reduce the chances that users build binaries that could end up with garbage bits in their MDWE prctl arguments. We are not aware of anyone having yet encountered this corner case with MDWE prctls but a backport would reduce the likelihood it happens, since this sort of issues has happened with other prctls. But If this is perceived as a backporting burden, I suppose we could also live without a stable backport. Link: https://lkml.kernel.org/r/20230828150858.393570-5-revest@chromium.org Fixes: b507808ebce2 ("mm: implement memory-deny-write-execute as a prctl") Signed-off-by: Florent Revest Suggested-by: Alexey Izbyshev Reviewed-by: David Hildenbrand Reviewed-by: Kees Cook Acked-by: Catalin Marinas Cc: Anshuman Khandual Cc: Ayush Jain Cc: Greg Thelen Cc: Joey Gouly Cc: KP Singh Cc: Mark Brown Cc: Michal Hocko Cc: Peter Xu Cc: Ryan Roberts Cc: Szabolcs Nagy Cc: Topi Miettinen Cc: Signed-off-by: Andrew Morton --- include/uapi/linux/prctl.h | 2 +- tools/include/uapi/linux/prctl.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'tools') diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index 3c36aeade991..9a85c69782bd 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -283,7 +283,7 @@ struct prctl_mm_map { /* Memory deny write / execute */ #define PR_SET_MDWE 65 -# define PR_MDWE_REFUSE_EXEC_GAIN 1 +# define PR_MDWE_REFUSE_EXEC_GAIN (1UL << 0) #define PR_GET_MDWE 66 diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h index 3c36aeade991..9a85c69782bd 100644 --- a/tools/include/uapi/linux/prctl.h +++ b/tools/include/uapi/linux/prctl.h @@ -283,7 +283,7 @@ struct prctl_mm_map { /* Memory deny write / execute */ #define PR_SET_MDWE 65 -# define PR_MDWE_REFUSE_EXEC_GAIN 1 +# define PR_MDWE_REFUSE_EXEC_GAIN (1UL << 0) #define PR_GET_MDWE 66 -- cgit v1.2.3 From 24e41bf8a6b424c76c5902fb999e9eca61bdf83d Mon Sep 17 00:00:00 2001 From: Florent Revest Date: Mon, 28 Aug 2023 17:08:57 +0200 Subject: mm: add a NO_INHERIT flag to the PR_SET_MDWE prctl This extends the current PR_SET_MDWE prctl arg with a bit to indicate that the process doesn't want MDWE protection to propagate to children. To implement this no-inherit mode, the tag in current->mm->flags must be absent from MMF_INIT_MASK. This means that the encoding for "MDWE but without inherit" is different in the prctl than in the mm flags. This leads to a bit of bit-mangling in the prctl implementation. Link: https://lkml.kernel.org/r/20230828150858.393570-6-revest@chromium.org Signed-off-by: Florent Revest Reviewed-by: Kees Cook Reviewed-by: Catalin Marinas Cc: Alexey Izbyshev Cc: Anshuman Khandual Cc: Ayush Jain Cc: David Hildenbrand Cc: Greg Thelen Cc: Joey Gouly Cc: KP Singh Cc: Mark Brown Cc: Michal Hocko Cc: Peter Xu Cc: Ryan Roberts Cc: Szabolcs Nagy Cc: Topi Miettinen Signed-off-by: Andrew Morton --- include/linux/sched/coredump.h | 10 ++++++++++ include/uapi/linux/prctl.h | 1 + kernel/fork.c | 2 +- kernel/sys.c | 32 ++++++++++++++++++++++++++------ tools/include/uapi/linux/prctl.h | 1 + 5 files changed, 39 insertions(+), 7 deletions(-) (limited to 'tools') diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h index 0ee96ea7a0e9..1b37fa8fc723 100644 --- a/include/linux/sched/coredump.h +++ b/include/linux/sched/coredump.h @@ -91,4 +91,14 @@ static inline int get_dumpable(struct mm_struct *mm) MMF_DISABLE_THP_MASK | MMF_HAS_MDWE_MASK) #define MMF_VM_MERGE_ANY 29 +#define MMF_HAS_MDWE_NO_INHERIT 30 + +static inline unsigned long mmf_init_flags(unsigned long flags) +{ + if (flags & (1UL << MMF_HAS_MDWE_NO_INHERIT)) + flags &= ~((1UL << MMF_HAS_MDWE) | + (1UL << MMF_HAS_MDWE_NO_INHERIT)); + return flags & MMF_INIT_MASK; +} + #endif /* _LINUX_SCHED_COREDUMP_H */ diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index 9a85c69782bd..370ed14b1ae0 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -284,6 +284,7 @@ struct prctl_mm_map { /* Memory deny write / execute */ #define PR_SET_MDWE 65 # define PR_MDWE_REFUSE_EXEC_GAIN (1UL << 0) +# define PR_MDWE_NO_INHERIT (1UL << 1) #define PR_GET_MDWE 66 diff --git a/kernel/fork.c b/kernel/fork.c index 1779183a7cb3..e45a4457ba83 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1288,7 +1288,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, hugetlb_count_init(mm); if (current->mm) { - mm->flags = current->mm->flags & MMF_INIT_MASK; + mm->flags = mmf_init_flags(current->mm->flags); mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK; } else { mm->flags = default_dump_filter; diff --git a/kernel/sys.c b/kernel/sys.c index 2410e3999ebe..4a8073c1b255 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -2368,19 +2368,41 @@ static int prctl_set_vma(unsigned long opt, unsigned long start, } #endif /* CONFIG_ANON_VMA_NAME */ +static inline unsigned long get_current_mdwe(void) +{ + unsigned long ret = 0; + + if (test_bit(MMF_HAS_MDWE, ¤t->mm->flags)) + ret |= PR_MDWE_REFUSE_EXEC_GAIN; + if (test_bit(MMF_HAS_MDWE_NO_INHERIT, ¤t->mm->flags)) + ret |= PR_MDWE_NO_INHERIT; + + return ret; +} + static inline int prctl_set_mdwe(unsigned long bits, unsigned long arg3, unsigned long arg4, unsigned long arg5) { + unsigned long current_bits; + if (arg3 || arg4 || arg5) return -EINVAL; - if (bits & ~(PR_MDWE_REFUSE_EXEC_GAIN)) + if (bits & ~(PR_MDWE_REFUSE_EXEC_GAIN | PR_MDWE_NO_INHERIT)) + return -EINVAL; + + /* NO_INHERIT only makes sense with REFUSE_EXEC_GAIN */ + if (bits & PR_MDWE_NO_INHERIT && !(bits & PR_MDWE_REFUSE_EXEC_GAIN)) return -EINVAL; + current_bits = get_current_mdwe(); + if (current_bits && current_bits != bits) + return -EPERM; /* Cannot unset the flags */ + + if (bits & PR_MDWE_NO_INHERIT) + set_bit(MMF_HAS_MDWE_NO_INHERIT, ¤t->mm->flags); if (bits & PR_MDWE_REFUSE_EXEC_GAIN) set_bit(MMF_HAS_MDWE, ¤t->mm->flags); - else if (test_bit(MMF_HAS_MDWE, ¤t->mm->flags)) - return -EPERM; /* Cannot unset the flag */ return 0; } @@ -2390,9 +2412,7 @@ static inline int prctl_get_mdwe(unsigned long arg2, unsigned long arg3, { if (arg2 || arg3 || arg4 || arg5) return -EINVAL; - - return test_bit(MMF_HAS_MDWE, ¤t->mm->flags) ? - PR_MDWE_REFUSE_EXEC_GAIN : 0; + return get_current_mdwe(); } static int prctl_get_auxv(void __user *addr, unsigned long len) diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h index 9a85c69782bd..370ed14b1ae0 100644 --- a/tools/include/uapi/linux/prctl.h +++ b/tools/include/uapi/linux/prctl.h @@ -284,6 +284,7 @@ struct prctl_mm_map { /* Memory deny write / execute */ #define PR_SET_MDWE 65 # define PR_MDWE_REFUSE_EXEC_GAIN (1UL << 0) +# define PR_MDWE_NO_INHERIT (1UL << 1) #define PR_GET_MDWE 66 -- cgit v1.2.3 From 2dc539ac4d2f556105ea7e2a37d23379a79a47eb Mon Sep 17 00:00:00 2001 From: Florent Revest Date: Mon, 28 Aug 2023 17:08:58 +0200 Subject: kselftest: vm: add tests for no-inherit memory-deny-write-execute Add some tests to cover the new PR_MDWE_NO_INHERIT flag of the PR_SET_MDWE prctl. Check that: - it can't be set without PR_SET_MDWE - MDWE flags can't be unset - when set, PR_SET_MDWE doesn't propagate to children Link: https://lkml.kernel.org/r/20230828150858.393570-7-revest@chromium.org Signed-off-by: Florent Revest Acked-by: Catalin Marinas Reviewed-by: Kees Cook Cc: Alexey Izbyshev Cc: Anshuman Khandual Cc: Ayush Jain Cc: David Hildenbrand Cc: Greg Thelen Cc: Joey Gouly Cc: KP Singh Cc: Mark Brown Cc: Michal Hocko Cc: Peter Xu Cc: Ryan Roberts Cc: Szabolcs Nagy Cc: Topi Miettinen Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/mdwe_test.c | 114 +++++++++++++++++++++++++++++++-- 1 file changed, 108 insertions(+), 6 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/mm/mdwe_test.c b/tools/testing/selftests/mm/mdwe_test.c index 1b84cf8e1bbe..200bedcdc32e 100644 --- a/tools/testing/selftests/mm/mdwe_test.c +++ b/tools/testing/selftests/mm/mdwe_test.c @@ -22,6 +22,9 @@ TEST(prctl_flags) { + EXPECT_LT(prctl(PR_SET_MDWE, PR_MDWE_NO_INHERIT, 0L, 0L, 7L), 0); + EXPECT_EQ(errno, EINVAL); + EXPECT_LT(prctl(PR_SET_MDWE, 7L, 0L, 0L, 0L), 0); EXPECT_EQ(errno, EINVAL); EXPECT_LT(prctl(PR_SET_MDWE, 0L, 7L, 0L, 0L), 0); @@ -41,6 +44,84 @@ TEST(prctl_flags) EXPECT_EQ(errno, EINVAL); } +FIXTURE(consecutive_prctl_flags) {}; +FIXTURE_SETUP(consecutive_prctl_flags) {} +FIXTURE_TEARDOWN(consecutive_prctl_flags) {} + +FIXTURE_VARIANT(consecutive_prctl_flags) +{ + unsigned long first_flags; + unsigned long second_flags; + bool should_work; +}; + +FIXTURE_VARIANT_ADD(consecutive_prctl_flags, can_keep_no_flags) +{ + .first_flags = 0, + .second_flags = 0, + .should_work = true, +}; + +FIXTURE_VARIANT_ADD(consecutive_prctl_flags, can_keep_exec_gain) +{ + .first_flags = PR_MDWE_REFUSE_EXEC_GAIN, + .second_flags = PR_MDWE_REFUSE_EXEC_GAIN, + .should_work = true, +}; + +FIXTURE_VARIANT_ADD(consecutive_prctl_flags, can_keep_both_flags) +{ + .first_flags = PR_MDWE_REFUSE_EXEC_GAIN | PR_MDWE_NO_INHERIT, + .second_flags = PR_MDWE_REFUSE_EXEC_GAIN | PR_MDWE_NO_INHERIT, + .should_work = true, +}; + +FIXTURE_VARIANT_ADD(consecutive_prctl_flags, cant_disable_mdwe) +{ + .first_flags = PR_MDWE_REFUSE_EXEC_GAIN, + .second_flags = 0, + .should_work = false, +}; + +FIXTURE_VARIANT_ADD(consecutive_prctl_flags, cant_disable_mdwe_no_inherit) +{ + .first_flags = PR_MDWE_REFUSE_EXEC_GAIN | PR_MDWE_NO_INHERIT, + .second_flags = 0, + .should_work = false, +}; + +FIXTURE_VARIANT_ADD(consecutive_prctl_flags, cant_disable_no_inherit) +{ + .first_flags = PR_MDWE_REFUSE_EXEC_GAIN | PR_MDWE_NO_INHERIT, + .second_flags = PR_MDWE_REFUSE_EXEC_GAIN, + .should_work = false, +}; + +FIXTURE_VARIANT_ADD(consecutive_prctl_flags, cant_enable_no_inherit) +{ + .first_flags = PR_MDWE_REFUSE_EXEC_GAIN, + .second_flags = PR_MDWE_REFUSE_EXEC_GAIN | PR_MDWE_NO_INHERIT, + .should_work = false, +}; + +TEST_F(consecutive_prctl_flags, two_prctls) +{ + int ret; + + EXPECT_EQ(prctl(PR_SET_MDWE, variant->first_flags, 0L, 0L, 0L), 0); + + ret = prctl(PR_SET_MDWE, variant->second_flags, 0L, 0L, 0L); + if (variant->should_work) { + EXPECT_EQ(ret, 0); + + ret = prctl(PR_GET_MDWE, 0L, 0L, 0L, 0L); + ASSERT_EQ(ret, variant->second_flags); + } else { + EXPECT_NE(ret, 0); + ASSERT_EQ(errno, EPERM); + } +} + FIXTURE(mdwe) { void *p; @@ -53,28 +134,45 @@ FIXTURE_VARIANT(mdwe) { bool enabled; bool forked; + bool inherit; }; FIXTURE_VARIANT_ADD(mdwe, stock) { .enabled = false, .forked = false, + .inherit = false, }; FIXTURE_VARIANT_ADD(mdwe, enabled) { .enabled = true, .forked = false, + .inherit = true, }; -FIXTURE_VARIANT_ADD(mdwe, forked) +FIXTURE_VARIANT_ADD(mdwe, inherited) { .enabled = true, .forked = true, + .inherit = true, }; +FIXTURE_VARIANT_ADD(mdwe, not_inherited) +{ + .enabled = true, + .forked = true, + .inherit = false, +}; + +static bool executable_map_should_fail(const FIXTURE_VARIANT(mdwe) *variant) +{ + return variant->enabled && (!variant->forked || variant->inherit); +} + FIXTURE_SETUP(mdwe) { + unsigned long mdwe_flags; int ret, status; self->p = NULL; @@ -84,13 +182,17 @@ FIXTURE_SETUP(mdwe) if (!variant->enabled) return; - ret = prctl(PR_SET_MDWE, PR_MDWE_REFUSE_EXEC_GAIN, 0L, 0L, 0L); + mdwe_flags = PR_MDWE_REFUSE_EXEC_GAIN; + if (!variant->inherit) + mdwe_flags |= PR_MDWE_NO_INHERIT; + + ret = prctl(PR_SET_MDWE, mdwe_flags, 0L, 0L, 0L); ASSERT_EQ(ret, 0) { TH_LOG("PR_SET_MDWE failed or unsupported"); } ret = prctl(PR_GET_MDWE, 0L, 0L, 0L, 0L); - ASSERT_EQ(ret, 1); + ASSERT_EQ(ret, mdwe_flags); if (variant->forked) { self->pid = fork(); @@ -121,7 +223,7 @@ TEST_F(mdwe, mmap_READ_EXEC) TEST_F(mdwe, mmap_WRITE_EXEC) { self->p = mmap(NULL, self->size, PROT_WRITE | PROT_EXEC, self->flags, 0, 0); - if (variant->enabled) { + if (executable_map_should_fail(variant)) { EXPECT_EQ(self->p, MAP_FAILED); } else { EXPECT_NE(self->p, MAP_FAILED); @@ -147,7 +249,7 @@ TEST_F(mdwe, mprotect_add_EXEC) ASSERT_NE(self->p, MAP_FAILED); ret = mprotect(self->p, self->size, PROT_READ | PROT_EXEC); - if (variant->enabled) { + if (executable_map_should_fail(variant)) { EXPECT_LT(ret, 0); } else { EXPECT_EQ(ret, 0); @@ -162,7 +264,7 @@ TEST_F(mdwe, mprotect_WRITE_EXEC) ASSERT_NE(self->p, MAP_FAILED); ret = mprotect(self->p, self->size, PROT_WRITE | PROT_EXEC); - if (variant->enabled) { + if (executable_map_should_fail(variant)) { EXPECT_LT(ret, 0); } else { EXPECT_EQ(ret, 0); -- cgit v1.2.3 From 0374af1da077573b2bea8ff70258d3537c5a1e79 Mon Sep 17 00:00:00 2001 From: Stefan Roesch Date: Fri, 22 Sep 2023 14:11:41 -0700 Subject: mm/ksm: test case for prctl fork/exec workflow This adds a new test case to the ksm functional tests to make sure that the KSM setting is inherited by the child process when doing a fork/exec. Link: https://lkml.kernel.org/r/20230922211141.320789-3-shr@devkernel.io Signed-off-by: Stefan Roesch Reviewed-by: David Hildenbrand Cc: Carl Klemm Cc: Johannes Weiner Cc: Rik van Riel Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/ksm_functional_tests.c | 66 ++++++++++++++++++++++- 1 file changed, 65 insertions(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/testing/selftests/mm/ksm_functional_tests.c b/tools/testing/selftests/mm/ksm_functional_tests.c index 901e950f9138..fbff0dd09191 100644 --- a/tools/testing/selftests/mm/ksm_functional_tests.c +++ b/tools/testing/selftests/mm/ksm_functional_tests.c @@ -26,6 +26,7 @@ #define KiB 1024u #define MiB (1024 * KiB) +#define FORK_EXEC_CHILD_PRG_NAME "ksm_fork_exec_child" static int mem_fd; static int ksm_fd; @@ -479,6 +480,64 @@ static void test_prctl_fork(void) ksft_test_result_pass("PR_SET_MEMORY_MERGE value is inherited\n"); } +static int ksm_fork_exec_child(void) +{ + /* Test if KSM is enabled for the process. */ + return prctl(PR_GET_MEMORY_MERGE, 0, 0, 0, 0) == 1; +} + +static void test_prctl_fork_exec(void) +{ + int ret, status; + pid_t child_pid; + + ksft_print_msg("[RUN] %s\n", __func__); + + ret = prctl(PR_SET_MEMORY_MERGE, 1, 0, 0, 0); + if (ret < 0 && errno == EINVAL) { + ksft_test_result_skip("PR_SET_MEMORY_MERGE not supported\n"); + return; + } else if (ret) { + ksft_test_result_fail("PR_SET_MEMORY_MERGE=1 failed\n"); + return; + } + + child_pid = fork(); + if (child_pid == -1) { + ksft_test_result_skip("fork() failed\n"); + return; + } else if (child_pid == 0) { + char *prg_name = "./ksm_functional_tests"; + char *argv_for_program[] = { prg_name, FORK_EXEC_CHILD_PRG_NAME }; + + execv(prg_name, argv_for_program); + return; + } + + if (waitpid(child_pid, &status, 0) > 0) { + if (WIFEXITED(status)) { + status = WEXITSTATUS(status); + if (status) { + ksft_test_result_fail("KSM not enabled\n"); + return; + } + } else { + ksft_test_result_fail("program didn't terminate normally\n"); + return; + } + } else { + ksft_test_result_fail("waitpid() failed\n"); + return; + } + + if (prctl(PR_SET_MEMORY_MERGE, 0, 0, 0, 0)) { + ksft_test_result_fail("PR_SET_MEMORY_MERGE=0 failed\n"); + return; + } + + ksft_test_result_pass("PR_SET_MEMORY_MERGE value is inherited\n"); +} + static void test_prctl_unmerge(void) { const unsigned int size = 2 * MiB; @@ -536,9 +595,13 @@ unmap: int main(int argc, char **argv) { - unsigned int tests = 7; + unsigned int tests = 8; int err; + if (argc > 1 && !strcmp(argv[1], FORK_EXEC_CHILD_PRG_NAME)) { + exit(ksm_fork_exec_child() == 1 ? 0 : 1); + } + #ifdef __NR_userfaultfd tests++; #endif @@ -576,6 +639,7 @@ int main(int argc, char **argv) test_prctl(); test_prctl_fork(); + test_prctl_fork_exec(); test_prctl_unmerge(); err = ksft_get_fail_cnt(); -- cgit v1.2.3 From fc7f04dc23db50206bee7891516ed4726c3f64cf Mon Sep 17 00:00:00 2001 From: Tiezhu Yang Date: Tue, 11 Jul 2023 17:13:34 +0800 Subject: selftests/clone3: Fix broken test under !CONFIG_TIME_NS When execute the following command to test clone3 under !CONFIG_TIME_NS: # make headers && cd tools/testing/selftests/clone3 && make && ./clone3 we can see the following error info: # [7538] Trying clone3() with flags 0x80 (size 0) # Invalid argument - Failed to create new process # [7538] clone3() with flags says: -22 expected 0 not ok 18 [7538] Result (-22) is different than expected (0) ... # Totals: pass:18 fail:1 xfail:0 xpass:0 skip:0 error:0 This is because if CONFIG_TIME_NS is not set, but the flag CLONE_NEWTIME (0x80) is used to clone a time namespace, it will return -EINVAL in copy_time_ns(). If kernel does not support CONFIG_TIME_NS, /proc/self/ns/time will be not exist, and then we should skip clone3() test with CLONE_NEWTIME. With this patch under !CONFIG_TIME_NS: # make headers && cd tools/testing/selftests/clone3 && make && ./clone3 ... # Time namespaces are not supported ok 18 # SKIP Skipping clone3() with CLONE_NEWTIME ... # Totals: pass:18 fail:0 xfail:0 xpass:0 skip:1 error:0 Link: https://lkml.kernel.org/r/1689066814-13295-1-git-send-email-yangtiezhu@loongson.cn Fixes: 515bddf0ec41 ("selftests/clone3: test clone3 with CLONE_NEWTIME") Signed-off-by: Tiezhu Yang Suggested-by: Thomas Gleixner Cc: Christian Brauner Cc: Shuah Khan Cc: Signed-off-by: Andrew Morton --- tools/testing/selftests/clone3/clone3.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/testing/selftests/clone3/clone3.c b/tools/testing/selftests/clone3/clone3.c index e60cf4da8fb0..1c61e3c022cb 100644 --- a/tools/testing/selftests/clone3/clone3.c +++ b/tools/testing/selftests/clone3/clone3.c @@ -196,7 +196,12 @@ int main(int argc, char *argv[]) CLONE3_ARGS_NO_TEST); /* Do a clone3() in a new time namespace */ - test_clone3(CLONE_NEWTIME, 0, 0, CLONE3_ARGS_NO_TEST); + if (access("/proc/self/ns/time", F_OK) == 0) { + test_clone3(CLONE_NEWTIME, 0, 0, CLONE3_ARGS_NO_TEST); + } else { + ksft_print_msg("Time namespaces are not supported\n"); + ksft_test_result_skip("Skipping clone3() with CLONE_NEWTIME\n"); + } /* Do a clone3() with exit signal (SIGCHLD) in flags */ test_clone3(SIGCHLD, 0, -EINVAL, CLONE3_ARGS_NO_TEST); -- cgit v1.2.3 From b58aa0f4fee61040bdb7557bf66822e929342ac5 Mon Sep 17 00:00:00 2001 From: Muhammad Usama Anjum Date: Mon, 21 Aug 2023 19:15:16 +0500 Subject: tools headers UAPI: update linux/fs.h with the kernel sources MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit New IOCTL and macros has been added in the kernel sources. Update the tools header file as well. Link: https://lkml.kernel.org/r/20230821141518.870589-5-usama.anjum@collabora.com Signed-off-by: Muhammad Usama Anjum Cc: Alex Sierra Cc: Al Viro Cc: Andrei Vagin Cc: Axel Rasmussen Cc: Christian Brauner Cc: Cyrill Gorcunov Cc: Dan Williams Cc: David Hildenbrand Cc: Greg Kroah-Hartman Cc: Gustavo A. R. Silva Cc: "Liam R. Howlett" Cc: Matthew Wilcox Cc: Michal Miroslaw Cc: Michał Mirosław Cc: Mike Rapoport (IBM) Cc: Nadav Amit Cc: Pasha Tatashin Cc: Paul Gofman Cc: Peter Xu Cc: Shuah Khan Cc: Suren Baghdasaryan Cc: Vlastimil Babka Cc: Yang Shi Cc: Yun Zhou Signed-off-by: Andrew Morton --- tools/include/uapi/linux/fs.h | 59 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) (limited to 'tools') diff --git a/tools/include/uapi/linux/fs.h b/tools/include/uapi/linux/fs.h index b7b56871029c..da43810b7485 100644 --- a/tools/include/uapi/linux/fs.h +++ b/tools/include/uapi/linux/fs.h @@ -305,4 +305,63 @@ typedef int __bitwise __kernel_rwf_t; #define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT |\ RWF_APPEND) +/* Pagemap ioctl */ +#define PAGEMAP_SCAN _IOWR('f', 16, struct pm_scan_arg) + +/* Bitmasks provided in pm_scan_args masks and reported in page_region.categories. */ +#define PAGE_IS_WPALLOWED (1 << 0) +#define PAGE_IS_WRITTEN (1 << 1) +#define PAGE_IS_FILE (1 << 2) +#define PAGE_IS_PRESENT (1 << 3) +#define PAGE_IS_SWAPPED (1 << 4) +#define PAGE_IS_PFNZERO (1 << 5) +#define PAGE_IS_HUGE (1 << 6) + +/* + * struct page_region - Page region with flags + * @start: Start of the region + * @end: End of the region (exclusive) + * @categories: PAGE_IS_* category bitmask for the region + */ +struct page_region { + __u64 start; + __u64 end; + __u64 categories; +}; + +/* Flags for PAGEMAP_SCAN ioctl */ +#define PM_SCAN_WP_MATCHING (1 << 0) /* Write protect the pages matched. */ +#define PM_SCAN_CHECK_WPASYNC (1 << 1) /* Abort the scan when a non-WP-enabled page is found. */ + +/* + * struct pm_scan_arg - Pagemap ioctl argument + * @size: Size of the structure + * @flags: Flags for the IOCTL + * @start: Starting address of the region + * @end: Ending address of the region + * @walk_end Address where the scan stopped (written by kernel). + * walk_end == end (address tags cleared) informs that the scan completed on entire range. + * @vec: Address of page_region struct array for output + * @vec_len: Length of the page_region struct array + * @max_pages: Optional limit for number of returned pages (0 = disabled) + * @category_inverted: PAGE_IS_* categories which values match if 0 instead of 1 + * @category_mask: Skip pages for which any category doesn't match + * @category_anyof_mask: Skip pages for which no category matches + * @return_mask: PAGE_IS_* categories that are to be reported in `page_region`s returned + */ +struct pm_scan_arg { + __u64 size; + __u64 flags; + __u64 start; + __u64 end; + __u64 walk_end; + __u64 vec; + __u64 vec_len; + __u64 max_pages; + __u64 category_inverted; + __u64 category_mask; + __u64 category_anyof_mask; + __u64 return_mask; +}; + #endif /* _UAPI_LINUX_FS_H */ -- cgit v1.2.3 From 46fd75d4a3c94dfa5dfcf113881c0c704036b2b2 Mon Sep 17 00:00:00 2001 From: Muhammad Usama Anjum Date: Mon, 21 Aug 2023 19:15:18 +0500 Subject: selftests: mm: add pagemap ioctl tests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add pagemap ioctl tests. Add several different types of tests to judge the correction of the interface. Link: https://lkml.kernel.org/r/20230821141518.870589-7-usama.anjum@collabora.com Signed-off-by: Muhammad Usama Anjum Cc: Alex Sierra Cc: Al Viro Cc: Andrei Vagin Cc: Axel Rasmussen Cc: Christian Brauner Cc: Cyrill Gorcunov Cc: Dan Williams Cc: David Hildenbrand Cc: Greg Kroah-Hartman Cc: Gustavo A. R. Silva Cc: "Liam R. Howlett" Cc: Matthew Wilcox Cc: Michal Miroslaw Cc: Michał Mirosław Cc: Mike Rapoport (IBM) Cc: Nadav Amit Cc: Pasha Tatashin Cc: Paul Gofman Cc: Peter Xu Cc: Shuah Khan Cc: Suren Baghdasaryan Cc: Vlastimil Babka Cc: Yang Shi Cc: Yun Zhou Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/.gitignore | 2 + tools/testing/selftests/mm/Makefile | 3 +- tools/testing/selftests/mm/config | 1 + tools/testing/selftests/mm/pagemap_ioctl.c | 1660 ++++++++++++++++++++++++++++ tools/testing/selftests/mm/run_vmtests.sh | 4 + 5 files changed, 1669 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/mm/pagemap_ioctl.c (limited to 'tools') diff --git a/tools/testing/selftests/mm/.gitignore b/tools/testing/selftests/mm/.gitignore index cdc9ce4426b9..cc920c79ff1c 100644 --- a/tools/testing/selftests/mm/.gitignore +++ b/tools/testing/selftests/mm/.gitignore @@ -18,6 +18,8 @@ mremap_dontunmap mremap_test on-fault-limit transhuge-stress +pagemap_ioctl +*.tmp* protection_keys protection_keys_32 protection_keys_64 diff --git a/tools/testing/selftests/mm/Makefile b/tools/testing/selftests/mm/Makefile index 6a9fc5693145..2a89989afafc 100644 --- a/tools/testing/selftests/mm/Makefile +++ b/tools/testing/selftests/mm/Makefile @@ -33,7 +33,7 @@ endif MAKEFLAGS += --no-builtin-rules CFLAGS = -Wall -I $(top_srcdir) $(EXTRA_CFLAGS) $(KHDR_INCLUDES) -LDLIBS = -lrt -lpthread +LDLIBS = -lrt -lpthread -lm TEST_GEN_FILES = cow TEST_GEN_FILES += compaction_test @@ -60,6 +60,7 @@ TEST_GEN_FILES += mrelease_test TEST_GEN_FILES += mremap_dontunmap TEST_GEN_FILES += mremap_test TEST_GEN_FILES += on-fault-limit +TEST_GEN_PROGS += pagemap_ioctl TEST_GEN_FILES += thuge-gen TEST_GEN_FILES += transhuge-stress TEST_GEN_FILES += uffd-stress diff --git a/tools/testing/selftests/mm/config b/tools/testing/selftests/mm/config index be087c4bc396..4309916f629e 100644 --- a/tools/testing/selftests/mm/config +++ b/tools/testing/selftests/mm/config @@ -1,5 +1,6 @@ CONFIG_SYSVIPC=y CONFIG_USERFAULTFD=y +CONFIG_PTE_MARKER_UFFD_WP=y CONFIG_TEST_VMALLOC=m CONFIG_DEVICE_PRIVATE=y CONFIG_TEST_HMM=m diff --git a/tools/testing/selftests/mm/pagemap_ioctl.c b/tools/testing/selftests/mm/pagemap_ioctl.c new file mode 100644 index 000000000000..0161fb49fc6e --- /dev/null +++ b/tools/testing/selftests/mm/pagemap_ioctl.c @@ -0,0 +1,1660 @@ +// SPDX-License-Identifier: GPL-2.0 +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include "vm_util.h" +#include "../kselftest.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PAGEMAP_BITS_ALL (PAGE_IS_WPALLOWED | PAGE_IS_WRITTEN | \ + PAGE_IS_FILE | PAGE_IS_PRESENT | \ + PAGE_IS_SWAPPED | PAGE_IS_PFNZERO | \ + PAGE_IS_HUGE) +#define PAGEMAP_NON_WRITTEN_BITS (PAGE_IS_WPALLOWED | PAGE_IS_FILE | \ + PAGE_IS_PRESENT | PAGE_IS_SWAPPED | \ + PAGE_IS_PFNZERO | PAGE_IS_HUGE) + +#define TEST_ITERATIONS 100 +#define PAGEMAP "/proc/self/pagemap" +int pagemap_fd; +int uffd; +int page_size; +int hpage_size; + +#define LEN(region) ((region.end - region.start)/page_size) + +static long pagemap_ioctl(void *start, int len, void *vec, int vec_len, int flag, + int max_pages, long required_mask, long anyof_mask, long excluded_mask, + long return_mask) +{ + struct pm_scan_arg arg; + + arg.start = (uintptr_t)start; + arg.end = (uintptr_t)(start + len); + arg.vec = (uintptr_t)vec; + arg.vec_len = vec_len; + arg.flags = flag; + arg.size = sizeof(struct pm_scan_arg); + arg.max_pages = max_pages; + arg.category_mask = required_mask; + arg.category_anyof_mask = anyof_mask; + arg.category_inverted = excluded_mask; + arg.return_mask = return_mask; + + return ioctl(pagemap_fd, PAGEMAP_SCAN, &arg); +} + +static long pagemap_ioc(void *start, int len, void *vec, int vec_len, int flag, + int max_pages, long required_mask, long anyof_mask, long excluded_mask, + long return_mask, long *walk_end) +{ + struct pm_scan_arg arg; + int ret; + + arg.start = (uintptr_t)start; + arg.end = (uintptr_t)(start + len); + arg.vec = (uintptr_t)vec; + arg.vec_len = vec_len; + arg.flags = flag; + arg.size = sizeof(struct pm_scan_arg); + arg.max_pages = max_pages; + arg.category_mask = required_mask; + arg.category_anyof_mask = anyof_mask; + arg.category_inverted = excluded_mask; + arg.return_mask = return_mask; + + ret = ioctl(pagemap_fd, PAGEMAP_SCAN, &arg); + + if (walk_end) + *walk_end = arg.walk_end; + + return ret; +} + + +int init_uffd(void) +{ + struct uffdio_api uffdio_api; + + uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK | UFFD_USER_MODE_ONLY); + if (uffd == -1) + ksft_exit_fail_msg("uffd syscall failed\n"); + + uffdio_api.api = UFFD_API; + uffdio_api.features = UFFD_FEATURE_WP_UNPOPULATED | UFFD_FEATURE_WP_ASYNC | + UFFD_FEATURE_WP_HUGETLBFS_SHMEM; + if (ioctl(uffd, UFFDIO_API, &uffdio_api)) + ksft_exit_fail_msg("UFFDIO_API\n"); + + if (!(uffdio_api.api & UFFDIO_REGISTER_MODE_WP) || + !(uffdio_api.features & UFFD_FEATURE_WP_UNPOPULATED) || + !(uffdio_api.features & UFFD_FEATURE_WP_ASYNC) || + !(uffdio_api.features & UFFD_FEATURE_WP_HUGETLBFS_SHMEM)) + ksft_exit_fail_msg("UFFDIO_API error %llu\n", uffdio_api.api); + + return 0; +} + +int wp_init(void *lpBaseAddress, int dwRegionSize) +{ + struct uffdio_register uffdio_register; + struct uffdio_writeprotect wp; + + uffdio_register.range.start = (unsigned long)lpBaseAddress; + uffdio_register.range.len = dwRegionSize; + uffdio_register.mode = UFFDIO_REGISTER_MODE_WP; + if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register)) + ksft_exit_fail_msg("ioctl(UFFDIO_REGISTER) %d %s\n", errno, strerror(errno)); + + if (!(uffdio_register.ioctls & UFFDIO_WRITEPROTECT)) + ksft_exit_fail_msg("ioctl set is incorrect\n"); + + wp.range.start = (unsigned long)lpBaseAddress; + wp.range.len = dwRegionSize; + wp.mode = UFFDIO_WRITEPROTECT_MODE_WP; + + if (ioctl(uffd, UFFDIO_WRITEPROTECT, &wp)) + ksft_exit_fail_msg("ioctl(UFFDIO_WRITEPROTECT)\n"); + + return 0; +} + +int wp_free(void *lpBaseAddress, int dwRegionSize) +{ + struct uffdio_register uffdio_register; + + uffdio_register.range.start = (unsigned long)lpBaseAddress; + uffdio_register.range.len = dwRegionSize; + uffdio_register.mode = UFFDIO_REGISTER_MODE_WP; + if (ioctl(uffd, UFFDIO_UNREGISTER, &uffdio_register.range)) + ksft_exit_fail_msg("ioctl unregister failure\n"); + return 0; +} + +int wp_addr_range(void *lpBaseAddress, int dwRegionSize) +{ + if (pagemap_ioctl(lpBaseAddress, dwRegionSize, NULL, 0, + PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, + 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN) < 0) + ksft_exit_fail_msg("error %d %d %s\n", 1, errno, strerror(errno)); + + return 0; +} + +void *gethugetlb_mem(int size, int *shmid) +{ + char *mem; + + if (shmid) { + *shmid = shmget(2, size, SHM_HUGETLB | IPC_CREAT | SHM_R | SHM_W); + if (*shmid < 0) + return NULL; + + mem = shmat(*shmid, 0, 0); + if (mem == (char *)-1) { + shmctl(*shmid, IPC_RMID, NULL); + ksft_exit_fail_msg("Shared memory attach failure\n"); + } + } else { + mem = mmap(NULL, size, PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_HUGETLB | MAP_PRIVATE, -1, 0); + if (mem == MAP_FAILED) + return NULL; + } + + return mem; +} + +int userfaultfd_tests(void) +{ + int mem_size, vec_size, written, num_pages = 16; + char *mem, *vec; + + mem_size = num_pages * page_size; + mem = mmap(NULL, mem_size, PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, 0); + if (mem == MAP_FAILED) + ksft_exit_fail_msg("error nomem\n"); + + wp_init(mem, mem_size); + + /* Change protection of pages differently */ + mprotect(mem, mem_size/8, PROT_READ|PROT_WRITE); + mprotect(mem + 1 * mem_size/8, mem_size/8, PROT_READ); + mprotect(mem + 2 * mem_size/8, mem_size/8, PROT_READ|PROT_WRITE); + mprotect(mem + 3 * mem_size/8, mem_size/8, PROT_READ); + mprotect(mem + 4 * mem_size/8, mem_size/8, PROT_READ|PROT_WRITE); + mprotect(mem + 5 * mem_size/8, mem_size/8, PROT_NONE); + mprotect(mem + 6 * mem_size/8, mem_size/8, PROT_READ|PROT_WRITE); + mprotect(mem + 7 * mem_size/8, mem_size/8, PROT_READ); + + wp_addr_range(mem + (mem_size/16), mem_size - 2 * (mem_size/8)); + wp_addr_range(mem, mem_size); + + vec_size = mem_size/page_size; + vec = malloc(sizeof(struct page_region) * vec_size); + + written = pagemap_ioctl(mem, mem_size, vec, 1, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, + vec_size - 2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); + if (written < 0) + ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno)); + + ksft_test_result(written == 0, "%s all new pages must not be written (dirty)\n", __func__); + + wp_free(mem, mem_size); + munmap(mem, mem_size); + free(vec); + return 0; +} + +int get_reads(struct page_region *vec, int vec_size) +{ + int i, sum = 0; + + for (i = 0; i < vec_size; i++) + sum += LEN(vec[i]); + + return sum; +} + +int sanity_tests_sd(void) +{ + int mem_size, vec_size, ret, ret2, ret3, i, num_pages = 1000, total_pages = 0; + int total_writes, total_reads, reads, count; + struct page_region *vec, *vec2; + char *mem, *m[2]; + long walk_end; + + vec_size = num_pages/2; + mem_size = num_pages * page_size; + + vec = malloc(sizeof(struct page_region) * vec_size); + if (!vec) + ksft_exit_fail_msg("error nomem\n"); + + vec2 = malloc(sizeof(struct page_region) * vec_size); + if (!vec2) + ksft_exit_fail_msg("error nomem\n"); + + mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); + if (mem == MAP_FAILED) + ksft_exit_fail_msg("error nomem\n"); + + wp_init(mem, mem_size); + wp_addr_range(mem, mem_size); + + /* 1. wrong operation */ + ksft_test_result(pagemap_ioctl(mem, 0, vec, vec_size, 0, + 0, PAGEMAP_BITS_ALL, 0, 0, PAGEMAP_BITS_ALL) == 0, + "%s Zero range size is valid\n", __func__); + + ksft_test_result(pagemap_ioctl(mem, mem_size, NULL, vec_size, 0, + 0, PAGEMAP_BITS_ALL, 0, 0, PAGEMAP_BITS_ALL) < 0, + "%s output buffer must be specified with size\n", __func__); + + ksft_test_result(pagemap_ioctl(mem, mem_size, vec, 0, 0, + 0, PAGEMAP_BITS_ALL, 0, 0, PAGEMAP_BITS_ALL) == 0, + "%s output buffer can be 0\n", __func__); + + ksft_test_result(pagemap_ioctl(mem, mem_size, 0, 0, 0, + 0, PAGEMAP_BITS_ALL, 0, 0, PAGEMAP_BITS_ALL) == 0, + "%s output buffer can be 0\n", __func__); + + ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, -1, + 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN) < 0, + "%s wrong flag specified\n", __func__); + + ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, + PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC | 0xFF, + 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN) < 0, + "%s flag has extra bits specified\n", __func__); + + ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, 0, + 0, 0, 0, 0, PAGE_IS_WRITTEN) >= 0, + "%s no selection mask is specified\n", __func__); + + ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, 0, + 0, PAGE_IS_WRITTEN, PAGE_IS_WRITTEN, 0, 0) == 0, + "%s no return mask is specified\n", __func__); + + ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, 0, + 0, PAGE_IS_WRITTEN, 0, 0, 0x1000) < 0, + "%s wrong return mask specified\n", __func__); + + ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, + PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, + 0, 0xFFF, PAGE_IS_WRITTEN, 0, PAGE_IS_WRITTEN) < 0, + "%s mixture of correct and wrong flag\n", __func__); + + ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, + PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, + 0, 0, 0, PAGEMAP_BITS_ALL, PAGE_IS_WRITTEN) >= 0, + "%s PAGEMAP_BITS_ALL can be specified with PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC\n", + __func__); + + /* 2. Clear area with larger vec size */ + ret = pagemap_ioctl(mem, mem_size, vec, vec_size, + PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 0, + PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); + ksft_test_result(ret >= 0, "%s Clear area with larger vec size\n", __func__); + + /* 3. Repeated pattern of written and non-written pages */ + for (i = 0; i < mem_size; i += 2 * page_size) + mem[i]++; + + ret = pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0, PAGE_IS_WRITTEN, 0, + 0, PAGE_IS_WRITTEN); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + + ksft_test_result(ret == mem_size/(page_size * 2), + "%s Repeated pattern of written and non-written pages\n", __func__); + + /* 4. Repeated pattern of written and non-written pages in parts */ + ret = pagemap_ioctl(mem, mem_size, vec, vec_size, + PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, + num_pages/2 - 2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + + ret2 = pagemap_ioctl(mem, mem_size, vec, 2, 0, 0, PAGE_IS_WRITTEN, 0, 0, + PAGE_IS_WRITTEN); + if (ret2 < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret2, errno, strerror(errno)); + + ret3 = pagemap_ioctl(mem, mem_size, vec, vec_size, + PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, + 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); + if (ret3 < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret3, errno, strerror(errno)); + + ksft_test_result((ret + ret3) == num_pages/2 && ret2 == 2, + "%s Repeated pattern of written and non-written pages in parts %d %d %d\n", + __func__, ret, ret3, ret2); + + /* 5. Repeated pattern of written and non-written pages max_pages */ + for (i = 0; i < mem_size; i += 2 * page_size) + mem[i]++; + mem[(mem_size/page_size - 1) * page_size]++; + + ret = pagemap_ioctl(mem, mem_size, vec, vec_size, + PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, + num_pages/2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + + ret2 = pagemap_ioctl(mem, mem_size, vec, vec_size, + PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, + 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); + if (ret2 < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret2, errno, strerror(errno)); + + ksft_test_result(ret == num_pages/2 && ret2 == 1, + "%s Repeated pattern of written and non-written pages max_pages\n", + __func__); + + /* 6. only get 2 dirty pages and clear them as well */ + vec_size = mem_size/page_size; + memset(mem, -1, mem_size); + + /* get and clear second and third pages */ + ret = pagemap_ioctl(mem + page_size, 2 * page_size, vec, 1, + PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, + 2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + + ret2 = pagemap_ioctl(mem, mem_size, vec2, vec_size, 0, 0, + PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); + if (ret2 < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret2, errno, strerror(errno)); + + ksft_test_result(ret == 1 && LEN(vec[0]) == 2 && + vec[0].start == (uintptr_t)(mem + page_size) && + ret2 == 2 && LEN(vec2[0]) == 1 && vec2[0].start == (uintptr_t)mem && + LEN(vec2[1]) == vec_size - 3 && + vec2[1].start == (uintptr_t)(mem + 3 * page_size), + "%s only get 2 written pages and clear them as well\n", __func__); + + wp_free(mem, mem_size); + munmap(mem, mem_size); + + /* 7. Two regions */ + m[0] = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); + if (m[0] == MAP_FAILED) + ksft_exit_fail_msg("error nomem\n"); + m[1] = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); + if (m[1] == MAP_FAILED) + ksft_exit_fail_msg("error nomem\n"); + + wp_init(m[0], mem_size); + wp_init(m[1], mem_size); + wp_addr_range(m[0], mem_size); + wp_addr_range(m[1], mem_size); + + memset(m[0], 'a', mem_size); + memset(m[1], 'b', mem_size); + + wp_addr_range(m[0], mem_size); + + ret = pagemap_ioctl(m[1], mem_size, vec, 1, 0, 0, PAGE_IS_WRITTEN, 0, 0, + PAGE_IS_WRITTEN); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + + ksft_test_result(ret == 1 && LEN(vec[0]) == mem_size/page_size, + "%s Two regions\n", __func__); + + wp_free(m[0], mem_size); + wp_free(m[1], mem_size); + munmap(m[0], mem_size); + munmap(m[1], mem_size); + + free(vec); + free(vec2); + + /* 8. Smaller vec */ + mem_size = 1050 * page_size; + vec_size = mem_size/(page_size*2); + + vec = malloc(sizeof(struct page_region) * vec_size); + if (!vec) + ksft_exit_fail_msg("error nomem\n"); + + mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); + if (mem == MAP_FAILED) + ksft_exit_fail_msg("error nomem\n"); + + wp_init(mem, mem_size); + wp_addr_range(mem, mem_size); + + ret = pagemap_ioctl(mem, mem_size, vec, vec_size, + PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 0, + PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + + for (i = 0; i < mem_size/page_size; i += 2) + mem[i * page_size]++; + + ret = pagemap_ioctl(mem, mem_size, vec, vec_size, + PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, + mem_size/(page_size*5), PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + + total_pages += ret; + + ret = pagemap_ioctl(mem, mem_size, vec, vec_size, + PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, + mem_size/(page_size*5), PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + + total_pages += ret; + + ret = pagemap_ioctl(mem, mem_size, vec, vec_size, + PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, + mem_size/(page_size*5), PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + + total_pages += ret; + + ksft_test_result(total_pages == mem_size/(page_size*2), "%s Smaller max_pages\n", __func__); + + free(vec); + wp_free(mem, mem_size); + munmap(mem, mem_size); + total_pages = 0; + + /* 9. Smaller vec */ + mem_size = 10000 * page_size; + vec_size = 50; + + vec = malloc(sizeof(struct page_region) * vec_size); + if (!vec) + ksft_exit_fail_msg("error nomem\n"); + + mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); + if (mem == MAP_FAILED) + ksft_exit_fail_msg("error nomem\n"); + + wp_init(mem, mem_size); + wp_addr_range(mem, mem_size); + + for (count = 0; count < TEST_ITERATIONS; count++) { + total_writes = total_reads = 0; + walk_end = (long)mem; + + for (i = 0; i < mem_size; i += page_size) { + if (rand() % 2) { + mem[i]++; + total_writes++; + } + } + + while (total_reads < total_writes) { + ret = pagemap_ioc((void *)walk_end, mem_size-(walk_end - (long)mem), vec, + vec_size, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, + 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + + if (ret > vec_size) + break; + + reads = get_reads(vec, ret); + total_reads += reads; + } + + if (total_reads != total_writes) + break; + } + + ksft_test_result(count == TEST_ITERATIONS, "Smaller vec\n"); + + free(vec); + wp_free(mem, mem_size); + munmap(mem, mem_size); + + /* 10. Walk_end tester */ + vec_size = 1000; + mem_size = vec_size * page_size; + + vec = malloc(sizeof(struct page_region) * vec_size); + if (!vec) + ksft_exit_fail_msg("error nomem\n"); + + mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); + if (mem == MAP_FAILED) + ksft_exit_fail_msg("error nomem\n"); + + wp_init(mem, mem_size); + wp_addr_range(mem, mem_size); + + memset(mem, 0, mem_size); + + ret = pagemap_ioc(mem, 0, vec, vec_size, 0, + 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_test_result(ret == 0 && walk_end == (long)mem, + "Walk_end: Same start and end address\n"); + + ret = pagemap_ioc(mem, 0, vec, vec_size, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, + 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_test_result(ret == 0 && walk_end == (long)mem, + "Walk_end: Same start and end with WP\n"); + + ret = pagemap_ioc(mem, 0, vec, 0, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, + 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_test_result(ret == 0 && walk_end == (long)mem, + "Walk_end: Same start and end with 0 output buffer\n"); + + ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0, + 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_test_result(ret == 1 && walk_end == (long)(mem + mem_size), + "Walk_end: Big vec\n"); + + ret = pagemap_ioc(mem, mem_size, vec, 1, 0, + 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_test_result(ret == 1 && walk_end == (long)(mem + mem_size), + "Walk_end: vec of minimum length\n"); + + ret = pagemap_ioc(mem, mem_size, vec, 1, 0, + vec_size, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_test_result(ret == 1 && walk_end == (long)(mem + mem_size), + "Walk_end: Max pages specified\n"); + + ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0, + vec_size/2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_test_result(ret == 1 && walk_end == (long)(mem + mem_size/2), + "Walk_end: Half max pages\n"); + + ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0, + 1, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_test_result(ret == 1 && walk_end == (long)(mem + page_size), + "Walk_end: 1 max page\n"); + + ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0, + -1, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_test_result(ret == 1 && walk_end == (long)(mem + mem_size), + "Walk_end: max pages\n"); + + wp_addr_range(mem, mem_size); + for (i = 0; i < mem_size; i += 2 * page_size) + mem[i]++; + + ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0, + 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_test_result(ret == vec_size/2 && walk_end == (long)(mem + mem_size), + "Walk_end sparse: Big vec\n"); + + ret = pagemap_ioc(mem, mem_size, vec, 1, 0, + 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_test_result(ret == 1 && walk_end == (long)(mem + page_size * 2), + "Walk_end sparse: vec of minimum length\n"); + + ret = pagemap_ioc(mem, mem_size, vec, 1, 0, + vec_size, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_test_result(ret == 1 && walk_end == (long)(mem + page_size * 2), + "Walk_end sparse: Max pages specified\n"); + + ret = pagemap_ioc(mem, mem_size, vec, vec_size/2, 0, + vec_size, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_test_result(ret == vec_size/2 && walk_end == (long)(mem + mem_size), + "Walk_end sparse: Max pages specified\n"); + + ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0, + vec_size, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_test_result(ret == vec_size/2 && walk_end == (long)(mem + mem_size), + "Walk_end sparse: Max pages specified\n"); + + ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0, + vec_size/2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_test_result(ret == vec_size/2 && walk_end == (long)(mem + mem_size), + "Walk_endsparse : Half max pages\n"); + + ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0, + 1, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_test_result(ret == 1 && walk_end == (long)(mem + page_size * 2), + "Walk_end: 1 max page\n"); + + free(vec); + wp_free(mem, mem_size); + munmap(mem, mem_size); + + return 0; +} + +int base_tests(char *prefix, char *mem, int mem_size, int skip) +{ + int vec_size, written; + struct page_region *vec, *vec2; + + if (skip) { + ksft_test_result_skip("%s all new pages must not be written (dirty)\n", prefix); + ksft_test_result_skip("%s all pages must be written (dirty)\n", prefix); + ksft_test_result_skip("%s all pages dirty other than first and the last one\n", + prefix); + ksft_test_result_skip("%s PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC\n", prefix); + ksft_test_result_skip("%s only middle page dirty\n", prefix); + ksft_test_result_skip("%s only two middle pages dirty\n", prefix); + return 0; + } + + vec_size = mem_size/page_size; + vec = malloc(sizeof(struct page_region) * vec_size); + vec2 = malloc(sizeof(struct page_region) * vec_size); + + /* 1. all new pages must be not be written (dirty) */ + written = pagemap_ioctl(mem, mem_size, vec, 1, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, + vec_size - 2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); + if (written < 0) + ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno)); + + ksft_test_result(written == 0, "%s all new pages must not be written (dirty)\n", prefix); + + /* 2. all pages must be written */ + memset(mem, -1, mem_size); + + written = pagemap_ioctl(mem, mem_size, vec, 1, 0, 0, PAGE_IS_WRITTEN, 0, 0, + PAGE_IS_WRITTEN); + if (written < 0) + ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno)); + + ksft_test_result(written == 1 && LEN(vec[0]) == mem_size/page_size, + "%s all pages must be written (dirty)\n", prefix); + + /* 3. all pages dirty other than first and the last one */ + written = pagemap_ioctl(mem, mem_size, vec, 1, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, + 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); + if (written < 0) + ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno)); + + memset(mem + page_size, 0, mem_size - (2 * page_size)); + + written = pagemap_ioctl(mem, mem_size, vec, 1, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, + 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); + if (written < 0) + ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno)); + + ksft_test_result(written == 1 && LEN(vec[0]) >= vec_size - 2 && LEN(vec[0]) <= vec_size, + "%s all pages dirty other than first and the last one\n", prefix); + + written = pagemap_ioctl(mem, mem_size, vec, 1, 0, 0, + PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); + if (written < 0) + ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno)); + + ksft_test_result(written == 0, + "%s PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC\n", prefix); + + /* 4. only middle page dirty */ + written = pagemap_ioctl(mem, mem_size, vec, 1, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, + 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); + if (written < 0) + ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno)); + + mem[vec_size/2 * page_size]++; + + written = pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0, PAGE_IS_WRITTEN, + 0, 0, PAGE_IS_WRITTEN); + if (written < 0) + ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno)); + + ksft_test_result(written == 1 && LEN(vec[0]) >= 1, + "%s only middle page dirty\n", prefix); + + /* 5. only two middle pages dirty and walk over only middle pages */ + written = pagemap_ioctl(mem, mem_size, vec, 1, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, + 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN | PAGE_IS_HUGE); + if (written < 0) + ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno)); + + mem[vec_size/2 * page_size]++; + mem[(vec_size/2 + 1) * page_size]++; + + written = pagemap_ioctl(&mem[vec_size/2 * page_size], 2 * page_size, vec, 1, 0, + 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN | PAGE_IS_HUGE); + if (written < 0) + ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno)); + + ksft_test_result(written == 1 && vec[0].start == (uintptr_t)(&mem[vec_size/2 * page_size]) + && LEN(vec[0]) == 2, + "%s only two middle pages dirty\n", prefix); + + free(vec); + free(vec2); + return 0; +} + +void *gethugepage(int map_size) +{ + int ret; + char *map; + + map = memalign(hpage_size, map_size); + if (!map) + ksft_exit_fail_msg("memalign failed %d %s\n", errno, strerror(errno)); + + ret = madvise(map, map_size, MADV_HUGEPAGE); + if (ret) + return NULL; + + memset(map, 0, map_size); + + return map; +} + +int hpage_unit_tests(void) +{ + char *map; + int ret, ret2; + size_t num_pages = 10; + int map_size = hpage_size * num_pages; + int vec_size = map_size/page_size; + struct page_region *vec, *vec2; + + vec = malloc(sizeof(struct page_region) * vec_size); + vec2 = malloc(sizeof(struct page_region) * vec_size); + if (!vec || !vec2) + ksft_exit_fail_msg("malloc failed\n"); + + map = gethugepage(map_size); + if (map) { + wp_init(map, map_size); + wp_addr_range(map, map_size); + + /* 1. all new huge page must not be written (dirty) */ + ret = pagemap_ioctl(map, map_size, vec, vec_size, + PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 0, + PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + + ksft_test_result(ret == 0, "%s all new huge page must not be written (dirty)\n", + __func__); + + /* 2. all the huge page must not be written */ + ret = pagemap_ioctl(map, map_size, vec, vec_size, 0, 0, + PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + + ksft_test_result(ret == 0, "%s all the huge page must not be written\n", __func__); + + /* 3. all the huge page must be written and clear dirty as well */ + memset(map, -1, map_size); + ret = pagemap_ioctl(map, map_size, vec, vec_size, + PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, + 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + + ksft_test_result(ret == 1 && vec[0].start == (uintptr_t)map && + LEN(vec[0]) == vec_size && vec[0].categories == PAGE_IS_WRITTEN, + "%s all the huge page must be written and clear\n", __func__); + + /* 4. only middle page written */ + wp_free(map, map_size); + free(map); + map = gethugepage(map_size); + wp_init(map, map_size); + wp_addr_range(map, map_size); + map[vec_size/2 * page_size]++; + + ret = pagemap_ioctl(map, map_size, vec, vec_size, 0, 0, + PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + + ksft_test_result(ret == 1 && LEN(vec[0]) > 0, + "%s only middle page written\n", __func__); + + wp_free(map, map_size); + free(map); + } else { + ksft_test_result_skip("%s all new huge page must be written\n", __func__); + ksft_test_result_skip("%s all the huge page must not be written\n", __func__); + ksft_test_result_skip("%s all the huge page must be written and clear\n", __func__); + ksft_test_result_skip("%s only middle page written\n", __func__); + } + + /* 5. clear first half of huge page */ + map = gethugepage(map_size); + if (map) { + wp_init(map, map_size); + wp_addr_range(map, map_size); + + memset(map, 0, map_size); + + wp_addr_range(map, map_size/2); + + ret = pagemap_ioctl(map, map_size, vec, vec_size, 0, 0, + PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + + ksft_test_result(ret == 1 && LEN(vec[0]) == vec_size/2 && + vec[0].start == (uintptr_t)(map + map_size/2), + "%s clear first half of huge page\n", __func__); + wp_free(map, map_size); + free(map); + } else { + ksft_test_result_skip("%s clear first half of huge page\n", __func__); + } + + /* 6. clear first half of huge page with limited buffer */ + map = gethugepage(map_size); + if (map) { + wp_init(map, map_size); + wp_addr_range(map, map_size); + + memset(map, 0, map_size); + + ret = pagemap_ioctl(map, map_size, vec, vec_size, + PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, + vec_size/2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + + ret = pagemap_ioctl(map, map_size, vec, vec_size, 0, 0, + PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + + ksft_test_result(ret == 1 && LEN(vec[0]) == vec_size/2 && + vec[0].start == (uintptr_t)(map + map_size/2), + "%s clear first half of huge page with limited buffer\n", + __func__); + wp_free(map, map_size); + free(map); + } else { + ksft_test_result_skip("%s clear first half of huge page with limited buffer\n", + __func__); + } + + /* 7. clear second half of huge page */ + map = gethugepage(map_size); + if (map) { + wp_init(map, map_size); + wp_addr_range(map, map_size); + + memset(map, -1, map_size); + + ret = pagemap_ioctl(map + map_size/2, map_size/2, vec, vec_size, + PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, vec_size/2, + PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + + ret = pagemap_ioctl(map, map_size, vec, vec_size, 0, 0, + PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + + ksft_test_result(ret == 1 && LEN(vec[0]) == vec_size/2, + "%s clear second half huge page\n", __func__); + wp_free(map, map_size); + free(map); + } else { + ksft_test_result_skip("%s clear second half huge page\n", __func__); + } + + /* 8. get half huge page */ + map = gethugepage(map_size); + if (map) { + wp_init(map, map_size); + wp_addr_range(map, map_size); + + memset(map, -1, map_size); + usleep(100); + + ret = pagemap_ioctl(map, map_size, vec, 1, + PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, + hpage_size/(2*page_size), PAGE_IS_WRITTEN, 0, 0, + PAGE_IS_WRITTEN); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + + ksft_test_result(ret == 1 && LEN(vec[0]) == hpage_size/(2*page_size), + "%s get half huge page\n", __func__); + + ret2 = pagemap_ioctl(map, map_size, vec, vec_size, 0, 0, + PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); + if (ret2 < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret2, errno, strerror(errno)); + + ksft_test_result(ret2 == 1 && LEN(vec[0]) == (map_size - hpage_size/2)/page_size, + "%s get half huge page\n", __func__); + + wp_free(map, map_size); + free(map); + } else { + ksft_test_result_skip("%s get half huge page\n", __func__); + ksft_test_result_skip("%s get half huge page\n", __func__); + } + + free(vec); + free(vec2); + return 0; +} + +int unmapped_region_tests(void) +{ + void *start = (void *)0x10000000; + int written, len = 0x00040000; + int vec_size = len / page_size; + struct page_region *vec = malloc(sizeof(struct page_region) * vec_size); + + /* 1. Get written pages */ + written = pagemap_ioctl(start, len, vec, vec_size, 0, 0, + PAGEMAP_NON_WRITTEN_BITS, 0, 0, PAGEMAP_NON_WRITTEN_BITS); + if (written < 0) + ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno)); + + ksft_test_result(written >= 0, "%s Get status of pages\n", __func__); + + free(vec); + return 0; +} + +static void test_simple(void) +{ + int i; + char *map; + struct page_region vec; + + map = aligned_alloc(page_size, page_size); + if (!map) + ksft_exit_fail_msg("aligned_alloc failed\n"); + + wp_init(map, page_size); + wp_addr_range(map, page_size); + + for (i = 0 ; i < TEST_ITERATIONS; i++) { + if (pagemap_ioctl(map, page_size, &vec, 1, 0, 0, + PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN) == 1) { + ksft_print_msg("written bit was 1, but should be 0 (i=%d)\n", i); + break; + } + + wp_addr_range(map, page_size); + /* Write something to the page to get the written bit enabled on the page */ + map[0]++; + + if (pagemap_ioctl(map, page_size, &vec, 1, 0, 0, + PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN) == 0) { + ksft_print_msg("written bit was 0, but should be 1 (i=%d)\n", i); + break; + } + + wp_addr_range(map, page_size); + } + wp_free(map, page_size); + free(map); + + ksft_test_result(i == TEST_ITERATIONS, "Test %s\n", __func__); +} + +int sanity_tests(void) +{ + int mem_size, vec_size, ret, fd, i, buf_size; + struct page_region *vec; + char *mem, *fmem; + struct stat sbuf; + char *tmp_buf; + + /* 1. wrong operation */ + mem_size = 10 * page_size; + vec_size = mem_size / page_size; + + vec = malloc(sizeof(struct page_region) * vec_size); + mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); + if (mem == MAP_FAILED || vec == MAP_FAILED) + ksft_exit_fail_msg("error nomem\n"); + + wp_init(mem, mem_size); + wp_addr_range(mem, mem_size); + + ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, + PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, + 0, PAGEMAP_BITS_ALL, 0, 0, PAGEMAP_BITS_ALL) >= 0, + "%s WP op can be specified with !PAGE_IS_WRITTEN\n", __func__); + ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0, + PAGEMAP_BITS_ALL, 0, 0, PAGEMAP_BITS_ALL) >= 0, + "%s required_mask specified\n", __func__); + ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0, + 0, PAGEMAP_BITS_ALL, 0, PAGEMAP_BITS_ALL) >= 0, + "%s anyof_mask specified\n", __func__); + ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0, + 0, 0, PAGEMAP_BITS_ALL, PAGEMAP_BITS_ALL) >= 0, + "%s excluded_mask specified\n", __func__); + ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0, + PAGEMAP_BITS_ALL, PAGEMAP_BITS_ALL, 0, + PAGEMAP_BITS_ALL) >= 0, + "%s required_mask and anyof_mask specified\n", __func__); + wp_free(mem, mem_size); + munmap(mem, mem_size); + + /* 2. Get sd and present pages with anyof_mask */ + mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); + if (mem == MAP_FAILED) + ksft_exit_fail_msg("error nomem\n"); + wp_init(mem, mem_size); + wp_addr_range(mem, mem_size); + + memset(mem, 0, mem_size); + + ret = pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0, + 0, PAGEMAP_BITS_ALL, 0, PAGEMAP_BITS_ALL); + ksft_test_result(ret >= 0 && vec[0].start == (uintptr_t)mem && LEN(vec[0]) == vec_size && + (vec[0].categories & (PAGE_IS_WRITTEN | PAGE_IS_PRESENT)) == + (PAGE_IS_WRITTEN | PAGE_IS_PRESENT), + "%s Get sd and present pages with anyof_mask\n", __func__); + + /* 3. Get sd and present pages with required_mask */ + ret = pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0, + PAGEMAP_BITS_ALL, 0, 0, PAGEMAP_BITS_ALL); + ksft_test_result(ret >= 0 && vec[0].start == (uintptr_t)mem && LEN(vec[0]) == vec_size && + (vec[0].categories & (PAGE_IS_WRITTEN | PAGE_IS_PRESENT)) == + (PAGE_IS_WRITTEN | PAGE_IS_PRESENT), + "%s Get all the pages with required_mask\n", __func__); + + /* 4. Get sd and present pages with required_mask and anyof_mask */ + ret = pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0, + PAGE_IS_WRITTEN, PAGE_IS_PRESENT, 0, PAGEMAP_BITS_ALL); + ksft_test_result(ret >= 0 && vec[0].start == (uintptr_t)mem && LEN(vec[0]) == vec_size && + (vec[0].categories & (PAGE_IS_WRITTEN | PAGE_IS_PRESENT)) == + (PAGE_IS_WRITTEN | PAGE_IS_PRESENT), + "%s Get sd and present pages with required_mask and anyof_mask\n", + __func__); + + /* 5. Don't get sd pages */ + ret = pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0, + PAGE_IS_WRITTEN, 0, PAGE_IS_WRITTEN, PAGEMAP_BITS_ALL); + ksft_test_result(ret == 0, "%s Don't get sd pages\n", __func__); + + /* 6. Don't get present pages */ + ret = pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0, + PAGE_IS_PRESENT, 0, PAGE_IS_PRESENT, PAGEMAP_BITS_ALL); + ksft_test_result(ret == 0, "%s Don't get present pages\n", __func__); + + wp_free(mem, mem_size); + munmap(mem, mem_size); + + /* 8. Find written present pages with return mask */ + mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); + if (mem == MAP_FAILED) + ksft_exit_fail_msg("error nomem\n"); + wp_init(mem, mem_size); + wp_addr_range(mem, mem_size); + + memset(mem, 0, mem_size); + + ret = pagemap_ioctl(mem, mem_size, vec, vec_size, + PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 0, + 0, PAGEMAP_BITS_ALL, 0, PAGE_IS_WRITTEN); + ksft_test_result(ret >= 0 && vec[0].start == (uintptr_t)mem && LEN(vec[0]) == vec_size && + vec[0].categories == PAGE_IS_WRITTEN, + "%s Find written present pages with return mask\n", __func__); + wp_free(mem, mem_size); + munmap(mem, mem_size); + + /* 9. Memory mapped file */ + fd = open(__FILE__, O_RDONLY); + if (fd < 0) + ksft_exit_fail_msg("%s Memory mapped file\n"); + + ret = stat(__FILE__, &sbuf); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + + fmem = mmap(NULL, sbuf.st_size, PROT_READ, MAP_PRIVATE, fd, 0); + if (fmem == MAP_FAILED) + ksft_exit_fail_msg("error nomem %ld %s\n", errno, strerror(errno)); + + tmp_buf = malloc(sbuf.st_size); + memcpy(tmp_buf, fmem, sbuf.st_size); + + ret = pagemap_ioctl(fmem, sbuf.st_size, vec, vec_size, 0, 0, + 0, PAGEMAP_NON_WRITTEN_BITS, 0, PAGEMAP_NON_WRITTEN_BITS); + + ksft_test_result(ret >= 0 && vec[0].start == (uintptr_t)fmem && + LEN(vec[0]) == ceilf((float)sbuf.st_size/page_size) && + (vec[0].categories & PAGE_IS_FILE), + "%s Memory mapped file\n", __func__); + + munmap(fmem, sbuf.st_size); + close(fd); + + /* 10. Create and read/write to a memory mapped file */ + buf_size = page_size * 10; + + fd = open(__FILE__".tmp2", O_RDWR | O_CREAT, 0666); + if (fd < 0) + ksft_exit_fail_msg("Read/write to memory: %s\n", + strerror(errno)); + + for (i = 0; i < buf_size; i++) + if (write(fd, "c", 1) < 0) + ksft_exit_fail_msg("Create and read/write to a memory mapped file\n"); + + fmem = mmap(NULL, buf_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); + if (fmem == MAP_FAILED) + ksft_exit_fail_msg("error nomem %ld %s\n", errno, strerror(errno)); + + wp_init(fmem, buf_size); + wp_addr_range(fmem, buf_size); + + for (i = 0; i < buf_size; i++) + fmem[i] = 'z'; + + msync(fmem, buf_size, MS_SYNC); + + ret = pagemap_ioctl(fmem, buf_size, vec, vec_size, 0, 0, + PAGE_IS_WRITTEN, PAGE_IS_PRESENT | PAGE_IS_SWAPPED | PAGE_IS_FILE, 0, + PAGEMAP_BITS_ALL); + + ksft_test_result(ret >= 0 && vec[0].start == (uintptr_t)fmem && + LEN(vec[0]) == (buf_size/page_size) && + (vec[0].categories & PAGE_IS_WRITTEN), + "%s Read/write to memory\n", __func__); + + wp_free(fmem, buf_size); + munmap(fmem, buf_size); + close(fd); + + free(vec); + return 0; +} + +int mprotect_tests(void) +{ + int ret; + char *mem, *mem2; + struct page_region vec; + int pagemap_fd = open("/proc/self/pagemap", O_RDONLY); + + if (pagemap_fd < 0) { + fprintf(stderr, "open() failed\n"); + exit(1); + } + + /* 1. Map two pages */ + mem = mmap(0, 2 * page_size, PROT_READ|PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); + if (mem == MAP_FAILED) + ksft_exit_fail_msg("error nomem\n"); + wp_init(mem, 2 * page_size); + wp_addr_range(mem, 2 * page_size); + + /* Populate both pages. */ + memset(mem, 1, 2 * page_size); + + ret = pagemap_ioctl(mem, 2 * page_size, &vec, 1, 0, 0, PAGE_IS_WRITTEN, + 0, 0, PAGE_IS_WRITTEN); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + + ksft_test_result(ret == 1 && LEN(vec) == 2, "%s Both pages written\n", __func__); + + /* 2. Start tracking */ + wp_addr_range(mem, 2 * page_size); + + ksft_test_result(pagemap_ioctl(mem, 2 * page_size, &vec, 1, 0, 0, + PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN) == 0, + "%s Both pages are not written (dirty)\n", __func__); + + /* 3. Remap the second page */ + mem2 = mmap(mem + page_size, page_size, PROT_READ|PROT_WRITE, + MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0); + if (mem2 == MAP_FAILED) + ksft_exit_fail_msg("error nomem\n"); + wp_init(mem2, page_size); + wp_addr_range(mem2, page_size); + + /* Protect + unprotect. */ + mprotect(mem, page_size, PROT_NONE); + mprotect(mem, 2 * page_size, PROT_READ); + mprotect(mem, 2 * page_size, PROT_READ|PROT_WRITE); + + /* Modify both pages. */ + memset(mem, 2, 2 * page_size); + + /* Protect + unprotect. */ + mprotect(mem, page_size, PROT_NONE); + mprotect(mem, page_size, PROT_READ); + mprotect(mem, page_size, PROT_READ|PROT_WRITE); + + ret = pagemap_ioctl(mem, 2 * page_size, &vec, 1, 0, 0, PAGE_IS_WRITTEN, + 0, 0, PAGE_IS_WRITTEN); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + + ksft_test_result(ret == 1 && LEN(vec) == 2, + "%s Both pages written after remap and mprotect\n", __func__); + + /* 4. Clear and make the pages written */ + wp_addr_range(mem, 2 * page_size); + + memset(mem, 'A', 2 * page_size); + + ret = pagemap_ioctl(mem, 2 * page_size, &vec, 1, 0, 0, PAGE_IS_WRITTEN, + 0, 0, PAGE_IS_WRITTEN); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + + ksft_test_result(ret == 1 && LEN(vec) == 2, + "%s Clear and make the pages written\n", __func__); + + wp_free(mem, 2 * page_size); + munmap(mem, 2 * page_size); + return 0; +} + +/* transact test */ +static const unsigned int nthreads = 6, pages_per_thread = 32, access_per_thread = 8; +static pthread_barrier_t start_barrier, end_barrier; +static unsigned int extra_thread_faults; +static unsigned int iter_count = 1000; +static volatile int finish; + +static ssize_t get_dirty_pages_reset(char *mem, unsigned int count, + int reset, int page_size) +{ + struct pm_scan_arg arg = {0}; + struct page_region rgns[256]; + int i, j, cnt, ret; + + arg.size = sizeof(struct pm_scan_arg); + arg.start = (uintptr_t)mem; + arg.max_pages = count; + arg.end = (uintptr_t)(mem + count * page_size); + arg.vec = (uintptr_t)rgns; + arg.vec_len = sizeof(rgns) / sizeof(*rgns); + if (reset) + arg.flags |= PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC; + arg.category_mask = PAGE_IS_WRITTEN; + arg.return_mask = PAGE_IS_WRITTEN; + + ret = ioctl(pagemap_fd, PAGEMAP_SCAN, &arg); + if (ret < 0) + ksft_exit_fail_msg("ioctl failed\n"); + + cnt = 0; + for (i = 0; i < ret; ++i) { + if (rgns[i].categories != PAGE_IS_WRITTEN) + ksft_exit_fail_msg("wrong flags\n"); + + for (j = 0; j < LEN(rgns[i]); ++j) + cnt++; + } + + return cnt; +} + +void *thread_proc(void *mem) +{ + int *m = mem; + long curr_faults, faults; + struct rusage r; + unsigned int i; + int ret; + + if (getrusage(RUSAGE_THREAD, &r)) + ksft_exit_fail_msg("getrusage\n"); + + curr_faults = r.ru_minflt; + + while (!finish) { + ret = pthread_barrier_wait(&start_barrier); + if (ret && ret != PTHREAD_BARRIER_SERIAL_THREAD) + ksft_exit_fail_msg("pthread_barrier_wait\n"); + + for (i = 0; i < access_per_thread; ++i) + __atomic_add_fetch(m + i * (0x1000 / sizeof(*m)), 1, __ATOMIC_SEQ_CST); + + ret = pthread_barrier_wait(&end_barrier); + if (ret && ret != PTHREAD_BARRIER_SERIAL_THREAD) + ksft_exit_fail_msg("pthread_barrier_wait\n"); + + if (getrusage(RUSAGE_THREAD, &r)) + ksft_exit_fail_msg("getrusage\n"); + + faults = r.ru_minflt - curr_faults; + if (faults < access_per_thread) + ksft_exit_fail_msg("faults < access_per_thread"); + + __atomic_add_fetch(&extra_thread_faults, faults - access_per_thread, + __ATOMIC_SEQ_CST); + curr_faults = r.ru_minflt; + } + + return NULL; +} + +static void transact_test(int page_size) +{ + unsigned int i, count, extra_pages; + pthread_t th; + char *mem; + int ret, c; + + if (pthread_barrier_init(&start_barrier, NULL, nthreads + 1)) + ksft_exit_fail_msg("pthread_barrier_init\n"); + + if (pthread_barrier_init(&end_barrier, NULL, nthreads + 1)) + ksft_exit_fail_msg("pthread_barrier_init\n"); + + mem = mmap(NULL, 0x1000 * nthreads * pages_per_thread, PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + if (mem == MAP_FAILED) + ksft_exit_fail_msg("Error mmap %s.\n", strerror(errno)); + + wp_init(mem, 0x1000 * nthreads * pages_per_thread); + wp_addr_range(mem, 0x1000 * nthreads * pages_per_thread); + + memset(mem, 0, 0x1000 * nthreads * pages_per_thread); + + count = get_dirty_pages_reset(mem, nthreads * pages_per_thread, 1, page_size); + ksft_test_result(count > 0, "%s count %d\n", __func__, count); + count = get_dirty_pages_reset(mem, nthreads * pages_per_thread, 1, page_size); + ksft_test_result(count == 0, "%s count %d\n", __func__, count); + + finish = 0; + for (i = 0; i < nthreads; ++i) + pthread_create(&th, NULL, thread_proc, mem + 0x1000 * i * pages_per_thread); + + extra_pages = 0; + for (i = 0; i < iter_count; ++i) { + count = 0; + + ret = pthread_barrier_wait(&start_barrier); + if (ret && ret != PTHREAD_BARRIER_SERIAL_THREAD) + ksft_exit_fail_msg("pthread_barrier_wait\n"); + + count = get_dirty_pages_reset(mem, nthreads * pages_per_thread, 1, + page_size); + + ret = pthread_barrier_wait(&end_barrier); + if (ret && ret != PTHREAD_BARRIER_SERIAL_THREAD) + ksft_exit_fail_msg("pthread_barrier_wait\n"); + + if (count > nthreads * access_per_thread) + ksft_exit_fail_msg("Too big count %d expected %d, iter %d\n", + count, nthreads * access_per_thread, i); + + c = get_dirty_pages_reset(mem, nthreads * pages_per_thread, 1, page_size); + count += c; + + if (c > nthreads * access_per_thread) { + ksft_test_result_fail(" %s count > nthreads\n", __func__); + return; + } + + if (count != nthreads * access_per_thread) { + /* + * The purpose of the test is to make sure that no page updates are lost + * when the page updates and read-resetting soft dirty flags are performed + * in parallel. However, it is possible that the application will get the + * soft dirty flags twice on the two consecutive read-resets. This seems + * unavoidable as soft dirty flag is handled in software through page faults + * in kernel. While the updating the flags is supposed to be synchronized + * between page fault handling and read-reset, it is possible that + * read-reset happens after page fault PTE update but before the application + * re-executes write instruction. So read-reset gets the flag, clears write + * access and application gets page fault again for the same write. + */ + if (count < nthreads * access_per_thread) { + ksft_test_result_fail("Lost update, iter %d, %d vs %d.\n", i, count, + nthreads * access_per_thread); + return; + } + + extra_pages += count - nthreads * access_per_thread; + } + } + + pthread_barrier_wait(&start_barrier); + finish = 1; + pthread_barrier_wait(&end_barrier); + + ksft_test_result_pass("%s Extra pages %u (%.1lf%%), extra thread faults %d.\n", __func__, + extra_pages, + 100.0 * extra_pages / (iter_count * nthreads * access_per_thread), + extra_thread_faults); +} + +int main(void) +{ + int mem_size, shmid, buf_size, fd, i, ret; + char *mem, *map, *fmem; + struct stat sbuf; + + ksft_print_header(); + ksft_set_plan(115); + + page_size = getpagesize(); + hpage_size = read_pmd_pagesize(); + + pagemap_fd = open(PAGEMAP, O_RDONLY); + if (pagemap_fd < 0) + return -EINVAL; + + if (init_uffd()) + ksft_exit_fail_msg("uffd init failed\n"); + + /* 1. Sanity testing */ + sanity_tests_sd(); + + /* 2. Normal page testing */ + mem_size = 10 * page_size; + mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); + if (mem == MAP_FAILED) + ksft_exit_fail_msg("error nomem\n"); + wp_init(mem, mem_size); + wp_addr_range(mem, mem_size); + + base_tests("Page testing:", mem, mem_size, 0); + + wp_free(mem, mem_size); + munmap(mem, mem_size); + + /* 3. Large page testing */ + mem_size = 512 * 10 * page_size; + mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); + if (mem == MAP_FAILED) + ksft_exit_fail_msg("error nomem\n"); + wp_init(mem, mem_size); + wp_addr_range(mem, mem_size); + + base_tests("Large Page testing:", mem, mem_size, 0); + + wp_free(mem, mem_size); + munmap(mem, mem_size); + + /* 4. Huge page testing */ + map = gethugepage(hpage_size); + if (map) { + wp_init(map, hpage_size); + wp_addr_range(map, hpage_size); + base_tests("Huge page testing:", map, hpage_size, 0); + wp_free(map, hpage_size); + free(map); + } else { + base_tests("Huge page testing:", NULL, 0, 1); + } + + /* 5. SHM Hugetlb page testing */ + mem_size = 2*1024*1024; + mem = gethugetlb_mem(mem_size, &shmid); + if (mem) { + wp_init(mem, mem_size); + wp_addr_range(mem, mem_size); + + base_tests("Hugetlb shmem testing:", mem, mem_size, 0); + + wp_free(mem, mem_size); + shmctl(shmid, IPC_RMID, NULL); + } else { + base_tests("Hugetlb shmem testing:", NULL, 0, 1); + } + + /* 6. Hugetlb page testing */ + mem = gethugetlb_mem(mem_size, NULL); + if (mem) { + wp_init(mem, mem_size); + wp_addr_range(mem, mem_size); + + base_tests("Hugetlb mem testing:", mem, mem_size, 0); + + wp_free(mem, mem_size); + } else { + base_tests("Hugetlb mem testing:", NULL, 0, 1); + } + + /* 7. File Hugetlb testing */ + mem_size = 2*1024*1024; + fd = memfd_create("uffd-test", MFD_HUGETLB | MFD_NOEXEC_SEAL); + mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + if (mem) { + wp_init(mem, mem_size); + wp_addr_range(mem, mem_size); + + base_tests("Hugetlb shmem testing:", mem, mem_size, 0); + + wp_free(mem, mem_size); + shmctl(shmid, IPC_RMID, NULL); + } else { + base_tests("Hugetlb shmem testing:", NULL, 0, 1); + } + close(fd); + + /* 8. File memory testing */ + buf_size = page_size * 10; + + fd = open(__FILE__".tmp0", O_RDWR | O_CREAT, 0777); + if (fd < 0) + ksft_exit_fail_msg("Create and read/write to a memory mapped file: %s\n", + strerror(errno)); + + for (i = 0; i < buf_size; i++) + if (write(fd, "c", 1) < 0) + ksft_exit_fail_msg("Create and read/write to a memory mapped file\n"); + + ret = stat(__FILE__".tmp0", &sbuf); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + + fmem = mmap(NULL, sbuf.st_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); + if (fmem == MAP_FAILED) + ksft_exit_fail_msg("error nomem %ld %s\n", errno, strerror(errno)); + + wp_init(fmem, sbuf.st_size); + wp_addr_range(fmem, sbuf.st_size); + + base_tests("File memory testing:", fmem, sbuf.st_size, 0); + + wp_free(fmem, sbuf.st_size); + munmap(fmem, sbuf.st_size); + close(fd); + + /* 9. File memory testing */ + buf_size = page_size * 10; + + fd = memfd_create(__FILE__".tmp00", MFD_NOEXEC_SEAL); + if (fd < 0) + ksft_exit_fail_msg("Create and read/write to a memory mapped file: %s\n", + strerror(errno)); + + if (ftruncate(fd, buf_size)) + ksft_exit_fail_msg("Error ftruncate\n"); + + for (i = 0; i < buf_size; i++) + if (write(fd, "c", 1) < 0) + ksft_exit_fail_msg("Create and read/write to a memory mapped file\n"); + + fmem = mmap(NULL, buf_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); + if (fmem == MAP_FAILED) + ksft_exit_fail_msg("error nomem %ld %s\n", errno, strerror(errno)); + + wp_init(fmem, buf_size); + wp_addr_range(fmem, buf_size); + + base_tests("File anonymous memory testing:", fmem, buf_size, 0); + + wp_free(fmem, buf_size); + munmap(fmem, buf_size); + close(fd); + + /* 10. Huge page tests */ + hpage_unit_tests(); + + /* 11. Iterative test */ + test_simple(); + + /* 12. Mprotect test */ + mprotect_tests(); + + /* 13. Transact test */ + transact_test(page_size); + + /* 14. Sanity testing */ + sanity_tests(); + + /*15. Unmapped address test */ + unmapped_region_tests(); + + /* 16. Userfaultfd tests */ + userfaultfd_tests(); + + close(pagemap_fd); + return ksft_exit_pass(); +} diff --git a/tools/testing/selftests/mm/run_vmtests.sh b/tools/testing/selftests/mm/run_vmtests.sh index 3e2bc818d566..45941ad5f658 100755 --- a/tools/testing/selftests/mm/run_vmtests.sh +++ b/tools/testing/selftests/mm/run_vmtests.sh @@ -56,6 +56,8 @@ separated by spaces: memory protection key tests - soft_dirty test soft dirty page bit semantics +- pagemap + test pagemap_scan IOCTL - cow test copy-on-write semantics - thp @@ -342,6 +344,8 @@ then CATEGORY="soft_dirty" run_test ./soft-dirty fi +CATEGORY="pagemap" run_test ./pagemap_ioctl + # COW tests CATEGORY="cow" run_test ./cow -- cgit v1.2.3 From 7771dcf019dde5998380c40deec0b42f5df9d9a3 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Fri, 29 Sep 2023 16:13:59 -0400 Subject: radix tree test suite: fix allocation calculation in kmem_cache_alloc_bulk() The bulk allocation is iterating through an array and storing enough memory for the entire bulk allocation instead of a single array entry. Only allocate an array element of the size set in the kmem_cache. Link: https://lkml.kernel.org/r/20230929201359.2857583-1-Liam.Howlett@oracle.com Fixes: cc86e0c2f306 ("radix tree test suite: add support for slab bulk APIs") Signed-off-by: Liam R. Howlett Reported-by: Christophe JAILLET Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- tools/testing/radix-tree/linux.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tools') diff --git a/tools/testing/radix-tree/linux.c b/tools/testing/radix-tree/linux.c index d587a558997f..61fe2601cb3a 100644 --- a/tools/testing/radix-tree/linux.c +++ b/tools/testing/radix-tree/linux.c @@ -165,9 +165,9 @@ int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size, for (i = 0; i < size; i++) { if (cachep->align) { posix_memalign(&p[i], cachep->align, - cachep->size * size); + cachep->size); } else { - p[i] = malloc(cachep->size * size); + p[i] = malloc(cachep->size); } if (cachep->ctor) cachep->ctor(p[i]); -- cgit v1.2.3 From c8b90731427814b1e265c3c2bc01c38406963c32 Mon Sep 17 00:00:00 2001 From: Breno Leitao Date: Thu, 5 Oct 2023 09:39:21 -0700 Subject: selftests/mm: export get_free_hugepages() Patch series "New selftest for mm", v2. This is a simple test case that reproduces an mm problem[1], where a page fault races with madvise(), and it is not trivial to reproduce and debug. This test-case aims to avoid such race problems from happening again, impacting workloads that leverages external allocators, such as tcmalloc, jemalloc, etc. [1] https://lore.kernel.org/all/20231001005659.2185316-1-riel@surriel.com/#r This patch (of 2): get_free_hugepages() is helpful for other hugepage tests. Export it to the common file (vm_util.c) to be reused. Link: https://lkml.kernel.org/r/20231005163922.87568-1-leitao@debian.org Link: https://lkml.kernel.org/r/20231005163922.87568-2-leitao@debian.org Signed-off-by: Breno Leitao Reviewed-by: Rik van Riel Cc: Mike Kravetz Cc: Muchun Song Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/hugetlb-madvise.c | 19 ------------------- tools/testing/selftests/mm/vm_util.c | 19 +++++++++++++++++++ tools/testing/selftests/mm/vm_util.h | 1 + 3 files changed, 20 insertions(+), 19 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/mm/hugetlb-madvise.c b/tools/testing/selftests/mm/hugetlb-madvise.c index d55322df4b73..f32d99565c5e 100644 --- a/tools/testing/selftests/mm/hugetlb-madvise.c +++ b/tools/testing/selftests/mm/hugetlb-madvise.c @@ -36,25 +36,6 @@ unsigned long huge_page_size; unsigned long base_page_size; -unsigned long get_free_hugepages(void) -{ - unsigned long fhp = 0; - char *line = NULL; - size_t linelen = 0; - FILE *f = fopen("/proc/meminfo", "r"); - - if (!f) - return fhp; - while (getline(&line, &linelen, f) > 0) { - if (sscanf(line, "HugePages_Free: %lu", &fhp) == 1) - break; - } - - free(line); - fclose(f); - return fhp; -} - void write_fault_pages(void *addr, unsigned long nr_pages) { unsigned long i; diff --git a/tools/testing/selftests/mm/vm_util.c b/tools/testing/selftests/mm/vm_util.c index 558c9cd8901c..3082b40492dd 100644 --- a/tools/testing/selftests/mm/vm_util.c +++ b/tools/testing/selftests/mm/vm_util.c @@ -269,3 +269,22 @@ int uffd_unregister(int uffd, void *addr, uint64_t len) return ret; } + +unsigned long get_free_hugepages(void) +{ + unsigned long fhp = 0; + char *line = NULL; + size_t linelen = 0; + FILE *f = fopen("/proc/meminfo", "r"); + + if (!f) + return fhp; + while (getline(&line, &linelen, f) > 0) { + if (sscanf(line, "HugePages_Free: %lu", &fhp) == 1) + break; + } + + free(line); + fclose(f); + return fhp; +} diff --git a/tools/testing/selftests/mm/vm_util.h b/tools/testing/selftests/mm/vm_util.h index c7fa61f0dff8..c02990bbd56f 100644 --- a/tools/testing/selftests/mm/vm_util.h +++ b/tools/testing/selftests/mm/vm_util.h @@ -51,6 +51,7 @@ int uffd_register(int uffd, void *addr, uint64_t len, int uffd_unregister(int uffd, void *addr, uint64_t len); int uffd_register_with_ioctls(int uffd, void *addr, uint64_t len, bool miss, bool wp, bool minor, uint64_t *ioctls); +unsigned long get_free_hugepages(void); /* * On ppc64 this will only work with radix 2M hugepage size -- cgit v1.2.3 From 116d57303a051bb2c7939a5026e441d8a7845db2 Mon Sep 17 00:00:00 2001 From: Breno Leitao Date: Thu, 5 Oct 2023 09:39:22 -0700 Subject: selftests/mm: add a new test for madv and hugetlb Create a selftest that exercises the race between page faults and madvise(MADV_DONTNEED) in the same huge page. Do it by running two threads that touches the huge page and madvise(MADV_DONTNEED) at the same time. In case of a SIGBUS coming at pagefault, the test should fail, since we hit the bug. The test doesn't have a signal handler, and if it fails, it fails like the following ---------------------------------- running ./hugetlb_fault_after_madv ---------------------------------- ./run_vmtests.sh: line 186: 595563 Bus error (core dumped) "$@" [FAIL] This selftest goes together with the fix of the bug[1] itself. [1] https://lore.kernel.org/all/20231001005659.2185316-1-riel@surriel.com/#r Link: https://lkml.kernel.org/r/20231005163922.87568-3-leitao@debian.org Signed-off-by: Breno Leitao Reviewed-by: Rik van Riel Tested-by: Rik van Riel Cc: Mike Kravetz Cc: Muchun Song Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/Makefile | 1 + .../selftests/mm/hugetlb_fault_after_madv.c | 73 ++++++++++++++++++++++ tools/testing/selftests/mm/run_vmtests.sh | 4 ++ 3 files changed, 78 insertions(+) create mode 100644 tools/testing/selftests/mm/hugetlb_fault_after_madv.c (limited to 'tools') diff --git a/tools/testing/selftests/mm/Makefile b/tools/testing/selftests/mm/Makefile index 2a89989afafc..78dfec8bc676 100644 --- a/tools/testing/selftests/mm/Makefile +++ b/tools/testing/selftests/mm/Makefile @@ -69,6 +69,7 @@ TEST_GEN_FILES += split_huge_page_test TEST_GEN_FILES += ksm_tests TEST_GEN_FILES += ksm_functional_tests TEST_GEN_FILES += mdwe_test +TEST_GEN_FILES += hugetlb_fault_after_madv ifneq ($(ARCH),arm64) TEST_GEN_PROGS += soft-dirty diff --git a/tools/testing/selftests/mm/hugetlb_fault_after_madv.c b/tools/testing/selftests/mm/hugetlb_fault_after_madv.c new file mode 100644 index 000000000000..73b81c632366 --- /dev/null +++ b/tools/testing/selftests/mm/hugetlb_fault_after_madv.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include + +#include "vm_util.h" +#include "../kselftest.h" + +#define MMAP_SIZE (1 << 21) +#define INLOOP_ITER 100 + +char *huge_ptr; + +/* Touch the memory while it is being madvised() */ +void *touch(void *unused) +{ + char *ptr = (char *)huge_ptr; + + for (int i = 0; i < INLOOP_ITER; i++) + ptr[0] = '.'; + + return NULL; +} + +void *madv(void *unused) +{ + usleep(rand() % 10); + + for (int i = 0; i < INLOOP_ITER; i++) + madvise(huge_ptr, MMAP_SIZE, MADV_DONTNEED); + + return NULL; +} + +int main(void) +{ + unsigned long free_hugepages; + pthread_t thread1, thread2; + /* + * On kernel 6.4, we are able to reproduce the problem with ~1000 + * interactions + */ + int max = 10000; + + srand(getpid()); + + free_hugepages = get_free_hugepages(); + if (free_hugepages != 1) { + ksft_exit_skip("This test needs one and only one page to execute. Got %lu\n", + free_hugepages); + } + + while (max--) { + huge_ptr = mmap(NULL, MMAP_SIZE, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, + -1, 0); + + if ((unsigned long)huge_ptr == -1) + ksft_exit_skip("Failed to allocated huge page\n"); + + pthread_create(&thread1, NULL, madv, NULL); + pthread_create(&thread2, NULL, touch, NULL); + + pthread_join(thread1, NULL); + pthread_join(thread2, NULL); + munmap(huge_ptr, MMAP_SIZE); + } + + return KSFT_PASS; +} diff --git a/tools/testing/selftests/mm/run_vmtests.sh b/tools/testing/selftests/mm/run_vmtests.sh index 45941ad5f658..bf4c4cd46600 100755 --- a/tools/testing/selftests/mm/run_vmtests.sh +++ b/tools/testing/selftests/mm/run_vmtests.sh @@ -223,6 +223,10 @@ CATEGORY="hugetlb" run_test ./hugepage-mremap CATEGORY="hugetlb" run_test ./hugepage-vmemmap CATEGORY="hugetlb" run_test ./hugetlb-madvise +# For this test, we need one and just one huge page +echo 1 > /proc/sys/vm/nr_hugepages +CATEGORY="hugetlb" run_test ./hugetlb_fault_after_madv + if test_selected "hugetlb"; then echo "NOTE: These hugetlb tests provide minimal coverage. Use" echo " https://github.com/libhugetlbfs/libhugetlbfs.git for" -- cgit v1.2.3 From c0dddb7aa5f85e6150a1e3230e01d5ba286eded9 Mon Sep 17 00:00:00 2001 From: Nhat Pham Date: Fri, 6 Oct 2023 11:46:29 -0700 Subject: selftests: add a selftest to verify hugetlb usage in memcg This patch add a new kselftest to demonstrate and verify the new hugetlb memcg accounting behavior. Link: https://lkml.kernel.org/r/20231006184629.155543-5-nphamcs@gmail.com Signed-off-by: Nhat Pham Cc: Frank van der Linden Cc: Johannes Weiner Cc: Michal Hocko Cc: Mike Kravetz Cc: Muchun Song Cc: Rik van Riel Cc: Roman Gushchin Cc: Shakeel Butt Cc: Shuah Khan Cc: Tejun heo Cc: Yosry Ahmed Cc: Zefan Li Signed-off-by: Andrew Morton --- MAINTAINERS | 2 + tools/testing/selftests/cgroup/.gitignore | 1 + tools/testing/selftests/cgroup/Makefile | 2 + .../testing/selftests/cgroup/test_hugetlb_memcg.c | 234 +++++++++++++++++++++ 4 files changed, 239 insertions(+) create mode 100644 tools/testing/selftests/cgroup/test_hugetlb_memcg.c (limited to 'tools') diff --git a/MAINTAINERS b/MAINTAINERS index 68ff5b62a1d2..7fd72948d10e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5269,6 +5269,7 @@ S: Maintained F: mm/memcontrol.c F: mm/swap_cgroup.c F: tools/testing/selftests/cgroup/memcg_protection.m +F: tools/testing/selftests/cgroup/test_hugetlb_memcg.c F: tools/testing/selftests/cgroup/test_kmem.c F: tools/testing/selftests/cgroup/test_memcontrol.c @@ -9652,6 +9653,7 @@ F: include/linux/hugetlb.h F: mm/hugetlb.c F: mm/hugetlb_vmemmap.c F: mm/hugetlb_vmemmap.h +F: tools/testing/selftests/cgroup/test_hugetlb_memcg.c HVA ST MEDIA DRIVER M: Jean-Christophe Trotin diff --git a/tools/testing/selftests/cgroup/.gitignore b/tools/testing/selftests/cgroup/.gitignore index af8c3f30b9c1..2732e0b29271 100644 --- a/tools/testing/selftests/cgroup/.gitignore +++ b/tools/testing/selftests/cgroup/.gitignore @@ -7,4 +7,5 @@ test_kill test_cpu test_cpuset test_zswap +test_hugetlb_memcg wait_inotify diff --git a/tools/testing/selftests/cgroup/Makefile b/tools/testing/selftests/cgroup/Makefile index c27f05f6ce9b..00b441928909 100644 --- a/tools/testing/selftests/cgroup/Makefile +++ b/tools/testing/selftests/cgroup/Makefile @@ -14,6 +14,7 @@ TEST_GEN_PROGS += test_kill TEST_GEN_PROGS += test_cpu TEST_GEN_PROGS += test_cpuset TEST_GEN_PROGS += test_zswap +TEST_GEN_PROGS += test_hugetlb_memcg LOCAL_HDRS += $(selfdir)/clone3/clone3_selftests.h $(selfdir)/pidfd/pidfd.h @@ -27,3 +28,4 @@ $(OUTPUT)/test_kill: cgroup_util.c $(OUTPUT)/test_cpu: cgroup_util.c $(OUTPUT)/test_cpuset: cgroup_util.c $(OUTPUT)/test_zswap: cgroup_util.c +$(OUTPUT)/test_hugetlb_memcg: cgroup_util.c diff --git a/tools/testing/selftests/cgroup/test_hugetlb_memcg.c b/tools/testing/selftests/cgroup/test_hugetlb_memcg.c new file mode 100644 index 000000000000..f0fefeb4cc24 --- /dev/null +++ b/tools/testing/selftests/cgroup/test_hugetlb_memcg.c @@ -0,0 +1,234 @@ +// SPDX-License-Identifier: GPL-2.0 +#define _GNU_SOURCE + +#include +#include +#include +#include +#include +#include +#include "../kselftest.h" +#include "cgroup_util.h" + +#define ADDR ((void *)(0x0UL)) +#define FLAGS (MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB) +/* mapping 8 MBs == 4 hugepages */ +#define LENGTH (8UL*1024*1024) +#define PROTECTION (PROT_READ | PROT_WRITE) + +/* borrowed from mm/hmm-tests.c */ +static long get_hugepage_size(void) +{ + int fd; + char buf[2048]; + int len; + char *p, *q, *path = "/proc/meminfo", *tag = "Hugepagesize:"; + long val; + + fd = open(path, O_RDONLY); + if (fd < 0) { + /* Error opening the file */ + return -1; + } + + len = read(fd, buf, sizeof(buf)); + close(fd); + if (len < 0) { + /* Error in reading the file */ + return -1; + } + if (len == sizeof(buf)) { + /* Error file is too large */ + return -1; + } + buf[len] = '\0'; + + /* Search for a tag if provided */ + if (tag) { + p = strstr(buf, tag); + if (!p) + return -1; /* looks like the line we want isn't there */ + p += strlen(tag); + } else + p = buf; + + val = strtol(p, &q, 0); + if (*q != ' ') { + /* Error parsing the file */ + return -1; + } + + return val; +} + +static int set_file(const char *path, long value) +{ + FILE *file; + int ret; + + file = fopen(path, "w"); + if (!file) + return -1; + ret = fprintf(file, "%ld\n", value); + fclose(file); + return ret; +} + +static int set_nr_hugepages(long value) +{ + return set_file("/proc/sys/vm/nr_hugepages", value); +} + +static unsigned int check_first(char *addr) +{ + return *(unsigned int *)addr; +} + +static void write_data(char *addr) +{ + unsigned long i; + + for (i = 0; i < LENGTH; i++) + *(addr + i) = (char)i; +} + +static int hugetlb_test_program(const char *cgroup, void *arg) +{ + char *test_group = (char *)arg; + void *addr; + long old_current, expected_current, current; + int ret = EXIT_FAILURE; + + old_current = cg_read_long(test_group, "memory.current"); + set_nr_hugepages(20); + current = cg_read_long(test_group, "memory.current"); + if (current - old_current >= MB(2)) { + ksft_print_msg( + "setting nr_hugepages should not increase hugepage usage.\n"); + ksft_print_msg("before: %ld, after: %ld\n", old_current, current); + return EXIT_FAILURE; + } + + addr = mmap(ADDR, LENGTH, PROTECTION, FLAGS, 0, 0); + if (addr == MAP_FAILED) { + ksft_print_msg("fail to mmap.\n"); + return EXIT_FAILURE; + } + current = cg_read_long(test_group, "memory.current"); + if (current - old_current >= MB(2)) { + ksft_print_msg("mmap should not increase hugepage usage.\n"); + ksft_print_msg("before: %ld, after: %ld\n", old_current, current); + goto out_failed_munmap; + } + old_current = current; + + /* read the first page */ + check_first(addr); + expected_current = old_current + MB(2); + current = cg_read_long(test_group, "memory.current"); + if (!values_close(expected_current, current, 5)) { + ksft_print_msg("memory usage should increase by around 2MB.\n"); + ksft_print_msg( + "expected memory: %ld, actual memory: %ld\n", + expected_current, current); + goto out_failed_munmap; + } + + /* write to the whole range */ + write_data(addr); + current = cg_read_long(test_group, "memory.current"); + expected_current = old_current + MB(8); + if (!values_close(expected_current, current, 5)) { + ksft_print_msg("memory usage should increase by around 8MB.\n"); + ksft_print_msg( + "expected memory: %ld, actual memory: %ld\n", + expected_current, current); + goto out_failed_munmap; + } + + /* unmap the whole range */ + munmap(addr, LENGTH); + current = cg_read_long(test_group, "memory.current"); + expected_current = old_current; + if (!values_close(expected_current, current, 5)) { + ksft_print_msg("memory usage should go back down.\n"); + ksft_print_msg( + "expected memory: %ld, actual memory: %ld\n", + expected_current, current); + return ret; + } + + ret = EXIT_SUCCESS; + return ret; + +out_failed_munmap: + munmap(addr, LENGTH); + return ret; +} + +static int test_hugetlb_memcg(char *root) +{ + int ret = KSFT_FAIL; + char *test_group; + + test_group = cg_name(root, "hugetlb_memcg_test"); + if (!test_group || cg_create(test_group)) { + ksft_print_msg("fail to create cgroup.\n"); + goto out; + } + + if (cg_write(test_group, "memory.max", "100M")) { + ksft_print_msg("fail to set cgroup memory limit.\n"); + goto out; + } + + /* disable swap */ + if (cg_write(test_group, "memory.swap.max", "0")) { + ksft_print_msg("fail to disable swap.\n"); + goto out; + } + + if (!cg_run(test_group, hugetlb_test_program, (void *)test_group)) + ret = KSFT_PASS; +out: + cg_destroy(test_group); + free(test_group); + return ret; +} + +int main(int argc, char **argv) +{ + char root[PATH_MAX]; + int ret = EXIT_SUCCESS, has_memory_hugetlb_acc; + + has_memory_hugetlb_acc = proc_mount_contains("memory_hugetlb_accounting"); + if (has_memory_hugetlb_acc < 0) + ksft_exit_skip("Failed to query cgroup mount option\n"); + else if (!has_memory_hugetlb_acc) + ksft_exit_skip("memory hugetlb accounting is disabled\n"); + + /* Unit is kB! */ + if (get_hugepage_size() != 2048) { + ksft_print_msg("test_hugetlb_memcg requires 2MB hugepages\n"); + ksft_test_result_skip("test_hugetlb_memcg\n"); + return ret; + } + + if (cg_find_unified_root(root, sizeof(root))) + ksft_exit_skip("cgroup v2 isn't mounted\n"); + + switch (test_hugetlb_memcg(root)) { + case KSFT_PASS: + ksft_test_result_pass("test_hugetlb_memcg\n"); + break; + case KSFT_SKIP: + ksft_test_result_skip("test_hugetlb_memcg\n"); + break; + default: + ret = EXIT_FAILURE; + ksft_test_result_fail("test_hugetlb_memcg\n"); + break; + } + + return ret; +} -- cgit v1.2.3 From 0179c62839bdc49769a986e6eb1a6ca6fc7d274a Mon Sep 17 00:00:00 2001 From: Audra Mitchell Date: Fri, 13 Oct 2023 15:03:46 -0400 Subject: tools/mm: remove references to free_ts from page_owner_sort With the removal of free timestamps from page_owner output, we no longer need to handle this case or the "unreleased" case. Remove all references to both cases. Link: https://lkml.kernel.org/r/20231013190350.579407-3-audra@redhat.com Signed-off-by: Audra Mitchell Acked-by: Rafael Aquini Reviewed-by: Vlastimil Babka Cc: Georgi Djakov Signed-off-by: Andrew Morton --- tools/mm/page_owner_sort.c | 98 ++++++---------------------------------------- 1 file changed, 12 insertions(+), 86 deletions(-) (limited to 'tools') diff --git a/tools/mm/page_owner_sort.c b/tools/mm/page_owner_sort.c index 99798894b879..9c93f3f4514f 100644 --- a/tools/mm/page_owner_sort.c +++ b/tools/mm/page_owner_sort.c @@ -33,7 +33,6 @@ struct block_list { char *comm; // task command name char *stacktrace; __u64 ts_nsec; - __u64 free_ts_nsec; int len; int num; int page_num; @@ -42,18 +41,16 @@ struct block_list { int allocator; }; enum FILTER_BIT { - FILTER_UNRELEASE = 1<<1, - FILTER_PID = 1<<2, - FILTER_TGID = 1<<3, - FILTER_COMM = 1<<4 + FILTER_PID = 1<<1, + FILTER_TGID = 1<<2, + FILTER_COMM = 1<<3 }; enum CULL_BIT { - CULL_UNRELEASE = 1<<1, - CULL_PID = 1<<2, - CULL_TGID = 1<<3, - CULL_COMM = 1<<4, - CULL_STACKTRACE = 1<<5, - CULL_ALLOCATOR = 1<<6 + CULL_PID = 1<<1, + CULL_TGID = 1<<2, + CULL_COMM = 1<<3, + CULL_STACKTRACE = 1<<4, + CULL_ALLOCATOR = 1<<5 }; enum ALLOCATOR_BIT { ALLOCATOR_CMA = 1<<1, @@ -62,9 +59,8 @@ enum ALLOCATOR_BIT { ALLOCATOR_OTHERS = 1<<4 }; enum ARG_TYPE { - ARG_TXT, ARG_COMM, ARG_STACKTRACE, ARG_ALLOC_TS, ARG_FREE_TS, - ARG_CULL_TIME, ARG_PAGE_NUM, ARG_PID, ARG_TGID, ARG_UNKNOWN, ARG_FREE, - ARG_ALLOCATOR + ARG_TXT, ARG_COMM, ARG_STACKTRACE, ARG_ALLOC_TS, ARG_CULL_TIME, + ARG_PAGE_NUM, ARG_PID, ARG_TGID, ARG_UNKNOWN, ARG_ALLOCATOR }; enum SORT_ORDER { SORT_ASC = 1, @@ -90,7 +86,6 @@ static regex_t pid_pattern; static regex_t tgid_pattern; static regex_t comm_pattern; static regex_t ts_nsec_pattern; -static regex_t free_ts_nsec_pattern; static struct block_list *list; static int list_size; static int max_size; @@ -181,24 +176,6 @@ static int compare_ts(const void *p1, const void *p2) return l1->ts_nsec < l2->ts_nsec ? -1 : 1; } -static int compare_free_ts(const void *p1, const void *p2) -{ - const struct block_list *l1 = p1, *l2 = p2; - - return l1->free_ts_nsec < l2->free_ts_nsec ? -1 : 1; -} - -static int compare_release(const void *p1, const void *p2) -{ - const struct block_list *l1 = p1, *l2 = p2; - - if (!l1->free_ts_nsec && !l2->free_ts_nsec) - return 0; - if (l1->free_ts_nsec && l2->free_ts_nsec) - return 0; - return l1->free_ts_nsec ? 1 : -1; -} - static int compare_cull_condition(const void *p1, const void *p2) { if (cull == 0) @@ -211,8 +188,6 @@ static int compare_cull_condition(const void *p1, const void *p2) return compare_tgid(p1, p2); if ((cull & CULL_COMM) && compare_comm(p1, p2)) return compare_comm(p1, p2); - if ((cull & CULL_UNRELEASE) && compare_release(p1, p2)) - return compare_release(p1, p2); if ((cull & CULL_ALLOCATOR) && compare_allocator(p1, p2)) return compare_allocator(p1, p2); return 0; @@ -366,24 +341,6 @@ static __u64 get_ts_nsec(char *buf) return ts_nsec; } -static __u64 get_free_ts_nsec(char *buf) -{ - __u64 free_ts_nsec; - char free_ts_nsec_str[FIELD_BUFF] = {0}; - char *endptr; - - search_pattern(&free_ts_nsec_pattern, free_ts_nsec_str, buf); - errno = 0; - free_ts_nsec = strtoull(free_ts_nsec_str, &endptr, 10); - if (errno != 0 || endptr == free_ts_nsec_str || *endptr != '\0') { - if (debug_on) - fprintf(stderr, "wrong free_ts_nsec in follow buf:\n%s\n", buf); - return -1; - } - - return free_ts_nsec; -} - static char *get_comm(char *buf) { char *comm_str = malloc(TASK_COMM_LEN); @@ -411,12 +368,8 @@ static int get_arg_type(const char *arg) return ARG_COMM; else if (!strcmp(arg, "stacktrace") || !strcmp(arg, "st")) return ARG_STACKTRACE; - else if (!strcmp(arg, "free") || !strcmp(arg, "f")) - return ARG_FREE; else if (!strcmp(arg, "txt") || !strcmp(arg, "T")) return ARG_TXT; - else if (!strcmp(arg, "free_ts") || !strcmp(arg, "ft")) - return ARG_FREE_TS; else if (!strcmp(arg, "alloc_ts") || !strcmp(arg, "at")) return ARG_ALLOC_TS; else if (!strcmp(arg, "allocator") || !strcmp(arg, "ator")) @@ -471,13 +424,6 @@ static bool match_str_list(const char *str, char **list, int list_size) static bool is_need(char *buf) { - __u64 ts_nsec, free_ts_nsec; - - ts_nsec = get_ts_nsec(buf); - free_ts_nsec = get_free_ts_nsec(buf); - - if ((filter & FILTER_UNRELEASE) && free_ts_nsec != 0 && ts_nsec < free_ts_nsec) - return false; if ((filter & FILTER_PID) && !match_num_list(get_pid(buf), fc.pids, fc.pids_size)) return false; if ((filter & FILTER_TGID) && @@ -528,7 +474,6 @@ static bool add_list(char *buf, int len, char *ext_buf) if (*list[list_size].stacktrace == '\n') list[list_size].stacktrace++; list[list_size].ts_nsec = get_ts_nsec(buf); - list[list_size].free_ts_nsec = get_free_ts_nsec(buf); list[list_size].allocator = get_allocator(buf, ext_buf); list_size++; if (list_size % 1000 == 0) { @@ -554,8 +499,6 @@ static bool parse_cull_args(const char *arg_str) cull |= CULL_COMM; else if (arg_type == ARG_STACKTRACE) cull |= CULL_STACKTRACE; - else if (arg_type == ARG_FREE) - cull |= CULL_UNRELEASE; else if (arg_type == ARG_ALLOCATOR) cull |= CULL_ALLOCATOR; else { @@ -616,8 +559,6 @@ static bool parse_sort_args(const char *arg_str) sc.cmps[i] = compare_stacktrace; else if (arg_type == ARG_ALLOC_TS) sc.cmps[i] = compare_ts; - else if (arg_type == ARG_FREE_TS) - sc.cmps[i] = compare_free_ts; else if (arg_type == ARG_TXT) sc.cmps[i] = compare_txt; else if (arg_type == ARG_ALLOCATOR) @@ -679,8 +620,6 @@ static void usage(void) "-P\t\tSort by tgid.\n" "-n\t\tSort by task command name.\n" "-a\t\tSort by memory allocate time.\n" - "-r\t\tSort by memory release time.\n" - "-f\t\tFilter out the information of blocks whose memory has been released.\n" "-d\t\tPrint debug information.\n" "--pid \tSelect by pid. This selects the information of blocks whose process ID numbers appear in .\n" "--tgid \tSelect by tgid. This selects the information of blocks whose Thread Group ID numbers appear in .\n" @@ -706,7 +645,7 @@ int main(int argc, char **argv) { 0, 0, 0, 0}, }; - while ((opt = getopt_long(argc, argv, "adfmnprstP", longopts, NULL)) != -1) + while ((opt = getopt_long(argc, argv, "admnpstP", longopts, NULL)) != -1) switch (opt) { case 'a': set_single_cmp(compare_ts, SORT_ASC); @@ -714,18 +653,12 @@ int main(int argc, char **argv) case 'd': debug_on = true; break; - case 'f': - filter = filter | FILTER_UNRELEASE; - break; case 'm': set_single_cmp(compare_page_num, SORT_DESC); break; case 'p': set_single_cmp(compare_pid, SORT_ASC); break; - case 'r': - set_single_cmp(compare_free_ts, SORT_ASC); - break; case 's': set_single_cmp(compare_stacktrace, SORT_ASC); break; @@ -800,10 +733,8 @@ int main(int argc, char **argv) goto out_tgid; if (!check_regcomp(&comm_pattern, "tgid\\s*[0-9]*\\s*\\((.*)\\),\\s*ts")) goto out_comm; - if (!check_regcomp(&ts_nsec_pattern, "ts\\s*([0-9]*)\\s*ns,")) + if (!check_regcomp(&ts_nsec_pattern, "ts\\s*([0-9]*)\\s*ns")) goto out_ts; - if (!check_regcomp(&free_ts_nsec_pattern, "free_ts\\s*([0-9]*)\\s*ns")) - goto out_free_ts; fstat(fileno(fin), &st); max_size = st.st_size / 100; /* hack ... */ @@ -864,9 +795,6 @@ int main(int argc, char **argv) fprintf(fout, ", "); print_allocator(fout, list[i].allocator); } - if (cull & CULL_UNRELEASE) - fprintf(fout, " (%s)", - list[i].free_ts_nsec ? "UNRELEASED" : "RELEASED"); if (cull & CULL_STACKTRACE) fprintf(fout, ":\n%s", list[i].stacktrace); fprintf(fout, "\n"); @@ -880,8 +808,6 @@ out_free: free(buf); if (list) free(list); -out_free_ts: - regfree(&free_ts_nsec_pattern); out_ts: regfree(&ts_nsec_pattern); out_comm: -- cgit v1.2.3 From 63a150623a2bf94c9ed503719a3423675a3aa0d3 Mon Sep 17 00:00:00 2001 From: Audra Mitchell Date: Fri, 13 Oct 2023 15:03:47 -0400 Subject: tools/mm: filter out timestamps for correct collation With the introduction of allocation timestamps being included in page_owner output, each record becomes unique due to the timestamp nanosecond granularity. Remove the check in add_list that tries to collate each record during processing as the memcmp() is just additional overhead at this point. Also keep the allocation timestamps, but allow collation to occur without consideration of the allocation timestamp except in the case were allocation timestamps are requested by the user (the -a option). Link: https://lkml.kernel.org/r/20231013190350.579407-4-audra@redhat.com Signed-off-by: Audra Mitchell Acked-by: Rafael Aquini Acked-by: Vlastimil Babka Cc: Georgi Djakov Signed-off-by: Andrew Morton --- tools/mm/page_owner_sort.c | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) (limited to 'tools') diff --git a/tools/mm/page_owner_sort.c b/tools/mm/page_owner_sort.c index 9c93f3f4514f..7ddabcb3a073 100644 --- a/tools/mm/page_owner_sort.c +++ b/tools/mm/page_owner_sort.c @@ -203,6 +203,21 @@ static int compare_sort_condition(const void *p1, const void *p2) return cmp; } +static int remove_pattern(regex_t *pattern, char *buf, int len) +{ + regmatch_t pmatch[2]; + int err; + + err = regexec(pattern, buf, 2, pmatch, REG_NOTBOL); + if (err != 0 || pmatch[1].rm_so == -1) + return len; + + memcpy(buf + pmatch[1].rm_so, + buf + pmatch[1].rm_eo, len - pmatch[1].rm_eo); + + return len - (pmatch[1].rm_eo - pmatch[1].rm_so); +} + static int search_pattern(regex_t *pattern, char *pattern_str, char *buf) { int err, val_len; @@ -443,13 +458,6 @@ static bool is_need(char *buf) static bool add_list(char *buf, int len, char *ext_buf) { - if (list_size != 0 && - len == list[list_size-1].len && - memcmp(buf, list[list_size-1].txt, len) == 0) { - list[list_size-1].num++; - list[list_size-1].page_num += get_page_num(buf); - return true; - } if (list_size == max_size) { fprintf(stderr, "max_size too small??\n"); return false; @@ -465,6 +473,9 @@ static bool add_list(char *buf, int len, char *ext_buf) return false; } memcpy(list[list_size].txt, buf, len); + if (sc.cmps[0] != compare_ts) { + len = remove_pattern(&ts_nsec_pattern, list[list_size].txt, len); + } list[list_size].txt[len] = 0; list[list_size].len = len; list[list_size].num = 1; -- cgit v1.2.3 From c6d5e4901e00031650132aa30aa082a47c3796e5 Mon Sep 17 00:00:00 2001 From: Audra Mitchell Date: Fri, 13 Oct 2023 15:03:48 -0400 Subject: tools/mm: fix the default case for page_owner_sort With the additional commands and timestamps added to the tool, the default case (-t) has been broken. Now that the allocation timestamps are saved outside of the txt field, allow us to properly sort the data by number of times the record has been seen. Furthermore prevent the misuse of the commandline arguments so only one compare option can be used. Link: https://lkml.kernel.org/r/20231013190350.579407-5-audra@redhat.com Signed-off-by: Audra Mitchell Acked-by: Rafael Aquini Acked-by: Vlastimil Babka Cc: Georgi Djakov Signed-off-by: Andrew Morton --- tools/mm/page_owner_sort.c | 61 ++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 53 insertions(+), 8 deletions(-) (limited to 'tools') diff --git a/tools/mm/page_owner_sort.c b/tools/mm/page_owner_sort.c index 7ddabcb3a073..5a260096ebaa 100644 --- a/tools/mm/page_owner_sort.c +++ b/tools/mm/page_owner_sort.c @@ -66,6 +66,16 @@ enum SORT_ORDER { SORT_ASC = 1, SORT_DESC = -1, }; +enum COMP_FLAG { + COMP_NO_FLAG = 0, + COMP_ALLOC = 1<<0, + COMP_PAGE_NUM = 1<<1, + COMP_PID = 1<<2, + COMP_STACK = 1<<3, + COMP_NUM = 1<<4, + COMP_TGID = 1<<5, + COMP_COMM = 1<<6 +}; struct filter_condition { pid_t *pids; pid_t *tgids; @@ -644,7 +654,7 @@ int main(int argc, char **argv) { FILE *fin, *fout; char *buf, *ext_buf; - int i, count; + int i, count, compare_flag; struct stat st; int opt; struct option longopts[] = { @@ -656,31 +666,33 @@ int main(int argc, char **argv) { 0, 0, 0, 0}, }; + compare_flag = COMP_NO_FLAG; + while ((opt = getopt_long(argc, argv, "admnpstP", longopts, NULL)) != -1) switch (opt) { case 'a': - set_single_cmp(compare_ts, SORT_ASC); + compare_flag |= COMP_ALLOC; break; case 'd': debug_on = true; break; case 'm': - set_single_cmp(compare_page_num, SORT_DESC); + compare_flag |= COMP_PAGE_NUM; break; case 'p': - set_single_cmp(compare_pid, SORT_ASC); + compare_flag |= COMP_PID; break; case 's': - set_single_cmp(compare_stacktrace, SORT_ASC); + compare_flag |= COMP_STACK; break; case 't': - set_single_cmp(compare_num, SORT_DESC); + compare_flag |= COMP_NUM; break; case 'P': - set_single_cmp(compare_tgid, SORT_ASC); + compare_flag |= COMP_TGID; break; case 'n': - set_single_cmp(compare_comm, SORT_ASC); + compare_flag |= COMP_COMM; break; case 1: filter = filter | FILTER_PID; @@ -728,6 +740,39 @@ int main(int argc, char **argv) exit(1); } + /* Only one compare option is allowed, yet we also want handle the + * default case were no option is provided, but we still want to + * match the behavior of the -t option (compare by number of times + * a record is seen + */ + switch (compare_flag) { + case COMP_ALLOC: + set_single_cmp(compare_ts, SORT_ASC); + break; + case COMP_PAGE_NUM: + set_single_cmp(compare_page_num, SORT_DESC); + break; + case COMP_PID: + set_single_cmp(compare_pid, SORT_ASC); + break; + case COMP_STACK: + set_single_cmp(compare_stacktrace, SORT_ASC); + break; + case COMP_NO_FLAG: + case COMP_NUM: + set_single_cmp(compare_num, SORT_DESC); + break; + case COMP_TGID: + set_single_cmp(compare_tgid, SORT_ASC); + break; + case COMP_COMM: + set_single_cmp(compare_comm, SORT_ASC); + break; + default: + usage(); + exit(1); + } + fin = fopen(argv[optind], "r"); fout = fopen(argv[optind + 1], "w"); if (!fin || !fout) { -- cgit v1.2.3 From d8ea435f071592d82479414e97e3c70bed204666 Mon Sep 17 00:00:00 2001 From: Audra Mitchell Date: Fri, 13 Oct 2023 15:03:49 -0400 Subject: tools/mm: update the usage output to be more organized Organize the usage options alphabetically and improve the description of some options. Also separate the more complicated cull options from the single use compare options. Link: https://lkml.kernel.org/r/20231013190350.579407-6-audra@redhat.com Signed-off-by: Audra Mitchell Acked-by: Rafael Aquini Acked-by: Vlastimil Babka Cc: Georgi Djakov Signed-off-by: Andrew Morton --- tools/mm/page_owner_sort.c | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-) (limited to 'tools') diff --git a/tools/mm/page_owner_sort.c b/tools/mm/page_owner_sort.c index 5a260096ebaa..e1f264444342 100644 --- a/tools/mm/page_owner_sort.c +++ b/tools/mm/page_owner_sort.c @@ -634,19 +634,26 @@ static void print_allocator(FILE *out, int allocator) static void usage(void) { printf("Usage: ./page_owner_sort [OPTIONS] \n" - "-m\t\tSort by total memory.\n" - "-s\t\tSort by the stack trace.\n" - "-t\t\tSort by times (default).\n" - "-p\t\tSort by pid.\n" - "-P\t\tSort by tgid.\n" - "-n\t\tSort by task command name.\n" - "-a\t\tSort by memory allocate time.\n" - "-d\t\tPrint debug information.\n" - "--pid \tSelect by pid. This selects the information of blocks whose process ID numbers appear in .\n" - "--tgid \tSelect by tgid. This selects the information of blocks whose Thread Group ID numbers appear in .\n" - "--name \n\t\tSelect by command name. This selects the information of blocks whose command name appears in .\n" - "--cull \tCull by user-defined rules. is a single argument in the form of a comma-separated list with some common fields predefined\n" - "--sort \tSpecify sort order as: [+|-]key[,[+|-]key[,...]]\n" + "-a\t\t\tSort by memory allocation time.\n" + "-m\t\t\tSort by total memory.\n" + "-n\t\t\tSort by task command name.\n" + "-p\t\t\tSort by pid.\n" + "-P\t\t\tSort by tgid.\n" + "-s\t\t\tSort by the stacktrace.\n" + "-t\t\t\tSort by number of times record is seen (default).\n\n" + "--pid \t\tSelect by pid. This selects the information" + " of\n\t\t\tblocks whose process ID numbers appear in .\n" + "--tgid \tSelect by tgid. This selects the information" + " of\n\t\t\tblocks whose Thread Group ID numbers appear in " + ".\n" + "--name \tSelect by command name. This selects the" + " information\n\t\t\tof blocks whose command name appears in" + " .\n" + "--cull \t\tCull by user-defined rules. is a " + "single\n\t\t\targument in the form of a comma-separated list " + "with some\n\t\t\tcommon fields predefined (pid, tgid, comm, " + "stacktrace, allocator)\n" + "--sort \t\tSpecify sort order as: [+|-]key[,[+|-]key[,...]]\n" ); } -- cgit v1.2.3 From 6479b29203dedb724f4f4e08e1cc0dcf432c333b Mon Sep 17 00:00:00 2001 From: Nhat Pham Date: Fri, 20 Oct 2023 15:20:09 -0700 Subject: selftests: add a sanity check for zswap We recently encountered a bug that makes all zswap store attempt fail. Specifically, after: "141fdeececb3 mm/zswap: delay the initialization of zswap" if we build a kernel with zswap disabled by default, then enabled after the swapfile is set up, the zswap tree will not be initialized. As a result, all zswap store calls will be short-circuited. We have to perform another swapon to get zswap working properly again. Fortunately, this issue has since been fixed by the patch that kills frontswap: "42c06a0e8ebe mm: kill frontswap" which performs zswap_swapon() unconditionally, i.e always initializing the zswap tree. This test add a sanity check that ensure zswap storing works as intended. Link: https://lkml.kernel.org/r/20231020222009.2358953-1-nphamcs@gmail.com Signed-off-by: Nhat Pham Acked-by: Rik van Riel Cc: Domenico Cerasuolo Cc: Johannes Weiner Cc: Shuah Khan Cc: Tejun Heo Cc: Zefan Li Signed-off-by: Andrew Morton --- tools/testing/selftests/cgroup/test_zswap.c | 48 +++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/cgroup/test_zswap.c b/tools/testing/selftests/cgroup/test_zswap.c index 49def87a909b..c99d2adaca3f 100644 --- a/tools/testing/selftests/cgroup/test_zswap.c +++ b/tools/testing/selftests/cgroup/test_zswap.c @@ -55,6 +55,11 @@ static int get_zswap_written_back_pages(size_t *value) return read_int("/sys/kernel/debug/zswap/written_back_pages", value); } +static long get_zswpout(const char *cgroup) +{ + return cg_read_key_long(cgroup, "memory.stat", "zswpout "); +} + static int allocate_bytes(const char *cgroup, void *arg) { size_t size = (size_t)arg; @@ -68,6 +73,48 @@ static int allocate_bytes(const char *cgroup, void *arg) return 0; } +/* + * Sanity test to check that pages are written into zswap. + */ +static int test_zswap_usage(const char *root) +{ + long zswpout_before, zswpout_after; + int ret = KSFT_FAIL; + char *test_group; + + /* Set up */ + test_group = cg_name(root, "no_shrink_test"); + if (!test_group) + goto out; + if (cg_create(test_group)) + goto out; + if (cg_write(test_group, "memory.max", "1M")) + goto out; + + zswpout_before = get_zswpout(test_group); + if (zswpout_before < 0) { + ksft_print_msg("Failed to get zswpout\n"); + goto out; + } + + /* Allocate more than memory.max to push memory into zswap */ + if (cg_run(test_group, allocate_bytes, (void *)MB(4))) + goto out; + + /* Verify that pages come into zswap */ + zswpout_after = get_zswpout(test_group); + if (zswpout_after <= zswpout_before) { + ksft_print_msg("zswpout does not increase after test program\n"); + goto out; + } + ret = KSFT_PASS; + +out: + cg_destroy(test_group); + free(test_group); + return ret; +} + /* * When trying to store a memcg page in zswap, if the memcg hits its memory * limit in zswap, writeback should not be triggered. @@ -235,6 +282,7 @@ struct zswap_test { int (*fn)(const char *root); const char *name; } tests[] = { + T(test_zswap_usage), T(test_no_kmem_bypass), T(test_no_invasive_cgroup_shrink), }; -- cgit v1.2.3