diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/generic-radix-tree.c | 32 | ||||
-rw-r--r-- | lib/test_meminit.c | 27 | ||||
-rw-r--r-- | lib/test_user_copy.c | 37 | ||||
-rw-r--r-- | lib/vdso/gettimeofday.c | 9 |
4 files changed, 86 insertions, 19 deletions
diff --git a/lib/generic-radix-tree.c b/lib/generic-radix-tree.c index ae25e2fa2187..f25eb111c051 100644 --- a/lib/generic-radix-tree.c +++ b/lib/generic-radix-tree.c @@ -2,6 +2,7 @@ #include <linux/export.h> #include <linux/generic-radix-tree.h> #include <linux/gfp.h> +#include <linux/kmemleak.h> #define GENRADIX_ARY (PAGE_SIZE / sizeof(struct genradix_node *)) #define GENRADIX_ARY_SHIFT ilog2(GENRADIX_ARY) @@ -75,6 +76,27 @@ void *__genradix_ptr(struct __genradix *radix, size_t offset) } EXPORT_SYMBOL(__genradix_ptr); +static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask) +{ + struct genradix_node *node; + + node = (struct genradix_node *)__get_free_page(gfp_mask|__GFP_ZERO); + + /* + * We're using pages (not slab allocations) directly for kernel data + * structures, so we need to explicitly inform kmemleak of them in order + * to avoid false positive memory leak reports. + */ + kmemleak_alloc(node, PAGE_SIZE, 1, gfp_mask); + return node; +} + +static inline void genradix_free_node(struct genradix_node *node) +{ + kmemleak_free(node); + free_page((unsigned long)node); +} + /* * Returns pointer to the specified byte @offset within @radix, allocating it if * necessary - newly allocated slots are always zeroed out: @@ -97,8 +119,7 @@ void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset, break; if (!new_node) { - new_node = (void *) - __get_free_page(gfp_mask|__GFP_ZERO); + new_node = genradix_alloc_node(gfp_mask); if (!new_node) return NULL; } @@ -121,8 +142,7 @@ void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset, n = READ_ONCE(*p); if (!n) { if (!new_node) { - new_node = (void *) - __get_free_page(gfp_mask|__GFP_ZERO); + new_node = genradix_alloc_node(gfp_mask); if (!new_node) return NULL; } @@ -133,7 +153,7 @@ void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset, } if (new_node) - free_page((unsigned long) new_node); + genradix_free_node(new_node); return &n->data[offset]; } @@ -191,7 +211,7 @@ static void genradix_free_recurse(struct genradix_node *n, unsigned level) genradix_free_recurse(n->children[i], level - 1); } - free_page((unsigned long) n); + genradix_free_node(n); } int __genradix_prealloc(struct __genradix *radix, size_t size, diff --git a/lib/test_meminit.c b/lib/test_meminit.c index 9729f271d150..9742e5cb853a 100644 --- a/lib/test_meminit.c +++ b/lib/test_meminit.c @@ -297,6 +297,32 @@ out: return 1; } +static int __init do_kmem_cache_size_bulk(int size, int *total_failures) +{ + struct kmem_cache *c; + int i, iter, maxiter = 1024; + int num, bytes; + bool fail = false; + void *objects[10]; + + c = kmem_cache_create("test_cache", size, size, 0, NULL); + for (iter = 0; (iter < maxiter) && !fail; iter++) { + num = kmem_cache_alloc_bulk(c, GFP_KERNEL, ARRAY_SIZE(objects), + objects); + for (i = 0; i < num; i++) { + bytes = count_nonzero_bytes(objects[i], size); + if (bytes) + fail = true; + fill_with_garbage(objects[i], size); + } + + if (num) + kmem_cache_free_bulk(c, num, objects); + } + *total_failures += fail; + return 1; +} + /* * Test kmem_cache allocation by creating caches of different sizes, with and * without constructors, with and without SLAB_TYPESAFE_BY_RCU. @@ -318,6 +344,7 @@ static int __init test_kmemcache(int *total_failures) num_tests += do_kmem_cache_size(size, ctor, rcu, zero, &failures); } + num_tests += do_kmem_cache_size_bulk(size, &failures); } REPORT_FAILURES_IN_FN(); *total_failures += failures; diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c index e365ace06538..5ff04d8fe971 100644 --- a/lib/test_user_copy.c +++ b/lib/test_user_copy.c @@ -47,18 +47,35 @@ static bool is_zeroed(void *from, size_t size) static int test_check_nonzero_user(char *kmem, char __user *umem, size_t size) { int ret = 0; - size_t start, end, i; - size_t zero_start = size / 4; - size_t zero_end = size - zero_start; + size_t start, end, i, zero_start, zero_end; + + if (test(size < 2 * PAGE_SIZE, "buffer too small")) + return -EINVAL; + + /* + * We want to cross a page boundary to exercise the code more + * effectively. We also don't want to make the size we scan too large, + * otherwise the test can take a long time and cause soft lockups. So + * scan a 1024 byte region across the page boundary. + */ + size = 1024; + start = PAGE_SIZE - (size / 2); + + kmem += start; + umem += start; + + zero_start = size / 4; + zero_end = size - zero_start; /* - * We conduct a series of check_nonzero_user() tests on a block of memory - * with the following byte-pattern (trying every possible [start,end] - * pair): + * We conduct a series of check_nonzero_user() tests on a block of + * memory with the following byte-pattern (trying every possible + * [start,end] pair): * * [ 00 ff 00 ff ... 00 00 00 00 ... ff 00 ff 00 ] * - * And we verify that check_nonzero_user() acts identically to memchr_inv(). + * And we verify that check_nonzero_user() acts identically to + * memchr_inv(). */ memset(kmem, 0x0, size); @@ -93,11 +110,13 @@ static int test_copy_struct_from_user(char *kmem, char __user *umem, size_t ksize, usize; umem_src = kmalloc(size, GFP_KERNEL); - if ((ret |= test(umem_src == NULL, "kmalloc failed"))) + ret = test(umem_src == NULL, "kmalloc failed"); + if (ret) goto out_free; expected = kmalloc(size, GFP_KERNEL); - if ((ret |= test(expected == NULL, "kmalloc failed"))) + ret = test(expected == NULL, "kmalloc failed"); + if (ret) goto out_free; /* Fill umem with a fixed byte pattern. */ diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c index e630e7ff57f1..45f57fd2db64 100644 --- a/lib/vdso/gettimeofday.c +++ b/lib/vdso/gettimeofday.c @@ -214,9 +214,10 @@ int __cvdso_clock_getres_common(clockid_t clock, struct __kernel_timespec *res) return -1; } - res->tv_sec = 0; - res->tv_nsec = ns; - + if (likely(res)) { + res->tv_sec = 0; + res->tv_nsec = ns; + } return 0; } @@ -245,7 +246,7 @@ __cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res) ret = clock_getres_fallback(clock, &ts); #endif - if (likely(!ret)) { + if (likely(!ret && res)) { res->tv_sec = ts.tv_sec; res->tv_nsec = ts.tv_nsec; } |