From 8e7c8ca6b988904d4c32c4053b325739738c8f36 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 20 Sep 2021 10:27:16 -0700 Subject: test_overflow: Regularize test reporting output Report test run summaries more regularly, so it's easier to understand the output: - Remove noisy "ok" reports for shift and allocator tests. - Reorganize per-type output to the end of each type's tests. - Replace redundant vmalloc tests with __vmalloc so that __GFP_NO_WARN can be used to keep the expected failure warnings out of dmesg, similar to commit 8e060c21ae2c ("lib/test_overflow.c: avoid tainting the kernel and fix wrap size") Resulting output: test_overflow: 18 u8 arithmetic tests finished test_overflow: 19 s8 arithmetic tests finished test_overflow: 17 u16 arithmetic tests finished test_overflow: 17 s16 arithmetic tests finished test_overflow: 17 u32 arithmetic tests finished test_overflow: 17 s32 arithmetic tests finished test_overflow: 17 u64 arithmetic tests finished test_overflow: 21 s64 arithmetic tests finished test_overflow: 113 shift tests finished test_overflow: 17 overflow size helper tests finished test_overflow: 11 allocation overflow tests finished test_overflow: all tests passed Acked-by: Rasmus Villemoes Link: https://lore.kernel.org/all/eb6d02ae-e2ed-e7bd-c700-8a6d004d84ce@rasmusvillemoes.dk/ Reviewed-by: Nick Desaulniers Link: https://lore.kernel.org/all/CAKwvOdnYYa+72VhtJ4ug=SJVFn7w+n7Th+hKYE87BRDt4hvqOg@mail.gmail.com/ Signed-off-by: Kees Cook --- lib/test_overflow.c | 54 +++++++++++++++++++++++++++++------------------------ 1 file changed, 30 insertions(+), 24 deletions(-) diff --git a/lib/test_overflow.c b/lib/test_overflow.c index 7a4b6f6c5473..cea37ae82615 100644 --- a/lib/test_overflow.c +++ b/lib/test_overflow.c @@ -252,10 +252,10 @@ static int __init test_ ## t ## _overflow(void) { \ int err = 0; \ unsigned i; \ \ - pr_info("%-3s: %zu arithmetic tests\n", #t, \ - ARRAY_SIZE(t ## _tests)); \ for (i = 0; i < ARRAY_SIZE(t ## _tests); ++i) \ err |= do_test_ ## t(&t ## _tests[i]); \ + pr_info("%zu %s arithmetic tests finished\n", \ + ARRAY_SIZE(t ## _tests), #t); \ return err; \ } @@ -291,6 +291,7 @@ static int __init test_overflow_calculation(void) static int __init test_overflow_shift(void) { int err = 0; + int count = 0; /* Args are: value, shift, type, expected result, overflow expected */ #define TEST_ONE_SHIFT(a, s, t, expect, of) ({ \ @@ -313,9 +314,7 @@ static int __init test_overflow_shift(void) pr_warn("got %llu\n", (u64)__d); \ __failed = 1; \ } \ - if (!__failed) \ - pr_info("ok: (%s)(%s << %s) == %s\n", #t, #a, #s, \ - of ? "overflow" : #expect); \ + count++; \ __failed; \ }) @@ -479,6 +478,10 @@ static int __init test_overflow_shift(void) err |= TEST_ONE_SHIFT(0, 31, s32, 0, false); err |= TEST_ONE_SHIFT(0, 63, s64, 0, false); + pr_info("%d shift tests finished\n", count); + +#undef TEST_ONE_SHIFT + return err; } @@ -530,7 +533,6 @@ static int __init test_ ## func (void *arg) \ free ## want_arg (free_func, arg, ptr); \ return 1; \ } \ - pr_info(#func " detected saturation\n"); \ return 0; \ } @@ -544,10 +546,7 @@ DEFINE_TEST_ALLOC(kmalloc, kfree, 0, 1, 0); DEFINE_TEST_ALLOC(kmalloc_node, kfree, 0, 1, 1); DEFINE_TEST_ALLOC(kzalloc, kfree, 0, 1, 0); DEFINE_TEST_ALLOC(kzalloc_node, kfree, 0, 1, 1); -DEFINE_TEST_ALLOC(vmalloc, vfree, 0, 0, 0); -DEFINE_TEST_ALLOC(vmalloc_node, vfree, 0, 0, 1); -DEFINE_TEST_ALLOC(vzalloc, vfree, 0, 0, 0); -DEFINE_TEST_ALLOC(vzalloc_node, vfree, 0, 0, 1); +DEFINE_TEST_ALLOC(__vmalloc, vfree, 0, 1, 0); DEFINE_TEST_ALLOC(kvmalloc, kvfree, 0, 1, 0); DEFINE_TEST_ALLOC(kvmalloc_node, kvfree, 0, 1, 1); DEFINE_TEST_ALLOC(kvzalloc, kvfree, 0, 1, 0); @@ -559,8 +558,14 @@ static int __init test_overflow_allocation(void) { const char device_name[] = "overflow-test"; struct device *dev; + int count = 0; int err = 0; +#define check_allocation_overflow(alloc) ({ \ + count++; \ + test_ ## alloc(dev); \ +}) + /* Create dummy device for devm_kmalloc()-family tests. */ dev = root_device_register(device_name); if (IS_ERR(dev)) { @@ -568,23 +573,24 @@ static int __init test_overflow_allocation(void) return 1; } - err |= test_kmalloc(NULL); - err |= test_kmalloc_node(NULL); - err |= test_kzalloc(NULL); - err |= test_kzalloc_node(NULL); - err |= test_kvmalloc(NULL); - err |= test_kvmalloc_node(NULL); - err |= test_kvzalloc(NULL); - err |= test_kvzalloc_node(NULL); - err |= test_vmalloc(NULL); - err |= test_vmalloc_node(NULL); - err |= test_vzalloc(NULL); - err |= test_vzalloc_node(NULL); - err |= test_devm_kmalloc(dev); - err |= test_devm_kzalloc(dev); + err |= check_allocation_overflow(kmalloc); + err |= check_allocation_overflow(kmalloc_node); + err |= check_allocation_overflow(kzalloc); + err |= check_allocation_overflow(kzalloc_node); + err |= check_allocation_overflow(__vmalloc); + err |= check_allocation_overflow(kvmalloc); + err |= check_allocation_overflow(kvmalloc_node); + err |= check_allocation_overflow(kvzalloc); + err |= check_allocation_overflow(kvzalloc_node); + err |= check_allocation_overflow(devm_kmalloc); + err |= check_allocation_overflow(devm_kzalloc); device_unregister(dev); + pr_info("%d allocation overflow tests finished\n", count); + +#undef check_allocation_overflow + return err; } -- cgit v1.2.3 From e1be43d9b5d0d1310dbd90185a8e5c7145dde40f Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Sat, 18 Sep 2021 15:17:53 -0700 Subject: overflow: Implement size_t saturating arithmetic helpers In order to perform more open-coded replacements of common allocation size arithmetic, the kernel needs saturating (SIZE_MAX) helpers for multiplication, addition, and subtraction. For example, it is common in allocators, especially on realloc, to add to an existing size: p = krealloc(map->patch, sizeof(struct reg_sequence) * (map->patch_regs + num_regs), GFP_KERNEL); There is no existing saturating replacement for this calculation, and just leaving the addition open coded inside array_size() could potentially overflow as well. For example, an overflow in an expression for a size_t argument might wrap to zero: array_size(anything, something_at_size_max + 1) == 0 Introduce size_mul(), size_add(), and size_sub() helpers that implicitly promote arguments to size_t and saturated calculations for use in allocations. With these helpers it is also possible to redefine array_size(), array3_size(), flex_array_size(), and struct_size() in terms of the new helpers. As with the check_*_overflow() helpers, the new helpers use __must_check, though what is really desired is a way to make sure that assignment is only to a size_t lvalue. Without this, it's still possible to introduce overflow/underflow via type conversion (i.e. from size_t to int). Enforcing this will currently need to be left to static analysis or future use of -Wconversion. Additionally update the overflow unit tests to force runtime evaluation for the pathological cases. Cc: Rasmus Villemoes Cc: Gustavo A. R. Silva Cc: Nathan Chancellor Cc: Jason Gunthorpe Cc: Nick Desaulniers Cc: Leon Romanovsky Cc: Keith Busch Cc: Len Baker Signed-off-by: Kees Cook --- Documentation/process/deprecated.rst | 20 ++++++- include/linux/overflow.h | 110 ++++++++++++++++++++++------------- lib/test_overflow.c | 98 +++++++++++++++++++++++++++++++ 3 files changed, 184 insertions(+), 44 deletions(-) diff --git a/Documentation/process/deprecated.rst b/Documentation/process/deprecated.rst index 388cb19f5dbb..a6e36d9c3d14 100644 --- a/Documentation/process/deprecated.rst +++ b/Documentation/process/deprecated.rst @@ -71,6 +71,9 @@ Instead, the 2-factor form of the allocator should be used:: foo = kmalloc_array(count, size, GFP_KERNEL); +Specifically, kmalloc() can be replaced with kmalloc_array(), and +kzalloc() can be replaced with kcalloc(). + If no 2-factor form is available, the saturate-on-overflow helpers should be used:: @@ -91,9 +94,20 @@ Instead, use the helper:: array usage and switch to a `flexible array member <#zero-length-and-one-element-arrays>`_ instead. -See array_size(), array3_size(), and struct_size(), -for more details as well as the related check_add_overflow() and -check_mul_overflow() family of functions. +For other calculations, please compose the use of the size_mul(), +size_add(), and size_sub() helpers. For example, in the case of:: + + foo = krealloc(current_size + chunk_size * (count - 3), GFP_KERNEL); + +Instead, use the helpers:: + + foo = krealloc(size_add(current_size, + size_mul(chunk_size, + size_sub(count, 3))), GFP_KERNEL); + +For more details, also see array3_size() and flex_array_size(), +as well as the related check_mul_overflow(), check_add_overflow(), +check_sub_overflow(), and check_shl_overflow() family of functions. simple_strtol(), simple_strtoll(), simple_strtoul(), simple_strtoull() ---------------------------------------------------------------------- diff --git a/include/linux/overflow.h b/include/linux/overflow.h index 4669632bd72b..59d7228104d0 100644 --- a/include/linux/overflow.h +++ b/include/linux/overflow.h @@ -118,81 +118,94 @@ static inline bool __must_check __must_check_overflow(bool overflow) })) /** - * array_size() - Calculate size of 2-dimensional array. - * - * @a: dimension one - * @b: dimension two + * size_mul() - Calculate size_t multiplication with saturation at SIZE_MAX * - * Calculates size of 2-dimensional array: @a * @b. + * @factor1: first factor + * @factor2: second factor * - * Returns: number of bytes needed to represent the array or SIZE_MAX on - * overflow. + * Returns: calculate @factor1 * @factor2, both promoted to size_t, + * with any overflow causing the return value to be SIZE_MAX. The + * lvalue must be size_t to avoid implicit type conversion. */ -static inline __must_check size_t array_size(size_t a, size_t b) +static inline size_t __must_check size_mul(size_t factor1, size_t factor2) { size_t bytes; - if (check_mul_overflow(a, b, &bytes)) + if (check_mul_overflow(factor1, factor2, &bytes)) return SIZE_MAX; return bytes; } /** - * array3_size() - Calculate size of 3-dimensional array. + * size_add() - Calculate size_t addition with saturation at SIZE_MAX * - * @a: dimension one - * @b: dimension two - * @c: dimension three - * - * Calculates size of 3-dimensional array: @a * @b * @c. + * @addend1: first addend + * @addend2: second addend * - * Returns: number of bytes needed to represent the array or SIZE_MAX on - * overflow. + * Returns: calculate @addend1 + @addend2, both promoted to size_t, + * with any overflow causing the return value to be SIZE_MAX. The + * lvalue must be size_t to avoid implicit type conversion. */ -static inline __must_check size_t array3_size(size_t a, size_t b, size_t c) +static inline size_t __must_check size_add(size_t addend1, size_t addend2) { size_t bytes; - if (check_mul_overflow(a, b, &bytes)) - return SIZE_MAX; - if (check_mul_overflow(bytes, c, &bytes)) + if (check_add_overflow(addend1, addend2, &bytes)) return SIZE_MAX; return bytes; } -/* - * Compute a*b+c, returning SIZE_MAX on overflow. Internal helper for - * struct_size() below. +/** + * size_sub() - Calculate size_t subtraction with saturation at SIZE_MAX + * + * @minuend: value to subtract from + * @subtrahend: value to subtract from @minuend + * + * Returns: calculate @minuend - @subtrahend, both promoted to size_t, + * with any overflow causing the return value to be SIZE_MAX. For + * composition with the size_add() and size_mul() helpers, neither + * argument may be SIZE_MAX (or the result with be forced to SIZE_MAX). + * The lvalue must be size_t to avoid implicit type conversion. */ -static inline __must_check size_t __ab_c_size(size_t a, size_t b, size_t c) +static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend) { size_t bytes; - if (check_mul_overflow(a, b, &bytes)) - return SIZE_MAX; - if (check_add_overflow(bytes, c, &bytes)) + if (minuend == SIZE_MAX || subtrahend == SIZE_MAX || + check_sub_overflow(minuend, subtrahend, &bytes)) return SIZE_MAX; return bytes; } /** - * struct_size() - Calculate size of structure with trailing array. - * @p: Pointer to the structure. - * @member: Name of the array member. - * @count: Number of elements in the array. + * array_size() - Calculate size of 2-dimensional array. * - * Calculates size of memory needed for structure @p followed by an - * array of @count number of @member elements. + * @a: dimension one + * @b: dimension two * - * Return: number of bytes needed or SIZE_MAX on overflow. + * Calculates size of 2-dimensional array: @a * @b. + * + * Returns: number of bytes needed to represent the array or SIZE_MAX on + * overflow. */ -#define struct_size(p, member, count) \ - __ab_c_size(count, \ - sizeof(*(p)->member) + __must_be_array((p)->member),\ - sizeof(*(p))) +#define array_size(a, b) size_mul(a, b) + +/** + * array3_size() - Calculate size of 3-dimensional array. + * + * @a: dimension one + * @b: dimension two + * @c: dimension three + * + * Calculates size of 3-dimensional array: @a * @b * @c. + * + * Returns: number of bytes needed to represent the array or SIZE_MAX on + * overflow. + */ +#define array3_size(a, b, c) size_mul(size_mul(a, b), c) /** * flex_array_size() - Calculate size of a flexible array member @@ -208,7 +221,22 @@ static inline __must_check size_t __ab_c_size(size_t a, size_t b, size_t c) * Return: number of bytes needed or SIZE_MAX on overflow. */ #define flex_array_size(p, member, count) \ - array_size(count, \ - sizeof(*(p)->member) + __must_be_array((p)->member)) + size_mul(count, \ + sizeof(*(p)->member) + __must_be_array((p)->member)) + +/** + * struct_size() - Calculate size of structure with trailing flexible array. + * + * @p: Pointer to the structure. + * @member: Name of the array member. + * @count: Number of elements in the array. + * + * Calculates size of memory needed for structure @p followed by an + * array of @count number of @member elements. + * + * Return: number of bytes needed or SIZE_MAX on overflow. + */ +#define struct_size(p, member, count) \ + size_add(sizeof(*(p)), flex_array_size(p, member, count)) #endif /* __LINUX_OVERFLOW_H */ diff --git a/lib/test_overflow.c b/lib/test_overflow.c index cea37ae82615..712fb2351c27 100644 --- a/lib/test_overflow.c +++ b/lib/test_overflow.c @@ -594,12 +594,110 @@ static int __init test_overflow_allocation(void) return err; } +struct __test_flex_array { + unsigned long flags; + size_t count; + unsigned long data[]; +}; + +static int __init test_overflow_size_helpers(void) +{ + struct __test_flex_array *obj; + int count = 0; + int err = 0; + int var; + +#define check_one_size_helper(expected, func, args...) ({ \ + bool __failure = false; \ + size_t _r; \ + \ + _r = func(args); \ + if (_r != (expected)) { \ + pr_warn("expected " #func "(" #args ") " \ + "to return %zu but got %zu instead\n", \ + (size_t)(expected), _r); \ + __failure = true; \ + } \ + count++; \ + __failure; \ +}) + + var = 4; + err |= check_one_size_helper(20, size_mul, var++, 5); + err |= check_one_size_helper(20, size_mul, 4, var++); + err |= check_one_size_helper(0, size_mul, 0, 3); + err |= check_one_size_helper(0, size_mul, 3, 0); + err |= check_one_size_helper(6, size_mul, 2, 3); + err |= check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, 1); + err |= check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, 3); + err |= check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, -3); + + var = 4; + err |= check_one_size_helper(9, size_add, var++, 5); + err |= check_one_size_helper(9, size_add, 4, var++); + err |= check_one_size_helper(9, size_add, 9, 0); + err |= check_one_size_helper(9, size_add, 0, 9); + err |= check_one_size_helper(5, size_add, 2, 3); + err |= check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, 1); + err |= check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, 3); + err |= check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, -3); + + var = 4; + err |= check_one_size_helper(1, size_sub, var--, 3); + err |= check_one_size_helper(1, size_sub, 4, var--); + err |= check_one_size_helper(1, size_sub, 3, 2); + err |= check_one_size_helper(9, size_sub, 9, 0); + err |= check_one_size_helper(SIZE_MAX, size_sub, 9, -3); + err |= check_one_size_helper(SIZE_MAX, size_sub, 0, 9); + err |= check_one_size_helper(SIZE_MAX, size_sub, 2, 3); + err |= check_one_size_helper(SIZE_MAX, size_sub, SIZE_MAX, 0); + err |= check_one_size_helper(SIZE_MAX, size_sub, SIZE_MAX, 10); + err |= check_one_size_helper(SIZE_MAX, size_sub, 0, SIZE_MAX); + err |= check_one_size_helper(SIZE_MAX, size_sub, 14, SIZE_MAX); + err |= check_one_size_helper(SIZE_MAX - 2, size_sub, SIZE_MAX - 1, 1); + err |= check_one_size_helper(SIZE_MAX - 4, size_sub, SIZE_MAX - 1, 3); + err |= check_one_size_helper(1, size_sub, SIZE_MAX - 1, -3); + + var = 4; + err |= check_one_size_helper(4 * sizeof(*obj->data), + flex_array_size, obj, data, var++); + err |= check_one_size_helper(5 * sizeof(*obj->data), + flex_array_size, obj, data, var++); + err |= check_one_size_helper(0, flex_array_size, obj, data, 0); + err |= check_one_size_helper(sizeof(*obj->data), + flex_array_size, obj, data, 1); + err |= check_one_size_helper(7 * sizeof(*obj->data), + flex_array_size, obj, data, 7); + err |= check_one_size_helper(SIZE_MAX, + flex_array_size, obj, data, -1); + err |= check_one_size_helper(SIZE_MAX, + flex_array_size, obj, data, SIZE_MAX - 4); + + var = 4; + err |= check_one_size_helper(sizeof(*obj) + (4 * sizeof(*obj->data)), + struct_size, obj, data, var++); + err |= check_one_size_helper(sizeof(*obj) + (5 * sizeof(*obj->data)), + struct_size, obj, data, var++); + err |= check_one_size_helper(sizeof(*obj), struct_size, obj, data, 0); + err |= check_one_size_helper(sizeof(*obj) + sizeof(*obj->data), + struct_size, obj, data, 1); + err |= check_one_size_helper(SIZE_MAX, + struct_size, obj, data, -3); + err |= check_one_size_helper(SIZE_MAX, + struct_size, obj, data, SIZE_MAX - 3); + + pr_info("%d overflow size helper tests finished\n", count); + + return err; +} + static int __init test_module_init(void) { int err = 0; err |= test_overflow_calculation(); err |= test_overflow_shift(); + err |= test_overflow_size_helpers(); err |= test_overflow_allocation(); if (err) { -- cgit v1.2.3 From 230f6fa2c1db6a3f3e668cfe95995ac8e6eee212 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 9 Feb 2022 16:40:41 -0800 Subject: overflow: Provide constant expression struct_size There have been cases where struct_size() (or flex_array_size()) needs to be calculated for an initializer, which requires it be a constant expression. This is possible when the "count" argument is a constant expression, so provide this ability for the helpers. Cc: Gustavo A. R. Silva Cc: Nathan Chancellor Cc: Nick Desaulniers Cc: Rasmus Villemoes Signed-off-by: Kees Cook Reviewed-by: Gustavo A. R. Silva Tested-by: Gustavo A. R. Silva Link: https://lore.kernel.org/lkml/20220210010407.GA701603@embeddedor --- include/linux/overflow.h | 10 +++++++--- lib/test_overflow.c | 26 +++++++++++++++++--------- 2 files changed, 24 insertions(+), 12 deletions(-) diff --git a/include/linux/overflow.h b/include/linux/overflow.h index 59d7228104d0..f1221d11f8e5 100644 --- a/include/linux/overflow.h +++ b/include/linux/overflow.h @@ -4,6 +4,7 @@ #include #include +#include /* * We need to compute the minimum and maximum values representable in a given @@ -221,8 +222,9 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend) * Return: number of bytes needed or SIZE_MAX on overflow. */ #define flex_array_size(p, member, count) \ - size_mul(count, \ - sizeof(*(p)->member) + __must_be_array((p)->member)) + __builtin_choose_expr(__is_constexpr(count), \ + (count) * sizeof(*(p)->member) + __must_be_array((p)->member), \ + size_mul(count, sizeof(*(p)->member) + __must_be_array((p)->member))) /** * struct_size() - Calculate size of structure with trailing flexible array. @@ -237,6 +239,8 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend) * Return: number of bytes needed or SIZE_MAX on overflow. */ #define struct_size(p, member, count) \ - size_add(sizeof(*(p)), flex_array_size(p, member, count)) + __builtin_choose_expr(__is_constexpr(count), \ + sizeof(*(p)) + flex_array_size(p, member, count), \ + size_add(sizeof(*(p)), flex_array_size(p, member, count))) #endif /* __LINUX_OVERFLOW_H */ diff --git a/lib/test_overflow.c b/lib/test_overflow.c index 712fb2351c27..f6530fce799d 100644 --- a/lib/test_overflow.c +++ b/lib/test_overflow.c @@ -602,10 +602,18 @@ struct __test_flex_array { static int __init test_overflow_size_helpers(void) { + /* Make sure struct_size() can be used in a constant expression. */ + u8 ce_array[struct_size((struct __test_flex_array *)0, data, 55)]; struct __test_flex_array *obj; int count = 0; int err = 0; int var; + volatile int unconst = 0; + + /* Verify constant expression against runtime version. */ + var = 55; + OPTIMIZER_HIDE_VAR(var); + err |= sizeof(ce_array) != struct_size(obj, data, var); #define check_one_size_helper(expected, func, args...) ({ \ bool __failure = false; \ @@ -663,28 +671,28 @@ static int __init test_overflow_size_helpers(void) flex_array_size, obj, data, var++); err |= check_one_size_helper(5 * sizeof(*obj->data), flex_array_size, obj, data, var++); - err |= check_one_size_helper(0, flex_array_size, obj, data, 0); + err |= check_one_size_helper(0, flex_array_size, obj, data, 0 + unconst); err |= check_one_size_helper(sizeof(*obj->data), - flex_array_size, obj, data, 1); + flex_array_size, obj, data, 1 + unconst); err |= check_one_size_helper(7 * sizeof(*obj->data), - flex_array_size, obj, data, 7); + flex_array_size, obj, data, 7 + unconst); err |= check_one_size_helper(SIZE_MAX, - flex_array_size, obj, data, -1); + flex_array_size, obj, data, -1 + unconst); err |= check_one_size_helper(SIZE_MAX, - flex_array_size, obj, data, SIZE_MAX - 4); + flex_array_size, obj, data, SIZE_MAX - 4 + unconst); var = 4; err |= check_one_size_helper(sizeof(*obj) + (4 * sizeof(*obj->data)), struct_size, obj, data, var++); err |= check_one_size_helper(sizeof(*obj) + (5 * sizeof(*obj->data)), struct_size, obj, data, var++); - err |= check_one_size_helper(sizeof(*obj), struct_size, obj, data, 0); + err |= check_one_size_helper(sizeof(*obj), struct_size, obj, data, 0 + unconst); err |= check_one_size_helper(sizeof(*obj) + sizeof(*obj->data), - struct_size, obj, data, 1); + struct_size, obj, data, 1 + unconst); err |= check_one_size_helper(SIZE_MAX, - struct_size, obj, data, -3); + struct_size, obj, data, -3 + unconst); err |= check_one_size_helper(SIZE_MAX, - struct_size, obj, data, SIZE_MAX - 3); + struct_size, obj, data, SIZE_MAX - 3 + unconst); pr_info("%d overflow size helper tests finished\n", count); -- cgit v1.2.3 From 617f55e20743fc50c989b498f9dee289eb644cfd Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 16 Feb 2022 14:17:49 -0800 Subject: lib: overflow: Convert to Kunit Convert overflow unit tests to KUnit, for better integration into the kernel self test framework. Includes a rename of test_overflow.c to overflow_kunit.c, and CONFIG_TEST_OVERFLOW to CONFIG_OVERFLOW_KUNIT_TEST. $ ./tools/testing/kunit/kunit.py run overflow ... [14:33:51] Starting KUnit Kernel (1/1)... [14:33:51] ============================================================ [14:33:51] ================== overflow (11 subtests) ================== [14:33:51] [PASSED] u8_overflow_test [14:33:51] [PASSED] s8_overflow_test [14:33:51] [PASSED] u16_overflow_test [14:33:51] [PASSED] s16_overflow_test [14:33:51] [PASSED] u32_overflow_test [14:33:51] [PASSED] s32_overflow_test [14:33:51] [PASSED] u64_overflow_test [14:33:51] [PASSED] s64_overflow_test [14:33:51] [PASSED] overflow_shift_test [14:33:51] [PASSED] overflow_allocation_test [14:33:51] [PASSED] overflow_size_helpers_test [14:33:51] ==================== [PASSED] overflow ===================== [14:33:51] ============================================================ [14:33:51] Testing complete. Passed: 11, Failed: 0, Crashed: 0, Skipped: 0, Errors: 0 [14:33:51] Elapsed time: 12.525s total, 0.001s configuring, 12.402s building, 0.101s running Cc: Rasmus Villemoes Cc: Nick Desaulniers Co-developed-by: Vitor Massaru Iha Signed-off-by: Vitor Massaru Iha Link: https://lore.kernel.org/lkml/20200720224418.200495-1-vitor@massaru.org/ Co-developed-by: Daniel Latypov Signed-off-by: Daniel Latypov Link: https://lore.kernel.org/linux-kselftest/20210503211536.1384578-1-dlatypov@google.com/ Acked-by: Nick Desaulniers Link: https://lore.kernel.org/lkml/CAKwvOdm62iA1dNiC6Q11UJ-MnTqtc4kXkm-ubPaFMK824_k0nw@mail.gmail.com Signed-off-by: Kees Cook Reviewed-by: David Gow Link: https://lore.kernel.org/lkml/CABVgOS=TWVh649_Vjo3wnMu9gZnq66gkV-LtGgsksAWMqc+MSA@mail.gmail.com --- lib/Kconfig.debug | 16 +- lib/Makefile | 2 +- lib/overflow_kunit.c | 670 +++++++++++++++++++++++++++++++++++++++++++++++ lib/test_overflow.c | 726 --------------------------------------------------- 4 files changed, 684 insertions(+), 730 deletions(-) create mode 100644 lib/overflow_kunit.c delete mode 100644 lib/test_overflow.c diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 14b89aa37c5c..14d90d03bc8d 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2214,9 +2214,6 @@ config TEST_UUID config TEST_XARRAY tristate "Test the XArray code at runtime" -config TEST_OVERFLOW - tristate "Test check_*_overflow() functions at runtime" - config TEST_RHASHTABLE tristate "Perform selftest on resizable hash table" help @@ -2501,6 +2498,19 @@ config MEMCPY_KUNIT_TEST If unsure, say N. +config OVERFLOW_KUNIT_TEST + tristate "Test check_*_overflow() functions at runtime" if !KUNIT_ALL_TESTS + depends on KUNIT + default KUNIT_ALL_TESTS + help + Builds unit tests for the check_*_overflow(), size_*(), allocation, and + related functions. + + For more information on KUnit and unit tests in general please refer + to the KUnit documentation in Documentation/dev-tools/kunit/. + + If unsure, say N. + config TEST_UDELAY tristate "udelay test driver" help diff --git a/lib/Makefile b/lib/Makefile index 300f569c626b..fdfcbfaff32f 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -77,7 +77,6 @@ obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o obj-$(CONFIG_TEST_MIN_HEAP) += test_min_heap.o obj-$(CONFIG_TEST_LKM) += test_module.o obj-$(CONFIG_TEST_VMALLOC) += test_vmalloc.o -obj-$(CONFIG_TEST_OVERFLOW) += test_overflow.o obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o obj-$(CONFIG_TEST_SORT) += test_sort.o obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o @@ -363,6 +362,7 @@ obj-$(CONFIG_BITS_TEST) += test_bits.o obj-$(CONFIG_CMDLINE_KUNIT_TEST) += cmdline_kunit.o obj-$(CONFIG_SLUB_KUNIT_TEST) += slub_kunit.o obj-$(CONFIG_MEMCPY_KUNIT_TEST) += memcpy_kunit.o +obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += overflow_kunit.o obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o diff --git a/lib/overflow_kunit.c b/lib/overflow_kunit.c new file mode 100644 index 000000000000..475f0c064bf6 --- /dev/null +++ b/lib/overflow_kunit.c @@ -0,0 +1,670 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Test cases for arithmetic overflow checks. See: + * https://www.kernel.org/doc/html/latest/dev-tools/kunit/kunit-tool.html#configuring-building-and-running-tests + * ./tools/testing/kunit/kunit.py run overflow [--raw_output] + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DEFINE_TEST_ARRAY(t) \ + static const struct test_ ## t { \ + t a, b; \ + t sum, diff, prod; \ + bool s_of, d_of, p_of; \ + } t ## _tests[] + +DEFINE_TEST_ARRAY(u8) = { + {0, 0, 0, 0, 0, false, false, false}, + {1, 1, 2, 0, 1, false, false, false}, + {0, 1, 1, U8_MAX, 0, false, true, false}, + {1, 0, 1, 1, 0, false, false, false}, + {0, U8_MAX, U8_MAX, 1, 0, false, true, false}, + {U8_MAX, 0, U8_MAX, U8_MAX, 0, false, false, false}, + {1, U8_MAX, 0, 2, U8_MAX, true, true, false}, + {U8_MAX, 1, 0, U8_MAX-1, U8_MAX, true, false, false}, + {U8_MAX, U8_MAX, U8_MAX-1, 0, 1, true, false, true}, + + {U8_MAX, U8_MAX-1, U8_MAX-2, 1, 2, true, false, true}, + {U8_MAX-1, U8_MAX, U8_MAX-2, U8_MAX, 2, true, true, true}, + + {1U << 3, 1U << 3, 1U << 4, 0, 1U << 6, false, false, false}, + {1U << 4, 1U << 4, 1U << 5, 0, 0, false, false, true}, + {1U << 4, 1U << 3, 3*(1U << 3), 1U << 3, 1U << 7, false, false, false}, + {1U << 7, 1U << 7, 0, 0, 0, true, false, true}, + + {48, 32, 80, 16, 0, false, false, true}, + {128, 128, 0, 0, 0, true, false, true}, + {123, 234, 101, 145, 110, true, true, true}, +}; +DEFINE_TEST_ARRAY(u16) = { + {0, 0, 0, 0, 0, false, false, false}, + {1, 1, 2, 0, 1, false, false, false}, + {0, 1, 1, U16_MAX, 0, false, true, false}, + {1, 0, 1, 1, 0, false, false, false}, + {0, U16_MAX, U16_MAX, 1, 0, false, true, false}, + {U16_MAX, 0, U16_MAX, U16_MAX, 0, false, false, false}, + {1, U16_MAX, 0, 2, U16_MAX, true, true, false}, + {U16_MAX, 1, 0, U16_MAX-1, U16_MAX, true, false, false}, + {U16_MAX, U16_MAX, U16_MAX-1, 0, 1, true, false, true}, + + {U16_MAX, U16_MAX-1, U16_MAX-2, 1, 2, true, false, true}, + {U16_MAX-1, U16_MAX, U16_MAX-2, U16_MAX, 2, true, true, true}, + + {1U << 7, 1U << 7, 1U << 8, 0, 1U << 14, false, false, false}, + {1U << 8, 1U << 8, 1U << 9, 0, 0, false, false, true}, + {1U << 8, 1U << 7, 3*(1U << 7), 1U << 7, 1U << 15, false, false, false}, + {1U << 15, 1U << 15, 0, 0, 0, true, false, true}, + + {123, 234, 357, 65425, 28782, false, true, false}, + {1234, 2345, 3579, 64425, 10146, false, true, true}, +}; +DEFINE_TEST_ARRAY(u32) = { + {0, 0, 0, 0, 0, false, false, false}, + {1, 1, 2, 0, 1, false, false, false}, + {0, 1, 1, U32_MAX, 0, false, true, false}, + {1, 0, 1, 1, 0, false, false, false}, + {0, U32_MAX, U32_MAX, 1, 0, false, true, false}, + {U32_MAX, 0, U32_MAX, U32_MAX, 0, false, false, false}, + {1, U32_MAX, 0, 2, U32_MAX, true, true, false}, + {U32_MAX, 1, 0, U32_MAX-1, U32_MAX, true, false, false}, + {U32_MAX, U32_MAX, U32_MAX-1, 0, 1, true, false, true}, + + {U32_MAX, U32_MAX-1, U32_MAX-2, 1, 2, true, false, true}, + {U32_MAX-1, U32_MAX, U32_MAX-2, U32_MAX, 2, true, true, true}, + + {1U << 15, 1U << 15, 1U << 16, 0, 1U << 30, false, false, false}, + {1U << 16, 1U << 16, 1U << 17, 0, 0, false, false, true}, + {1U << 16, 1U << 15, 3*(1U << 15), 1U << 15, 1U << 31, false, false, false}, + {1U << 31, 1U << 31, 0, 0, 0, true, false, true}, + + {-2U, 1U, -1U, -3U, -2U, false, false, false}, + {-4U, 5U, 1U, -9U, -20U, true, false, true}, +}; + +DEFINE_TEST_ARRAY(u64) = { + {0, 0, 0, 0, 0, false, false, false}, + {1, 1, 2, 0, 1, false, false, false}, + {0, 1, 1, U64_MAX, 0, false, true, false}, + {1, 0, 1, 1, 0, false, false, false}, + {0, U64_MAX, U64_MAX, 1, 0, false, true, false}, + {U64_MAX, 0, U64_MAX, U64_MAX, 0, false, false, false}, + {1, U64_MAX, 0, 2, U64_MAX, true, true, false}, + {U64_MAX, 1, 0, U64_MAX-1, U64_MAX, true, false, false}, + {U64_MAX, U64_MAX, U64_MAX-1, 0, 1, true, false, true}, + + {U64_MAX, U64_MAX-1, U64_MAX-2, 1, 2, true, false, true}, + {U64_MAX-1, U64_MAX, U64_MAX-2, U64_MAX, 2, true, true, true}, + + {1ULL << 31, 1ULL << 31, 1ULL << 32, 0, 1ULL << 62, false, false, false}, + {1ULL << 32, 1ULL << 32, 1ULL << 33, 0, 0, false, false, true}, + {1ULL << 32, 1ULL << 31, 3*(1ULL << 31), 1ULL << 31, 1ULL << 63, false, false, false}, + {1ULL << 63, 1ULL << 63, 0, 0, 0, true, false, true}, + {1000000000ULL /* 10^9 */, 10000000000ULL /* 10^10 */, + 11000000000ULL, 18446744064709551616ULL, 10000000000000000000ULL, + false, true, false}, + {-15ULL, 10ULL, -5ULL, -25ULL, -150ULL, false, false, true}, +}; + +DEFINE_TEST_ARRAY(s8) = { + {0, 0, 0, 0, 0, false, false, false}, + + {0, S8_MAX, S8_MAX, -S8_MAX, 0, false, false, false}, + {S8_MAX, 0, S8_MAX, S8_MAX, 0, false, false, false}, + {0, S8_MIN, S8_MIN, S8_MIN, 0, false, true, false}, + {S8_MIN, 0, S8_MIN, S8_MIN, 0, false, false, false}, + + {-1, S8_MIN, S8_MAX, S8_MAX, S8_MIN, true, false, true}, + {S8_MIN, -1, S8_MAX, -S8_MAX, S8_MIN, true, false, true}, + {-1, S8_MAX, S8_MAX-1, S8_MIN, -S8_MAX, false, false, false}, + {S8_MAX, -1, S8_MAX-1, S8_MIN, -S8_MAX, false, true, false}, + {-1, -S8_MAX, S8_MIN, S8_MAX-1, S8_MAX, false, false, false}, + {-S8_MAX, -1, S8_MIN, S8_MIN+2, S8_MAX, false, false, false}, + + {1, S8_MIN, -S8_MAX, -S8_MAX, S8_MIN, false, true, false}, + {S8_MIN, 1, -S8_MAX, S8_MAX, S8_MIN, false, true, false}, + {1, S8_MAX, S8_MIN, S8_MIN+2, S8_MAX, true, false, false}, + {S8_MAX, 1, S8_MIN, S8_MAX-1, S8_MAX, true, false, false}, + + {S8_MIN, S8_MIN, 0, 0, 0, true, false, true}, + {S8_MAX, S8_MAX, -2, 0, 1, true, false, true}, + + {-4, -32, -36, 28, -128, false, false, true}, + {-4, 32, 28, -36, -128, false, false, false}, +}; + +DEFINE_TEST_ARRAY(s16) = { + {0, 0, 0, 0, 0, false, false, false}, + + {0, S16_MAX, S16_MAX, -S16_MAX, 0, false, false, false}, + {S16_MAX, 0, S16_MAX, S16_MAX, 0, false, false, false}, + {0, S16_MIN, S16_MIN, S16_MIN, 0, false, true, false}, + {S16_MIN, 0, S16_MIN, S16_MIN, 0, false, false, false}, + + {-1, S16_MIN, S16_MAX, S16_MAX, S16_MIN, true, false, true}, + {S16_MIN, -1, S16_MAX, -S16_MAX, S16_MIN, true, false, true}, + {-1, S16_MAX, S16_MAX-1, S16_MIN, -S16_MAX, false, false, false}, + {S16_MAX, -1, S16_MAX-1, S16_MIN, -S16_MAX, false, true, false}, + {-1, -S16_MAX, S16_MIN, S16_MAX-1, S16_MAX, false, false, false}, + {-S16_MAX, -1, S16_MIN, S16_MIN+2, S16_MAX, false, false, false}, + + {1, S16_MIN, -S16_MAX, -S16_MAX, S16_MIN, false, true, false}, + {S16_MIN, 1, -S16_MAX, S16_MAX, S16_MIN, false, true, false}, + {1, S16_MAX, S16_MIN, S16_MIN+2, S16_MAX, true, false, false}, + {S16_MAX, 1, S16_MIN, S16_MAX-1, S16_MAX, true, false, false}, + + {S16_MIN, S16_MIN, 0, 0, 0, true, false, true}, + {S16_MAX, S16_MAX, -2, 0, 1, true, false, true}, +}; +DEFINE_TEST_ARRAY(s32) = { + {0, 0, 0, 0, 0, false, false, false}, + + {0, S32_MAX, S32_MAX, -S32_MAX, 0, false, false, false}, + {S32_MAX, 0, S32_MAX, S32_MAX, 0, false, false, false}, + {0, S32_MIN, S32_MIN, S32_MIN, 0, false, true, false}, + {S32_MIN, 0, S32_MIN, S32_MIN, 0, false, false, false}, + + {-1, S32_MIN, S32_MAX, S32_MAX, S32_MIN, true, false, true}, + {S32_MIN, -1, S32_MAX, -S32_MAX, S32_MIN, true, false, true}, + {-1, S32_MAX, S32_MAX-1, S32_MIN, -S32_MAX, false, false, false}, + {S32_MAX, -1, S32_MAX-1, S32_MIN, -S32_MAX, false, true, false}, + {-1, -S32_MAX, S32_MIN, S32_MAX-1, S32_MAX, false, false, false}, + {-S32_MAX, -1, S32_MIN, S32_MIN+2, S32_MAX, false, false, false}, + + {1, S32_MIN, -S32_MAX, -S32_MAX, S32_MIN, false, true, false}, + {S32_MIN, 1, -S32_MAX, S32_MAX, S32_MIN, false, true, false}, + {1, S32_MAX, S32_MIN, S32_MIN+2, S32_MAX, true, false, false}, + {S32_MAX, 1, S32_MIN, S32_MAX-1, S32_MAX, true, false, false}, + + {S32_MIN, S32_MIN, 0, 0, 0, true, false, true}, + {S32_MAX, S32_MAX, -2, 0, 1, true, false, true}, +}; +DEFINE_TEST_ARRAY(s64) = { + {0, 0, 0, 0, 0, false, false, false}, + + {0, S64_MAX, S64_MAX, -S64_MAX, 0, false, false, false}, + {S64_MAX, 0, S64_MAX, S64_MAX, 0, false, false, false}, + {0, S64_MIN, S64_MIN, S64_MIN, 0, false, true, false}, + {S64_MIN, 0, S64_MIN, S64_MIN, 0, false, false, false}, + + {-1, S64_MIN, S64_MAX, S64_MAX, S64_MIN, true, false, true}, + {S64_MIN, -1, S64_MAX, -S64_MAX, S64_MIN, true, false, true}, + {-1, S64_MAX, S64_MAX-1, S64_MIN, -S64_MAX, false, false, false}, + {S64_MAX, -1, S64_MAX-1, S64_MIN, -S64_MAX, false, true, false}, + {-1, -S64_MAX, S64_MIN, S64_MAX-1, S64_MAX, false, false, false}, + {-S64_MAX, -1, S64_MIN, S64_MIN+2, S64_MAX, false, false, false}, + + {1, S64_MIN, -S64_MAX, -S64_MAX, S64_MIN, false, true, false}, + {S64_MIN, 1, -S64_MAX, S64_MAX, S64_MIN, false, true, false}, + {1, S64_MAX, S64_MIN, S64_MIN+2, S64_MAX, true, false, false}, + {S64_MAX, 1, S64_MIN, S64_MAX-1, S64_MAX, true, false, false}, + + {S64_MIN, S64_MIN, 0, 0, 0, true, false, true}, + {S64_MAX, S64_MAX, -2, 0, 1, true, false, true}, + + {-1, -1, -2, 0, 1, false, false, false}, + {-1, -128, -129, 127, 128, false, false, false}, + {-128, -1, -129, -127, 128, false, false, false}, + {0, -S64_MAX, -S64_MAX, S64_MAX, 0, false, false, false}, +}; + +#define check_one_op(t, fmt, op, sym, a, b, r, of) do { \ + t _r; \ + bool _of; \ + \ + _of = check_ ## op ## _overflow(a, b, &_r); \ + KUNIT_EXPECT_EQ_MSG(test, _of, of, \ + "expected "fmt" "sym" "fmt" to%s overflow (type %s)\n", \ + a, b, of ? "" : " not", #t); \ + KUNIT_EXPECT_EQ_MSG(test, _r, r, \ + "expected "fmt" "sym" "fmt" == "fmt", got "fmt" (type %s)\n", \ + a, b, r, _r, #t); \ +} while (0) + +#define DEFINE_TEST_FUNC(t, fmt) \ +static void do_test_ ## t(struct kunit *test, const struct test_ ## t *p) \ +{ \ + check_one_op(t, fmt, add, "+", p->a, p->b, p->sum, p->s_of); \ + check_one_op(t, fmt, add, "+", p->b, p->a, p->sum, p->s_of); \ + check_one_op(t, fmt, sub, "-", p->a, p->b, p->diff, p->d_of); \ + check_one_op(t, fmt, mul, "*", p->a, p->b, p->prod, p->p_of); \ + check_one_op(t, fmt, mul, "*", p->b, p->a, p->prod, p->p_of); \ +} \ + \ +static void t ## _overflow_test(struct kunit *test) { \ + unsigned i; \ + \ + for (i = 0; i < ARRAY_SIZE(t ## _tests); ++i) \ + do_test_ ## t(test, &t ## _tests[i]); \ + kunit_info(test, "%zu %s arithmetic tests finished\n", \ + ARRAY_SIZE(t ## _tests), #t); \ +} + +DEFINE_TEST_FUNC(u8, "%d"); +DEFINE_TEST_FUNC(s8, "%d"); +DEFINE_TEST_FUNC(u16, "%d"); +DEFINE_TEST_FUNC(s16, "%d"); +DEFINE_TEST_FUNC(u32, "%u"); +DEFINE_TEST_FUNC(s32, "%d"); +#if BITS_PER_LONG == 64 +DEFINE_TEST_FUNC(u64, "%llu"); +DEFINE_TEST_FUNC(s64, "%lld"); +#endif + +static void overflow_shift_test(struct kunit *test) +{ + int count = 0; + +/* Args are: value, shift, type, expected result, overflow expected */ +#define TEST_ONE_SHIFT(a, s, t, expect, of) do { \ + typeof(a) __a = (a); \ + typeof(s) __s = (s); \ + t __e = (expect); \ + t __d; \ + bool __of = check_shl_overflow(__a, __s, &__d); \ + if (__of != of) { \ + KUNIT_EXPECT_EQ_MSG(test, __of, of, \ + "expected (%s)(%s << %s) to%s overflow\n", \ + #t, #a, #s, of ? "" : " not"); \ + } else if (!__of && __d != __e) { \ + KUNIT_EXPECT_EQ_MSG(test, __d, __e, \ + "expected (%s)(%s << %s) == %s\n", \ + #t, #a, #s, #expect); \ + if ((t)-1 < 0) \ + kunit_info(test, "got %lld\n", (s64)__d); \ + else \ + kunit_info(test, "got %llu\n", (u64)__d); \ + } \ + count++; \ +} while (0) + + /* Sane shifts. */ + TEST_ONE_SHIFT(1, 0, u8, 1 << 0, false); + TEST_ONE_SHIFT(1, 4, u8, 1 << 4, false); + TEST_ONE_SHIFT(1, 7, u8, 1 << 7, false); + TEST_ONE_SHIFT(0xF, 4, u8, 0xF << 4, false); + TEST_ONE_SHIFT(1, 0, u16, 1 << 0, false); + TEST_ONE_SHIFT(1, 10, u16, 1 << 10, false); + TEST_ONE_SHIFT(1, 15, u16, 1 << 15, false); + TEST_ONE_SHIFT(0xFF, 8, u16, 0xFF << 8, false); + TEST_ONE_SHIFT(1, 0, int, 1 << 0, false); + TEST_ONE_SHIFT(1, 16, int, 1 << 16, false); + TEST_ONE_SHIFT(1, 30, int, 1 << 30, false); + TEST_ONE_SHIFT(1, 0, s32, 1 << 0, false); + TEST_ONE_SHIFT(1, 16, s32, 1 << 16, false); + TEST_ONE_SHIFT(1, 30, s32, 1 << 30, false); + TEST_ONE_SHIFT(1, 0, unsigned int, 1U << 0, false); + TEST_ONE_SHIFT(1, 20, unsigned int, 1U << 20, false); + TEST_ONE_SHIFT(1, 31, unsigned int, 1U << 31, false); + TEST_ONE_SHIFT(0xFFFFU, 16, unsigned int, 0xFFFFU << 16, false); + TEST_ONE_SHIFT(1, 0, u32, 1U << 0, false); + TEST_ONE_SHIFT(1, 20, u32, 1U << 20, false); + TEST_ONE_SHIFT(1, 31, u32, 1U << 31, false); + TEST_ONE_SHIFT(0xFFFFU, 16, u32, 0xFFFFU << 16, false); + TEST_ONE_SHIFT(1, 0, u64, 1ULL << 0, false); + TEST_ONE_SHIFT(1, 40, u64, 1ULL << 40, false); + TEST_ONE_SHIFT(1, 63, u64, 1ULL << 63, false); + TEST_ONE_SHIFT(0xFFFFFFFFULL, 32, u64, 0xFFFFFFFFULL << 32, false); + + /* Sane shift: start and end with 0, without a too-wide shift. */ + TEST_ONE_SHIFT(0, 7, u8, 0, false); + TEST_ONE_SHIFT(0, 15, u16, 0, false); + TEST_ONE_SHIFT(0, 31, unsigned int, 0, false); + TEST_ONE_SHIFT(0, 31, u32, 0, false); + TEST_ONE_SHIFT(0, 63, u64, 0, false); + + /* Sane shift: start and end with 0, without reaching signed bit. */ + TEST_ONE_SHIFT(0, 6, s8, 0, false); + TEST_ONE_SHIFT(0, 14, s16, 0, false); + TEST_ONE_SHIFT(0, 30, int, 0, false); + TEST_ONE_SHIFT(0, 30, s32, 0, false); + TEST_ONE_SHIFT(0, 62, s64, 0, false); + + /* Overflow: shifted the bit off the end. */ + TEST_ONE_SHIFT(1, 8, u8, 0, true); + TEST_ONE_SHIFT(1, 16, u16, 0, true); + TEST_ONE_SHIFT(1, 32, unsigned int, 0, true); + TEST_ONE_SHIFT(1, 32, u32, 0, true); + TEST_ONE_SHIFT(1, 64, u64, 0, true); + + /* Overflow: shifted into the signed bit. */ + TEST_ONE_SHIFT(1, 7, s8, 0, true); + TEST_ONE_SHIFT(1, 15, s16, 0, true); + TEST_ONE_SHIFT(1, 31, int, 0, true); + TEST_ONE_SHIFT(1, 31, s32, 0, true); + TEST_ONE_SHIFT(1, 63, s64, 0, true); + + /* Overflow: high bit falls off unsigned types. */ + /* 10010110 */ + TEST_ONE_SHIFT(150, 1, u8, 0, true); + /* 1000100010010110 */ + TEST_ONE_SHIFT(34966, 1, u16, 0, true); + /* 10000100000010001000100010010110 */ + TEST_ONE_SHIFT(2215151766U, 1, u32, 0, true); + TEST_ONE_SHIFT(2215151766U, 1, unsigned int, 0, true); + /* 1000001000010000010000000100000010000100000010001000100010010110 */ + TEST_ONE_SHIFT(9372061470395238550ULL, 1, u64, 0, true); + + /* Overflow: bit shifted into signed bit on signed types. */ + /* 01001011 */ + TEST_ONE_SHIFT(75, 1, s8, 0, true); + /* 0100010001001011 */ + TEST_ONE_SHIFT(17483, 1, s16, 0, true); + /* 01000010000001000100010001001011 */ + TEST_ONE_SHIFT(1107575883, 1, s32, 0, true); + TEST_ONE_SHIFT(1107575883, 1, int, 0, true); + /* 0100000100001000001000000010000001000010000001000100010001001011 */ + TEST_ONE_SHIFT(4686030735197619275LL, 1, s64, 0, true); + + /* Overflow: bit shifted past signed bit on signed types. */ + /* 01001011 */ + TEST_ONE_SHIFT(75, 2, s8, 0, true); + /* 0100010001001011 */ + TEST_ONE_SHIFT(17483, 2, s16, 0, true); + /* 01000010000001000100010001001011 */ + TEST_ONE_SHIFT(1107575883, 2, s32, 0, true); + TEST_ONE_SHIFT(1107575883, 2, int, 0, true); + /* 0100000100001000001000000010000001000010000001000100010001001011 */ + TEST_ONE_SHIFT(4686030735197619275LL, 2, s64, 0, true); + + /* Overflow: values larger than destination type. */ + TEST_ONE_SHIFT(0x100, 0, u8, 0, true); + TEST_ONE_SHIFT(0xFF, 0, s8, 0, true); + TEST_ONE_SHIFT(0x10000U, 0, u16, 0, true); + TEST_ONE_SHIFT(0xFFFFU, 0, s16, 0, true); + TEST_ONE_SHIFT(0x100000000ULL, 0, u32, 0, true); + TEST_ONE_SHIFT(0x100000000ULL, 0, unsigned int, 0, true); + TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, s32, 0, true); + TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, int, 0, true); + TEST_ONE_SHIFT(0xFFFFFFFFFFFFFFFFULL, 0, s64, 0, true); + + /* Nonsense: negative initial value. */ + TEST_ONE_SHIFT(-1, 0, s8, 0, true); + TEST_ONE_SHIFT(-1, 0, u8, 0, true); + TEST_ONE_SHIFT(-5, 0, s16, 0, true); + TEST_ONE_SHIFT(-5, 0, u16, 0, true); + TEST_ONE_SHIFT(-10, 0, int, 0, true); + TEST_ONE_SHIFT(-10, 0, unsigned int, 0, true); + TEST_ONE_SHIFT(-100, 0, s32, 0, true); + TEST_ONE_SHIFT(-100, 0, u32, 0, true); + TEST_ONE_SHIFT(-10000, 0, s64, 0, true); + TEST_ONE_SHIFT(-10000, 0, u64, 0, true); + + /* Nonsense: negative shift values. */ + TEST_ONE_SHIFT(0, -5, s8, 0, true); + TEST_ONE_SHIFT(0, -5, u8, 0, true); + TEST_ONE_SHIFT(0, -10, s16, 0, true); + TEST_ONE_SHIFT(0, -10, u16, 0, true); + TEST_ONE_SHIFT(0, -15, int, 0, true); + TEST_ONE_SHIFT(0, -15, unsigned int, 0, true); + TEST_ONE_SHIFT(0, -20, s32, 0, true); + TEST_ONE_SHIFT(0, -20, u32, 0, true); + TEST_ONE_SHIFT(0, -30, s64, 0, true); + TEST_ONE_SHIFT(0, -30, u64, 0, true); + + /* Overflow: shifted at or beyond entire type's bit width. */ + TEST_ONE_SHIFT(0, 8, u8, 0, true); + TEST_ONE_SHIFT(0, 9, u8, 0, true); + TEST_ONE_SHIFT(0, 8, s8, 0, true); + TEST_ONE_SHIFT(0, 9, s8, 0, true); + TEST_ONE_SHIFT(0, 16, u16, 0, true); + TEST_ONE_SHIFT(0, 17, u16, 0, true); + TEST_ONE_SHIFT(0, 16, s16, 0, true); + TEST_ONE_SHIFT(0, 17, s16, 0, true); + TEST_ONE_SHIFT(0, 32, u32, 0, true); + TEST_ONE_SHIFT(0, 33, u32, 0, true); + TEST_ONE_SHIFT(0, 32, int, 0, true); + TEST_ONE_SHIFT(0, 33, int, 0, true); + TEST_ONE_SHIFT(0, 32, s32, 0, true); + TEST_ONE_SHIFT(0, 33, s32, 0, true); + TEST_ONE_SHIFT(0, 64, u64, 0, true); + TEST_ONE_SHIFT(0, 65, u64, 0, true); + TEST_ONE_SHIFT(0, 64, s64, 0, true); + TEST_ONE_SHIFT(0, 65, s64, 0, true); + + /* + * Corner case: for unsigned types, we fail when we've shifted + * through the entire width of bits. For signed types, we might + * want to match this behavior, but that would mean noticing if + * we shift through all but the signed bit, and this is not + * currently detected (but we'll notice an overflow into the + * signed bit). So, for now, we will test this condition but + * mark it as not expected to overflow. + */ + TEST_ONE_SHIFT(0, 7, s8, 0, false); + TEST_ONE_SHIFT(0, 15, s16, 0, false); + TEST_ONE_SHIFT(0, 31, int, 0, false); + TEST_ONE_SHIFT(0, 31, s32, 0, false); + TEST_ONE_SHIFT(0, 63, s64, 0, false); + + kunit_info(test, "%d shift tests finished\n", count); +#undef TEST_ONE_SHIFT +} + +/* + * Deal with the various forms of allocator arguments. See comments above + * the DEFINE_TEST_ALLOC() instances for mapping of the "bits". + */ +#define alloc_GFP (GFP_KERNEL | __GFP_NOWARN) +#define alloc010(alloc, arg, sz) alloc(sz, alloc_GFP) +#define alloc011(alloc, arg, sz) alloc(sz, alloc_GFP, NUMA_NO_NODE) +#define alloc000(alloc, arg, sz) alloc(sz) +#define alloc001(alloc, arg, sz) alloc(sz, NUMA_NO_NODE) +#define alloc110(alloc, arg, sz) alloc(arg, sz, alloc_GFP) +#define free0(free, arg, ptr) free(ptr) +#define free1(free, arg, ptr) free(arg, ptr) + +/* Wrap around to 16K */ +#define TEST_SIZE (5 * 4096) + +#define DEFINE_TEST_ALLOC(func, free_func, want_arg, want_gfp, want_node)\ +static void test_ ## func (struct kunit *test, void *arg) \ +{ \ + volatile size_t a = TEST_SIZE; \ + volatile size_t b = (SIZE_MAX / TEST_SIZE) + 1; \ + void *ptr; \ + \ + /* Tiny allocation test. */ \ + ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, 1);\ + KUNIT_ASSERT_NOT_ERR_OR_NULL_MSG(test, ptr, \ + #func " failed regular allocation?!\n"); \ + free ## want_arg (free_func, arg, ptr); \ + \ + /* Wrapped allocation test. */ \ + ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, \ + a * b); \ + KUNIT_ASSERT_NOT_ERR_OR_NULL_MSG(test, ptr, \ + #func " unexpectedly failed bad wrapping?!\n"); \ + free ## want_arg (free_func, arg, ptr); \ + \ + /* Saturated allocation test. */ \ + ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, \ + array_size(a, b)); \ + if (ptr) { \ + KUNIT_FAIL(test, #func " missed saturation!\n"); \ + free ## want_arg (free_func, arg, ptr); \ + } \ +} + +/* + * Allocator uses a trailing node argument --------+ (e.g. kmalloc_node()) + * Allocator uses the gfp_t argument -----------+ | (e.g. kmalloc()) + * Allocator uses a special leading argument + | | (e.g. devm_kmalloc()) + * | | | + */ +DEFINE_TEST_ALLOC(kmalloc, kfree, 0, 1, 0); +DEFINE_TEST_ALLOC(kmalloc_node, kfree, 0, 1, 1); +DEFINE_TEST_ALLOC(kzalloc, kfree, 0, 1, 0); +DEFINE_TEST_ALLOC(kzalloc_node, kfree, 0, 1, 1); +DEFINE_TEST_ALLOC(__vmalloc, vfree, 0, 1, 0); +DEFINE_TEST_ALLOC(kvmalloc, kvfree, 0, 1, 0); +DEFINE_TEST_ALLOC(kvmalloc_node, kvfree, 0, 1, 1); +DEFINE_TEST_ALLOC(kvzalloc, kvfree, 0, 1, 0); +DEFINE_TEST_ALLOC(kvzalloc_node, kvfree, 0, 1, 1); +DEFINE_TEST_ALLOC(devm_kmalloc, devm_kfree, 1, 1, 0); +DEFINE_TEST_ALLOC(devm_kzalloc, devm_kfree, 1, 1, 0); + +static void overflow_allocation_test(struct kunit *test) +{ + const char device_name[] = "overflow-test"; + struct device *dev; + int count = 0; + +#define check_allocation_overflow(alloc) do { \ + count++; \ + test_ ## alloc(test, dev); \ +} while (0) + + /* Create dummy device for devm_kmalloc()-family tests. */ + dev = root_device_register(device_name); + KUNIT_ASSERT_FALSE_MSG(test, IS_ERR(dev), + "Cannot register test device\n"); + + check_allocation_overflow(kmalloc); + check_allocation_overflow(kmalloc_node); + check_allocation_overflow(kzalloc); + check_allocation_overflow(kzalloc_node); + check_allocation_overflow(__vmalloc); + check_allocation_overflow(kvmalloc); + check_allocation_overflow(kvmalloc_node); + check_allocation_overflow(kvzalloc); + check_allocation_overflow(kvzalloc_node); + check_allocation_overflow(devm_kmalloc); + check_allocation_overflow(devm_kzalloc); + + device_unregister(dev); + + kunit_info(test, "%d allocation overflow tests finished\n", count); +#undef check_allocation_overflow +} + +struct __test_flex_array { + unsigned long flags; + size_t count; + unsigned long data[]; +}; + +static void overflow_size_helpers_test(struct kunit *test) +{ + /* Make sure struct_size() can be used in a constant expression. */ + u8 ce_array[struct_size((struct __test_flex_array *)0, data, 55)]; + struct __test_flex_array *obj; + int count = 0; + int var; + volatile int unconst = 0; + + /* Verify constant expression against runtime version. */ + var = 55; + OPTIMIZER_HIDE_VAR(var); + KUNIT_EXPECT_EQ(test, sizeof(ce_array), struct_size(obj, data, var)); + +#define check_one_size_helper(expected, func, args...) do { \ + size_t _r = func(args); \ + KUNIT_EXPECT_EQ_MSG(test, _r, expected, \ + "expected " #func "(" #args ") to return %zu but got %zu instead\n", \ + (size_t)(expected), _r); \ + count++; \ +} while (0) + + var = 4; + check_one_size_helper(20, size_mul, var++, 5); + check_one_size_helper(20, size_mul, 4, var++); + check_one_size_helper(0, size_mul, 0, 3); + check_one_size_helper(0, size_mul, 3, 0); + check_one_size_helper(6, size_mul, 2, 3); + check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, 1); + check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, 3); + check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, -3); + + var = 4; + check_one_size_helper(9, size_add, var++, 5); + check_one_size_helper(9, size_add, 4, var++); + check_one_size_helper(9, size_add, 9, 0); + check_one_size_helper(9, size_add, 0, 9); + check_one_size_helper(5, size_add, 2, 3); + check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, 1); + check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, 3); + check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, -3); + + var = 4; + check_one_size_helper(1, size_sub, var--, 3); + check_one_size_helper(1, size_sub, 4, var--); + check_one_size_helper(1, size_sub, 3, 2); + check_one_size_helper(9, size_sub, 9, 0); + check_one_size_helper(SIZE_MAX, size_sub, 9, -3); + check_one_size_helper(SIZE_MAX, size_sub, 0, 9); + check_one_size_helper(SIZE_MAX, size_sub, 2, 3); + check_one_size_helper(SIZE_MAX, size_sub, SIZE_MAX, 0); + check_one_size_helper(SIZE_MAX, size_sub, SIZE_MAX, 10); + check_one_size_helper(SIZE_MAX, size_sub, 0, SIZE_MAX); + check_one_size_helper(SIZE_MAX, size_sub, 14, SIZE_MAX); + check_one_size_helper(SIZE_MAX - 2, size_sub, SIZE_MAX - 1, 1); + check_one_size_helper(SIZE_MAX - 4, size_sub, SIZE_MAX - 1, 3); + check_one_size_helper(1, size_sub, SIZE_MAX - 1, -3); + + var = 4; + check_one_size_helper(4 * sizeof(*obj->data), + flex_array_size, obj, data, var++); + check_one_size_helper(5 * sizeof(*obj->data), + flex_array_size, obj, data, var++); + check_one_size_helper(0, flex_array_size, obj, data, 0 + unconst); + check_one_size_helper(sizeof(*obj->data), + flex_array_size, obj, data, 1 + unconst); + check_one_size_helper(7 * sizeof(*obj->data), + flex_array_size, obj, data, 7 + unconst); + check_one_size_helper(SIZE_MAX, + flex_array_size, obj, data, -1 + unconst); + check_one_size_helper(SIZE_MAX, + flex_array_size, obj, data, SIZE_MAX - 4 + unconst); + + var = 4; + check_one_size_helper(sizeof(*obj) + (4 * sizeof(*obj->data)), + struct_size, obj, data, var++); + check_one_size_helper(sizeof(*obj) + (5 * sizeof(*obj->data)), + struct_size, obj, data, var++); + check_one_size_helper(sizeof(*obj), struct_size, obj, data, 0 + unconst); + check_one_size_helper(sizeof(*obj) + sizeof(*obj->data), + struct_size, obj, data, 1 + unconst); + check_one_size_helper(SIZE_MAX, + struct_size, obj, data, -3 + unconst); + check_one_size_helper(SIZE_MAX, + struct_size, obj, data, SIZE_MAX - 3 + unconst); + + kunit_info(test, "%d overflow size helper tests finished\n", count); +#undef check_one_size_helper +} + +static struct kunit_case overflow_test_cases[] = { + KUNIT_CASE(u8_overflow_test), + KUNIT_CASE(s8_overflow_test), + KUNIT_CASE(u16_overflow_test), + KUNIT_CASE(s16_overflow_test), + KUNIT_CASE(u32_overflow_test), + KUNIT_CASE(s32_overflow_test), +#if BITS_PER_LONG == 64 + KUNIT_CASE(u64_overflow_test), + KUNIT_CASE(s64_overflow_test), +#endif + KUNIT_CASE(overflow_shift_test), + KUNIT_CASE(overflow_allocation_test), + KUNIT_CASE(overflow_size_helpers_test), + {} +}; + +static struct kunit_suite overflow_test_suite = { + .name = "overflow", + .test_cases = overflow_test_cases, +}; + +kunit_test_suite(overflow_test_suite); + +MODULE_LICENSE("Dual MIT/GPL"); diff --git a/lib/test_overflow.c b/lib/test_overflow.c deleted file mode 100644 index f6530fce799d..000000000000 --- a/lib/test_overflow.c +++ /dev/null @@ -1,726 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 OR MIT -/* - * Test cases for arithmetic overflow checks. - */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define DEFINE_TEST_ARRAY(t) \ - static const struct test_ ## t { \ - t a, b; \ - t sum, diff, prod; \ - bool s_of, d_of, p_of; \ - } t ## _tests[] __initconst - -DEFINE_TEST_ARRAY(u8) = { - {0, 0, 0, 0, 0, false, false, false}, - {1, 1, 2, 0, 1, false, false, false}, - {0, 1, 1, U8_MAX, 0, false, true, false}, - {1, 0, 1, 1, 0, false, false, false}, - {0, U8_MAX, U8_MAX, 1, 0, false, true, false}, - {U8_MAX, 0, U8_MAX, U8_MAX, 0, false, false, false}, - {1, U8_MAX, 0, 2, U8_MAX, true, true, false}, - {U8_MAX, 1, 0, U8_MAX-1, U8_MAX, true, false, false}, - {U8_MAX, U8_MAX, U8_MAX-1, 0, 1, true, false, true}, - - {U8_MAX, U8_MAX-1, U8_MAX-2, 1, 2, true, false, true}, - {U8_MAX-1, U8_MAX, U8_MAX-2, U8_MAX, 2, true, true, true}, - - {1U << 3, 1U << 3, 1U << 4, 0, 1U << 6, false, false, false}, - {1U << 4, 1U << 4, 1U << 5, 0, 0, false, false, true}, - {1U << 4, 1U << 3, 3*(1U << 3), 1U << 3, 1U << 7, false, false, false}, - {1U << 7, 1U << 7, 0, 0, 0, true, false, true}, - - {48, 32, 80, 16, 0, false, false, true}, - {128, 128, 0, 0, 0, true, false, true}, - {123, 234, 101, 145, 110, true, true, true}, -}; -DEFINE_TEST_ARRAY(u16) = { - {0, 0, 0, 0, 0, false, false, false}, - {1, 1, 2, 0, 1, false, false, false}, - {0, 1, 1, U16_MAX, 0, false, true, false}, - {1, 0, 1, 1, 0, false, false, false}, - {0, U16_MAX, U16_MAX, 1, 0, false, true, false}, - {U16_MAX, 0, U16_MAX, U16_MAX, 0, false, false, false}, - {1, U16_MAX, 0, 2, U16_MAX, true, true, false}, - {U16_MAX, 1, 0, U16_MAX-1, U16_MAX, true, false, false}, - {U16_MAX, U16_MAX, U16_MAX-1, 0, 1, true, false, true}, - - {U16_MAX, U16_MAX-1, U16_MAX-2, 1, 2, true, false, true}, - {U16_MAX-1, U16_MAX, U16_MAX-2, U16_MAX, 2, true, true, true}, - - {1U << 7, 1U << 7, 1U << 8, 0, 1U << 14, false, false, false}, - {1U << 8, 1U << 8, 1U << 9, 0, 0, false, false, true}, - {1U << 8, 1U << 7, 3*(1U << 7), 1U << 7, 1U << 15, false, false, false}, - {1U << 15, 1U << 15, 0, 0, 0, true, false, true}, - - {123, 234, 357, 65425, 28782, false, true, false}, - {1234, 2345, 3579, 64425, 10146, false, true, true}, -}; -DEFINE_TEST_ARRAY(u32) = { - {0, 0, 0, 0, 0, false, false, false}, - {1, 1, 2, 0, 1, false, false, false}, - {0, 1, 1, U32_MAX, 0, false, true, false}, - {1, 0, 1, 1, 0, false, false, false}, - {0, U32_MAX, U32_MAX, 1, 0, false, true, false}, - {U32_MAX, 0, U32_MAX, U32_MAX, 0, false, false, false}, - {1, U32_MAX, 0, 2, U32_MAX, true, true, false}, - {U32_MAX, 1, 0, U32_MAX-1, U32_MAX, true, false, false}, - {U32_MAX, U32_MAX, U32_MAX-1, 0, 1, true, false, true}, - - {U32_MAX, U32_MAX-1, U32_MAX-2, 1, 2, true, false, true}, - {U32_MAX-1, U32_MAX, U32_MAX-2, U32_MAX, 2, true, true, true}, - - {1U << 15, 1U << 15, 1U << 16, 0, 1U << 30, false, false, false}, - {1U << 16, 1U << 16, 1U << 17, 0, 0, false, false, true}, - {1U << 16, 1U << 15, 3*(1U << 15), 1U << 15, 1U << 31, false, false, false}, - {1U << 31, 1U << 31, 0, 0, 0, true, false, true}, - - {-2U, 1U, -1U, -3U, -2U, false, false, false}, - {-4U, 5U, 1U, -9U, -20U, true, false, true}, -}; - -DEFINE_TEST_ARRAY(u64) = { - {0, 0, 0, 0, 0, false, false, false}, - {1, 1, 2, 0, 1, false, false, false}, - {0, 1, 1, U64_MAX, 0, false, true, false}, - {1, 0, 1, 1, 0, false, false, false}, - {0, U64_MAX, U64_MAX, 1, 0, false, true, false}, - {U64_MAX, 0, U64_MAX, U64_MAX, 0, false, false, false}, - {1, U64_MAX, 0, 2, U64_MAX, true, true, false}, - {U64_MAX, 1, 0, U64_MAX-1, U64_MAX, true, false, false}, - {U64_MAX, U64_MAX, U64_MAX-1, 0, 1, true, false, true}, - - {U64_MAX, U64_MAX-1, U64_MAX-2, 1, 2, true, false, true}, - {U64_MAX-1, U64_MAX, U64_MAX-2, U64_MAX, 2, true, true, true}, - - {1ULL << 31, 1ULL << 31, 1ULL << 32, 0, 1ULL << 62, false, false, false}, - {1ULL << 32, 1ULL << 32, 1ULL << 33, 0, 0, false, false, true}, - {1ULL << 32, 1ULL << 31, 3*(1ULL << 31), 1ULL << 31, 1ULL << 63, false, false, false}, - {1ULL << 63, 1ULL << 63, 0, 0, 0, true, false, true}, - {1000000000ULL /* 10^9 */, 10000000000ULL /* 10^10 */, - 11000000000ULL, 18446744064709551616ULL, 10000000000000000000ULL, - false, true, false}, - {-15ULL, 10ULL, -5ULL, -25ULL, -150ULL, false, false, true}, -}; - -DEFINE_TEST_ARRAY(s8) = { - {0, 0, 0, 0, 0, false, false, false}, - - {0, S8_MAX, S8_MAX, -S8_MAX, 0, false, false, false}, - {S8_MAX, 0, S8_MAX, S8_MAX, 0, false, false, false}, - {0, S8_MIN, S8_MIN, S8_MIN, 0, false, true, false}, - {S8_MIN, 0, S8_MIN, S8_MIN, 0, false, false, false}, - - {-1, S8_MIN, S8_MAX, S8_MAX, S8_MIN, true, false, true}, - {S8_MIN, -1, S8_MAX, -S8_MAX, S8_MIN, true, false, true}, - {-1, S8_MAX, S8_MAX-1, S8_MIN, -S8_MAX, false, false, false}, - {S8_MAX, -1, S8_MAX-1, S8_MIN, -S8_MAX, false, true, false}, - {-1, -S8_MAX, S8_MIN, S8_MAX-1, S8_MAX, false, false, false}, - {-S8_MAX, -1, S8_MIN, S8_MIN+2, S8_MAX, false, false, false}, - - {1, S8_MIN, -S8_MAX, -S8_MAX, S8_MIN, false, true, false}, - {S8_MIN, 1, -S8_MAX, S8_MAX, S8_MIN, false, true, false}, - {1, S8_MAX, S8_MIN, S8_MIN+2, S8_MAX, true, false, false}, - {S8_MAX, 1, S8_MIN, S8_MAX-1, S8_MAX, true, false, false}, - - {S8_MIN, S8_MIN, 0, 0, 0, true, false, true}, - {S8_MAX, S8_MAX, -2, 0, 1, true, false, true}, - - {-4, -32, -36, 28, -128, false, false, true}, - {-4, 32, 28, -36, -128, false, false, false}, -}; - -DEFINE_TEST_ARRAY(s16) = { - {0, 0, 0, 0, 0, false, false, false}, - - {0, S16_MAX, S16_MAX, -S16_MAX, 0, false, false, false}, - {S16_MAX, 0, S16_MAX, S16_MAX, 0, false, false, false}, - {0, S16_MIN, S16_MIN, S16_MIN, 0, false, true, false}, - {S16_MIN, 0, S16_MIN, S16_MIN, 0, false, false, false}, - - {-1, S16_MIN, S16_MAX, S16_MAX, S16_MIN, true, false, true}, - {S16_MIN, -1, S16_MAX, -S16_MAX, S16_MIN, true, false, true}, - {-1, S16_MAX, S16_MAX-1, S16_MIN, -S16_MAX, false, false, false}, - {S16_MAX, -1, S16_MAX-1, S16_MIN, -S16_MAX, false, true, false}, - {-1, -S16_MAX, S16_MIN, S16_MAX-1, S16_MAX, false, false, false}, - {-S16_MAX, -1, S16_MIN, S16_MIN+2, S16_MAX, false, false, false}, - - {1, S16_MIN, -S16_MAX, -S16_MAX, S16_MIN, false, true, false}, - {S16_MIN, 1, -S16_MAX, S16_MAX, S16_MIN, false, true, false}, - {1, S16_MAX, S16_MIN, S16_MIN+2, S16_MAX, true, false, false}, - {S16_MAX, 1, S16_MIN, S16_MAX-1, S16_MAX, true, false, false}, - - {S16_MIN, S16_MIN, 0, 0, 0, true, false, true}, - {S16_MAX, S16_MAX, -2, 0, 1, true, false, true}, -}; -DEFINE_TEST_ARRAY(s32) = { - {0, 0, 0, 0, 0, false, false, false}, - - {0, S32_MAX, S32_MAX, -S32_MAX, 0, false, false, false}, - {S32_MAX, 0, S32_MAX, S32_MAX, 0, false, false, false}, - {0, S32_MIN, S32_MIN, S32_MIN, 0, false, true, false}, - {S32_MIN, 0, S32_MIN, S32_MIN, 0, false, false, false}, - - {-1, S32_MIN, S32_MAX, S32_MAX, S32_MIN, true, false, true}, - {S32_MIN, -1, S32_MAX, -S32_MAX, S32_MIN, true, false, true}, - {-1, S32_MAX, S32_MAX-1, S32_MIN, -S32_MAX, false, false, false}, - {S32_MAX, -1, S32_MAX-1, S32_MIN, -S32_MAX, false, true, false}, - {-1, -S32_MAX, S32_MIN, S32_MAX-1, S32_MAX, false, false, false}, - {-S32_MAX, -1, S32_MIN, S32_MIN+2, S32_MAX, false, false, false}, - - {1, S32_MIN, -S32_MAX, -S32_MAX, S32_MIN, false, true, false}, - {S32_MIN, 1, -S32_MAX, S32_MAX, S32_MIN, false, true, false}, - {1, S32_MAX, S32_MIN, S32_MIN+2, S32_MAX, true, false, false}, - {S32_MAX, 1, S32_MIN, S32_MAX-1, S32_MAX, true, false, false}, - - {S32_MIN, S32_MIN, 0, 0, 0, true, false, true}, - {S32_MAX, S32_MAX, -2, 0, 1, true, false, true}, -}; -DEFINE_TEST_ARRAY(s64) = { - {0, 0, 0, 0, 0, false, false, false}, - - {0, S64_MAX, S64_MAX, -S64_MAX, 0, false, false, false}, - {S64_MAX, 0, S64_MAX, S64_MAX, 0, false, false, false}, - {0, S64_MIN, S64_MIN, S64_MIN, 0, false, true, false}, - {S64_MIN, 0, S64_MIN, S64_MIN, 0, false, false, false}, - - {-1, S64_MIN, S64_MAX, S64_MAX, S64_MIN, true, false, true}, - {S64_MIN, -1, S64_MAX, -S64_MAX, S64_MIN, true, false, true}, - {-1, S64_MAX, S64_MAX-1, S64_MIN, -S64_MAX, false, false, false}, - {S64_MAX, -1, S64_MAX-1, S64_MIN, -S64_MAX, false, true, false}, - {-1, -S64_MAX, S64_MIN, S64_MAX-1, S64_MAX, false, false, false}, - {-S64_MAX, -1, S64_MIN, S64_MIN+2, S64_MAX, false, false, false}, - - {1, S64_MIN, -S64_MAX, -S64_MAX, S64_MIN, false, true, false}, - {S64_MIN, 1, -S64_MAX, S64_MAX, S64_MIN, false, true, false}, - {1, S64_MAX, S64_MIN, S64_MIN+2, S64_MAX, true, false, false}, - {S64_MAX, 1, S64_MIN, S64_MAX-1, S64_MAX, true, false, false}, - - {S64_MIN, S64_MIN, 0, 0, 0, true, false, true}, - {S64_MAX, S64_MAX, -2, 0, 1, true, false, true}, - - {-1, -1, -2, 0, 1, false, false, false}, - {-1, -128, -129, 127, 128, false, false, false}, - {-128, -1, -129, -127, 128, false, false, false}, - {0, -S64_MAX, -S64_MAX, S64_MAX, 0, false, false, false}, -}; - -#define check_one_op(t, fmt, op, sym, a, b, r, of) do { \ - t _r; \ - bool _of; \ - \ - _of = check_ ## op ## _overflow(a, b, &_r); \ - if (_of != of) { \ - pr_warn("expected "fmt" "sym" "fmt \ - " to%s overflow (type %s)\n", \ - a, b, of ? "" : " not", #t); \ - err = 1; \ - } \ - if (_r != r) { \ - pr_warn("expected "fmt" "sym" "fmt" == " \ - fmt", got "fmt" (type %s)\n", \ - a, b, r, _r, #t); \ - err = 1; \ - } \ -} while (0) - -#define DEFINE_TEST_FUNC(t, fmt) \ -static int __init do_test_ ## t(const struct test_ ## t *p) \ -{ \ - int err = 0; \ - \ - check_one_op(t, fmt, add, "+", p->a, p->b, p->sum, p->s_of); \ - check_one_op(t, fmt, add, "+", p->b, p->a, p->sum, p->s_of); \ - check_one_op(t, fmt, sub, "-", p->a, p->b, p->diff, p->d_of); \ - check_one_op(t, fmt, mul, "*", p->a, p->b, p->prod, p->p_of); \ - check_one_op(t, fmt, mul, "*", p->b, p->a, p->prod, p->p_of); \ - \ - return err; \ -} \ - \ -static int __init test_ ## t ## _overflow(void) { \ - int err = 0; \ - unsigned i; \ - \ - for (i = 0; i < ARRAY_SIZE(t ## _tests); ++i) \ - err |= do_test_ ## t(&t ## _tests[i]); \ - pr_info("%zu %s arithmetic tests finished\n", \ - ARRAY_SIZE(t ## _tests), #t); \ - return err; \ -} - -DEFINE_TEST_FUNC(u8, "%d"); -DEFINE_TEST_FUNC(s8, "%d"); -DEFINE_TEST_FUNC(u16, "%d"); -DEFINE_TEST_FUNC(s16, "%d"); -DEFINE_TEST_FUNC(u32, "%u"); -DEFINE_TEST_FUNC(s32, "%d"); -#if BITS_PER_LONG == 64 -DEFINE_TEST_FUNC(u64, "%llu"); -DEFINE_TEST_FUNC(s64, "%lld"); -#endif - -static int __init test_overflow_calculation(void) -{ - int err = 0; - - err |= test_u8_overflow(); - err |= test_s8_overflow(); - err |= test_u16_overflow(); - err |= test_s16_overflow(); - err |= test_u32_overflow(); - err |= test_s32_overflow(); -#if BITS_PER_LONG == 64 - err |= test_u64_overflow(); - err |= test_s64_overflow(); -#endif - - return err; -} - -static int __init test_overflow_shift(void) -{ - int err = 0; - int count = 0; - -/* Args are: value, shift, type, expected result, overflow expected */ -#define TEST_ONE_SHIFT(a, s, t, expect, of) ({ \ - int __failed = 0; \ - typeof(a) __a = (a); \ - typeof(s) __s = (s); \ - t __e = (expect); \ - t __d; \ - bool __of = check_shl_overflow(__a, __s, &__d); \ - if (__of != of) { \ - pr_warn("expected (%s)(%s << %s) to%s overflow\n", \ - #t, #a, #s, of ? "" : " not"); \ - __failed = 1; \ - } else if (!__of && __d != __e) { \ - pr_warn("expected (%s)(%s << %s) == %s\n", \ - #t, #a, #s, #expect); \ - if ((t)-1 < 0) \ - pr_warn("got %lld\n", (s64)__d); \ - else \ - pr_warn("got %llu\n", (u64)__d); \ - __failed = 1; \ - } \ - count++; \ - __failed; \ -}) - - /* Sane shifts. */ - err |= TEST_ONE_SHIFT(1, 0, u8, 1 << 0, false); - err |= TEST_ONE_SHIFT(1, 4, u8, 1 << 4, false); - err |= TEST_ONE_SHIFT(1, 7, u8, 1 << 7, false); - err |= TEST_ONE_SHIFT(0xF, 4, u8, 0xF << 4, false); - err |= TEST_ONE_SHIFT(1, 0, u16, 1 << 0, false); - err |= TEST_ONE_SHIFT(1, 10, u16, 1 << 10, false); - err |= TEST_ONE_SHIFT(1, 15, u16, 1 << 15, false); - err |= TEST_ONE_SHIFT(0xFF, 8, u16, 0xFF << 8, false); - err |= TEST_ONE_SHIFT(1, 0, int, 1 << 0, false); - err |= TEST_ONE_SHIFT(1, 16, int, 1 << 16, false); - err |= TEST_ONE_SHIFT(1, 30, int, 1 << 30, false); - err |= TEST_ONE_SHIFT(1, 0, s32, 1 << 0, false); - err |= TEST_ONE_SHIFT(1, 16, s32, 1 << 16, false); - err |= TEST_ONE_SHIFT(1, 30, s32, 1 << 30, false); - err |= TEST_ONE_SHIFT(1, 0, unsigned int, 1U << 0, false); - err |= TEST_ONE_SHIFT(1, 20, unsigned int, 1U << 20, false); - err |= TEST_ONE_SHIFT(1, 31, unsigned int, 1U << 31, false); - err |= TEST_ONE_SHIFT(0xFFFFU, 16, unsigned int, 0xFFFFU << 16, false); - err |= TEST_ONE_SHIFT(1, 0, u32, 1U << 0, false); - err |= TEST_ONE_SHIFT(1, 20, u32, 1U << 20, false); - err |= TEST_ONE_SHIFT(1, 31, u32, 1U << 31, false); - err |= TEST_ONE_SHIFT(0xFFFFU, 16, u32, 0xFFFFU << 16, false); - err |= TEST_ONE_SHIFT(1, 0, u64, 1ULL << 0, false); - err |= TEST_ONE_SHIFT(1, 40, u64, 1ULL << 40, false); - err |= TEST_ONE_SHIFT(1, 63, u64, 1ULL << 63, false); - err |= TEST_ONE_SHIFT(0xFFFFFFFFULL, 32, u64, - 0xFFFFFFFFULL << 32, false); - - /* Sane shift: start and end with 0, without a too-wide shift. */ - err |= TEST_ONE_SHIFT(0, 7, u8, 0, false); - err |= TEST_ONE_SHIFT(0, 15, u16, 0, false); - err |= TEST_ONE_SHIFT(0, 31, unsigned int, 0, false); - err |= TEST_ONE_SHIFT(0, 31, u32, 0, false); - err |= TEST_ONE_SHIFT(0, 63, u64, 0, false); - - /* Sane shift: start and end with 0, without reaching signed bit. */ - err |= TEST_ONE_SHIFT(0, 6, s8, 0, false); - err |= TEST_ONE_SHIFT(0, 14, s16, 0, false); - err |= TEST_ONE_SHIFT(0, 30, int, 0, false); - err |= TEST_ONE_SHIFT(0, 30, s32, 0, false); - err |= TEST_ONE_SHIFT(0, 62, s64, 0, false); - - /* Overflow: shifted the bit off the end. */ - err |= TEST_ONE_SHIFT(1, 8, u8, 0, true); - err |= TEST_ONE_SHIFT(1, 16, u16, 0, true); - err |= TEST_ONE_SHIFT(1, 32, unsigned int, 0, true); - err |= TEST_ONE_SHIFT(1, 32, u32, 0, true); - err |= TEST_ONE_SHIFT(1, 64, u64, 0, true); - - /* Overflow: shifted into the signed bit. */ - err |= TEST_ONE_SHIFT(1, 7, s8, 0, true); - err |= TEST_ONE_SHIFT(1, 15, s16, 0, true); - err |= TEST_ONE_SHIFT(1, 31, int, 0, true); - err |= TEST_ONE_SHIFT(1, 31, s32, 0, true); - err |= TEST_ONE_SHIFT(1, 63, s64, 0, true); - - /* Overflow: high bit falls off unsigned types. */ - /* 10010110 */ - err |= TEST_ONE_SHIFT(150, 1, u8, 0, true); - /* 1000100010010110 */ - err |= TEST_ONE_SHIFT(34966, 1, u16, 0, true); - /* 10000100000010001000100010010110 */ - err |= TEST_ONE_SHIFT(2215151766U, 1, u32, 0, true); - err |= TEST_ONE_SHIFT(2215151766U, 1, unsigned int, 0, true); - /* 1000001000010000010000000100000010000100000010001000100010010110 */ - err |= TEST_ONE_SHIFT(9372061470395238550ULL, 1, u64, 0, true); - - /* Overflow: bit shifted into signed bit on signed types. */ - /* 01001011 */ - err |= TEST_ONE_SHIFT(75, 1, s8, 0, true); - /* 0100010001001011 */ - err |= TEST_ONE_SHIFT(17483, 1, s16, 0, true); - /* 01000010000001000100010001001011 */ - err |= TEST_ONE_SHIFT(1107575883, 1, s32, 0, true); - err |= TEST_ONE_SHIFT(1107575883, 1, int, 0, true); - /* 0100000100001000001000000010000001000010000001000100010001001011 */ - err |= TEST_ONE_SHIFT(4686030735197619275LL, 1, s64, 0, true); - - /* Overflow: bit shifted past signed bit on signed types. */ - /* 01001011 */ - err |= TEST_ONE_SHIFT(75, 2, s8, 0, true); - /* 0100010001001011 */ - err |= TEST_ONE_SHIFT(17483, 2, s16, 0, true); - /* 01000010000001000100010001001011 */ - err |= TEST_ONE_SHIFT(1107575883, 2, s32, 0, true); - err |= TEST_ONE_SHIFT(1107575883, 2, int, 0, true); - /* 0100000100001000001000000010000001000010000001000100010001001011 */ - err |= TEST_ONE_SHIFT(4686030735197619275LL, 2, s64, 0, true); - - /* Overflow: values larger than destination type. */ - err |= TEST_ONE_SHIFT(0x100, 0, u8, 0, true); - err |= TEST_ONE_SHIFT(0xFF, 0, s8, 0, true); - err |= TEST_ONE_SHIFT(0x10000U, 0, u16, 0, true); - err |= TEST_ONE_SHIFT(0xFFFFU, 0, s16, 0, true); - err |= TEST_ONE_SHIFT(0x100000000ULL, 0, u32, 0, true); - err |= TEST_ONE_SHIFT(0x100000000ULL, 0, unsigned int, 0, true); - err |= TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, s32, 0, true); - err |= TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, int, 0, true); - err |= TEST_ONE_SHIFT(0xFFFFFFFFFFFFFFFFULL, 0, s64, 0, true); - - /* Nonsense: negative initial value. */ - err |= TEST_ONE_SHIFT(-1, 0, s8, 0, true); - err |= TEST_ONE_SHIFT(-1, 0, u8, 0, true); - err |= TEST_ONE_SHIFT(-5, 0, s16, 0, true); - err |= TEST_ONE_SHIFT(-5, 0, u16, 0, true); - err |= TEST_ONE_SHIFT(-10, 0, int, 0, true); - err |= TEST_ONE_SHIFT(-10, 0, unsigned int, 0, true); - err |= TEST_ONE_SHIFT(-100, 0, s32, 0, true); - err |= TEST_ONE_SHIFT(-100, 0, u32, 0, true); - err |= TEST_ONE_SHIFT(-10000, 0, s64, 0, true); - err |= TEST_ONE_SHIFT(-10000, 0, u64, 0, true); - - /* Nonsense: negative shift values. */ - err |= TEST_ONE_SHIFT(0, -5, s8, 0, true); - err |= TEST_ONE_SHIFT(0, -5, u8, 0, true); - err |= TEST_ONE_SHIFT(0, -10, s16, 0, true); - err |= TEST_ONE_SHIFT(0, -10, u16, 0, true); - err |= TEST_ONE_SHIFT(0, -15, int, 0, true); - err |= TEST_ONE_SHIFT(0, -15, unsigned int, 0, true); - err |= TEST_ONE_SHIFT(0, -20, s32, 0, true); - err |= TEST_ONE_SHIFT(0, -20, u32, 0, true); - err |= TEST_ONE_SHIFT(0, -30, s64, 0, true); - err |= TEST_ONE_SHIFT(0, -30, u64, 0, true); - - /* Overflow: shifted at or beyond entire type's bit width. */ - err |= TEST_ONE_SHIFT(0, 8, u8, 0, true); - err |= TEST_ONE_SHIFT(0, 9, u8, 0, true); - err |= TEST_ONE_SHIFT(0, 8, s8, 0, true); - err |= TEST_ONE_SHIFT(0, 9, s8, 0, true); - err |= TEST_ONE_SHIFT(0, 16, u16, 0, true); - err |= TEST_ONE_SHIFT(0, 17, u16, 0, true); - err |= TEST_ONE_SHIFT(0, 16, s16, 0, true); - err |= TEST_ONE_SHIFT(0, 17, s16, 0, true); - err |= TEST_ONE_SHIFT(0, 32, u32, 0, true); - err |= TEST_ONE_SHIFT(0, 33, u32, 0, true); - err |= TEST_ONE_SHIFT(0, 32, int, 0, true); - err |= TEST_ONE_SHIFT(0, 33, int, 0, true); - err |= TEST_ONE_SHIFT(0, 32, s32, 0, true); - err |= TEST_ONE_SHIFT(0, 33, s32, 0, true); - err |= TEST_ONE_SHIFT(0, 64, u64, 0, true); - err |= TEST_ONE_SHIFT(0, 65, u64, 0, true); - err |= TEST_ONE_SHIFT(0, 64, s64, 0, true); - err |= TEST_ONE_SHIFT(0, 65, s64, 0, true); - - /* - * Corner case: for unsigned types, we fail when we've shifted - * through the entire width of bits. For signed types, we might - * want to match this behavior, but that would mean noticing if - * we shift through all but the signed bit, and this is not - * currently detected (but we'll notice an overflow into the - * signed bit). So, for now, we will test this condition but - * mark it as not expected to overflow. - */ - err |= TEST_ONE_SHIFT(0, 7, s8, 0, false); - err |= TEST_ONE_SHIFT(0, 15, s16, 0, false); - err |= TEST_ONE_SHIFT(0, 31, int, 0, false); - err |= TEST_ONE_SHIFT(0, 31, s32, 0, false); - err |= TEST_ONE_SHIFT(0, 63, s64, 0, false); - - pr_info("%d shift tests finished\n", count); - -#undef TEST_ONE_SHIFT - - return err; -} - -/* - * Deal with the various forms of allocator arguments. See comments above - * the DEFINE_TEST_ALLOC() instances for mapping of the "bits". - */ -#define alloc_GFP (GFP_KERNEL | __GFP_NOWARN) -#define alloc010(alloc, arg, sz) alloc(sz, alloc_GFP) -#define alloc011(alloc, arg, sz) alloc(sz, alloc_GFP, NUMA_NO_NODE) -#define alloc000(alloc, arg, sz) alloc(sz) -#define alloc001(alloc, arg, sz) alloc(sz, NUMA_NO_NODE) -#define alloc110(alloc, arg, sz) alloc(arg, sz, alloc_GFP) -#define free0(free, arg, ptr) free(ptr) -#define free1(free, arg, ptr) free(arg, ptr) - -/* Wrap around to 16K */ -#define TEST_SIZE (5 * 4096) - -#define DEFINE_TEST_ALLOC(func, free_func, want_arg, want_gfp, want_node)\ -static int __init test_ ## func (void *arg) \ -{ \ - volatile size_t a = TEST_SIZE; \ - volatile size_t b = (SIZE_MAX / TEST_SIZE) + 1; \ - void *ptr; \ - \ - /* Tiny allocation test. */ \ - ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, 1);\ - if (!ptr) { \ - pr_warn(#func " failed regular allocation?!\n"); \ - return 1; \ - } \ - free ## want_arg (free_func, arg, ptr); \ - \ - /* Wrapped allocation test. */ \ - ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, \ - a * b); \ - if (!ptr) { \ - pr_warn(#func " unexpectedly failed bad wrapping?!\n"); \ - return 1; \ - } \ - free ## want_arg (free_func, arg, ptr); \ - \ - /* Saturated allocation test. */ \ - ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, \ - array_size(a, b)); \ - if (ptr) { \ - pr_warn(#func " missed saturation!\n"); \ - free ## want_arg (free_func, arg, ptr); \ - return 1; \ - } \ - return 0; \ -} - -/* - * Allocator uses a trailing node argument --------+ (e.g. kmalloc_node()) - * Allocator uses the gfp_t argument -----------+ | (e.g. kmalloc()) - * Allocator uses a special leading argument + | | (e.g. devm_kmalloc()) - * | | | - */ -DEFINE_TEST_ALLOC(kmalloc, kfree, 0, 1, 0); -DEFINE_TEST_ALLOC(kmalloc_node, kfree, 0, 1, 1); -DEFINE_TEST_ALLOC(kzalloc, kfree, 0, 1, 0); -DEFINE_TEST_ALLOC(kzalloc_node, kfree, 0, 1, 1); -DEFINE_TEST_ALLOC(__vmalloc, vfree, 0, 1, 0); -DEFINE_TEST_ALLOC(kvmalloc, kvfree, 0, 1, 0); -DEFINE_TEST_ALLOC(kvmalloc_node, kvfree, 0, 1, 1); -DEFINE_TEST_ALLOC(kvzalloc, kvfree, 0, 1, 0); -DEFINE_TEST_ALLOC(kvzalloc_node, kvfree, 0, 1, 1); -DEFINE_TEST_ALLOC(devm_kmalloc, devm_kfree, 1, 1, 0); -DEFINE_TEST_ALLOC(devm_kzalloc, devm_kfree, 1, 1, 0); - -static int __init test_overflow_allocation(void) -{ - const char device_name[] = "overflow-test"; - struct device *dev; - int count = 0; - int err = 0; - -#define check_allocation_overflow(alloc) ({ \ - count++; \ - test_ ## alloc(dev); \ -}) - - /* Create dummy device for devm_kmalloc()-family tests. */ - dev = root_device_register(device_name); - if (IS_ERR(dev)) { - pr_warn("Cannot register test device\n"); - return 1; - } - - err |= check_allocation_overflow(kmalloc); - err |= check_allocation_overflow(kmalloc_node); - err |= check_allocation_overflow(kzalloc); - err |= check_allocation_overflow(kzalloc_node); - err |= check_allocation_overflow(__vmalloc); - err |= check_allocation_overflow(kvmalloc); - err |= check_allocation_overflow(kvmalloc_node); - err |= check_allocation_overflow(kvzalloc); - err |= check_allocation_overflow(kvzalloc_node); - err |= check_allocation_overflow(devm_kmalloc); - err |= check_allocation_overflow(devm_kzalloc); - - device_unregister(dev); - - pr_info("%d allocation overflow tests finished\n", count); - -#undef check_allocation_overflow - - return err; -} - -struct __test_flex_array { - unsigned long flags; - size_t count; - unsigned long data[]; -}; - -static int __init test_overflow_size_helpers(void) -{ - /* Make sure struct_size() can be used in a constant expression. */ - u8 ce_array[struct_size((struct __test_flex_array *)0, data, 55)]; - struct __test_flex_array *obj; - int count = 0; - int err = 0; - int var; - volatile int unconst = 0; - - /* Verify constant expression against runtime version. */ - var = 55; - OPTIMIZER_HIDE_VAR(var); - err |= sizeof(ce_array) != struct_size(obj, data, var); - -#define check_one_size_helper(expected, func, args...) ({ \ - bool __failure = false; \ - size_t _r; \ - \ - _r = func(args); \ - if (_r != (expected)) { \ - pr_warn("expected " #func "(" #args ") " \ - "to return %zu but got %zu instead\n", \ - (size_t)(expected), _r); \ - __failure = true; \ - } \ - count++; \ - __failure; \ -}) - - var = 4; - err |= check_one_size_helper(20, size_mul, var++, 5); - err |= check_one_size_helper(20, size_mul, 4, var++); - err |= check_one_size_helper(0, size_mul, 0, 3); - err |= check_one_size_helper(0, size_mul, 3, 0); - err |= check_one_size_helper(6, size_mul, 2, 3); - err |= check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, 1); - err |= check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, 3); - err |= check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, -3); - - var = 4; - err |= check_one_size_helper(9, size_add, var++, 5); - err |= check_one_size_helper(9, size_add, 4, var++); - err |= check_one_size_helper(9, size_add, 9, 0); - err |= check_one_size_helper(9, size_add, 0, 9); - err |= check_one_size_helper(5, size_add, 2, 3); - err |= check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, 1); - err |= check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, 3); - err |= check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, -3); - - var = 4; - err |= check_one_size_helper(1, size_sub, var--, 3); - err |= check_one_size_helper(1, size_sub, 4, var--); - err |= check_one_size_helper(1, size_sub, 3, 2); - err |= check_one_size_helper(9, size_sub, 9, 0); - err |= check_one_size_helper(SIZE_MAX, size_sub, 9, -3); - err |= check_one_size_helper(SIZE_MAX, size_sub, 0, 9); - err |= check_one_size_helper(SIZE_MAX, size_sub, 2, 3); - err |= check_one_size_helper(SIZE_MAX, size_sub, SIZE_MAX, 0); - err |= check_one_size_helper(SIZE_MAX, size_sub, SIZE_MAX, 10); - err |= check_one_size_helper(SIZE_MAX, size_sub, 0, SIZE_MAX); - err |= check_one_size_helper(SIZE_MAX, size_sub, 14, SIZE_MAX); - err |= check_one_size_helper(SIZE_MAX - 2, size_sub, SIZE_MAX - 1, 1); - err |= check_one_size_helper(SIZE_MAX - 4, size_sub, SIZE_MAX - 1, 3); - err |= check_one_size_helper(1, size_sub, SIZE_MAX - 1, -3); - - var = 4; - err |= check_one_size_helper(4 * sizeof(*obj->data), - flex_array_size, obj, data, var++); - err |= check_one_size_helper(5 * sizeof(*obj->data), - flex_array_size, obj, data, var++); - err |= check_one_size_helper(0, flex_array_size, obj, data, 0 + unconst); - err |= check_one_size_helper(sizeof(*obj->data), - flex_array_size, obj, data, 1 + unconst); - err |= check_one_size_helper(7 * sizeof(*obj->data), - flex_array_size, obj, data, 7 + unconst); - err |= check_one_size_helper(SIZE_MAX, - flex_array_size, obj, data, -1 + unconst); - err |= check_one_size_helper(SIZE_MAX, - flex_array_size, obj, data, SIZE_MAX - 4 + unconst); - - var = 4; - err |= check_one_size_helper(sizeof(*obj) + (4 * sizeof(*obj->data)), - struct_size, obj, data, var++); - err |= check_one_size_helper(sizeof(*obj) + (5 * sizeof(*obj->data)), - struct_size, obj, data, var++); - err |= check_one_size_helper(sizeof(*obj), struct_size, obj, data, 0 + unconst); - err |= check_one_size_helper(sizeof(*obj) + sizeof(*obj->data), - struct_size, obj, data, 1 + unconst); - err |= check_one_size_helper(SIZE_MAX, - struct_size, obj, data, -3 + unconst); - err |= check_one_size_helper(SIZE_MAX, - struct_size, obj, data, SIZE_MAX - 3 + unconst); - - pr_info("%d overflow size helper tests finished\n", count); - - return err; -} - -static int __init test_module_init(void) -{ - int err = 0; - - err |= test_overflow_calculation(); - err |= test_overflow_shift(); - err |= test_overflow_size_helpers(); - err |= test_overflow_allocation(); - - if (err) { - pr_warn("FAIL!\n"); - err = -EINVAL; - } else { - pr_info("all tests passed\n"); - } - - return err; -} - -static void __exit test_module_exit(void) -{ } - -module_init(test_module_init); -module_exit(test_module_exit); -MODULE_LICENSE("Dual MIT/GPL"); -- cgit v1.2.3 From c7500c1b53bfc083e8968cdce13a5a9d1ca9bf83 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 16 Feb 2022 16:24:02 -0800 Subject: um: Allow builds with Clang Add SUBARCH target for Clang+um (which must go last, not alphabetically, so the other SUBARCHes are assigned). Remove open-coded "DEFINE" macro, instead using linux/kbuild.h's version which was updated to use Clang-friendly assembly in commit cf0c3e68aa81 ("kbuild: fix asm-offset generation to work with clang"). Redefine "DEFINE_LONGS" in terms of "COMMENT" and "DEFINE" so that the intended coment actually has useful content. Add a missed "break" to avoid implicit fall-through warnings. This lets me run KUnit tests with Clang: $ ./tools/testing/kunit/kunit.py run --make_options LLVM=1 ... Cc: Jeff Dike Cc: Richard Weinberger Cc: Anton Ivanov Cc: Masahiro Yamada Cc: Nick Desaulniers Cc: Nathan Chancellor Cc: David Gow Cc: linux-um@lists.infradead.org Cc: linux-kbuild@vger.kernel.org Cc: linux-kselftest@vger.kernel.org Cc: kunit-dev@googlegroups.com Cc: llvm@lists.linux.dev Reviewed-by: Nathan Chancellor Link: https://lore.kernel.org/lkml/Yg2YubZxvYvx7%2Fnm@dev-arch.archlinux-ax161/ Tested-by: David Gow Link: https://lore.kernel.org/lkml/CABVgOSk=oFxsbSbQE-v65VwR2+mXeGXDDjzq8t7FShwjJ3+kUg@mail.gmail.com/ Signed-off-by: Kees Cook --- v1: https://lore.kernel.org/lkml/20220217002843.2312603-1-keescook@chromium.org v2: https://lore.kernel.org/lkml/20220224055831.1854786-1-keescook@chromium.org v3: - use kbuild.h to avoid duplication (Masahiro) - fix intended comments (Masahiro) - use SUBARCH (Nathan) --- arch/um/os-Linux/execvp.c | 1 + arch/x86/um/user-offsets.c | 9 ++++----- scripts/Makefile.clang | 1 + 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/arch/um/os-Linux/execvp.c b/arch/um/os-Linux/execvp.c index 84a0777c2a45..c09a5fd5e225 100644 --- a/arch/um/os-Linux/execvp.c +++ b/arch/um/os-Linux/execvp.c @@ -93,6 +93,7 @@ int execvp_noalloc(char *buf, const char *file, char *const argv[]) up finding no executable we can use, we want to diagnose that we did find one but were denied access. */ got_eacces = 1; + break; case ENOENT: case ESTALE: case ENOTDIR: diff --git a/arch/x86/um/user-offsets.c b/arch/x86/um/user-offsets.c index bae61554abcc..e54a9814ccf1 100644 --- a/arch/x86/um/user-offsets.c +++ b/arch/x86/um/user-offsets.c @@ -8,12 +8,11 @@ #define __FRAME_OFFSETS #include #include +#include -#define DEFINE(sym, val) \ - asm volatile("\n->" #sym " %0 " #val : : "i" (val)) - -#define DEFINE_LONGS(sym, val) \ - asm volatile("\n->" #sym " %0 " #val : : "i" (val/sizeof(unsigned long))) +#define DEFINE_LONGS(sym, val) \ + COMMENT(#val " / sizeof(unsigned long)"); \ + DEFINE(sym, val / sizeof(unsigned long)) void foo(void) { diff --git a/scripts/Makefile.clang b/scripts/Makefile.clang index 51fc23e2e9e5..87285b76adb2 100644 --- a/scripts/Makefile.clang +++ b/scripts/Makefile.clang @@ -10,6 +10,7 @@ CLANG_TARGET_FLAGS_powerpc := powerpc64le-linux-gnu CLANG_TARGET_FLAGS_riscv := riscv64-linux-gnu CLANG_TARGET_FLAGS_s390 := s390x-linux-gnu CLANG_TARGET_FLAGS_x86 := x86_64-linux-gnu +CLANG_TARGET_FLAGS_um := $(CLANG_TARGET_FLAGS_$(SUBARCH)) CLANG_TARGET_FLAGS := $(CLANG_TARGET_FLAGS_$(SRCARCH)) ifeq ($(CROSS_COMPILE),) -- cgit v1.2.3 From 02788ebcf521fe78c24eb221fd1ed7f86792c330 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 16 Feb 2022 16:03:41 -0800 Subject: lib: stackinit: Convert to KUnit Convert stackinit unit tests to KUnit, for better integration into the kernel self test framework. Includes a rename of test_stackinit.c to stackinit_kunit.c, and CONFIG_TEST_STACKINIT to CONFIG_STACKINIT_KUNIT_TEST. Adjust expected test results based on which stack initialization method was chosen: $ CMD="./tools/testing/kunit/kunit.py run stackinit --raw_output \ --arch=x86_64 --kconfig_add" $ $CMD | grep stackinit: # stackinit: pass:36 fail:0 skip:29 total:65 $ $CMD CONFIG_GCC_PLUGIN_STRUCTLEAK_USER=y | grep stackinit: # stackinit: pass:37 fail:0 skip:28 total:65 $ $CMD CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF=y | grep stackinit: # stackinit: pass:55 fail:0 skip:10 total:65 $ $CMD CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL=y | grep stackinit: # stackinit: pass:62 fail:0 skip:3 total:65 $ $CMD CONFIG_INIT_STACK_ALL_PATTERN=y --make_option LLVM=1 | grep stackinit: # stackinit: pass:60 fail:0 skip:5 total:65 $ $CMD CONFIG_INIT_STACK_ALL_ZERO=y --make_option LLVM=1 | grep stackinit: # stackinit: pass:60 fail:0 skip:5 total:65 Temporarily remove the userspace-build mode, which will be restored in a later patch. Expand the size of the pre-case switch variable so it doesn't get accidentally cleared. Cc: David Gow Cc: Daniel Latypov Cc: Arnd Bergmann Signed-off-by: Kees Cook --- v1: https://lore.kernel.org/lkml/20220224055145.1853657-1-keescook@chromium.org v2: - split "userspace KUnit stub" into separate header and patch (Daniel) - Improve commit log and comments (David) - Provide mapping of expected XFAIL tests to CONFIGs (David) --- lib/Kconfig.debug | 22 +-- lib/Makefile | 4 +- lib/stackinit_kunit.c | 461 ++++++++++++++++++++++++++++++++++++++++++++ lib/test_stackinit.c | 514 -------------------------------------------------- 4 files changed, 474 insertions(+), 527 deletions(-) create mode 100644 lib/stackinit_kunit.c delete mode 100644 lib/test_stackinit.c diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 14d90d03bc8d..a5556ab05240 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2511,6 +2511,17 @@ config OVERFLOW_KUNIT_TEST If unsure, say N. +config STACKINIT_KUNIT_TEST + tristate "Test level of stack variable initialization" if !KUNIT_ALL_TESTS + depends on KUNIT + default KUNIT_ALL_TESTS + help + Test if the kernel is zero-initializing stack variables and + padding. Coverage is controlled by compiler flags, + CONFIG_INIT_STACK_ALL_PATTERN, CONFIG_INIT_STACK_ALL_ZERO, + CONFIG_GCC_PLUGIN_STRUCTLEAK, CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF, + or CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL. + config TEST_UDELAY tristate "udelay test driver" help @@ -2602,17 +2613,6 @@ config TEST_OBJAGG Enable this option to test object aggregation manager on boot (or module load). - -config TEST_STACKINIT - tristate "Test level of stack variable initialization" - help - Test if the kernel is zero-initializing stack variables and - padding. Coverage is controlled by compiler flags, - CONFIG_GCC_PLUGIN_STRUCTLEAK, CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF, - or CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL. - - If unsure, say N. - config TEST_MEMINIT tristate "Test heap/page initialization" help diff --git a/lib/Makefile b/lib/Makefile index fdfcbfaff32f..353bc09ce38d 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -93,8 +93,6 @@ obj-$(CONFIG_TEST_KMOD) += test_kmod.o obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o obj-$(CONFIG_TEST_OBJAGG) += test_objagg.o -CFLAGS_test_stackinit.o += $(call cc-disable-warning, switch-unreachable) -obj-$(CONFIG_TEST_STACKINIT) += test_stackinit.o obj-$(CONFIG_TEST_BLACKHOLE_DEV) += test_blackhole_dev.o obj-$(CONFIG_TEST_MEMINIT) += test_meminit.o obj-$(CONFIG_TEST_LOCKUP) += test_lockup.o @@ -363,6 +361,8 @@ obj-$(CONFIG_CMDLINE_KUNIT_TEST) += cmdline_kunit.o obj-$(CONFIG_SLUB_KUNIT_TEST) += slub_kunit.o obj-$(CONFIG_MEMCPY_KUNIT_TEST) += memcpy_kunit.o obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += overflow_kunit.o +CFLAGS_stackinit_kunit.o += $(call cc-disable-warning, switch-unreachable) +obj-$(CONFIG_STACKINIT_KUNIT_TEST) += stackinit_kunit.o obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o diff --git a/lib/stackinit_kunit.c b/lib/stackinit_kunit.c new file mode 100644 index 000000000000..35c69aa425b2 --- /dev/null +++ b/lib/stackinit_kunit.c @@ -0,0 +1,461 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Test cases for compiler-based stack variable zeroing via + * -ftrivial-auto-var-init={zero,pattern} or CONFIG_GCC_PLUGIN_STRUCTLEAK*. + * For example, see: + * https://www.kernel.org/doc/html/latest/dev-tools/kunit/kunit-tool.html#configuring-building-and-running-tests + * ./tools/testing/kunit/kunit.py run stackinit [--raw_output] \ + * --make_option LLVM=1 \ + * --kconfig_add CONFIG_INIT_STACK_ALL_ZERO=y + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include + +/* Exfiltration buffer. */ +#define MAX_VAR_SIZE 128 +static u8 check_buf[MAX_VAR_SIZE]; + +/* Character array to trigger stack protector in all functions. */ +#define VAR_BUFFER 32 + +/* Volatile mask to convince compiler to copy memory with 0xff. */ +static volatile u8 forced_mask = 0xff; + +/* Location and size tracking to validate fill and test are colocated. */ +static void *fill_start, *target_start; +static size_t fill_size, target_size; + +static bool range_contains(char *haystack_start, size_t haystack_size, + char *needle_start, size_t needle_size) +{ + if (needle_start >= haystack_start && + needle_start + needle_size <= haystack_start + haystack_size) + return true; + return false; +} + +/* Whether the test is expected to fail. */ +#define WANT_SUCCESS 0 +#define XFAIL 1 + +#define DO_NOTHING_TYPE_SCALAR(var_type) var_type +#define DO_NOTHING_TYPE_STRING(var_type) void +#define DO_NOTHING_TYPE_STRUCT(var_type) void + +#define DO_NOTHING_RETURN_SCALAR(ptr) *(ptr) +#define DO_NOTHING_RETURN_STRING(ptr) /**/ +#define DO_NOTHING_RETURN_STRUCT(ptr) /**/ + +#define DO_NOTHING_CALL_SCALAR(var, name) \ + (var) = do_nothing_ ## name(&(var)) +#define DO_NOTHING_CALL_STRING(var, name) \ + do_nothing_ ## name(var) +#define DO_NOTHING_CALL_STRUCT(var, name) \ + do_nothing_ ## name(&(var)) + +#define FETCH_ARG_SCALAR(var) &var +#define FETCH_ARG_STRING(var) var +#define FETCH_ARG_STRUCT(var) &var + +#define FILL_SIZE_STRING 16 + +#define INIT_CLONE_SCALAR /**/ +#define INIT_CLONE_STRING [FILL_SIZE_STRING] +#define INIT_CLONE_STRUCT /**/ + +#define ZERO_CLONE_SCALAR(zero) memset(&(zero), 0x00, sizeof(zero)) +#define ZERO_CLONE_STRING(zero) memset(&(zero), 0x00, sizeof(zero)) +/* + * For the struct, intentionally poison padding to see if it gets + * copied out in direct assignments. + * */ +#define ZERO_CLONE_STRUCT(zero) \ + do { \ + memset(&(zero), 0xFF, sizeof(zero)); \ + zero.one = 0; \ + zero.two = 0; \ + zero.three = 0; \ + zero.four = 0; \ + } while (0) + +#define INIT_SCALAR_none(var_type) /**/ +#define INIT_SCALAR_zero(var_type) = 0 + +#define INIT_STRING_none(var_type) [FILL_SIZE_STRING] /**/ +#define INIT_STRING_zero(var_type) [FILL_SIZE_STRING] = { } + +#define INIT_STRUCT_none(var_type) /**/ +#define INIT_STRUCT_zero(var_type) = { } + + +#define __static_partial { .two = 0, } +#define __static_all { .one = 0, \ + .two = 0, \ + .three = 0, \ + .four = 0, \ + } +#define __dynamic_partial { .two = arg->two, } +#define __dynamic_all { .one = arg->one, \ + .two = arg->two, \ + .three = arg->three, \ + .four = arg->four, \ + } +#define __runtime_partial var.two = 0 +#define __runtime_all var.one = 0; \ + var.two = 0; \ + var.three = 0; \ + var.four = 0 + +#define INIT_STRUCT_static_partial(var_type) \ + = __static_partial +#define INIT_STRUCT_static_all(var_type) \ + = __static_all +#define INIT_STRUCT_dynamic_partial(var_type) \ + = __dynamic_partial +#define INIT_STRUCT_dynamic_all(var_type) \ + = __dynamic_all +#define INIT_STRUCT_runtime_partial(var_type) \ + ; __runtime_partial +#define INIT_STRUCT_runtime_all(var_type) \ + ; __runtime_all + +#define INIT_STRUCT_assigned_static_partial(var_type) \ + ; var = (var_type)__static_partial +#define INIT_STRUCT_assigned_static_all(var_type) \ + ; var = (var_type)__static_all +#define INIT_STRUCT_assigned_dynamic_partial(var_type) \ + ; var = (var_type)__dynamic_partial +#define INIT_STRUCT_assigned_dynamic_all(var_type) \ + ; var = (var_type)__dynamic_all + +#define INIT_STRUCT_assigned_copy(var_type) \ + ; var = *(arg) + +/* + * @name: unique string name for the test + * @var_type: type to be tested for zeroing initialization + * @which: is this a SCALAR, STRING, or STRUCT type? + * @init_level: what kind of initialization is performed + * @xfail: is this test expected to fail? + */ +#define DEFINE_TEST_DRIVER(name, var_type, which, xfail) \ +/* Returns 0 on success, 1 on failure. */ \ +static noinline void test_ ## name (struct kunit *test) \ +{ \ + var_type zero INIT_CLONE_ ## which; \ + int ignored; \ + u8 sum = 0, i; \ + \ + /* Notice when a new test is larger than expected. */ \ + BUILD_BUG_ON(sizeof(zero) > MAX_VAR_SIZE); \ + \ + /* Fill clone type with zero for per-field init. */ \ + ZERO_CLONE_ ## which(zero); \ + /* Clear entire check buffer for 0xFF overlap test. */ \ + memset(check_buf, 0x00, sizeof(check_buf)); \ + /* Fill stack with 0xFF. */ \ + ignored = leaf_ ##name((unsigned long)&ignored, 1, \ + FETCH_ARG_ ## which(zero)); \ + /* Verify all bytes overwritten with 0xFF. */ \ + for (sum = 0, i = 0; i < target_size; i++) \ + sum += (check_buf[i] != 0xFF); \ + KUNIT_ASSERT_EQ_MSG(test, sum, 0, \ + "leaf fill was not 0xFF!?\n"); \ + /* Clear entire check buffer for later bit tests. */ \ + memset(check_buf, 0x00, sizeof(check_buf)); \ + /* Extract stack-defined variable contents. */ \ + ignored = leaf_ ##name((unsigned long)&ignored, 0, \ + FETCH_ARG_ ## which(zero)); \ + \ + /* Validate that compiler lined up fill and target. */ \ + KUNIT_ASSERT_TRUE_MSG(test, \ + range_contains(fill_start, fill_size, \ + target_start, target_size), \ + "stack fill missed target!? " \ + "(fill %zu wide, target offset by %d)\n", \ + fill_size, \ + (int)((ssize_t)(uintptr_t)fill_start - \ + (ssize_t)(uintptr_t)target_start)); \ + \ + /* Look for any bytes still 0xFF in check region. */ \ + for (sum = 0, i = 0; i < target_size; i++) \ + sum += (check_buf[i] == 0xFF); \ + \ + if (sum != 0 && xfail) \ + kunit_skip(test, \ + "XFAIL uninit bytes: %d\n", \ + sum); \ + KUNIT_ASSERT_EQ_MSG(test, sum, 0, \ + "uninit bytes: %d\n", sum); \ +} +#define DEFINE_TEST(name, var_type, which, init_level, xfail) \ +/* no-op to force compiler into ignoring "uninitialized" vars */\ +static noinline DO_NOTHING_TYPE_ ## which(var_type) \ +do_nothing_ ## name(var_type *ptr) \ +{ \ + /* Will always be true, but compiler doesn't know. */ \ + if ((unsigned long)ptr > 0x2) \ + return DO_NOTHING_RETURN_ ## which(ptr); \ + else \ + return DO_NOTHING_RETURN_ ## which(ptr + 1); \ +} \ +static noinline int leaf_ ## name(unsigned long sp, bool fill, \ + var_type *arg) \ +{ \ + char buf[VAR_BUFFER]; \ + var_type var \ + INIT_ ## which ## _ ## init_level(var_type); \ + \ + target_start = &var; \ + target_size = sizeof(var); \ + /* \ + * Keep this buffer around to make sure we've got a \ + * stack frame of SOME kind... \ + */ \ + memset(buf, (char)(sp & 0xff), sizeof(buf)); \ + /* Fill variable with 0xFF. */ \ + if (fill) { \ + fill_start = &var; \ + fill_size = sizeof(var); \ + memset(fill_start, \ + (char)((sp & 0xff) | forced_mask), \ + fill_size); \ + } \ + \ + /* Silence "never initialized" warnings. */ \ + DO_NOTHING_CALL_ ## which(var, name); \ + \ + /* Exfiltrate "var". */ \ + memcpy(check_buf, target_start, target_size); \ + \ + return (int)buf[0] | (int)buf[sizeof(buf) - 1]; \ +} \ +DEFINE_TEST_DRIVER(name, var_type, which, xfail) + +/* Structure with no padding. */ +struct test_packed { + unsigned long one; + unsigned long two; + unsigned long three; + unsigned long four; +}; + +/* Simple structure with padding likely to be covered by compiler. */ +struct test_small_hole { + size_t one; + char two; + /* 3 byte padding hole here. */ + int three; + unsigned long four; +}; + +/* Trigger unhandled padding in a structure. */ +struct test_big_hole { + u8 one; + u8 two; + u8 three; + /* 61 byte padding hole here. */ + u8 four __aligned(64); +} __aligned(64); + +struct test_trailing_hole { + char *one; + char *two; + char *three; + char four; + /* "sizeof(unsigned long) - 1" byte padding hole here. */ +}; + +/* Test if STRUCTLEAK is clearing structs with __user fields. */ +struct test_user { + u8 one; + unsigned long two; + char __user *three; + unsigned long four; +}; + +#define ALWAYS_PASS WANT_SUCCESS +#define ALWAYS_FAIL XFAIL + +#ifdef CONFIG_INIT_STACK_NONE +# define USER_PASS XFAIL +# define BYREF_PASS XFAIL +# define STRONG_PASS XFAIL +#elif defined(CONFIG_GCC_PLUGIN_STRUCTLEAK_USER) +# define USER_PASS WANT_SUCCESS +# define BYREF_PASS XFAIL +# define STRONG_PASS XFAIL +#elif defined(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF) +# define USER_PASS WANT_SUCCESS +# define BYREF_PASS WANT_SUCCESS +# define STRONG_PASS XFAIL +#else +# define USER_PASS WANT_SUCCESS +# define BYREF_PASS WANT_SUCCESS +# define STRONG_PASS WANT_SUCCESS +#endif + +#define DEFINE_SCALAR_TEST(name, init, xfail) \ + DEFINE_TEST(name ## _ ## init, name, SCALAR, \ + init, xfail) + +#define DEFINE_SCALAR_TESTS(init, xfail) \ + DEFINE_SCALAR_TEST(u8, init, xfail); \ + DEFINE_SCALAR_TEST(u16, init, xfail); \ + DEFINE_SCALAR_TEST(u32, init, xfail); \ + DEFINE_SCALAR_TEST(u64, init, xfail); \ + DEFINE_TEST(char_array_ ## init, unsigned char, \ + STRING, init, xfail) + +#define DEFINE_STRUCT_TEST(name, init, xfail) \ + DEFINE_TEST(name ## _ ## init, \ + struct test_ ## name, STRUCT, init, \ + xfail) + +#define DEFINE_STRUCT_TESTS(init, xfail) \ + DEFINE_STRUCT_TEST(small_hole, init, xfail); \ + DEFINE_STRUCT_TEST(big_hole, init, xfail); \ + DEFINE_STRUCT_TEST(trailing_hole, init, xfail); \ + DEFINE_STRUCT_TEST(packed, init, xfail) + +#define DEFINE_STRUCT_INITIALIZER_TESTS(base, xfail) \ + DEFINE_STRUCT_TESTS(base ## _ ## partial, \ + xfail); \ + DEFINE_STRUCT_TESTS(base ## _ ## all, xfail) + +/* These should be fully initialized all the time! */ +DEFINE_SCALAR_TESTS(zero, ALWAYS_PASS); +DEFINE_STRUCT_TESTS(zero, ALWAYS_PASS); +/* Struct initializers: padding may be left uninitialized. */ +DEFINE_STRUCT_INITIALIZER_TESTS(static, STRONG_PASS); +DEFINE_STRUCT_INITIALIZER_TESTS(dynamic, STRONG_PASS); +DEFINE_STRUCT_INITIALIZER_TESTS(runtime, STRONG_PASS); +DEFINE_STRUCT_INITIALIZER_TESTS(assigned_static, STRONG_PASS); +DEFINE_STRUCT_INITIALIZER_TESTS(assigned_dynamic, STRONG_PASS); +DEFINE_STRUCT_TESTS(assigned_copy, ALWAYS_FAIL); +/* No initialization without compiler instrumentation. */ +DEFINE_SCALAR_TESTS(none, STRONG_PASS); +DEFINE_STRUCT_TESTS(none, BYREF_PASS); +/* Initialization of members with __user attribute. */ +DEFINE_TEST(user, struct test_user, STRUCT, none, USER_PASS); + +/* + * Check two uses through a variable declaration outside either path, + * which was noticed as a special case in porting earlier stack init + * compiler logic. + */ +static int noinline __leaf_switch_none(int path, bool fill) +{ + switch (path) { + /* + * This is intentionally unreachable. To silence the + * warning, build with -Wno-switch-unreachable + */ + uint64_t var[10]; + + case 1: + target_start = &var; + target_size = sizeof(var); + if (fill) { + fill_start = &var; + fill_size = sizeof(var); + + memset(fill_start, forced_mask | 0x55, fill_size); + } + memcpy(check_buf, target_start, target_size); + break; + case 2: + target_start = &var; + target_size = sizeof(var); + if (fill) { + fill_start = &var; + fill_size = sizeof(var); + + memset(fill_start, forced_mask | 0xaa, fill_size); + } + memcpy(check_buf, target_start, target_size); + break; + default: + var[1] = 5; + return var[1] & forced_mask; + } + return 0; +} + +static noinline int leaf_switch_1_none(unsigned long sp, bool fill, + uint64_t *arg) +{ + return __leaf_switch_none(1, fill); +} + +static noinline int leaf_switch_2_none(unsigned long sp, bool fill, + uint64_t *arg) +{ + return __leaf_switch_none(2, fill); +} + +/* + * These are expected to fail for most configurations because neither + * GCC nor Clang have a way to perform initialization of variables in + * non-code areas (i.e. in a switch statement before the first "case"). + * https://bugs.llvm.org/show_bug.cgi?id=44916 + */ +DEFINE_TEST_DRIVER(switch_1_none, uint64_t, SCALAR, ALWAYS_FAIL); +DEFINE_TEST_DRIVER(switch_2_none, uint64_t, SCALAR, ALWAYS_FAIL); + +#define KUNIT_test_scalars(init) \ + KUNIT_CASE(test_u8_ ## init), \ + KUNIT_CASE(test_u16_ ## init), \ + KUNIT_CASE(test_u32_ ## init), \ + KUNIT_CASE(test_u64_ ## init), \ + KUNIT_CASE(test_char_array_ ## init) + +#define KUNIT_test_structs(init) \ + KUNIT_CASE(test_small_hole_ ## init), \ + KUNIT_CASE(test_big_hole_ ## init), \ + KUNIT_CASE(test_trailing_hole_ ## init),\ + KUNIT_CASE(test_packed_ ## init) \ + +static struct kunit_case stackinit_test_cases[] = { + /* These are explicitly initialized and should always pass. */ + KUNIT_test_scalars(zero), + KUNIT_test_structs(zero), + /* Padding here appears to be accidentally always initialized? */ + KUNIT_test_structs(dynamic_partial), + KUNIT_test_structs(assigned_dynamic_partial), + /* Padding initialization depends on compiler behaviors. */ + KUNIT_test_structs(static_partial), + KUNIT_test_structs(static_all), + KUNIT_test_structs(dynamic_all), + KUNIT_test_structs(runtime_partial), + KUNIT_test_structs(runtime_all), + KUNIT_test_structs(assigned_static_partial), + KUNIT_test_structs(assigned_static_all), + KUNIT_test_structs(assigned_dynamic_all), + /* Everything fails this since it effectively performs a memcpy(). */ + KUNIT_test_structs(assigned_copy), + /* STRUCTLEAK_BYREF_ALL should cover everything from here down. */ + KUNIT_test_scalars(none), + KUNIT_CASE(test_switch_1_none), + KUNIT_CASE(test_switch_2_none), + /* STRUCTLEAK_BYREF should cover from here down. */ + KUNIT_test_structs(none), + /* STRUCTLEAK will only cover this. */ + KUNIT_CASE(test_user), + {} +}; + +static struct kunit_suite stackinit_test_suite = { + .name = "stackinit", + .test_cases = stackinit_test_cases, +}; + +kunit_test_suites(&stackinit_test_suite); + +MODULE_LICENSE("GPL"); diff --git a/lib/test_stackinit.c b/lib/test_stackinit.c deleted file mode 100644 index a3c74e6a21ff..000000000000 --- a/lib/test_stackinit.c +++ /dev/null @@ -1,514 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Test cases for compiler-based stack variable zeroing via - * -ftrivial-auto-var-init={zero,pattern} or CONFIG_GCC_PLUGIN_STRUCTLEAK*. - * - * External build example: - * clang -O2 -Wall -ftrivial-auto-var-init=pattern \ - * -o test_stackinit test_stackinit.c - */ -#ifdef __KERNEL__ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include - -#else - -/* Userspace headers. */ -#include -#include -#include -#include -#include -#include - -/* Linux kernel-ism stubs for stand-alone userspace build. */ -#define KBUILD_MODNAME "stackinit" -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#define pr_err(fmt, ...) fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__) -#define pr_warn(fmt, ...) fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__) -#define pr_info(fmt, ...) fprintf(stdout, pr_fmt(fmt), ##__VA_ARGS__) -#define __init /**/ -#define __exit /**/ -#define __user /**/ -#define noinline __attribute__((__noinline__)) -#define __aligned(x) __attribute__((__aligned__(x))) -#ifdef __clang__ -# define __compiletime_error(message) /**/ -#else -# define __compiletime_error(message) __attribute__((__error__(message))) -#endif -#define __compiletime_assert(condition, msg, prefix, suffix) \ - do { \ - extern void prefix ## suffix(void) __compiletime_error(msg); \ - if (!(condition)) \ - prefix ## suffix(); \ - } while (0) -#define _compiletime_assert(condition, msg, prefix, suffix) \ - __compiletime_assert(condition, msg, prefix, suffix) -#define compiletime_assert(condition, msg) \ - _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__) -#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg) -#define BUILD_BUG_ON(condition) \ - BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition) -typedef uint8_t u8; -typedef uint16_t u16; -typedef uint32_t u32; -typedef uint64_t u64; - -#define module_init(func) static int (*do_init)(void) = func -#define module_exit(func) static void (*do_exit)(void) = func -#define MODULE_LICENSE(str) int main(void) { \ - int rc; \ - /* License: str */ \ - rc = do_init(); \ - if (rc == 0) \ - do_exit(); \ - return rc; \ - } - -#endif /* __KERNEL__ */ - -/* Exfiltration buffer. */ -#define MAX_VAR_SIZE 128 -static u8 check_buf[MAX_VAR_SIZE]; - -/* Character array to trigger stack protector in all functions. */ -#define VAR_BUFFER 32 - -/* Volatile mask to convince compiler to copy memory with 0xff. */ -static volatile u8 forced_mask = 0xff; - -/* Location and size tracking to validate fill and test are colocated. */ -static void *fill_start, *target_start; -static size_t fill_size, target_size; - -static bool range_contains(char *haystack_start, size_t haystack_size, - char *needle_start, size_t needle_size) -{ - if (needle_start >= haystack_start && - needle_start + needle_size <= haystack_start + haystack_size) - return true; - return false; -} - -/* Whether the test is expected to fail. */ -#define WANT_SUCCESS 0 -#define XFAIL 1 - -#define DO_NOTHING_TYPE_SCALAR(var_type) var_type -#define DO_NOTHING_TYPE_STRING(var_type) void -#define DO_NOTHING_TYPE_STRUCT(var_type) void - -#define DO_NOTHING_RETURN_SCALAR(ptr) *(ptr) -#define DO_NOTHING_RETURN_STRING(ptr) /**/ -#define DO_NOTHING_RETURN_STRUCT(ptr) /**/ - -#define DO_NOTHING_CALL_SCALAR(var, name) \ - (var) = do_nothing_ ## name(&(var)) -#define DO_NOTHING_CALL_STRING(var, name) \ - do_nothing_ ## name(var) -#define DO_NOTHING_CALL_STRUCT(var, name) \ - do_nothing_ ## name(&(var)) - -#define FETCH_ARG_SCALAR(var) &var -#define FETCH_ARG_STRING(var) var -#define FETCH_ARG_STRUCT(var) &var - -#define FILL_SIZE_STRING 16 - -#define INIT_CLONE_SCALAR /**/ -#define INIT_CLONE_STRING [FILL_SIZE_STRING] -#define INIT_CLONE_STRUCT /**/ - -#define ZERO_CLONE_SCALAR(zero) memset(&(zero), 0x00, sizeof(zero)) -#define ZERO_CLONE_STRING(zero) memset(&(zero), 0x00, sizeof(zero)) -/* - * For the struct, intentionally poison padding to see if it gets - * copied out in direct assignments. - * */ -#define ZERO_CLONE_STRUCT(zero) \ - do { \ - memset(&(zero), 0xFF, sizeof(zero)); \ - zero.one = 0; \ - zero.two = 0; \ - zero.three = 0; \ - zero.four = 0; \ - } while (0) - -#define INIT_SCALAR_none(var_type) /**/ -#define INIT_SCALAR_zero(var_type) = 0 - -#define INIT_STRING_none(var_type) [FILL_SIZE_STRING] /**/ -#define INIT_STRING_zero(var_type) [FILL_SIZE_STRING] = { } - -#define INIT_STRUCT_none(var_type) /**/ -#define INIT_STRUCT_zero(var_type) = { } - - -#define __static_partial { .two = 0, } -#define __static_all { .one = 0, \ - .two = 0, \ - .three = 0, \ - .four = 0, \ - } -#define __dynamic_partial { .two = arg->two, } -#define __dynamic_all { .one = arg->one, \ - .two = arg->two, \ - .three = arg->three, \ - .four = arg->four, \ - } -#define __runtime_partial var.two = 0 -#define __runtime_all var.one = 0; \ - var.two = 0; \ - var.three = 0; \ - var.four = 0 - -#define INIT_STRUCT_static_partial(var_type) \ - = __static_partial -#define INIT_STRUCT_static_all(var_type) \ - = __static_all -#define INIT_STRUCT_dynamic_partial(var_type) \ - = __dynamic_partial -#define INIT_STRUCT_dynamic_all(var_type) \ - = __dynamic_all -#define INIT_STRUCT_runtime_partial(var_type) \ - ; __runtime_partial -#define INIT_STRUCT_runtime_all(var_type) \ - ; __runtime_all - -#define INIT_STRUCT_assigned_static_partial(var_type) \ - ; var = (var_type)__static_partial -#define INIT_STRUCT_assigned_static_all(var_type) \ - ; var = (var_type)__static_all -#define INIT_STRUCT_assigned_dynamic_partial(var_type) \ - ; var = (var_type)__dynamic_partial -#define INIT_STRUCT_assigned_dynamic_all(var_type) \ - ; var = (var_type)__dynamic_all - -#define INIT_STRUCT_assigned_copy(var_type) \ - ; var = *(arg) - -/* - * @name: unique string name for the test - * @var_type: type to be tested for zeroing initialization - * @which: is this a SCALAR, STRING, or STRUCT type? - * @init_level: what kind of initialization is performed - * @xfail: is this test expected to fail? - */ -#define DEFINE_TEST_DRIVER(name, var_type, which, xfail) \ -/* Returns 0 on success, 1 on failure. */ \ -static noinline __init int test_ ## name (void) \ -{ \ - var_type zero INIT_CLONE_ ## which; \ - int ignored; \ - u8 sum = 0, i; \ - \ - /* Notice when a new test is larger than expected. */ \ - BUILD_BUG_ON(sizeof(zero) > MAX_VAR_SIZE); \ - \ - /* Fill clone type with zero for per-field init. */ \ - ZERO_CLONE_ ## which(zero); \ - /* Clear entire check buffer for 0xFF overlap test. */ \ - memset(check_buf, 0x00, sizeof(check_buf)); \ - /* Fill stack with 0xFF. */ \ - ignored = leaf_ ##name((unsigned long)&ignored, 1, \ - FETCH_ARG_ ## which(zero)); \ - /* Verify all bytes overwritten with 0xFF. */ \ - for (sum = 0, i = 0; i < target_size; i++) \ - sum += (check_buf[i] != 0xFF); \ - if (sum) { \ - pr_err(#name ": leaf fill was not 0xFF!?\n"); \ - return 1; \ - } \ - /* Clear entire check buffer for later bit tests. */ \ - memset(check_buf, 0x00, sizeof(check_buf)); \ - /* Extract stack-defined variable contents. */ \ - ignored = leaf_ ##name((unsigned long)&ignored, 0, \ - FETCH_ARG_ ## which(zero)); \ - \ - /* Validate that compiler lined up fill and target. */ \ - if (!range_contains(fill_start, fill_size, \ - target_start, target_size)) { \ - pr_err(#name ": stack fill missed target!?\n"); \ - pr_err(#name ": fill %zu wide\n", fill_size); \ - pr_err(#name ": target offset by %d\n", \ - (int)((ssize_t)(uintptr_t)fill_start - \ - (ssize_t)(uintptr_t)target_start)); \ - return 1; \ - } \ - \ - /* Look for any bytes still 0xFF in check region. */ \ - for (sum = 0, i = 0; i < target_size; i++) \ - sum += (check_buf[i] == 0xFF); \ - \ - if (sum == 0) { \ - pr_info(#name " ok\n"); \ - return 0; \ - } else { \ - pr_warn(#name " %sFAIL (uninit bytes: %d)\n", \ - (xfail) ? "X" : "", sum); \ - return (xfail) ? 0 : 1; \ - } \ -} -#define DEFINE_TEST(name, var_type, which, init_level, xfail) \ -/* no-op to force compiler into ignoring "uninitialized" vars */\ -static noinline __init DO_NOTHING_TYPE_ ## which(var_type) \ -do_nothing_ ## name(var_type *ptr) \ -{ \ - /* Will always be true, but compiler doesn't know. */ \ - if ((unsigned long)ptr > 0x2) \ - return DO_NOTHING_RETURN_ ## which(ptr); \ - else \ - return DO_NOTHING_RETURN_ ## which(ptr + 1); \ -} \ -static noinline __init int leaf_ ## name(unsigned long sp, \ - bool fill, \ - var_type *arg) \ -{ \ - char buf[VAR_BUFFER]; \ - var_type var \ - INIT_ ## which ## _ ## init_level(var_type); \ - \ - target_start = &var; \ - target_size = sizeof(var); \ - /* \ - * Keep this buffer around to make sure we've got a \ - * stack frame of SOME kind... \ - */ \ - memset(buf, (char)(sp & 0xff), sizeof(buf)); \ - /* Fill variable with 0xFF. */ \ - if (fill) { \ - fill_start = &var; \ - fill_size = sizeof(var); \ - memset(fill_start, \ - (char)((sp & 0xff) | forced_mask), \ - fill_size); \ - } \ - \ - /* Silence "never initialized" warnings. */ \ - DO_NOTHING_CALL_ ## which(var, name); \ - \ - /* Exfiltrate "var". */ \ - memcpy(check_buf, target_start, target_size); \ - \ - return (int)buf[0] | (int)buf[sizeof(buf) - 1]; \ -} \ -DEFINE_TEST_DRIVER(name, var_type, which, xfail) - -/* Structure with no padding. */ -struct test_packed { - unsigned long one; - unsigned long two; - unsigned long three; - unsigned long four; -}; - -/* Simple structure with padding likely to be covered by compiler. */ -struct test_small_hole { - size_t one; - char two; - /* 3 byte padding hole here. */ - int three; - unsigned long four; -}; - -/* Trigger unhandled padding in a structure. */ -struct test_big_hole { - u8 one; - u8 two; - u8 three; - /* 61 byte padding hole here. */ - u8 four __aligned(64); -} __aligned(64); - -struct test_trailing_hole { - char *one; - char *two; - char *three; - char four; - /* "sizeof(unsigned long) - 1" byte padding hole here. */ -}; - -/* Test if STRUCTLEAK is clearing structs with __user fields. */ -struct test_user { - u8 one; - unsigned long two; - char __user *three; - unsigned long four; -}; - -#define DEFINE_SCALAR_TEST(name, init, xfail) \ - DEFINE_TEST(name ## _ ## init, name, SCALAR, \ - init, xfail) - -#define DEFINE_SCALAR_TESTS(init, xfail) \ - DEFINE_SCALAR_TEST(u8, init, xfail); \ - DEFINE_SCALAR_TEST(u16, init, xfail); \ - DEFINE_SCALAR_TEST(u32, init, xfail); \ - DEFINE_SCALAR_TEST(u64, init, xfail); \ - DEFINE_TEST(char_array_ ## init, unsigned char, \ - STRING, init, xfail) - -#define DEFINE_STRUCT_TEST(name, init, xfail) \ - DEFINE_TEST(name ## _ ## init, \ - struct test_ ## name, STRUCT, init, \ - xfail) - -#define DEFINE_STRUCT_TESTS(init, xfail) \ - DEFINE_STRUCT_TEST(small_hole, init, xfail); \ - DEFINE_STRUCT_TEST(big_hole, init, xfail); \ - DEFINE_STRUCT_TEST(trailing_hole, init, xfail); \ - DEFINE_STRUCT_TEST(packed, init, xfail) - -#define DEFINE_STRUCT_INITIALIZER_TESTS(base) \ - DEFINE_STRUCT_TESTS(base ## _ ## partial, \ - WANT_SUCCESS); \ - DEFINE_STRUCT_TESTS(base ## _ ## all, \ - WANT_SUCCESS) - -/* These should be fully initialized all the time! */ -DEFINE_SCALAR_TESTS(zero, WANT_SUCCESS); -DEFINE_STRUCT_TESTS(zero, WANT_SUCCESS); -/* Struct initializers: padding may be left uninitialized. */ -DEFINE_STRUCT_INITIALIZER_TESTS(static); -DEFINE_STRUCT_INITIALIZER_TESTS(dynamic); -DEFINE_STRUCT_INITIALIZER_TESTS(runtime); -DEFINE_STRUCT_INITIALIZER_TESTS(assigned_static); -DEFINE_STRUCT_INITIALIZER_TESTS(assigned_dynamic); -DEFINE_STRUCT_TESTS(assigned_copy, XFAIL); -/* No initialization without compiler instrumentation. */ -DEFINE_SCALAR_TESTS(none, WANT_SUCCESS); -DEFINE_STRUCT_TESTS(none, WANT_SUCCESS); -/* Initialization of members with __user attribute. */ -DEFINE_TEST(user, struct test_user, STRUCT, none, WANT_SUCCESS); - -/* - * Check two uses through a variable declaration outside either path, - * which was noticed as a special case in porting earlier stack init - * compiler logic. - */ -static int noinline __leaf_switch_none(int path, bool fill) -{ - switch (path) { - /* - * This is intentionally unreachable. To silence the - * warning, build with -Wno-switch-unreachable - */ - uint64_t var; - - case 1: - target_start = &var; - target_size = sizeof(var); - if (fill) { - fill_start = &var; - fill_size = sizeof(var); - - memset(fill_start, forced_mask | 0x55, fill_size); - } - memcpy(check_buf, target_start, target_size); - break; - case 2: - target_start = &var; - target_size = sizeof(var); - if (fill) { - fill_start = &var; - fill_size = sizeof(var); - - memset(fill_start, forced_mask | 0xaa, fill_size); - } - memcpy(check_buf, target_start, target_size); - break; - default: - var = 5; - return var & forced_mask; - } - return 0; -} - -static noinline __init int leaf_switch_1_none(unsigned long sp, bool fill, - uint64_t *arg) -{ - return __leaf_switch_none(1, fill); -} - -static noinline __init int leaf_switch_2_none(unsigned long sp, bool fill, - uint64_t *arg) -{ - return __leaf_switch_none(2, fill); -} - -/* - * These are expected to fail for most configurations because neither - * GCC nor Clang have a way to perform initialization of variables in - * non-code areas (i.e. in a switch statement before the first "case"). - * https://bugs.llvm.org/show_bug.cgi?id=44916 - */ -DEFINE_TEST_DRIVER(switch_1_none, uint64_t, SCALAR, XFAIL); -DEFINE_TEST_DRIVER(switch_2_none, uint64_t, SCALAR, XFAIL); - -static int __init test_stackinit_init(void) -{ - unsigned int failures = 0; - -#define test_scalars(init) do { \ - failures += test_u8_ ## init (); \ - failures += test_u16_ ## init (); \ - failures += test_u32_ ## init (); \ - failures += test_u64_ ## init (); \ - failures += test_char_array_ ## init (); \ - } while (0) - -#define test_structs(init) do { \ - failures += test_small_hole_ ## init (); \ - failures += test_big_hole_ ## init (); \ - failures += test_trailing_hole_ ## init (); \ - failures += test_packed_ ## init (); \ - } while (0) - - /* These are explicitly initialized and should always pass. */ - test_scalars(zero); - test_structs(zero); - /* Padding here appears to be accidentally always initialized? */ - test_structs(dynamic_partial); - test_structs(assigned_dynamic_partial); - /* Padding initialization depends on compiler behaviors. */ - test_structs(static_partial); - test_structs(static_all); - test_structs(dynamic_all); - test_structs(runtime_partial); - test_structs(runtime_all); - test_structs(assigned_static_partial); - test_structs(assigned_static_all); - test_structs(assigned_dynamic_all); - /* Everything fails this since it effectively performs a memcpy(). */ - test_structs(assigned_copy); - - /* STRUCTLEAK_BYREF_ALL should cover everything from here down. */ - test_scalars(none); - failures += test_switch_1_none(); - failures += test_switch_2_none(); - - /* STRUCTLEAK_BYREF should cover from here down. */ - test_structs(none); - - /* STRUCTLEAK will only cover this. */ - failures += test_user(); - - if (failures == 0) - pr_info("all tests passed!\n"); - else - pr_err("failures: %u\n", failures); - - return failures ? -EINVAL : 0; -} -module_init(test_stackinit_init); - -static void __exit test_stackinit_exit(void) -{ } -module_exit(test_stackinit_exit); - -MODULE_LICENSE("GPL"); -- cgit v1.2.3