summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig1
-rw-r--r--lib/Kconfig.debug39
-rw-r--r--lib/Kconfig.ubsan10
-rw-r--r--lib/Makefile9
-rw-r--r--lib/bch.c38
-rw-r--r--lib/checksum_kunit.c54
-rw-r--r--lib/clz_ctz.c32
-rw-r--r--lib/cpumask.c5
-rw-r--r--lib/crypto/Makefile2
-rw-r--r--lib/crypto/mpi/Makefile (renamed from lib/mpi/Makefile)0
-rw-r--r--lib/crypto/mpi/ec.c (renamed from lib/mpi/ec.c)0
-rw-r--r--lib/crypto/mpi/generic_mpih-add1.c (renamed from lib/mpi/generic_mpih-add1.c)0
-rw-r--r--lib/crypto/mpi/generic_mpih-lshift.c (renamed from lib/mpi/generic_mpih-lshift.c)0
-rw-r--r--lib/crypto/mpi/generic_mpih-mul1.c (renamed from lib/mpi/generic_mpih-mul1.c)0
-rw-r--r--lib/crypto/mpi/generic_mpih-mul2.c (renamed from lib/mpi/generic_mpih-mul2.c)0
-rw-r--r--lib/crypto/mpi/generic_mpih-mul3.c (renamed from lib/mpi/generic_mpih-mul3.c)0
-rw-r--r--lib/crypto/mpi/generic_mpih-rshift.c (renamed from lib/mpi/generic_mpih-rshift.c)0
-rw-r--r--lib/crypto/mpi/generic_mpih-sub1.c (renamed from lib/mpi/generic_mpih-sub1.c)0
-rw-r--r--lib/crypto/mpi/longlong.h (renamed from lib/mpi/longlong.h)0
-rw-r--r--lib/crypto/mpi/mpi-add.c (renamed from lib/mpi/mpi-add.c)0
-rw-r--r--lib/crypto/mpi/mpi-bit.c (renamed from lib/mpi/mpi-bit.c)0
-rw-r--r--lib/crypto/mpi/mpi-cmp.c (renamed from lib/mpi/mpi-cmp.c)8
-rw-r--r--lib/crypto/mpi/mpi-div.c (renamed from lib/mpi/mpi-div.c)0
-rw-r--r--lib/crypto/mpi/mpi-inline.h (renamed from lib/mpi/mpi-inline.h)0
-rw-r--r--lib/crypto/mpi/mpi-internal.h (renamed from lib/mpi/mpi-internal.h)0
-rw-r--r--lib/crypto/mpi/mpi-inv.c (renamed from lib/mpi/mpi-inv.c)0
-rw-r--r--lib/crypto/mpi/mpi-mod.c (renamed from lib/mpi/mpi-mod.c)0
-rw-r--r--lib/crypto/mpi/mpi-mul.c (renamed from lib/mpi/mpi-mul.c)0
-rw-r--r--lib/crypto/mpi/mpi-pow.c (renamed from lib/mpi/mpi-pow.c)0
-rw-r--r--lib/crypto/mpi/mpi-sub-ui.c (renamed from lib/mpi/mpi-sub-ui.c)0
-rw-r--r--lib/crypto/mpi/mpicoder.c (renamed from lib/mpi/mpicoder.c)0
-rw-r--r--lib/crypto/mpi/mpih-cmp.c (renamed from lib/mpi/mpih-cmp.c)0
-rw-r--r--lib/crypto/mpi/mpih-div.c (renamed from lib/mpi/mpih-div.c)0
-rw-r--r--lib/crypto/mpi/mpih-mul.c (renamed from lib/mpi/mpih-mul.c)0
-rw-r--r--lib/crypto/mpi/mpiutil.c (renamed from lib/mpi/mpiutil.c)0
-rw-r--r--lib/error-inject.c2
-rw-r--r--lib/genalloc.c6
-rw-r--r--lib/iov_iter.c46
-rw-r--r--lib/kstrtox.c2
-rw-r--r--lib/kunit/Kconfig2
-rw-r--r--lib/kunit/Makefile3
-rw-r--r--lib/kunit/attributes.c414
-rw-r--r--lib/kunit/executor.c227
-rw-r--r--lib/kunit/executor_test.c152
-rw-r--r--lib/kunit/kunit-example-test.c9
-rw-r--r--lib/kunit/test.c64
-rw-r--r--lib/list_debug.c16
-rw-r--r--lib/locking-selftest.c135
-rw-r--r--lib/logic_pio.c3
-rw-r--r--lib/maple_tree.c1118
-rw-r--r--lib/math/Makefile2
-rw-r--r--lib/math/int_log.c133
-rw-r--r--lib/memcpy_kunit.c8
-rw-r--r--lib/nlattr.c6
-rw-r--r--lib/nmi_backtrace.c6
-rw-r--r--lib/notifier-error-inject.c3
-rw-r--r--lib/radix-tree.c1
-rw-r--r--lib/raid6/mktables.c2
-rw-r--r--lib/raid6/recov.c1
-rw-r--r--lib/raid6/test/.gitignore3
-rw-r--r--lib/raid6/test/Makefile50
-rw-r--r--lib/sbitmap.c15
-rw-r--r--lib/scatterlist.c2
-rw-r--r--lib/test_bitmap.c8
-rw-r--r--lib/test_bpf.c24
-rw-r--r--lib/test_hmm.c10
-rw-r--r--lib/test_maple_tree.c146
-rw-r--r--lib/test_meminit.c2
-rw-r--r--lib/test_printf.c3
-rw-r--r--lib/ts_bm.c43
-rw-r--r--lib/vsprintf.c1
71 files changed, 1842 insertions, 1024 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 5c2da561c516..c686f4adc124 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -415,6 +415,7 @@ config REED_SOLOMON_DEC16
#
config BCH
tristate
+ select BITREVERSE
config BCH_CONST_PARAMS
bool
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index fbc89baf7de6..a8a1b0ac8b22 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1200,7 +1200,7 @@ config WQ_CPU_INTENSIVE_REPORT
help
Say Y here to enable reporting of concurrency-managed per-cpu work
items that hog CPUs for longer than
- workqueue.cpu_intensive_threshold_us. Workqueue automatically
+ workqueue.cpu_intensive_thresh_us. Workqueue automatically
detects and excludes them from concurrency management to prevent
them from stalling other per-cpu work items. Occassional
triggering may not necessarily indicate a problem. Repeated
@@ -1673,10 +1673,15 @@ menu "Debug kernel data structures"
config DEBUG_LIST
bool "Debug linked list manipulation"
- depends on DEBUG_KERNEL || BUG_ON_DATA_CORRUPTION
+ depends on DEBUG_KERNEL
+ select LIST_HARDENED
help
- Enable this to turn on extended checks in the linked-list
- walking routines.
+ Enable this to turn on extended checks in the linked-list walking
+ routines.
+
+ This option trades better quality error reports for performance, and
+ is more suitable for kernel debugging. If you care about performance,
+ you should only enable CONFIG_LIST_HARDENED instead.
If unsure, say N.
@@ -1710,16 +1715,6 @@ config DEBUG_NOTIFIERS
This is a relatively cheap check but if you care about maximum
performance, say N.
-config BUG_ON_DATA_CORRUPTION
- bool "Trigger a BUG when data corruption is detected"
- select DEBUG_LIST
- help
- Select this option if the kernel should BUG when it encounters
- data corruption in kernel memory structures when they get checked
- for validity.
-
- If unsure, say N.
-
config DEBUG_MAPLE_TREE
bool "Debug maple trees"
depends on DEBUG_KERNEL
@@ -2701,6 +2696,9 @@ config MEMCPY_SLOW_KUNIT_TEST
and bit ranges. These can be very slow, so they are split out
as a separate config, in case they need to be disabled.
+ Note this config option will be replaced by the use of KUnit test
+ attributes.
+
config IS_SIGNED_TYPE_KUNIT_TEST
tristate "Test is_signed_type() macro" if !KUNIT_ALL_TESTS
depends on KUNIT
@@ -3010,6 +3008,19 @@ config RUST_BUILD_ASSERT_ALLOW
If unsure, say N.
+config RUST_KERNEL_DOCTESTS
+ bool "Doctests for the `kernel` crate" if !KUNIT_ALL_TESTS
+ depends on RUST && KUNIT=y
+ default KUNIT_ALL_TESTS
+ help
+ This builds the documentation tests of the `kernel` crate
+ as KUnit tests.
+
+ For more information on KUnit and unit tests in general,
+ please refer to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
endmenu # "Rust"
endmenu # Kernel hacking
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index efae7e011956..59e21bfec188 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -13,7 +13,7 @@ menuconfig UBSAN
if UBSAN
config UBSAN_TRAP
- bool "On Sanitizer warnings, abort the running kernel code"
+ bool "Abort on Sanitizer warnings (smaller kernel but less verbose)"
depends on !COMPILE_TEST
help
Building kernels with Sanitizer features enabled tends to grow
@@ -26,6 +26,14 @@ config UBSAN_TRAP
the system. For some system builders this is an acceptable
trade-off.
+ Also note that selecting Y will cause your kernel to Oops
+ with an "illegal instruction" error with no further details
+ when a UBSAN violation occurs. (Except on arm64, which will
+ report which Sanitizer failed.) This may make it hard to
+ determine whether an Oops was caused by UBSAN or to figure
+ out the details of a UBSAN violation. It makes the kernel log
+ output less useful for bug reports.
+
config CC_HAS_UBSAN_BOUNDS_STRICT
def_bool $(cc-option,-fsanitize=bounds-strict)
help
diff --git a/lib/Makefile b/lib/Makefile
index 42d307ade225..2e08397f6210 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -82,7 +82,13 @@ obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
obj-$(CONFIG_TEST_DYNAMIC_DEBUG) += test_dynamic_debug.o
obj-$(CONFIG_TEST_PRINTF) += test_printf.o
obj-$(CONFIG_TEST_SCANF) += test_scanf.o
+
obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
+ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_KASAN),yy)
+# FIXME: Clang breaks test_bitmap_const_eval when KASAN and GCOV are enabled
+GCOV_PROFILE_test_bitmap.o := n
+endif
+
obj-$(CONFIG_TEST_UUID) += test_uuid.o
obj-$(CONFIG_TEST_XARRAY) += test_xarray.o
obj-$(CONFIG_TEST_MAPLE_TREE) += test_maple_tree.o
@@ -161,7 +167,7 @@ obj-$(CONFIG_BTREE) += btree.o
obj-$(CONFIG_INTERVAL_TREE) += interval_tree.o
obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
+obj-$(CONFIG_LIST_HARDENED) += list_debug.o
obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
obj-$(CONFIG_BITREVERSE) += bitrev.o
@@ -253,7 +259,6 @@ obj-$(CONFIG_DQL) += dynamic_queue_limits.o
obj-$(CONFIG_GLOB) += glob.o
obj-$(CONFIG_GLOB_SELFTEST) += globtest.o
-obj-$(CONFIG_MPILIB) += mpi/
obj-$(CONFIG_DIMLIB) += dim/
obj-$(CONFIG_SIGNATURE) += digsig.o
diff --git a/lib/bch.c b/lib/bch.c
index c8095f30f254..5f71fd76eca8 100644
--- a/lib/bch.c
+++ b/lib/bch.c
@@ -71,6 +71,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/bitops.h>
+#include <linux/bitrev.h>
#include <asm/byteorder.h>
#include <linux/bch.h>
@@ -114,47 +115,12 @@ struct gf_poly_deg1 {
unsigned int c[2];
};
-static u8 swap_bits_table[] = {
- 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0,
- 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0,
- 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
- 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8,
- 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4,
- 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
- 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec,
- 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc,
- 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
- 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2,
- 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea,
- 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
- 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6,
- 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6,
- 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
- 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe,
- 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1,
- 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
- 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9,
- 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9,
- 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
- 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5,
- 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed,
- 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
- 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3,
- 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3,
- 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
- 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb,
- 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7,
- 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
- 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef,
- 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff,
-};
-
static u8 swap_bits(struct bch_control *bch, u8 in)
{
if (!bch->swap_bits)
return in;
- return swap_bits_table[in];
+ return bitrev8(in);
}
/*
diff --git a/lib/checksum_kunit.c b/lib/checksum_kunit.c
index ace3c4799fe1..0eed92b77ba3 100644
--- a/lib/checksum_kunit.c
+++ b/lib/checksum_kunit.c
@@ -10,7 +10,8 @@
#define MAX_ALIGN 64
#define TEST_BUFLEN (MAX_LEN + MAX_ALIGN)
-static const __wsum random_init_sum = 0x2847aab;
+/* Values for a little endian CPU. Byte swap each half on big endian CPU. */
+static const u32 random_init_sum = 0x2847aab;
static const u8 random_buf[] = {
0xac, 0xd7, 0x76, 0x69, 0x6e, 0xf2, 0x93, 0x2c, 0x1f, 0xe0, 0xde, 0x86,
0x8f, 0x54, 0x33, 0x90, 0x95, 0xbf, 0xff, 0xb9, 0xea, 0x62, 0x6e, 0xb5,
@@ -56,7 +57,9 @@ static const u8 random_buf[] = {
0xe1, 0xdf, 0x4b, 0xe1, 0x81, 0xe2, 0x17, 0x02, 0x7b, 0x58, 0x8b, 0x92,
0x1a, 0xac, 0x46, 0xdd, 0x2e, 0xce, 0x40, 0x09
};
-static const __sum16 expected_results[] = {
+
+/* Values for a little endian CPU. Byte swap on big endian CPU. */
+static const u16 expected_results[] = {
0x82d0, 0x8224, 0xab23, 0xaaad, 0x41ad, 0x413f, 0x4f3e, 0x4eab, 0x22ab,
0x228c, 0x428b, 0x41ad, 0xbbac, 0xbb1d, 0x671d, 0x66ea, 0xd6e9, 0xd654,
0x1754, 0x1655, 0x5d54, 0x5c6a, 0xfa69, 0xf9fb, 0x44fb, 0x4428, 0xf527,
@@ -115,7 +118,9 @@ static const __sum16 expected_results[] = {
0x1d47, 0x3c46, 0x3bc5, 0x59c4, 0x59ad, 0x57ad, 0x5732, 0xff31, 0xfea6,
0x6ca6, 0x6c8c, 0xc08b, 0xc045, 0xe344, 0xe316, 0x1516, 0x14d6,
};
-static const __wsum init_sums_no_overflow[] = {
+
+/* Values for a little endian CPU. Byte swap each half on big endian CPU. */
+static const u32 init_sums_no_overflow[] = {
0xffffffff, 0xfffffffb, 0xfffffbfb, 0xfffffbf7, 0xfffff7f7, 0xfffff7f3,
0xfffff3f3, 0xfffff3ef, 0xffffefef, 0xffffefeb, 0xffffebeb, 0xffffebe7,
0xffffe7e7, 0xffffe7e3, 0xffffe3e3, 0xffffe3df, 0xffffdfdf, 0xffffdfdb,
@@ -208,7 +213,21 @@ static u8 tmp_buf[TEST_BUFLEN];
#define full_csum(buff, len, sum) csum_fold(csum_partial(buff, len, sum))
-#define CHECK_EQ(lhs, rhs) KUNIT_ASSERT_EQ(test, lhs, rhs)
+#define CHECK_EQ(lhs, rhs) KUNIT_ASSERT_EQ(test, (__force u64)lhs, (__force u64)rhs)
+
+static __sum16 to_sum16(u16 x)
+{
+ return (__force __sum16)le16_to_cpu((__force __le16)x);
+}
+
+/* This function swaps the bytes inside each half of a __wsum */
+static __wsum to_wsum(u32 x)
+{
+ u16 hi = le16_to_cpu((__force __le16)(x >> 16));
+ u16 lo = le16_to_cpu((__force __le16)x);
+
+ return (__force __wsum)((hi << 16) | lo);
+}
static void assert_setup_correct(struct kunit *test)
{
@@ -226,7 +245,8 @@ static void assert_setup_correct(struct kunit *test)
static void test_csum_fixed_random_inputs(struct kunit *test)
{
int len, align;
- __wsum result, expec, sum;
+ __wsum sum;
+ __sum16 result, expec;
assert_setup_correct(test);
for (align = 0; align < TEST_BUFLEN; ++align) {
@@ -237,9 +257,9 @@ static void test_csum_fixed_random_inputs(struct kunit *test)
/*
* Test the precomputed random input.
*/
- sum = random_init_sum;
+ sum = to_wsum(random_init_sum);
result = full_csum(&tmp_buf[align], len, sum);
- expec = expected_results[len];
+ expec = to_sum16(expected_results[len]);
CHECK_EQ(result, expec);
}
}
@@ -251,7 +271,8 @@ static void test_csum_fixed_random_inputs(struct kunit *test)
static void test_csum_all_carry_inputs(struct kunit *test)
{
int len, align;
- __wsum result, expec, sum;
+ __wsum sum;
+ __sum16 result, expec;
assert_setup_correct(test);
memset(tmp_buf, 0xff, TEST_BUFLEN);
@@ -261,9 +282,9 @@ static void test_csum_all_carry_inputs(struct kunit *test)
/*
* All carries from input and initial sum.
*/
- sum = 0xffffffff;
+ sum = to_wsum(0xffffffff);
result = full_csum(&tmp_buf[align], len, sum);
- expec = (len & 1) ? 0xff00 : 0;
+ expec = to_sum16((len & 1) ? 0xff00 : 0);
CHECK_EQ(result, expec);
/*
@@ -272,11 +293,11 @@ static void test_csum_all_carry_inputs(struct kunit *test)
sum = 0;
result = full_csum(&tmp_buf[align], len, sum);
if (len & 1)
- expec = 0xff00;
+ expec = to_sum16(0xff00);
else if (len)
expec = 0;
else
- expec = 0xffff;
+ expec = to_sum16(0xffff);
CHECK_EQ(result, expec);
}
}
@@ -290,7 +311,8 @@ static void test_csum_all_carry_inputs(struct kunit *test)
static void test_csum_no_carry_inputs(struct kunit *test)
{
int len, align;
- __wsum result, expec, sum;
+ __wsum sum;
+ __sum16 result, expec;
assert_setup_correct(test);
memset(tmp_buf, 0x4, TEST_BUFLEN);
@@ -300,7 +322,7 @@ static void test_csum_no_carry_inputs(struct kunit *test)
/*
* Expect no carries.
*/
- sum = init_sums_no_overflow[len];
+ sum = to_wsum(init_sums_no_overflow[len]);
result = full_csum(&tmp_buf[align], len, sum);
expec = 0;
CHECK_EQ(result, expec);
@@ -308,9 +330,9 @@ static void test_csum_no_carry_inputs(struct kunit *test)
/*
* Expect one carry.
*/
- sum = init_sums_no_overflow[len] + 1;
+ sum = to_wsum(init_sums_no_overflow[len] + 1);
result = full_csum(&tmp_buf[align], len, sum);
- expec = len ? 0xfffe : 0xffff;
+ expec = to_sum16(len ? 0xfffe : 0xffff);
CHECK_EQ(result, expec);
}
}
diff --git a/lib/clz_ctz.c b/lib/clz_ctz.c
index 0d3a686b5ba2..fb8c0c5c2bd2 100644
--- a/lib/clz_ctz.c
+++ b/lib/clz_ctz.c
@@ -28,36 +28,16 @@ int __weak __clzsi2(int val)
}
EXPORT_SYMBOL(__clzsi2);
-int __weak __clzdi2(long val);
-int __weak __ctzdi2(long val);
-#if BITS_PER_LONG == 32
-
-int __weak __clzdi2(long val)
+int __weak __clzdi2(u64 val);
+int __weak __clzdi2(u64 val)
{
- return 32 - fls((int)val);
+ return 64 - fls64(val);
}
EXPORT_SYMBOL(__clzdi2);
-int __weak __ctzdi2(long val)
+int __weak __ctzdi2(u64 val);
+int __weak __ctzdi2(u64 val)
{
- return __ffs((u32)val);
+ return __ffs64(val);
}
EXPORT_SYMBOL(__ctzdi2);
-
-#elif BITS_PER_LONG == 64
-
-int __weak __clzdi2(long val)
-{
- return 64 - fls64((u64)val);
-}
-EXPORT_SYMBOL(__clzdi2);
-
-int __weak __ctzdi2(long val)
-{
- return __ffs64((u64)val);
-}
-EXPORT_SYMBOL(__ctzdi2);
-
-#else
-#error BITS_PER_LONG not 32 or 64
-#endif
diff --git a/lib/cpumask.c b/lib/cpumask.c
index de356f16773a..a7fd02b5ae26 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -45,6 +45,7 @@ EXPORT_SYMBOL(cpumask_next_wrap);
* alloc_cpumask_var_node - allocate a struct cpumask on a given node
* @mask: pointer to cpumask_var_t where the cpumask is returned
* @flags: GFP_ flags
+ * @node: memory node from which to allocate or %NUMA_NO_NODE
*
* Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
* a nop returning a constant 1 (in <linux/cpumask.h>)
@@ -157,7 +158,9 @@ EXPORT_SYMBOL(cpumask_local_spread);
static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
/**
- * cpumask_any_and_distribute - Return an arbitrary cpu within srcp1 & srcp2.
+ * cpumask_any_and_distribute - Return an arbitrary cpu within src1p & src2p.
+ * @src1p: first &cpumask for intersection
+ * @src2p: second &cpumask for intersection
*
* Iterated calls using the same srcp1 and srcp2 will be distributed within
* their intersection.
diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile
index 6ec2d4543d9c..8d1446c2be71 100644
--- a/lib/crypto/Makefile
+++ b/lib/crypto/Makefile
@@ -53,3 +53,5 @@ libblake2s-y += blake2s-selftest.o
libchacha20poly1305-y += chacha20poly1305-selftest.o
libcurve25519-y += curve25519-selftest.o
endif
+
+obj-$(CONFIG_MPILIB) += mpi/
diff --git a/lib/mpi/Makefile b/lib/crypto/mpi/Makefile
index 6e6ef9a34fe1..6e6ef9a34fe1 100644
--- a/lib/mpi/Makefile
+++ b/lib/crypto/mpi/Makefile
diff --git a/lib/mpi/ec.c b/lib/crypto/mpi/ec.c
index 40f5908e57a4..40f5908e57a4 100644
--- a/lib/mpi/ec.c
+++ b/lib/crypto/mpi/ec.c
diff --git a/lib/mpi/generic_mpih-add1.c b/lib/crypto/mpi/generic_mpih-add1.c
index 299308b5461c..299308b5461c 100644
--- a/lib/mpi/generic_mpih-add1.c
+++ b/lib/crypto/mpi/generic_mpih-add1.c
diff --git a/lib/mpi/generic_mpih-lshift.c b/lib/crypto/mpi/generic_mpih-lshift.c
index 7b21f5938a50..7b21f5938a50 100644
--- a/lib/mpi/generic_mpih-lshift.c
+++ b/lib/crypto/mpi/generic_mpih-lshift.c
diff --git a/lib/mpi/generic_mpih-mul1.c b/lib/crypto/mpi/generic_mpih-mul1.c
index e020e61d47b9..e020e61d47b9 100644
--- a/lib/mpi/generic_mpih-mul1.c
+++ b/lib/crypto/mpi/generic_mpih-mul1.c
diff --git a/lib/mpi/generic_mpih-mul2.c b/lib/crypto/mpi/generic_mpih-mul2.c
index 9484d8528243..9484d8528243 100644
--- a/lib/mpi/generic_mpih-mul2.c
+++ b/lib/crypto/mpi/generic_mpih-mul2.c
diff --git a/lib/mpi/generic_mpih-mul3.c b/lib/crypto/mpi/generic_mpih-mul3.c
index ccdbab4121e0..ccdbab4121e0 100644
--- a/lib/mpi/generic_mpih-mul3.c
+++ b/lib/crypto/mpi/generic_mpih-mul3.c
diff --git a/lib/mpi/generic_mpih-rshift.c b/lib/crypto/mpi/generic_mpih-rshift.c
index e07bc69aa898..e07bc69aa898 100644
--- a/lib/mpi/generic_mpih-rshift.c
+++ b/lib/crypto/mpi/generic_mpih-rshift.c
diff --git a/lib/mpi/generic_mpih-sub1.c b/lib/crypto/mpi/generic_mpih-sub1.c
index eea4382aad5f..eea4382aad5f 100644
--- a/lib/mpi/generic_mpih-sub1.c
+++ b/lib/crypto/mpi/generic_mpih-sub1.c
diff --git a/lib/mpi/longlong.h b/lib/crypto/mpi/longlong.h
index b6fa1d08fb55..b6fa1d08fb55 100644
--- a/lib/mpi/longlong.h
+++ b/lib/crypto/mpi/longlong.h
diff --git a/lib/mpi/mpi-add.c b/lib/crypto/mpi/mpi-add.c
index 9056fc5167fc..9056fc5167fc 100644
--- a/lib/mpi/mpi-add.c
+++ b/lib/crypto/mpi/mpi-add.c
diff --git a/lib/mpi/mpi-bit.c b/lib/crypto/mpi/mpi-bit.c
index 070ba784c9f1..070ba784c9f1 100644
--- a/lib/mpi/mpi-bit.c
+++ b/lib/crypto/mpi/mpi-bit.c
diff --git a/lib/mpi/mpi-cmp.c b/lib/crypto/mpi/mpi-cmp.c
index c4cfa3ff0581..0835b6213235 100644
--- a/lib/mpi/mpi-cmp.c
+++ b/lib/crypto/mpi/mpi-cmp.c
@@ -25,8 +25,12 @@ int mpi_cmp_ui(MPI u, unsigned long v)
mpi_limb_t limb = v;
mpi_normalize(u);
- if (!u->nlimbs && !limb)
- return 0;
+ if (u->nlimbs == 0) {
+ if (v == 0)
+ return 0;
+ else
+ return -1;
+ }
if (u->sign)
return -1;
if (u->nlimbs > 1)
diff --git a/lib/mpi/mpi-div.c b/lib/crypto/mpi/mpi-div.c
index 45beab8b9e9e..45beab8b9e9e 100644
--- a/lib/mpi/mpi-div.c
+++ b/lib/crypto/mpi/mpi-div.c
diff --git a/lib/mpi/mpi-inline.h b/lib/crypto/mpi/mpi-inline.h
index 980b6b940953..980b6b940953 100644
--- a/lib/mpi/mpi-inline.h
+++ b/lib/crypto/mpi/mpi-inline.h
diff --git a/lib/mpi/mpi-internal.h b/lib/crypto/mpi/mpi-internal.h
index 554002182db1..554002182db1 100644
--- a/lib/mpi/mpi-internal.h
+++ b/lib/crypto/mpi/mpi-internal.h
diff --git a/lib/mpi/mpi-inv.c b/lib/crypto/mpi/mpi-inv.c
index 61e37d18f793..61e37d18f793 100644
--- a/lib/mpi/mpi-inv.c
+++ b/lib/crypto/mpi/mpi-inv.c
diff --git a/lib/mpi/mpi-mod.c b/lib/crypto/mpi/mpi-mod.c
index 54fcc01564d9..54fcc01564d9 100644
--- a/lib/mpi/mpi-mod.c
+++ b/lib/crypto/mpi/mpi-mod.c
diff --git a/lib/mpi/mpi-mul.c b/lib/crypto/mpi/mpi-mul.c
index 7f4eda8560dc..7f4eda8560dc 100644
--- a/lib/mpi/mpi-mul.c
+++ b/lib/crypto/mpi/mpi-mul.c
diff --git a/lib/mpi/mpi-pow.c b/lib/crypto/mpi/mpi-pow.c
index 2fd7a46d55ec..2fd7a46d55ec 100644
--- a/lib/mpi/mpi-pow.c
+++ b/lib/crypto/mpi/mpi-pow.c
diff --git a/lib/mpi/mpi-sub-ui.c b/lib/crypto/mpi/mpi-sub-ui.c
index b41b082b5f3e..b41b082b5f3e 100644
--- a/lib/mpi/mpi-sub-ui.c
+++ b/lib/crypto/mpi/mpi-sub-ui.c
diff --git a/lib/mpi/mpicoder.c b/lib/crypto/mpi/mpicoder.c
index 3cb6bd148fa9..3cb6bd148fa9 100644
--- a/lib/mpi/mpicoder.c
+++ b/lib/crypto/mpi/mpicoder.c
diff --git a/lib/mpi/mpih-cmp.c b/lib/crypto/mpi/mpih-cmp.c
index f23709114a65..f23709114a65 100644
--- a/lib/mpi/mpih-cmp.c
+++ b/lib/crypto/mpi/mpih-cmp.c
diff --git a/lib/mpi/mpih-div.c b/lib/crypto/mpi/mpih-div.c
index be70ee2e42d3..be70ee2e42d3 100644
--- a/lib/mpi/mpih-div.c
+++ b/lib/crypto/mpi/mpih-div.c
diff --git a/lib/mpi/mpih-mul.c b/lib/crypto/mpi/mpih-mul.c
index e5f1c84e3c48..e5f1c84e3c48 100644
--- a/lib/mpi/mpih-mul.c
+++ b/lib/crypto/mpi/mpih-mul.c
diff --git a/lib/mpi/mpiutil.c b/lib/crypto/mpi/mpiutil.c
index aa8c46544af8..aa8c46544af8 100644
--- a/lib/mpi/mpiutil.c
+++ b/lib/crypto/mpi/mpiutil.c
diff --git a/lib/error-inject.c b/lib/error-inject.c
index 32c14770508e..887acd9a6ea6 100644
--- a/lib/error-inject.c
+++ b/lib/error-inject.c
@@ -217,8 +217,6 @@ static int __init ei_debugfs_init(void)
struct dentry *dir, *file;
dir = debugfs_create_dir("error_injection", NULL);
- if (!dir)
- return -ENOMEM;
file = debugfs_create_file("list", 0444, dir, NULL, &ei_fops);
if (!file) {
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 0c883d6fbd44..4fa5635bf81b 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -32,7 +32,9 @@
#include <linux/rculist.h>
#include <linux/interrupt.h>
#include <linux/genalloc.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/vmalloc.h>
static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
@@ -895,7 +897,7 @@ struct gen_pool *of_gen_pool_get(struct device_node *np,
of_property_read_string(np_pool, "label", &name);
if (!name)
- name = np_pool->name;
+ name = of_node_full_name(np_pool);
}
if (pdev)
pool = gen_pool_get(&pdev->dev, name);
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index b667b1e2f688..b31597b0ca20 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -566,24 +566,37 @@ size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
}
EXPORT_SYMBOL(iov_iter_zero);
-size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes,
- struct iov_iter *i)
+size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
+ size_t bytes, struct iov_iter *i)
{
- char *kaddr = kmap_atomic(page), *p = kaddr + offset;
- if (!page_copy_sane(page, offset, bytes)) {
- kunmap_atomic(kaddr);
+ size_t n, copied = 0;
+
+ if (!page_copy_sane(page, offset, bytes))
return 0;
- }
- if (WARN_ON_ONCE(!i->data_source)) {
- kunmap_atomic(kaddr);
+ if (WARN_ON_ONCE(!i->data_source))
return 0;
- }
- iterate_and_advance(i, bytes, base, len, off,
- copyin(p + off, base, len),
- memcpy_from_iter(i, p + off, base, len)
- )
- kunmap_atomic(kaddr);
- return bytes;
+
+ do {
+ char *p;
+
+ n = bytes - copied;
+ if (PageHighMem(page)) {
+ page += offset / PAGE_SIZE;
+ offset %= PAGE_SIZE;
+ n = min_t(size_t, n, PAGE_SIZE - offset);
+ }
+
+ p = kmap_atomic(page) + offset;
+ iterate_and_advance(i, n, base, len, off,
+ copyin(p + off, base, len),
+ memcpy_from_iter(i, p + off, base, len)
+ )
+ kunmap_atomic(p);
+ copied += n;
+ offset += n;
+ } while (PageHighMem(page) && copied != bytes && n > 0);
+
+ return copied;
}
EXPORT_SYMBOL(copy_page_from_iter_atomic);
@@ -1349,7 +1362,7 @@ uaccess_end:
return ret;
}
-static int copy_iovec_from_user(struct iovec *iov,
+static __noclone int copy_iovec_from_user(struct iovec *iov,
const struct iovec __user *uiov, unsigned long nr_segs)
{
int ret = -EFAULT;
@@ -1544,6 +1557,7 @@ int import_ubuf(int rw, void __user *buf, size_t len, struct iov_iter *i)
iov_iter_ubuf(i, rw, buf, len);
return 0;
}
+EXPORT_SYMBOL_GPL(import_ubuf);
/**
* iov_iter_restore() - Restore a &struct iov_iter to the same state as when
diff --git a/lib/kstrtox.c b/lib/kstrtox.c
index 08c14019841a..d586e6af5e5a 100644
--- a/lib/kstrtox.c
+++ b/lib/kstrtox.c
@@ -59,7 +59,7 @@ unsigned int _parse_integer_limit(const char *s, unsigned int base, unsigned lon
rv = 0;
while (max_chars--) {
unsigned int c = *s;
- unsigned int lc = c | 0x20; /* don't tolower() this line */
+ unsigned int lc = _tolower(c);
unsigned int val;
if ('0' <= c && c <= '9')
diff --git a/lib/kunit/Kconfig b/lib/kunit/Kconfig
index 626719b95bad..68a6daec0aef 100644
--- a/lib/kunit/Kconfig
+++ b/lib/kunit/Kconfig
@@ -4,7 +4,7 @@
menuconfig KUNIT
tristate "KUnit - Enable support for unit tests"
- select GLOB if KUNIT=y
+ select GLOB
help
Enables support for kernel unit tests (KUnit), a lightweight unit
testing and mocking framework for the Linux kernel. These tests are
diff --git a/lib/kunit/Makefile b/lib/kunit/Makefile
index cb417f504996..46f75f23dfe4 100644
--- a/lib/kunit/Makefile
+++ b/lib/kunit/Makefile
@@ -6,7 +6,8 @@ kunit-objs += test.o \
string-stream.o \
assert.o \
try-catch.o \
- executor.o
+ executor.o \
+ attributes.o
ifeq ($(CONFIG_KUNIT_DEBUGFS),y)
kunit-objs += debugfs.o
diff --git a/lib/kunit/attributes.c b/lib/kunit/attributes.c
new file mode 100644
index 000000000000..1b512f7e1838
--- /dev/null
+++ b/lib/kunit/attributes.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit API to save and access test attributes
+ *
+ * Copyright (C) 2023, Google LLC.
+ * Author: Rae Moar <rmoar@google.com>
+ */
+
+#include <kunit/test.h>
+#include <kunit/attributes.h>
+
+/* Options for printing attributes:
+ * PRINT_ALWAYS - attribute is printed for every test case and suite if set
+ * PRINT_SUITE - attribute is printed for every suite if set but not for test cases
+ * PRINT_NEVER - attribute is never printed
+ */
+enum print_ops {
+ PRINT_ALWAYS,
+ PRINT_SUITE,
+ PRINT_NEVER,
+};
+
+/**
+ * struct kunit_attr - represents a test attribute and holds flexible
+ * helper functions to interact with attribute.
+ *
+ * @name: name of test attribute, eg. speed
+ * @get_attr: function to return attribute value given a test
+ * @to_string: function to return string representation of given
+ * attribute value
+ * @filter: function to indicate whether a given attribute value passes a
+ * filter
+ * @attr_default: default attribute value used during filtering
+ * @print: value of enum print_ops to indicate when to print attribute
+ */
+struct kunit_attr {
+ const char *name;
+ void *(*get_attr)(void *test_or_suite, bool is_test);
+ const char *(*to_string)(void *attr, bool *to_free);
+ int (*filter)(void *attr, const char *input, int *err);
+ void *attr_default;
+ enum print_ops print;
+};
+
+/* String Lists for enum Attributes */
+
+static const char * const speed_str_list[] = {"unset", "very_slow", "slow", "normal"};
+
+/* To String Methods */
+
+static const char *attr_enum_to_string(void *attr, const char * const str_list[], bool *to_free)
+{
+ long val = (long)attr;
+
+ *to_free = false;
+ if (!val)
+ return NULL;
+ return str_list[val];
+}
+
+static const char *attr_speed_to_string(void *attr, bool *to_free)
+{
+ return attr_enum_to_string(attr, speed_str_list, to_free);
+}
+
+static const char *attr_string_to_string(void *attr, bool *to_free)
+{
+ *to_free = false;
+ return (char *) attr;
+}
+
+/* Filter Methods */
+
+static const char op_list[] = "<>!=";
+
+/*
+ * Returns whether the inputted integer value matches the filter given
+ * by the operation string and inputted integer.
+ */
+static int int_filter(long val, const char *op, int input, int *err)
+{
+ if (!strncmp(op, "<=", 2))
+ return (val <= input);
+ else if (!strncmp(op, ">=", 2))
+ return (val >= input);
+ else if (!strncmp(op, "!=", 2))
+ return (val != input);
+ else if (!strncmp(op, ">", 1))
+ return (val > input);
+ else if (!strncmp(op, "<", 1))
+ return (val < input);
+ else if (!strncmp(op, "=", 1))
+ return (val == input);
+ *err = -EINVAL;
+ pr_err("kunit executor: invalid filter operation: %s\n", op);
+ return false;
+}
+
+/*
+ * Returns whether the inputted enum value "attr" matches the filter given
+ * by the input string. Note: the str_list includes the corresponding string
+ * list to the enum values.
+ */
+static int attr_enum_filter(void *attr, const char *input, int *err,
+ const char * const str_list[], int max)
+{
+ int i, j, input_int = -1;
+ long test_val = (long)attr;
+ const char *input_val = NULL;
+
+ for (i = 0; input[i]; i++) {
+ if (!strchr(op_list, input[i])) {
+ input_val = input + i;
+ break;
+ }
+ }
+
+ if (!input_val) {
+ *err = -EINVAL;
+ pr_err("kunit executor: filter value not found: %s\n", input);
+ return false;
+ }
+
+ for (j = 0; j <= max; j++) {
+ if (!strcmp(input_val, str_list[j]))
+ input_int = j;
+ }
+
+ if (input_int < 0) {
+ *err = -EINVAL;
+ pr_err("kunit executor: invalid filter input: %s\n", input);
+ return false;
+ }
+
+ return int_filter(test_val, input, input_int, err);
+}
+
+static int attr_speed_filter(void *attr, const char *input, int *err)
+{
+ return attr_enum_filter(attr, input, err, speed_str_list, KUNIT_SPEED_MAX);
+}
+
+/*
+ * Returns whether the inputted string value (attr) matches the filter given
+ * by the input string.
+ */
+static int attr_string_filter(void *attr, const char *input, int *err)
+{
+ char *str = attr;
+
+ if (!strncmp(input, "<", 1)) {
+ *err = -EINVAL;
+ pr_err("kunit executor: invalid filter input: %s\n", input);
+ return false;
+ } else if (!strncmp(input, ">", 1)) {
+ *err = -EINVAL;
+ pr_err("kunit executor: invalid filter input: %s\n", input);
+ return false;
+ } else if (!strncmp(input, "!=", 2)) {
+ return (strcmp(input + 2, str) != 0);
+ } else if (!strncmp(input, "=", 1)) {
+ return (strcmp(input + 1, str) == 0);
+ }
+ *err = -EINVAL;
+ pr_err("kunit executor: invalid filter operation: %s\n", input);
+ return false;
+}
+
+
+/* Get Attribute Methods */
+
+static void *attr_speed_get(void *test_or_suite, bool is_test)
+{
+ struct kunit_suite *suite = is_test ? NULL : test_or_suite;
+ struct kunit_case *test = is_test ? test_or_suite : NULL;
+
+ if (test)
+ return ((void *) test->attr.speed);
+ else
+ return ((void *) suite->attr.speed);
+}
+
+static void *attr_module_get(void *test_or_suite, bool is_test)
+{
+ struct kunit_suite *suite = is_test ? NULL : test_or_suite;
+ struct kunit_case *test = is_test ? test_or_suite : NULL;
+
+ // Suites get their module attribute from their first test_case
+ if (test)
+ return ((void *) test->module_name);
+ else if (kunit_suite_num_test_cases(suite) > 0)
+ return ((void *) suite->test_cases[0].module_name);
+ else
+ return (void *) "";
+}
+
+/* List of all Test Attributes */
+
+static struct kunit_attr kunit_attr_list[] = {
+ {
+ .name = "speed",
+ .get_attr = attr_speed_get,
+ .to_string = attr_speed_to_string,
+ .filter = attr_speed_filter,
+ .attr_default = (void *)KUNIT_SPEED_NORMAL,
+ .print = PRINT_ALWAYS,
+ },
+ {
+ .name = "module",
+ .get_attr = attr_module_get,
+ .to_string = attr_string_to_string,
+ .filter = attr_string_filter,
+ .attr_default = (void *)"",
+ .print = PRINT_SUITE,
+ }
+};
+
+/* Helper Functions to Access Attributes */
+
+const char *kunit_attr_filter_name(struct kunit_attr_filter filter)
+{
+ return filter.attr->name;
+}
+
+void kunit_print_attr(void *test_or_suite, bool is_test, unsigned int test_level)
+{
+ int i;
+ bool to_free = false;
+ void *attr;
+ const char *attr_name, *attr_str;
+ struct kunit_suite *suite = is_test ? NULL : test_or_suite;
+ struct kunit_case *test = is_test ? test_or_suite : NULL;
+
+ for (i = 0; i < ARRAY_SIZE(kunit_attr_list); i++) {
+ if (kunit_attr_list[i].print == PRINT_NEVER ||
+ (test && kunit_attr_list[i].print == PRINT_SUITE))
+ continue;
+ attr = kunit_attr_list[i].get_attr(test_or_suite, is_test);
+ if (attr) {
+ attr_name = kunit_attr_list[i].name;
+ attr_str = kunit_attr_list[i].to_string(attr, &to_free);
+ if (test) {
+ kunit_log(KERN_INFO, test, "%*s# %s.%s: %s",
+ KUNIT_INDENT_LEN * test_level, "", test->name,
+ attr_name, attr_str);
+ } else {
+ kunit_log(KERN_INFO, suite, "%*s# %s: %s",
+ KUNIT_INDENT_LEN * test_level, "", attr_name, attr_str);
+ }
+
+ /* Free to_string of attribute if needed */
+ if (to_free)
+ kfree(attr_str);
+ }
+ }
+}
+
+/* Helper Functions to Filter Attributes */
+
+int kunit_get_filter_count(char *input)
+{
+ int i, comma_index = 0, count = 0;
+
+ for (i = 0; input[i]; i++) {
+ if (input[i] == ',') {
+ if ((i - comma_index) > 1)
+ count++;
+ comma_index = i;
+ }
+ }
+ if ((i - comma_index) > 0)
+ count++;
+ return count;
+}
+
+struct kunit_attr_filter kunit_next_attr_filter(char **filters, int *err)
+{
+ struct kunit_attr_filter filter = {};
+ int i, j, comma_index = 0, new_start_index = 0;
+ int op_index = -1, attr_index = -1;
+ char op;
+ char *input = *filters;
+
+ /* Parse input until operation */
+ for (i = 0; input[i]; i++) {
+ if (op_index < 0 && strchr(op_list, input[i])) {
+ op_index = i;
+ } else if (!comma_index && input[i] == ',') {
+ comma_index = i;
+ } else if (comma_index && input[i] != ' ') {
+ new_start_index = i;
+ break;
+ }
+ }
+
+ if (op_index <= 0) {
+ *err = -EINVAL;
+ pr_err("kunit executor: filter operation not found: %s\n", input);
+ return filter;
+ }
+
+ /* Temporarily set operator to \0 character. */
+ op = input[op_index];
+ input[op_index] = '\0';
+
+ /* Find associated kunit_attr object */
+ for (j = 0; j < ARRAY_SIZE(kunit_attr_list); j++) {
+ if (!strcmp(input, kunit_attr_list[j].name)) {
+ attr_index = j;
+ break;
+ }
+ }
+
+ input[op_index] = op;
+
+ if (attr_index < 0) {
+ *err = -EINVAL;
+ pr_err("kunit executor: attribute not found: %s\n", input);
+ } else {
+ filter.attr = &kunit_attr_list[attr_index];
+ }
+
+ if (comma_index > 0) {
+ input[comma_index] = '\0';
+ filter.input = input + op_index;
+ input = input + new_start_index;
+ } else {
+ filter.input = input + op_index;
+ input = NULL;
+ }
+
+ *filters = input;
+
+ return filter;
+}
+
+struct kunit_suite *kunit_filter_attr_tests(const struct kunit_suite *const suite,
+ struct kunit_attr_filter filter, char *action, int *err)
+{
+ int n = 0;
+ struct kunit_case *filtered, *test_case;
+ struct kunit_suite *copy;
+ void *suite_val, *test_val;
+ bool suite_result, test_result, default_result, result;
+
+ /* Allocate memory for new copy of suite and list of test cases */
+ copy = kmemdup(suite, sizeof(*copy), GFP_KERNEL);
+ if (!copy)
+ return ERR_PTR(-ENOMEM);
+
+ kunit_suite_for_each_test_case(suite, test_case) { n++; }
+
+ filtered = kcalloc(n + 1, sizeof(*filtered), GFP_KERNEL);
+ if (!filtered) {
+ kfree(copy);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ n = 0;
+
+ /* Save filtering result on default value */
+ default_result = filter.attr->filter(filter.attr->attr_default, filter.input, err);
+ if (*err)
+ goto err;
+
+ /* Save suite attribute value and filtering result on that value */
+ suite_val = filter.attr->get_attr((void *)suite, false);
+ suite_result = filter.attr->filter(suite_val, filter.input, err);
+ if (*err)
+ goto err;
+
+ /* For each test case, save test case if passes filtering. */
+ kunit_suite_for_each_test_case(suite, test_case) {
+ test_val = filter.attr->get_attr((void *) test_case, true);
+ test_result = filter.attr->filter(filter.attr->get_attr(test_case, true),
+ filter.input, err);
+ if (*err)
+ goto err;
+
+ /*
+ * If attribute value of test case is set, filter on that value.
+ * If not, filter on suite value if set. If not, filter on
+ * default value.
+ */
+ result = false;
+ if (test_val) {
+ if (test_result)
+ result = true;
+ } else if (suite_val) {
+ if (suite_result)
+ result = true;
+ } else if (default_result) {
+ result = true;
+ }
+
+ if (result) {
+ filtered[n++] = *test_case;
+ } else if (action && strcmp(action, "skip") == 0) {
+ test_case->status = KUNIT_SKIPPED;
+ filtered[n++] = *test_case;
+ }
+ }
+
+err:
+ if (n == 0 || *err) {
+ kfree(copy);
+ kfree(filtered);
+ return NULL;
+ }
+
+ copy->test_cases = filtered;
+
+ return copy;
+}
diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
index 74982b83707c..5181aa2e760b 100644
--- a/lib/kunit/executor.c
+++ b/lib/kunit/executor.c
@@ -2,6 +2,7 @@
#include <linux/reboot.h>
#include <kunit/test.h>
+#include <kunit/attributes.h>
#include <linux/glob.h>
#include <linux/moduleparam.h>
@@ -12,28 +13,59 @@
extern struct kunit_suite * const __kunit_suites_start[];
extern struct kunit_suite * const __kunit_suites_end[];
-#if IS_BUILTIN(CONFIG_KUNIT)
-
-static char *filter_glob_param;
static char *action_param;
-module_param_named(filter_glob, filter_glob_param, charp, 0);
-MODULE_PARM_DESC(filter_glob,
- "Filter which KUnit test suites/tests run at boot-time, e.g. list* or list*.*del_test");
-module_param_named(action, action_param, charp, 0);
+module_param_named(action, action_param, charp, 0400);
MODULE_PARM_DESC(action,
"Changes KUnit executor behavior, valid values are:\n"
"<none>: run the tests like normal\n"
- "'list' to list test names instead of running them.\n");
+ "'list' to list test names instead of running them.\n"
+ "'list_attr' to list test names and attributes instead of running them.\n");
+
+const char *kunit_action(void)
+{
+ return action_param;
+}
+
+static char *filter_glob_param;
+static char *filter_param;
+static char *filter_action_param;
+
+module_param_named(filter_glob, filter_glob_param, charp, 0400);
+MODULE_PARM_DESC(filter_glob,
+ "Filter which KUnit test suites/tests run at boot-time, e.g. list* or list*.*del_test");
+module_param_named(filter, filter_param, charp, 0400);
+MODULE_PARM_DESC(filter,
+ "Filter which KUnit test suites/tests run at boot-time using attributes, e.g. speed>slow");
+module_param_named(filter_action, filter_action_param, charp, 0400);
+MODULE_PARM_DESC(filter_action,
+ "Changes behavior of filtered tests using attributes, valid values are:\n"
+ "<none>: do not run filtered tests as normal\n"
+ "'skip': skip all filtered tests instead so tests will appear in output\n");
+
+const char *kunit_filter_glob(void)
+{
+ return filter_glob_param;
+}
+
+char *kunit_filter(void)
+{
+ return filter_param;
+}
+
+char *kunit_filter_action(void)
+{
+ return filter_action_param;
+}
/* glob_match() needs NULL terminated strings, so we need a copy of filter_glob_param. */
-struct kunit_test_filter {
+struct kunit_glob_filter {
char *suite_glob;
char *test_glob;
};
/* Split "suite_glob.test_glob" into two. Assumes filter_glob is not empty. */
-static void kunit_parse_filter_glob(struct kunit_test_filter *parsed,
+static void kunit_parse_glob_filter(struct kunit_glob_filter *parsed,
const char *filter_glob)
{
const int len = strlen(filter_glob);
@@ -55,7 +87,7 @@ static void kunit_parse_filter_glob(struct kunit_test_filter *parsed,
/* Create a copy of suite with only tests that match test_glob. */
static struct kunit_suite *
-kunit_filter_tests(const struct kunit_suite *const suite, const char *test_glob)
+kunit_filter_glob_tests(const struct kunit_suite *const suite, const char *test_glob)
{
int n = 0;
struct kunit_case *filtered, *test_case;
@@ -89,16 +121,7 @@ kunit_filter_tests(const struct kunit_suite *const suite, const char *test_glob)
return copy;
}
-static char *kunit_shutdown;
-core_param(kunit_shutdown, kunit_shutdown, charp, 0644);
-
-/* Stores an array of suites, end points one past the end */
-struct suite_set {
- struct kunit_suite * const *start;
- struct kunit_suite * const *end;
-};
-
-static void kunit_free_suite_set(struct suite_set suite_set)
+void kunit_free_suite_set(struct kunit_suite_set suite_set)
{
struct kunit_suite * const *suites;
@@ -107,72 +130,117 @@ static void kunit_free_suite_set(struct suite_set suite_set)
kfree(suite_set.start);
}
-static struct suite_set kunit_filter_suites(const struct suite_set *suite_set,
- const char *filter_glob,
- int *err)
+struct kunit_suite_set
+kunit_filter_suites(const struct kunit_suite_set *suite_set,
+ const char *filter_glob,
+ char *filters,
+ char *filter_action,
+ int *err)
{
- int i;
- struct kunit_suite **copy, *filtered_suite;
- struct suite_set filtered;
- struct kunit_test_filter filter;
+ int i, j, k;
+ int filter_count = 0;
+ struct kunit_suite **copy, **copy_start, *filtered_suite, *new_filtered_suite;
+ struct kunit_suite_set filtered = {NULL, NULL};
+ struct kunit_glob_filter parsed_glob;
+ struct kunit_attr_filter *parsed_filters = NULL;
const size_t max = suite_set->end - suite_set->start;
copy = kmalloc_array(max, sizeof(*filtered.start), GFP_KERNEL);
- filtered.start = copy;
if (!copy) { /* won't be able to run anything, return an empty set */
- filtered.end = copy;
return filtered;
}
+ copy_start = copy;
- kunit_parse_filter_glob(&filter, filter_glob);
-
- for (i = 0; &suite_set->start[i] != suite_set->end; i++) {
- if (!glob_match(filter.suite_glob, suite_set->start[i]->name))
- continue;
+ if (filter_glob)
+ kunit_parse_glob_filter(&parsed_glob, filter_glob);
- filtered_suite = kunit_filter_tests(suite_set->start[i], filter.test_glob);
- if (IS_ERR(filtered_suite)) {
- *err = PTR_ERR(filtered_suite);
+ /* Parse attribute filters */
+ if (filters) {
+ filter_count = kunit_get_filter_count(filters);
+ parsed_filters = kcalloc(filter_count, sizeof(*parsed_filters), GFP_KERNEL);
+ if (!parsed_filters) {
+ kfree(copy);
return filtered;
}
+ for (j = 0; j < filter_count; j++)
+ parsed_filters[j] = kunit_next_attr_filter(&filters, err);
+ if (*err)
+ goto err;
+ }
+
+ for (i = 0; &suite_set->start[i] != suite_set->end; i++) {
+ filtered_suite = suite_set->start[i];
+ if (filter_glob) {
+ if (!glob_match(parsed_glob.suite_glob, filtered_suite->name))
+ continue;
+ filtered_suite = kunit_filter_glob_tests(filtered_suite,
+ parsed_glob.test_glob);
+ if (IS_ERR(filtered_suite)) {
+ *err = PTR_ERR(filtered_suite);
+ goto err;
+ }
+ }
+ if (filter_count > 0 && parsed_filters != NULL) {
+ for (k = 0; k < filter_count; k++) {
+ new_filtered_suite = kunit_filter_attr_tests(filtered_suite,
+ parsed_filters[k], filter_action, err);
+
+ /* Free previous copy of suite */
+ if (k > 0 || filter_glob) {
+ kfree(filtered_suite->test_cases);
+ kfree(filtered_suite);
+ }
+
+ filtered_suite = new_filtered_suite;
+
+ if (*err)
+ goto err;
+ if (IS_ERR(filtered_suite)) {
+ *err = PTR_ERR(filtered_suite);
+ goto err;
+ }
+ if (!filtered_suite)
+ break;
+ }
+ }
+
if (!filtered_suite)
continue;
*copy++ = filtered_suite;
}
+ filtered.start = copy_start;
filtered.end = copy;
- kfree(filter.suite_glob);
- kfree(filter.test_glob);
- return filtered;
-}
+err:
+ if (*err)
+ kfree(copy);
-static void kunit_handle_shutdown(void)
-{
- if (!kunit_shutdown)
- return;
+ if (filter_glob) {
+ kfree(parsed_glob.suite_glob);
+ kfree(parsed_glob.test_glob);
+ }
- if (!strcmp(kunit_shutdown, "poweroff"))
- kernel_power_off();
- else if (!strcmp(kunit_shutdown, "halt"))
- kernel_halt();
- else if (!strcmp(kunit_shutdown, "reboot"))
- kernel_restart(NULL);
+ if (filter_count)
+ kfree(parsed_filters);
+ return filtered;
}
-static void kunit_exec_run_tests(struct suite_set *suite_set)
+void kunit_exec_run_tests(struct kunit_suite_set *suite_set, bool builtin)
{
size_t num_suites = suite_set->end - suite_set->start;
- pr_info("KTAP version 1\n");
- pr_info("1..%zu\n", num_suites);
+ if (builtin || num_suites) {
+ pr_info("KTAP version 1\n");
+ pr_info("1..%zu\n", num_suites);
+ }
__kunit_test_suites_init(suite_set->start, num_suites);
}
-static void kunit_exec_list_tests(struct suite_set *suite_set)
+void kunit_exec_list_tests(struct kunit_suite_set *suite_set, bool include_attr)
{
struct kunit_suite * const *suites;
struct kunit_case *test_case;
@@ -180,23 +248,54 @@ static void kunit_exec_list_tests(struct suite_set *suite_set)
/* Hack: print a ktap header so kunit.py can find the start of KUnit output. */
pr_info("KTAP version 1\n");
- for (suites = suite_set->start; suites < suite_set->end; suites++)
+ for (suites = suite_set->start; suites < suite_set->end; suites++) {
+ /* Print suite name and suite attributes */
+ pr_info("%s\n", (*suites)->name);
+ if (include_attr)
+ kunit_print_attr((void *)(*suites), false, 0);
+
+ /* Print test case name and attributes in suite */
kunit_suite_for_each_test_case((*suites), test_case) {
pr_info("%s.%s\n", (*suites)->name, test_case->name);
+ if (include_attr)
+ kunit_print_attr((void *)test_case, true, 0);
}
+ }
+}
+
+#if IS_BUILTIN(CONFIG_KUNIT)
+
+static char *kunit_shutdown;
+core_param(kunit_shutdown, kunit_shutdown, charp, 0644);
+
+static void kunit_handle_shutdown(void)
+{
+ if (!kunit_shutdown)
+ return;
+
+ if (!strcmp(kunit_shutdown, "poweroff"))
+ kernel_power_off();
+ else if (!strcmp(kunit_shutdown, "halt"))
+ kernel_halt();
+ else if (!strcmp(kunit_shutdown, "reboot"))
+ kernel_restart(NULL);
+
}
int kunit_run_all_tests(void)
{
- struct suite_set suite_set = {__kunit_suites_start, __kunit_suites_end};
+ struct kunit_suite_set suite_set = {
+ __kunit_suites_start, __kunit_suites_end,
+ };
int err = 0;
if (!kunit_enabled()) {
pr_info("kunit: disabled\n");
goto out;
}
- if (filter_glob_param) {
- suite_set = kunit_filter_suites(&suite_set, filter_glob_param, &err);
+ if (filter_glob_param || filter_param) {
+ suite_set = kunit_filter_suites(&suite_set, filter_glob_param,
+ filter_param, filter_action_param, &err);
if (err) {
pr_err("kunit executor: error filtering suites: %d\n", err);
goto out;
@@ -204,13 +303,15 @@ int kunit_run_all_tests(void)
}
if (!action_param)
- kunit_exec_run_tests(&suite_set);
+ kunit_exec_run_tests(&suite_set, true);
else if (strcmp(action_param, "list") == 0)
- kunit_exec_list_tests(&suite_set);
+ kunit_exec_list_tests(&suite_set, false);
+ else if (strcmp(action_param, "list_attr") == 0)
+ kunit_exec_list_tests(&suite_set, true);
else
pr_err("kunit executor: unknown action '%s'\n", action_param);
- if (filter_glob_param) { /* a copy was made of each suite */
+ if (filter_glob_param || filter_param) { /* a copy was made of each suite */
kunit_free_suite_set(suite_set);
}
diff --git a/lib/kunit/executor_test.c b/lib/kunit/executor_test.c
index ce6749af374d..4084071d0eb5 100644
--- a/lib/kunit/executor_test.c
+++ b/lib/kunit/executor_test.c
@@ -7,6 +7,7 @@
*/
#include <kunit/test.h>
+#include <kunit/attributes.h>
static void kfree_at_end(struct kunit *test, const void *to_free);
static struct kunit_suite *alloc_fake_suite(struct kunit *test,
@@ -24,15 +25,15 @@ static struct kunit_case dummy_test_cases[] = {
static void parse_filter_test(struct kunit *test)
{
- struct kunit_test_filter filter = {NULL, NULL};
+ struct kunit_glob_filter filter = {NULL, NULL};
- kunit_parse_filter_glob(&filter, "suite");
+ kunit_parse_glob_filter(&filter, "suite");
KUNIT_EXPECT_STREQ(test, filter.suite_glob, "suite");
KUNIT_EXPECT_FALSE(test, filter.test_glob);
kfree(filter.suite_glob);
kfree(filter.test_glob);
- kunit_parse_filter_glob(&filter, "suite.test");
+ kunit_parse_glob_filter(&filter, "suite.test");
KUNIT_EXPECT_STREQ(test, filter.suite_glob, "suite");
KUNIT_EXPECT_STREQ(test, filter.test_glob, "test");
kfree(filter.suite_glob);
@@ -42,15 +43,17 @@ static void parse_filter_test(struct kunit *test)
static void filter_suites_test(struct kunit *test)
{
struct kunit_suite *subsuite[3] = {NULL, NULL};
- struct suite_set suite_set = {.start = subsuite, .end = &subsuite[2]};
- struct suite_set got;
+ struct kunit_suite_set suite_set = {
+ .start = subsuite, .end = &subsuite[2],
+ };
+ struct kunit_suite_set got;
int err = 0;
subsuite[0] = alloc_fake_suite(test, "suite1", dummy_test_cases);
subsuite[1] = alloc_fake_suite(test, "suite2", dummy_test_cases);
/* Want: suite1, suite2, NULL -> suite2, NULL */
- got = kunit_filter_suites(&suite_set, "suite2", &err);
+ got = kunit_filter_suites(&suite_set, "suite2", NULL, NULL, &err);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
KUNIT_ASSERT_EQ(test, err, 0);
kfree_at_end(test, got.start);
@@ -66,15 +69,17 @@ static void filter_suites_test(struct kunit *test)
static void filter_suites_test_glob_test(struct kunit *test)
{
struct kunit_suite *subsuite[3] = {NULL, NULL};
- struct suite_set suite_set = {.start = subsuite, .end = &subsuite[2]};
- struct suite_set got;
+ struct kunit_suite_set suite_set = {
+ .start = subsuite, .end = &subsuite[2],
+ };
+ struct kunit_suite_set got;
int err = 0;
subsuite[0] = alloc_fake_suite(test, "suite1", dummy_test_cases);
subsuite[1] = alloc_fake_suite(test, "suite2", dummy_test_cases);
/* Want: suite1, suite2, NULL -> suite2 (just test1), NULL */
- got = kunit_filter_suites(&suite_set, "suite2.test2", &err);
+ got = kunit_filter_suites(&suite_set, "suite2.test2", NULL, NULL, &err);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
KUNIT_ASSERT_EQ(test, err, 0);
kfree_at_end(test, got.start);
@@ -93,14 +98,16 @@ static void filter_suites_test_glob_test(struct kunit *test)
static void filter_suites_to_empty_test(struct kunit *test)
{
struct kunit_suite *subsuite[3] = {NULL, NULL};
- struct suite_set suite_set = {.start = subsuite, .end = &subsuite[2]};
- struct suite_set got;
+ struct kunit_suite_set suite_set = {
+ .start = subsuite, .end = &subsuite[2],
+ };
+ struct kunit_suite_set got;
int err = 0;
subsuite[0] = alloc_fake_suite(test, "suite1", dummy_test_cases);
subsuite[1] = alloc_fake_suite(test, "suite2", dummy_test_cases);
- got = kunit_filter_suites(&suite_set, "not_found", &err);
+ got = kunit_filter_suites(&suite_set, "not_found", NULL, NULL, &err);
KUNIT_ASSERT_EQ(test, err, 0);
kfree_at_end(test, got.start); /* just in case */
@@ -108,11 +115,132 @@ static void filter_suites_to_empty_test(struct kunit *test)
"should be empty to indicate no match");
}
+static void parse_filter_attr_test(struct kunit *test)
+{
+ int j, filter_count;
+ struct kunit_attr_filter *parsed_filters;
+ char *filters = "speed>slow, module!=example";
+ int err = 0;
+
+ filter_count = kunit_get_filter_count(filters);
+ KUNIT_EXPECT_EQ(test, filter_count, 2);
+
+ parsed_filters = kunit_kcalloc(test, filter_count, sizeof(*parsed_filters),
+ GFP_KERNEL);
+ for (j = 0; j < filter_count; j++) {
+ parsed_filters[j] = kunit_next_attr_filter(&filters, &err);
+ KUNIT_ASSERT_EQ_MSG(test, err, 0, "failed to parse filter '%s'", filters[j]);
+ }
+
+ KUNIT_EXPECT_STREQ(test, kunit_attr_filter_name(parsed_filters[0]), "speed");
+ KUNIT_EXPECT_STREQ(test, parsed_filters[0].input, ">slow");
+
+ KUNIT_EXPECT_STREQ(test, kunit_attr_filter_name(parsed_filters[1]), "module");
+ KUNIT_EXPECT_STREQ(test, parsed_filters[1].input, "!=example");
+}
+
+static struct kunit_case dummy_attr_test_cases[] = {
+ /* .run_case is not important, just needs to be non-NULL */
+ { .name = "slow", .run_case = dummy_test, .module_name = "dummy",
+ .attr.speed = KUNIT_SPEED_SLOW },
+ { .name = "normal", .run_case = dummy_test, .module_name = "dummy" },
+ {},
+};
+
+static void filter_attr_test(struct kunit *test)
+{
+ struct kunit_suite *subsuite[3] = {NULL, NULL};
+ struct kunit_suite_set suite_set = {
+ .start = subsuite, .end = &subsuite[2],
+ };
+ struct kunit_suite_set got;
+ int err = 0;
+
+ subsuite[0] = alloc_fake_suite(test, "normal_suite", dummy_attr_test_cases);
+ subsuite[1] = alloc_fake_suite(test, "slow_suite", dummy_attr_test_cases);
+ subsuite[1]->attr.speed = KUNIT_SPEED_SLOW; // Set suite attribute
+
+ /*
+ * Want: normal_suite(slow, normal), slow_suite(slow, normal),
+ * NULL -> normal_suite(normal), NULL
+ *
+ * The normal test in slow_suite is filtered out because the speed
+ * attribute is unset and thus, the filtering is based on the parent attribute
+ * of slow.
+ */
+ got = kunit_filter_suites(&suite_set, NULL, "speed>slow", NULL, &err);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ kfree_at_end(test, got.start);
+
+ /* Validate we just have normal_suite */
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]);
+ KUNIT_EXPECT_STREQ(test, got.start[0]->name, "normal_suite");
+ KUNIT_ASSERT_EQ(test, got.end - got.start, 1);
+
+ /* Now validate we just have normal test case */
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]->test_cases);
+ KUNIT_EXPECT_STREQ(test, got.start[0]->test_cases[0].name, "normal");
+ KUNIT_EXPECT_FALSE(test, got.start[0]->test_cases[1].name);
+}
+
+static void filter_attr_empty_test(struct kunit *test)
+{
+ struct kunit_suite *subsuite[3] = {NULL, NULL};
+ struct kunit_suite_set suite_set = {
+ .start = subsuite, .end = &subsuite[2],
+ };
+ struct kunit_suite_set got;
+ int err = 0;
+
+ subsuite[0] = alloc_fake_suite(test, "suite1", dummy_attr_test_cases);
+ subsuite[1] = alloc_fake_suite(test, "suite2", dummy_attr_test_cases);
+
+ got = kunit_filter_suites(&suite_set, NULL, "module!=dummy", NULL, &err);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ kfree_at_end(test, got.start); /* just in case */
+
+ KUNIT_EXPECT_PTR_EQ_MSG(test, got.start, got.end,
+ "should be empty to indicate no match");
+}
+
+static void filter_attr_skip_test(struct kunit *test)
+{
+ struct kunit_suite *subsuite[2] = {NULL};
+ struct kunit_suite_set suite_set = {
+ .start = subsuite, .end = &subsuite[1],
+ };
+ struct kunit_suite_set got;
+ int err = 0;
+
+ subsuite[0] = alloc_fake_suite(test, "suite", dummy_attr_test_cases);
+
+ /* Want: suite(slow, normal), NULL -> suite(slow with SKIP, normal), NULL */
+ got = kunit_filter_suites(&suite_set, NULL, "speed>slow", "skip", &err);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ kfree_at_end(test, got.start);
+
+ /* Validate we have both the slow and normal test */
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]->test_cases);
+ KUNIT_ASSERT_EQ(test, kunit_suite_num_test_cases(got.start[0]), 2);
+ KUNIT_EXPECT_STREQ(test, got.start[0]->test_cases[0].name, "slow");
+ KUNIT_EXPECT_STREQ(test, got.start[0]->test_cases[1].name, "normal");
+
+ /* Now ensure slow is skipped and normal is not */
+ KUNIT_EXPECT_EQ(test, got.start[0]->test_cases[0].status, KUNIT_SKIPPED);
+ KUNIT_EXPECT_FALSE(test, got.start[0]->test_cases[1].status);
+}
+
static struct kunit_case executor_test_cases[] = {
KUNIT_CASE(parse_filter_test),
KUNIT_CASE(filter_suites_test),
KUNIT_CASE(filter_suites_test_glob_test),
KUNIT_CASE(filter_suites_to_empty_test),
+ KUNIT_CASE(parse_filter_attr_test),
+ KUNIT_CASE(filter_attr_test),
+ KUNIT_CASE(filter_attr_empty_test),
+ KUNIT_CASE(filter_attr_skip_test),
{}
};
diff --git a/lib/kunit/kunit-example-test.c b/lib/kunit/kunit-example-test.c
index b69b689ea850..01a769f35e1d 100644
--- a/lib/kunit/kunit-example-test.c
+++ b/lib/kunit/kunit-example-test.c
@@ -221,6 +221,14 @@ static void example_params_test(struct kunit *test)
}
/*
+ * This test should always pass. Can be used to practice filtering attributes.
+ */
+static void example_slow_test(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, 1 + 1, 2);
+}
+
+/*
* Here we make a list of all the test cases we want to add to the test suite
* below.
*/
@@ -237,6 +245,7 @@ static struct kunit_case example_test_cases[] = {
KUNIT_CASE(example_all_expect_macros_test),
KUNIT_CASE(example_static_stub_test),
KUNIT_CASE_PARAM(example_params_test, example_gen_params),
+ KUNIT_CASE_SLOW(example_slow_test),
{}
};
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index 84e4666555c9..49698a168437 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -9,6 +9,7 @@
#include <kunit/resource.h>
#include <kunit/test.h>
#include <kunit/test-bug.h>
+#include <kunit/attributes.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -168,6 +169,13 @@ size_t kunit_suite_num_test_cases(struct kunit_suite *suite)
}
EXPORT_SYMBOL_GPL(kunit_suite_num_test_cases);
+/* Currently supported test levels */
+enum {
+ KUNIT_LEVEL_SUITE = 0,
+ KUNIT_LEVEL_CASE,
+ KUNIT_LEVEL_CASE_PARAM,
+};
+
static void kunit_print_suite_start(struct kunit_suite *suite)
{
/*
@@ -181,17 +189,11 @@ static void kunit_print_suite_start(struct kunit_suite *suite)
pr_info(KUNIT_SUBTEST_INDENT "KTAP version 1\n");
pr_info(KUNIT_SUBTEST_INDENT "# Subtest: %s\n",
suite->name);
+ kunit_print_attr((void *)suite, false, KUNIT_LEVEL_CASE);
pr_info(KUNIT_SUBTEST_INDENT "1..%zd\n",
kunit_suite_num_test_cases(suite));
}
-/* Currently supported test levels */
-enum {
- KUNIT_LEVEL_SUITE = 0,
- KUNIT_LEVEL_CASE,
- KUNIT_LEVEL_CASE_PARAM,
-};
-
static void kunit_print_ok_not_ok(struct kunit *test,
unsigned int test_level,
enum kunit_status status,
@@ -611,18 +613,22 @@ int kunit_run_tests(struct kunit_suite *suite)
kunit_suite_for_each_test_case(suite, test_case) {
struct kunit test = { .param_value = NULL, .param_index = 0 };
struct kunit_result_stats param_stats = { 0 };
- test_case->status = KUNIT_SKIPPED;
kunit_init_test(&test, test_case->name, test_case->log);
-
- if (!test_case->generate_params) {
+ if (test_case->status == KUNIT_SKIPPED) {
+ /* Test marked as skip */
+ test.status = KUNIT_SKIPPED;
+ kunit_update_stats(&param_stats, test.status);
+ } else if (!test_case->generate_params) {
/* Non-parameterised test. */
+ test_case->status = KUNIT_SKIPPED;
kunit_run_case_catch_errors(suite, test_case, &test);
kunit_update_stats(&param_stats, test.status);
} else {
/* Get initial param. */
param_desc[0] = '\0';
test.param_value = test_case->generate_params(NULL, param_desc);
+ test_case->status = KUNIT_SKIPPED;
kunit_log(KERN_INFO, &test, KUNIT_SUBTEST_INDENT KUNIT_SUBTEST_INDENT
"KTAP version 1\n");
kunit_log(KERN_INFO, &test, KUNIT_SUBTEST_INDENT KUNIT_SUBTEST_INDENT
@@ -651,6 +657,7 @@ int kunit_run_tests(struct kunit_suite *suite)
}
}
+ kunit_print_attr((void *)test_case, true, KUNIT_LEVEL_CASE);
kunit_print_test_stats(&test, param_stats);
@@ -729,12 +736,45 @@ EXPORT_SYMBOL_GPL(__kunit_test_suites_exit);
#ifdef CONFIG_MODULES
static void kunit_module_init(struct module *mod)
{
- __kunit_test_suites_init(mod->kunit_suites, mod->num_kunit_suites);
+ struct kunit_suite_set suite_set = {
+ mod->kunit_suites, mod->kunit_suites + mod->num_kunit_suites,
+ };
+ const char *action = kunit_action();
+ int err = 0;
+
+ suite_set = kunit_filter_suites(&suite_set,
+ kunit_filter_glob() ?: "*.*",
+ kunit_filter(), kunit_filter_action(),
+ &err);
+ if (err)
+ pr_err("kunit module: error filtering suites: %d\n", err);
+
+ mod->kunit_suites = (struct kunit_suite **)suite_set.start;
+ mod->num_kunit_suites = suite_set.end - suite_set.start;
+
+ if (!action)
+ kunit_exec_run_tests(&suite_set, false);
+ else if (!strcmp(action, "list"))
+ kunit_exec_list_tests(&suite_set, false);
+ else if (!strcmp(action, "list_attr"))
+ kunit_exec_list_tests(&suite_set, true);
+ else
+ pr_err("kunit: unknown action '%s'\n", action);
}
static void kunit_module_exit(struct module *mod)
{
- __kunit_test_suites_exit(mod->kunit_suites, mod->num_kunit_suites);
+ struct kunit_suite_set suite_set = {
+ mod->kunit_suites, mod->kunit_suites + mod->num_kunit_suites,
+ };
+ const char *action = kunit_action();
+
+ if (!action)
+ __kunit_test_suites_exit(mod->kunit_suites,
+ mod->num_kunit_suites);
+
+ if (suite_set.start)
+ kunit_free_suite_set(suite_set);
}
static int kunit_module_notify(struct notifier_block *nb, unsigned long val,
diff --git a/lib/list_debug.c b/lib/list_debug.c
index d98d43f80958..db602417febf 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -2,7 +2,8 @@
* Copyright 2006, Red Hat, Inc., Dave Jones
* Released under the General Public License (GPL).
*
- * This file contains the linked list validation for DEBUG_LIST.
+ * This file contains the linked list validation and error reporting for
+ * LIST_HARDENED and DEBUG_LIST.
*/
#include <linux/export.h>
@@ -17,8 +18,9 @@
* attempt).
*/
-bool __list_add_valid(struct list_head *new, struct list_head *prev,
- struct list_head *next)
+__list_valid_slowpath
+bool __list_add_valid_or_report(struct list_head *new, struct list_head *prev,
+ struct list_head *next)
{
if (CHECK_DATA_CORRUPTION(prev == NULL,
"list_add corruption. prev is NULL.\n") ||
@@ -37,9 +39,10 @@ bool __list_add_valid(struct list_head *new, struct list_head *prev,
return true;
}
-EXPORT_SYMBOL(__list_add_valid);
+EXPORT_SYMBOL(__list_add_valid_or_report);
-bool __list_del_entry_valid(struct list_head *entry)
+__list_valid_slowpath
+bool __list_del_entry_valid_or_report(struct list_head *entry)
{
struct list_head *prev, *next;
@@ -65,6 +68,5 @@ bool __list_del_entry_valid(struct list_head *entry)
return false;
return true;
-
}
-EXPORT_SYMBOL(__list_del_entry_valid);
+EXPORT_SYMBOL(__list_del_entry_valid_or_report);
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index 8d24279fad05..6f6a5fc85b42 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -2506,94 +2506,29 @@ static void fs_reclaim_tests(void)
pr_cont("\n");
}
-#define __guard(cleanup) __maybe_unused __attribute__((__cleanup__(cleanup)))
+/* Defines guard classes to create contexts */
+DEFINE_LOCK_GUARD_0(HARDIRQ, HARDIRQ_ENTER(), HARDIRQ_EXIT())
+DEFINE_LOCK_GUARD_0(NOTTHREADED_HARDIRQ,
+ do {
+ local_irq_disable();
+ __irq_enter();
+ WARN_ON(!in_irq());
+ } while(0), HARDIRQ_EXIT())
+DEFINE_LOCK_GUARD_0(SOFTIRQ, SOFTIRQ_ENTER(), SOFTIRQ_EXIT())
+
+/* Define RCU guards, should go away when RCU has its own guard definitions */
+DEFINE_LOCK_GUARD_0(RCU, rcu_read_lock(), rcu_read_unlock())
+DEFINE_LOCK_GUARD_0(RCU_BH, rcu_read_lock_bh(), rcu_read_unlock_bh())
+DEFINE_LOCK_GUARD_0(RCU_SCHED, rcu_read_lock_sched(), rcu_read_unlock_sched())
-static void hardirq_exit(int *_)
-{
- HARDIRQ_EXIT();
-}
-
-#define HARDIRQ_CONTEXT(name, ...) \
- int hardirq_guard_##name __guard(hardirq_exit); \
- HARDIRQ_ENTER();
-
-#define NOTTHREADED_HARDIRQ_CONTEXT(name, ...) \
- int notthreaded_hardirq_guard_##name __guard(hardirq_exit); \
- local_irq_disable(); \
- __irq_enter(); \
- WARN_ON(!in_irq());
-
-static void softirq_exit(int *_)
-{
- SOFTIRQ_EXIT();
-}
-
-#define SOFTIRQ_CONTEXT(name, ...) \
- int softirq_guard_##name __guard(softirq_exit); \
- SOFTIRQ_ENTER();
-
-static void rcu_exit(int *_)
-{
- rcu_read_unlock();
-}
-
-#define RCU_CONTEXT(name, ...) \
- int rcu_guard_##name __guard(rcu_exit); \
- rcu_read_lock();
-
-static void rcu_bh_exit(int *_)
-{
- rcu_read_unlock_bh();
-}
-
-#define RCU_BH_CONTEXT(name, ...) \
- int rcu_bh_guard_##name __guard(rcu_bh_exit); \
- rcu_read_lock_bh();
-
-static void rcu_sched_exit(int *_)
-{
- rcu_read_unlock_sched();
-}
-
-#define RCU_SCHED_CONTEXT(name, ...) \
- int rcu_sched_guard_##name __guard(rcu_sched_exit); \
- rcu_read_lock_sched();
-
-static void raw_spinlock_exit(raw_spinlock_t **lock)
-{
- raw_spin_unlock(*lock);
-}
-
-#define RAW_SPINLOCK_CONTEXT(name, lock) \
- raw_spinlock_t *raw_spinlock_guard_##name __guard(raw_spinlock_exit) = &(lock); \
- raw_spin_lock(&(lock));
-
-static void spinlock_exit(spinlock_t **lock)
-{
- spin_unlock(*lock);
-}
-
-#define SPINLOCK_CONTEXT(name, lock) \
- spinlock_t *spinlock_guard_##name __guard(spinlock_exit) = &(lock); \
- spin_lock(&(lock));
-
-static void mutex_exit(struct mutex **lock)
-{
- mutex_unlock(*lock);
-}
-
-#define MUTEX_CONTEXT(name, lock) \
- struct mutex *mutex_guard_##name __guard(mutex_exit) = &(lock); \
- mutex_lock(&(lock));
#define GENERATE_2_CONTEXT_TESTCASE(outer, outer_lock, inner, inner_lock) \
\
static void __maybe_unused inner##_in_##outer(void) \
{ \
- outer##_CONTEXT(_, outer_lock); \
- { \
- inner##_CONTEXT(_, inner_lock); \
- } \
+ /* Relies the reversed clean-up ordering: inner first */ \
+ guard(outer)(outer_lock); \
+ guard(inner)(inner_lock); \
}
/*
@@ -2632,21 +2567,21 @@ GENERATE_2_CONTEXT_TESTCASE(SOFTIRQ, , inner, inner_lock) \
GENERATE_2_CONTEXT_TESTCASE(RCU, , inner, inner_lock) \
GENERATE_2_CONTEXT_TESTCASE(RCU_BH, , inner, inner_lock) \
GENERATE_2_CONTEXT_TESTCASE(RCU_SCHED, , inner, inner_lock) \
-GENERATE_2_CONTEXT_TESTCASE(RAW_SPINLOCK, raw_lock_A, inner, inner_lock) \
-GENERATE_2_CONTEXT_TESTCASE(SPINLOCK, lock_A, inner, inner_lock) \
-GENERATE_2_CONTEXT_TESTCASE(MUTEX, mutex_A, inner, inner_lock)
+GENERATE_2_CONTEXT_TESTCASE(raw_spinlock, &raw_lock_A, inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(spinlock, &lock_A, inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(mutex, &mutex_A, inner, inner_lock)
GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(RCU, )
-GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(RAW_SPINLOCK, raw_lock_B)
-GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(SPINLOCK, lock_B)
-GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(MUTEX, mutex_B)
+GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(raw_spinlock, &raw_lock_B)
+GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(spinlock, &lock_B)
+GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(mutex, &mutex_B)
/* the outer context allows all kinds of preemption */
#define DO_CONTEXT_TESTCASE_OUTER_PREEMPTIBLE(outer) \
dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \
- dotest(RAW_SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
- dotest(SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
- dotest(MUTEX_in_##outer, SUCCESS, LOCKTYPE_MUTEX); \
+ dotest(raw_spinlock_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
+ dotest(spinlock_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
+ dotest(mutex_in_##outer, SUCCESS, LOCKTYPE_MUTEX); \
/*
* the outer context only allows the preemption introduced by spinlock_t (which
@@ -2654,16 +2589,16 @@ GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(MUTEX, mutex_B)
*/
#define DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(outer) \
dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \
- dotest(RAW_SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
- dotest(SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
- dotest(MUTEX_in_##outer, FAILURE, LOCKTYPE_MUTEX); \
+ dotest(raw_spinlock_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
+ dotest(spinlock_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
+ dotest(mutex_in_##outer, FAILURE, LOCKTYPE_MUTEX); \
/* the outer doesn't allows any kind of preemption */
#define DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(outer) \
dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \
- dotest(RAW_SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
- dotest(SPINLOCK_in_##outer, FAILURE, LOCKTYPE_SPIN); \
- dotest(MUTEX_in_##outer, FAILURE, LOCKTYPE_MUTEX); \
+ dotest(raw_spinlock_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
+ dotest(spinlock_in_##outer, FAILURE, LOCKTYPE_SPIN); \
+ dotest(mutex_in_##outer, FAILURE, LOCKTYPE_MUTEX); \
static void wait_context_tests(void)
{
@@ -2697,15 +2632,15 @@ static void wait_context_tests(void)
pr_cont("\n");
print_testname("in RAW_SPINLOCK context");
- DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(RAW_SPINLOCK);
+ DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(raw_spinlock);
pr_cont("\n");
print_testname("in SPINLOCK context");
- DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(SPINLOCK);
+ DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(spinlock);
pr_cont("\n");
print_testname("in MUTEX context");
- DO_CONTEXT_TESTCASE_OUTER_PREEMPTIBLE(MUTEX);
+ DO_CONTEXT_TESTCASE_OUTER_PREEMPTIBLE(mutex);
pr_cont("\n");
}
diff --git a/lib/logic_pio.c b/lib/logic_pio.c
index 07b4b9a1f54b..2ea564a40064 100644
--- a/lib/logic_pio.c
+++ b/lib/logic_pio.c
@@ -20,9 +20,6 @@
static LIST_HEAD(io_range_list);
static DEFINE_MUTEX(io_range_mutex);
-/* Consider a kernel general helper for this */
-#define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len))
-
/**
* logic_pio_register_range - register logical PIO range for a host
* @new_range: pointer to the IO range to be registered.
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index bfffbb7cab26..ee1ff0c59fd7 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -75,6 +75,7 @@
#define MA_STATE_PREALLOC 4
#define ma_parent_ptr(x) ((struct maple_pnode *)(x))
+#define mas_tree_parent(x) ((unsigned long)(x->tree) | MA_ROOT_PARENT)
#define ma_mnode_ptr(x) ((struct maple_node *)(x))
#define ma_enode_ptr(x) ((struct maple_enode *)(x))
static struct kmem_cache *maple_node_cache;
@@ -729,33 +730,6 @@ mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char offset)
}
/*
- * mas_logical_pivot() - Get the logical pivot of a given offset.
- * @mas: The maple state
- * @pivots: The pointer to the maple node pivots
- * @offset: The offset into the pivot array
- * @type: The maple node type
- *
- * When there is no value at a pivot (beyond the end of the data), then the
- * pivot is actually @mas->max.
- *
- * Return: the logical pivot of a given @offset.
- */
-static inline unsigned long
-mas_logical_pivot(struct ma_state *mas, unsigned long *pivots,
- unsigned char offset, enum maple_type type)
-{
- unsigned long lpiv = mas_safe_pivot(mas, pivots, offset, type);
-
- if (likely(lpiv))
- return lpiv;
-
- if (likely(offset))
- return mas->max;
-
- return lpiv;
-}
-
-/*
* mte_set_pivot() - Set a pivot to a value in an encoded maple node.
* @mn: The encoded maple node
* @piv: The pivot offset
@@ -804,6 +778,12 @@ static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt)
}
}
+static inline bool mt_write_locked(const struct maple_tree *mt)
+{
+ return mt_external_lock(mt) ? mt_write_lock_is_held(mt) :
+ lockdep_is_held(&mt->ma_lock);
+}
+
static inline bool mt_locked(const struct maple_tree *mt)
{
return mt_external_lock(mt) ? mt_lock_is_held(mt) :
@@ -819,7 +799,7 @@ static inline void *mt_slot(const struct maple_tree *mt,
static inline void *mt_slot_locked(struct maple_tree *mt, void __rcu **slots,
unsigned char offset)
{
- return rcu_dereference_protected(slots[offset], mt_locked(mt));
+ return rcu_dereference_protected(slots[offset], mt_write_locked(mt));
}
/*
* mas_slot_locked() - Get the slot value when holding the maple tree lock.
@@ -862,7 +842,7 @@ static inline void *mas_root(struct ma_state *mas)
static inline void *mt_root_locked(struct maple_tree *mt)
{
- return rcu_dereference_protected(mt->ma_root, mt_locked(mt));
+ return rcu_dereference_protected(mt->ma_root, mt_write_locked(mt));
}
/*
@@ -1002,27 +982,9 @@ static inline void mat_add(struct ma_topiary *mat,
mat->tail = dead_enode;
}
-static void mte_destroy_walk(struct maple_enode *, struct maple_tree *);
-static inline void mas_free(struct ma_state *mas, struct maple_enode *used);
-
-/*
- * mas_mat_free() - Free all nodes in a dead list.
- * @mas - the maple state
- * @mat - the ma_topiary linked list of dead nodes to free.
- *
- * Free walk a dead list.
- */
-static void mas_mat_free(struct ma_state *mas, struct ma_topiary *mat)
-{
- struct maple_enode *next;
-
- while (mat->head) {
- next = mte_to_mat(mat->head)->next;
- mas_free(mas, mat->head);
- mat->head = next;
- }
-}
-
+static void mt_free_walk(struct rcu_head *head);
+static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt,
+ bool free);
/*
* mas_mat_destroy() - Free all nodes and subtrees in a dead list.
* @mas - the maple state
@@ -1033,10 +995,15 @@ static void mas_mat_free(struct ma_state *mas, struct ma_topiary *mat)
static void mas_mat_destroy(struct ma_state *mas, struct ma_topiary *mat)
{
struct maple_enode *next;
+ struct maple_node *node;
+ bool in_rcu = mt_in_rcu(mas->tree);
while (mat->head) {
next = mte_to_mat(mat->head)->next;
- mte_destroy_walk(mat->head, mat->mtree);
+ node = mte_to_node(mat->head);
+ mt_destroy_walk(mat->head, mas->tree, !in_rcu);
+ if (in_rcu)
+ call_rcu(&node->rcu, mt_free_walk);
mat->head = next;
}
}
@@ -1610,8 +1577,6 @@ ma_max_gap(struct maple_node *node, unsigned long *gaps, enum maple_type mt,
* mas_max_gap() - find the largest gap in a non-leaf node and set the slot.
* @mas: The maple state.
*
- * If the metadata gap is set to MAPLE_ARANGE64_META_MAX, there is no gap.
- *
* Return: The gap value.
*/
static inline unsigned long mas_max_gap(struct ma_state *mas)
@@ -1628,9 +1593,6 @@ static inline unsigned long mas_max_gap(struct ma_state *mas)
node = mas_mn(mas);
MAS_BUG_ON(mas, mt != maple_arange_64);
offset = ma_meta_gap(node, mt);
- if (offset == MAPLE_ARANGE64_META_MAX)
- return 0;
-
gaps = ma_gaps(node, mt);
return gaps[offset];
}
@@ -1662,10 +1624,7 @@ static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset,
ascend:
MAS_BUG_ON(mas, pmt != maple_arange_64);
meta_offset = ma_meta_gap(pnode, pmt);
- if (meta_offset == MAPLE_ARANGE64_META_MAX)
- meta_gap = 0;
- else
- meta_gap = pgaps[meta_offset];
+ meta_gap = pgaps[meta_offset];
pgaps[offset] = new;
@@ -1678,7 +1637,6 @@ ascend:
ma_set_meta_gap(pnode, pmt, offset);
} else if (new < meta_gap) {
- meta_offset = 15;
new = ma_max_gap(pnode, pgaps, pmt, &meta_offset);
ma_set_meta_gap(pnode, pmt, meta_offset);
}
@@ -1731,7 +1689,7 @@ static inline void mas_adopt_children(struct ma_state *mas,
struct maple_enode *parent)
{
enum maple_type type = mte_node_type(parent);
- struct maple_node *node = mas_mn(mas);
+ struct maple_node *node = mte_to_node(parent);
void __rcu **slots = ma_slots(node, type);
unsigned long *pivots = ma_pivots(node, type);
struct maple_enode *child;
@@ -1745,53 +1703,54 @@ static inline void mas_adopt_children(struct ma_state *mas,
}
/*
- * mas_replace() - Replace a maple node in the tree with mas->node. Uses the
- * parent encoding to locate the maple node in the tree.
- * @mas - the ma_state to use for operations.
- * @advanced - boolean to adopt the child nodes and free the old node (false) or
- * leave the node (true) and handle the adoption and free elsewhere.
+ * mas_put_in_tree() - Put a new node in the tree, smp_wmb(), and mark the old
+ * node as dead.
+ * @mas - the maple state with the new node
+ * @old_enode - The old maple encoded node to replace.
*/
-static inline void mas_replace(struct ma_state *mas, bool advanced)
+static inline void mas_put_in_tree(struct ma_state *mas,
+ struct maple_enode *old_enode)
__must_hold(mas->tree->ma_lock)
{
- struct maple_node *mn = mas_mn(mas);
- struct maple_enode *old_enode;
- unsigned char offset = 0;
- void __rcu **slots = NULL;
-
- if (ma_is_root(mn)) {
- old_enode = mas_root_locked(mas);
- } else {
- offset = mte_parent_slot(mas->node);
- slots = ma_slots(mte_parent(mas->node),
- mas_parent_type(mas, mas->node));
- old_enode = mas_slot_locked(mas, slots, offset);
- }
-
- if (!advanced && !mte_is_leaf(mas->node))
- mas_adopt_children(mas, mas->node);
+ unsigned char offset;
+ void __rcu **slots;
if (mte_is_root(mas->node)) {
- mn->parent = ma_parent_ptr(
- ((unsigned long)mas->tree | MA_ROOT_PARENT));
+ mas_mn(mas)->parent = ma_parent_ptr(mas_tree_parent(mas));
rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
mas_set_height(mas);
} else {
+
+ offset = mte_parent_slot(mas->node);
+ slots = ma_slots(mte_parent(mas->node),
+ mas_parent_type(mas, mas->node));
rcu_assign_pointer(slots[offset], mas->node);
}
- if (!advanced) {
- mte_set_node_dead(old_enode);
- mas_free(mas, old_enode);
- }
+ mte_set_node_dead(old_enode);
}
/*
- * mas_new_child() - Find the new child of a node.
- * @mas: the maple state
+ * mas_replace_node() - Replace a node by putting it in the tree, marking it
+ * dead, and freeing it.
+ * the parent encoding to locate the maple node in the tree.
+ * @mas - the ma_state with @mas->node pointing to the new node.
+ * @old_enode - The old maple encoded node.
+ */
+static inline void mas_replace_node(struct ma_state *mas,
+ struct maple_enode *old_enode)
+ __must_hold(mas->tree->ma_lock)
+{
+ mas_put_in_tree(mas, old_enode);
+ mas_free(mas, old_enode);
+}
+
+/*
+ * mas_find_child() - Find a child who has the parent @mas->node.
+ * @mas: the maple state with the parent.
* @child: the maple state to store the child.
*/
-static inline bool mas_new_child(struct ma_state *mas, struct ma_state *child)
+static inline bool mas_find_child(struct ma_state *mas, struct ma_state *child)
__must_hold(mas->tree->ma_lock)
{
enum maple_type mt;
@@ -2076,7 +2035,7 @@ static inline void mab_mas_cp(struct maple_big_node *b_node,
end = j - 1;
if (likely(!ma_is_leaf(mt) && mt_is_alloc(mas->tree))) {
unsigned long max_gap = 0;
- unsigned char offset = 15;
+ unsigned char offset = 0;
gaps = ma_gaps(node, mt);
do {
@@ -2094,56 +2053,6 @@ static inline void mab_mas_cp(struct maple_big_node *b_node,
}
/*
- * mas_descend_adopt() - Descend through a sub-tree and adopt children.
- * @mas: the maple state with the maple encoded node of the sub-tree.
- *
- * Descend through a sub-tree and adopt children who do not have the correct
- * parents set. Follow the parents which have the correct parents as they are
- * the new entries which need to be followed to find other incorrectly set
- * parents.
- */
-static inline void mas_descend_adopt(struct ma_state *mas)
-{
- struct ma_state list[3], next[3];
- int i, n;
-
- /*
- * At each level there may be up to 3 correct parent pointers which indicates
- * the new nodes which need to be walked to find any new nodes at a lower level.
- */
-
- for (i = 0; i < 3; i++) {
- list[i] = *mas;
- list[i].offset = 0;
- next[i].offset = 0;
- }
- next[0] = *mas;
-
- while (!mte_is_leaf(list[0].node)) {
- n = 0;
- for (i = 0; i < 3; i++) {
- if (mas_is_none(&list[i]))
- continue;
-
- if (i && list[i-1].node == list[i].node)
- continue;
-
- while ((n < 3) && (mas_new_child(&list[i], &next[n])))
- n++;
-
- mas_adopt_children(&list[i], list[i].node);
- }
-
- while (n < 3)
- next[n++].node = MAS_NONE;
-
- /* descend by setting the list to the children */
- for (i = 0; i < 3; i++)
- list[i] = next[i];
- }
-}
-
-/*
* mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert.
* @mas: The maple state
* @end: The maple node end
@@ -2211,7 +2120,7 @@ static noinline_for_kasan void mas_store_b_node(struct ma_wr_state *wr_mas,
goto b_end;
/* Handle new range ending before old range ends */
- piv = mas_logical_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type);
+ piv = mas_safe_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type);
if (piv > mas->last) {
if (piv == ULONG_MAX)
mas_bulk_rebalance(mas, b_node->b_end, wr_mas->type);
@@ -2333,98 +2242,6 @@ static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas)
}
/*
- * mas_topiary_range() - Add a range of slots to the topiary.
- * @mas: The maple state
- * @destroy: The topiary to add the slots (usually destroy)
- * @start: The starting slot inclusively
- * @end: The end slot inclusively
- */
-static inline void mas_topiary_range(struct ma_state *mas,
- struct ma_topiary *destroy, unsigned char start, unsigned char end)
-{
- void __rcu **slots;
- unsigned char offset;
-
- MAS_BUG_ON(mas, mte_is_leaf(mas->node));
-
- slots = ma_slots(mas_mn(mas), mte_node_type(mas->node));
- for (offset = start; offset <= end; offset++) {
- struct maple_enode *enode = mas_slot_locked(mas, slots, offset);
-
- if (mte_dead_node(enode))
- continue;
-
- mat_add(destroy, enode);
- }
-}
-
-/*
- * mast_topiary() - Add the portions of the tree to the removal list; either to
- * be freed or discarded (destroy walk).
- * @mast: The maple_subtree_state.
- */
-static inline void mast_topiary(struct maple_subtree_state *mast)
-{
- MA_WR_STATE(wr_mas, mast->orig_l, NULL);
- unsigned char r_start, r_end;
- unsigned char l_start, l_end;
- void __rcu **l_slots, **r_slots;
-
- wr_mas.type = mte_node_type(mast->orig_l->node);
- mast->orig_l->index = mast->orig_l->last;
- mas_wr_node_walk(&wr_mas);
- l_start = mast->orig_l->offset + 1;
- l_end = mas_data_end(mast->orig_l);
- r_start = 0;
- r_end = mast->orig_r->offset;
-
- if (r_end)
- r_end--;
-
- l_slots = ma_slots(mas_mn(mast->orig_l),
- mte_node_type(mast->orig_l->node));
-
- r_slots = ma_slots(mas_mn(mast->orig_r),
- mte_node_type(mast->orig_r->node));
-
- if ((l_start < l_end) &&
- mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_start))) {
- l_start++;
- }
-
- if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_end))) {
- if (r_end)
- r_end--;
- }
-
- if ((l_start > r_end) && (mast->orig_l->node == mast->orig_r->node))
- return;
-
- /* At the node where left and right sides meet, add the parts between */
- if (mast->orig_l->node == mast->orig_r->node) {
- return mas_topiary_range(mast->orig_l, mast->destroy,
- l_start, r_end);
- }
-
- /* mast->orig_r is different and consumed. */
- if (mte_is_leaf(mast->orig_r->node))
- return;
-
- if (mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_end)))
- l_end--;
-
-
- if (l_start <= l_end)
- mas_topiary_range(mast->orig_l, mast->destroy, l_start, l_end);
-
- if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_start)))
- r_start++;
-
- if (r_start <= r_end)
- mas_topiary_range(mast->orig_r, mast->destroy, 0, r_end);
-}
-
-/*
* mast_rebalance_next() - Rebalance against the next node
* @mast: The maple subtree state
* @old_r: The encoded maple node to the right (next node).
@@ -2459,7 +2276,7 @@ static inline void mast_rebalance_prev(struct maple_subtree_state *mast)
/*
* mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring
* the node to the right. Checking the nodes to the right then the left at each
- * level upwards until root is reached. Free and destroy as needed.
+ * level upwards until root is reached.
* Data is copied into the @mast->bn.
* @mast: The maple_subtree_state.
*/
@@ -2468,8 +2285,6 @@ bool mast_spanning_rebalance(struct maple_subtree_state *mast)
{
struct ma_state r_tmp = *mast->orig_r;
struct ma_state l_tmp = *mast->orig_l;
- struct maple_enode *ancestor = NULL;
- unsigned char start, end;
unsigned char depth = 0;
r_tmp = *mast->orig_r;
@@ -2478,87 +2293,25 @@ bool mast_spanning_rebalance(struct maple_subtree_state *mast)
mas_ascend(mast->orig_r);
mas_ascend(mast->orig_l);
depth++;
- if (!ancestor &&
- (mast->orig_r->node == mast->orig_l->node)) {
- ancestor = mast->orig_r->node;
- end = mast->orig_r->offset - 1;
- start = mast->orig_l->offset + 1;
- }
-
if (mast->orig_r->offset < mas_data_end(mast->orig_r)) {
- if (!ancestor) {
- ancestor = mast->orig_r->node;
- start = 0;
- }
-
mast->orig_r->offset++;
do {
mas_descend(mast->orig_r);
mast->orig_r->offset = 0;
- depth--;
- } while (depth);
+ } while (--depth);
mast_rebalance_next(mast);
- do {
- unsigned char l_off = 0;
- struct maple_enode *child = r_tmp.node;
-
- mas_ascend(&r_tmp);
- if (ancestor == r_tmp.node)
- l_off = start;
-
- if (r_tmp.offset)
- r_tmp.offset--;
-
- if (l_off < r_tmp.offset)
- mas_topiary_range(&r_tmp, mast->destroy,
- l_off, r_tmp.offset);
-
- if (l_tmp.node != child)
- mat_add(mast->free, child);
-
- } while (r_tmp.node != ancestor);
-
*mast->orig_l = l_tmp;
return true;
-
} else if (mast->orig_l->offset != 0) {
- if (!ancestor) {
- ancestor = mast->orig_l->node;
- end = mas_data_end(mast->orig_l);
- }
-
mast->orig_l->offset--;
do {
mas_descend(mast->orig_l);
mast->orig_l->offset =
mas_data_end(mast->orig_l);
- depth--;
- } while (depth);
+ } while (--depth);
mast_rebalance_prev(mast);
- do {
- unsigned char r_off;
- struct maple_enode *child = l_tmp.node;
-
- mas_ascend(&l_tmp);
- if (ancestor == l_tmp.node)
- r_off = end;
- else
- r_off = mas_data_end(&l_tmp);
-
- if (l_tmp.offset < r_off)
- l_tmp.offset++;
-
- if (l_tmp.offset < r_off)
- mas_topiary_range(&l_tmp, mast->destroy,
- l_tmp.offset, r_off);
-
- if (r_tmp.node != child)
- mat_add(mast->free, child);
-
- } while (l_tmp.node != ancestor);
-
*mast->orig_r = r_tmp;
return true;
}
@@ -2570,36 +2323,24 @@ bool mast_spanning_rebalance(struct maple_subtree_state *mast)
}
/*
- * mast_ascend_free() - Add current original maple state nodes to the free list
- * and ascend.
+ * mast_ascend() - Ascend the original left and right maple states.
* @mast: the maple subtree state.
*
- * Ascend the original left and right sides and add the previous nodes to the
- * free list. Set the slots to point to the correct location in the new nodes.
+ * Ascend the original left and right sides. Set the offsets to point to the
+ * data already in the new tree (@mast->l and @mast->r).
*/
-static inline void
-mast_ascend_free(struct maple_subtree_state *mast)
+static inline void mast_ascend(struct maple_subtree_state *mast)
{
MA_WR_STATE(wr_mas, mast->orig_r, NULL);
- struct maple_enode *left = mast->orig_l->node;
- struct maple_enode *right = mast->orig_r->node;
-
mas_ascend(mast->orig_l);
mas_ascend(mast->orig_r);
- mat_add(mast->free, left);
-
- if (left != right)
- mat_add(mast->free, right);
mast->orig_r->offset = 0;
mast->orig_r->index = mast->r->max;
/* last should be larger than or equal to index */
if (mast->orig_r->last < mast->orig_r->index)
mast->orig_r->last = mast->orig_r->index;
- /*
- * The node may not contain the value so set slot to ensure all
- * of the nodes contents are freed or destroyed.
- */
+
wr_mas.type = mte_node_type(mast->orig_r->node);
mas_wr_node_walk(&wr_mas);
/* Set up the left side of things */
@@ -2778,58 +2519,152 @@ static inline void mast_set_split_parents(struct maple_subtree_state *mast,
}
/*
- * mas_wmb_replace() - Write memory barrier and replace
- * @mas: The maple state
- * @free: the maple topiary list of nodes to free
- * @destroy: The maple topiary list of nodes to destroy (walk and free)
+ * mas_topiary_node() - Dispose of a singe node
+ * @mas: The maple state for pushing nodes
+ * @enode: The encoded maple node
+ * @in_rcu: If the tree is in rcu mode
*
- * Updates gap as necessary.
+ * The node will either be RCU freed or pushed back on the maple state.
*/
-static inline void mas_wmb_replace(struct ma_state *mas,
- struct ma_topiary *free,
- struct ma_topiary *destroy)
+static inline void mas_topiary_node(struct ma_state *mas,
+ struct maple_enode *enode, bool in_rcu)
{
- /* All nodes must see old data as dead prior to replacing that data */
- smp_wmb(); /* Needed for RCU */
+ struct maple_node *tmp;
- /* Insert the new data in the tree */
- mas_replace(mas, true);
+ if (enode == MAS_NONE)
+ return;
+
+ tmp = mte_to_node(enode);
+ mte_set_node_dead(enode);
+ if (in_rcu)
+ ma_free_rcu(tmp);
+ else
+ mas_push_node(mas, tmp);
+}
+
+/*
+ * mas_topiary_replace() - Replace the data with new data, then repair the
+ * parent links within the new tree. Iterate over the dead sub-tree and collect
+ * the dead subtrees and topiary the nodes that are no longer of use.
+ *
+ * The new tree will have up to three children with the correct parent. Keep
+ * track of the new entries as they need to be followed to find the next level
+ * of new entries.
+ *
+ * The old tree will have up to three children with the old parent. Keep track
+ * of the old entries as they may have more nodes below replaced. Nodes within
+ * [index, last] are dead subtrees, others need to be freed and followed.
+ *
+ * @mas: The maple state pointing at the new data
+ * @old_enode: The maple encoded node being replaced
+ *
+ */
+static inline void mas_topiary_replace(struct ma_state *mas,
+ struct maple_enode *old_enode)
+{
+ struct ma_state tmp[3], tmp_next[3];
+ MA_TOPIARY(subtrees, mas->tree);
+ bool in_rcu;
+ int i, n;
- if (!mte_is_leaf(mas->node))
- mas_descend_adopt(mas);
+ /* Place data in tree & then mark node as old */
+ mas_put_in_tree(mas, old_enode);
- mas_mat_free(mas, free);
+ /* Update the parent pointers in the tree */
+ tmp[0] = *mas;
+ tmp[0].offset = 0;
+ tmp[1].node = MAS_NONE;
+ tmp[2].node = MAS_NONE;
+ while (!mte_is_leaf(tmp[0].node)) {
+ n = 0;
+ for (i = 0; i < 3; i++) {
+ if (mas_is_none(&tmp[i]))
+ continue;
- if (destroy)
- mas_mat_destroy(mas, destroy);
+ while (n < 3) {
+ if (!mas_find_child(&tmp[i], &tmp_next[n]))
+ break;
+ n++;
+ }
- if (mte_is_leaf(mas->node))
- return;
+ mas_adopt_children(&tmp[i], tmp[i].node);
+ }
- mas_update_gap(mas);
+ if (MAS_WARN_ON(mas, n == 0))
+ break;
+
+ while (n < 3)
+ tmp_next[n++].node = MAS_NONE;
+
+ for (i = 0; i < 3; i++)
+ tmp[i] = tmp_next[i];
+ }
+
+ /* Collect the old nodes that need to be discarded */
+ if (mte_is_leaf(old_enode))
+ return mas_free(mas, old_enode);
+
+ tmp[0] = *mas;
+ tmp[0].offset = 0;
+ tmp[0].node = old_enode;
+ tmp[1].node = MAS_NONE;
+ tmp[2].node = MAS_NONE;
+ in_rcu = mt_in_rcu(mas->tree);
+ do {
+ n = 0;
+ for (i = 0; i < 3; i++) {
+ if (mas_is_none(&tmp[i]))
+ continue;
+
+ while (n < 3) {
+ if (!mas_find_child(&tmp[i], &tmp_next[n]))
+ break;
+
+ if ((tmp_next[n].min >= tmp_next->index) &&
+ (tmp_next[n].max <= tmp_next->last)) {
+ mat_add(&subtrees, tmp_next[n].node);
+ tmp_next[n].node = MAS_NONE;
+ } else {
+ n++;
+ }
+ }
+ }
+
+ if (MAS_WARN_ON(mas, n == 0))
+ break;
+
+ while (n < 3)
+ tmp_next[n++].node = MAS_NONE;
+
+ for (i = 0; i < 3; i++) {
+ mas_topiary_node(mas, tmp[i].node, in_rcu);
+ tmp[i] = tmp_next[i];
+ }
+ } while (!mte_is_leaf(tmp[0].node));
+
+ for (i = 0; i < 3; i++)
+ mas_topiary_node(mas, tmp[i].node, in_rcu);
+
+ mas_mat_destroy(mas, &subtrees);
}
/*
- * mast_new_root() - Set a new tree root during subtree creation
- * @mast: The maple subtree state
+ * mas_wmb_replace() - Write memory barrier and replace
* @mas: The maple state
+ * @old: The old maple encoded node that is being replaced.
+ *
+ * Updates gap as necessary.
*/
-static inline void mast_new_root(struct maple_subtree_state *mast,
- struct ma_state *mas)
+static inline void mas_wmb_replace(struct ma_state *mas,
+ struct maple_enode *old_enode)
{
- mas_mn(mast->l)->parent =
- ma_parent_ptr(((unsigned long)mas->tree | MA_ROOT_PARENT));
- if (!mte_dead_node(mast->orig_l->node) &&
- !mte_is_root(mast->orig_l->node)) {
- do {
- mast_ascend_free(mast);
- mast_topiary(mast);
- } while (!mte_is_root(mast->orig_l->node));
- }
- if ((mast->orig_l->node != mas->node) &&
- (mast->l->depth > mas_mt_height(mas))) {
- mat_add(mast->free, mas->node);
- }
+ /* Insert the new data in the tree */
+ mas_topiary_replace(mas, old_enode);
+
+ if (mte_is_leaf(mas->node))
+ return;
+
+ mas_update_gap(mas);
}
/*
@@ -3015,12 +2850,11 @@ static int mas_spanning_rebalance(struct ma_state *mas,
unsigned char split, mid_split;
unsigned char slot = 0;
struct maple_enode *left = NULL, *middle = NULL, *right = NULL;
+ struct maple_enode *old_enode;
MA_STATE(l_mas, mas->tree, mas->index, mas->index);
MA_STATE(r_mas, mas->tree, mas->index, mas->last);
MA_STATE(m_mas, mas->tree, mas->index, mas->index);
- MA_TOPIARY(free, mas->tree);
- MA_TOPIARY(destroy, mas->tree);
/*
* The tree needs to be rebalanced and leaves need to be kept at the same level.
@@ -3029,8 +2863,6 @@ static int mas_spanning_rebalance(struct ma_state *mas,
mast->l = &l_mas;
mast->m = &m_mas;
mast->r = &r_mas;
- mast->free = &free;
- mast->destroy = &destroy;
l_mas.node = r_mas.node = m_mas.node = MAS_NONE;
/* Check if this is not root and has sufficient data. */
@@ -3038,7 +2870,7 @@ static int mas_spanning_rebalance(struct ma_state *mas,
unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type]))
mast_spanning_rebalance(mast);
- mast->orig_l->depth = 0;
+ l_mas.depth = 0;
/*
* Each level of the tree is examined and balanced, pushing data to the left or
@@ -3049,7 +2881,7 @@ static int mas_spanning_rebalance(struct ma_state *mas,
* original tree and the partially new tree. To remedy the parent pointers in
* the old tree, the new data is swapped into the active tree and a walk down
* the tree is performed and the parent pointers are updated.
- * See mas_descend_adopt() for more information..
+ * See mas_topiary_replace() for more information.
*/
while (count--) {
mast->bn->b_end--;
@@ -3066,13 +2898,13 @@ static int mas_spanning_rebalance(struct ma_state *mas,
*/
memset(mast->bn, 0, sizeof(struct maple_big_node));
mast->bn->type = mte_node_type(left);
- mast->orig_l->depth++;
+ l_mas.depth++;
/* Root already stored in l->node. */
if (mas_is_root_limits(mast->l))
goto new_root;
- mast_ascend_free(mast);
+ mast_ascend(mast);
mast_combine_cp_left(mast);
l_mas.offset = mast->bn->b_end;
mab_set_b_end(mast->bn, &l_mas, left);
@@ -3081,7 +2913,6 @@ static int mas_spanning_rebalance(struct ma_state *mas,
/* Copy anything necessary out of the right node. */
mast_combine_cp_right(mast);
- mast_topiary(mast);
mast->orig_l->last = mast->orig_l->max;
if (mast_sufficient(mast))
@@ -3103,7 +2934,7 @@ static int mas_spanning_rebalance(struct ma_state *mas,
l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)),
mte_node_type(mast->orig_l->node));
- mast->orig_l->depth++;
+ l_mas.depth++;
mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true);
mas_set_parent(mas, left, l_mas.node, slot);
if (middle)
@@ -3114,23 +2945,20 @@ static int mas_spanning_rebalance(struct ma_state *mas,
if (mas_is_root_limits(mast->l)) {
new_root:
- mast_new_root(mast, mas);
+ mas_mn(mast->l)->parent = ma_parent_ptr(mas_tree_parent(mas));
+ while (!mte_is_root(mast->orig_l->node))
+ mast_ascend(mast);
} else {
mas_mn(&l_mas)->parent = mas_mn(mast->orig_l)->parent;
}
- if (!mte_dead_node(mast->orig_l->node))
- mat_add(&free, mast->orig_l->node);
-
- mas->depth = mast->orig_l->depth;
- *mast->orig_l = l_mas;
- mte_set_node_dead(mas->node);
-
- /* Set up mas for insertion. */
- mast->orig_l->depth = mas->depth;
- mast->orig_l->alloc = mas->alloc;
- *mas = *mast->orig_l;
- mas_wmb_replace(mas, &free, &destroy);
+ old_enode = mast->orig_l->node;
+ mas->depth = l_mas.depth;
+ mas->node = l_mas.node;
+ mas->min = l_mas.min;
+ mas->max = l_mas.max;
+ mas->offset = l_mas.offset;
+ mas_wmb_replace(mas, old_enode);
mtree_range_walk(mas);
return mast->bn->b_end;
}
@@ -3166,7 +2994,7 @@ static inline int mas_rebalance(struct ma_state *mas,
* tries to combine the data in the same way. If one node contains the
* entire range of the tree, then that node is used as a new root node.
*/
- mas_node_count(mas, 1 + empty_count * 3);
+ mas_node_count(mas, empty_count * 2 - 1);
if (mas_is_err(mas))
return 0;
@@ -3206,7 +3034,7 @@ static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end
{
enum maple_type mt = mte_node_type(mas->node);
struct maple_node reuse, *newnode, *parent, *new_left, *left, *node;
- struct maple_enode *eparent;
+ struct maple_enode *eparent, *old_eparent;
unsigned char offset, tmp, split = mt_slots[mt] / 2;
void __rcu **l_slots, **slots;
unsigned long *l_pivs, *pivs, gap;
@@ -3248,7 +3076,7 @@ static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end
l_mas.max = l_pivs[split];
mas->min = l_mas.max + 1;
- eparent = mt_mk_node(mte_parent(l_mas.node),
+ old_eparent = mt_mk_node(mte_parent(l_mas.node),
mas_parent_type(&l_mas, l_mas.node));
tmp += end;
if (!in_rcu) {
@@ -3264,7 +3092,7 @@ static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end
memcpy(node, newnode, sizeof(struct maple_node));
ma_set_meta(node, mt, 0, tmp - 1);
- mte_set_pivot(eparent, mte_parent_slot(l_mas.node),
+ mte_set_pivot(old_eparent, mte_parent_slot(l_mas.node),
l_pivs[split]);
/* Remove data from l_pivs. */
@@ -3272,6 +3100,7 @@ static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end
memset(l_pivs + tmp, 0, sizeof(unsigned long) * (max_p - tmp));
memset(l_slots + tmp, 0, sizeof(void *) * (max_s - tmp));
ma_set_meta(left, mt, 0, split);
+ eparent = old_eparent;
goto done;
}
@@ -3296,7 +3125,7 @@ static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end
parent = mas_pop_node(mas);
slots = ma_slots(parent, mt);
pivs = ma_pivots(parent, mt);
- memcpy(parent, mte_to_node(eparent), sizeof(struct maple_node));
+ memcpy(parent, mte_to_node(old_eparent), sizeof(struct maple_node));
rcu_assign_pointer(slots[offset], mas->node);
rcu_assign_pointer(slots[offset - 1], l_mas.node);
pivs[offset - 1] = l_mas.max;
@@ -3308,8 +3137,10 @@ done:
mte_set_gap(eparent, mte_parent_slot(l_mas.node), gap);
mas_ascend(mas);
- if (in_rcu)
- mas_replace(mas, false);
+ if (in_rcu) {
+ mas_replace_node(mas, old_eparent);
+ mas_adopt_children(mas, mas->node);
+ }
mas_update_gap(mas);
}
@@ -3358,7 +3189,6 @@ static inline void mast_fill_bnode(struct maple_subtree_state *mast,
unsigned char skip)
{
bool cp = true;
- struct maple_enode *old = mas->node;
unsigned char split;
memset(mast->bn->gap, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->gap));
@@ -3370,7 +3200,6 @@ static inline void mast_fill_bnode(struct maple_subtree_state *mast,
cp = false;
} else {
mas_ascend(mas);
- mat_add(mast->free, old);
mas->offset = mte_parent_slot(mas->node);
}
@@ -3474,13 +3303,11 @@ static inline bool mas_push_data(struct ma_state *mas, int height,
split = mt_slots[mast->bn->type] - 2;
if (left) {
/* Switch mas to prev node */
- mat_add(mast->free, mas->node);
*mas = tmp_mas;
/* Start using mast->l for the left side. */
tmp_mas.node = mast->l->node;
*mast->l = tmp_mas;
} else {
- mat_add(mast->free, tmp_mas.node);
tmp_mas.node = mast->r->node;
*mast->r = tmp_mas;
split = slot_total - split;
@@ -3507,6 +3334,7 @@ static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
struct maple_subtree_state mast;
int height = 0;
unsigned char mid_split, split = 0;
+ struct maple_enode *old;
/*
* Splitting is handled differently from any other B-tree; the Maple
@@ -3529,7 +3357,6 @@ static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
MA_STATE(r_mas, mas->tree, mas->index, mas->last);
MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last);
MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last);
- MA_TOPIARY(mat, mas->tree);
trace_ma_op(__func__, mas);
mas->depth = mas_mt_height(mas);
@@ -3542,7 +3369,6 @@ static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
mast.r = &r_mas;
mast.orig_l = &prev_l_mas;
mast.orig_r = &prev_r_mas;
- mast.free = &mat;
mast.bn = b_node;
while (height++ <= mas->depth) {
@@ -3582,9 +3408,9 @@ static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
}
/* Set the original node as dead */
- mat_add(mast.free, mas->node);
+ old = mas->node;
mas->node = l_mas.node;
- mas_wmb_replace(mas, mast.free, NULL);
+ mas_wmb_replace(mas, old);
mtree_range_walk(mas);
return 1;
}
@@ -3626,11 +3452,13 @@ static noinline_for_kasan int mas_commit_b_node(struct ma_wr_state *wr_mas,
struct maple_big_node *b_node, unsigned char end)
{
struct maple_node *node;
+ struct maple_enode *old_enode;
unsigned char b_end = b_node->b_end;
enum maple_type b_type = b_node->type;
+ old_enode = wr_mas->mas->node;
if ((b_end < mt_min_slots[b_type]) &&
- (!mte_is_root(wr_mas->mas->node)) &&
+ (!mte_is_root(old_enode)) &&
(mas_mt_height(wr_mas->mas) > 1))
return mas_rebalance(wr_mas->mas, b_node);
@@ -3648,7 +3476,7 @@ static noinline_for_kasan int mas_commit_b_node(struct ma_wr_state *wr_mas,
node->parent = mas_mn(wr_mas->mas)->parent;
wr_mas->mas->node = mt_mk_node(node, b_type);
mab_mas_cp(b_node, 0, b_end, wr_mas->mas, false);
- mas_replace(wr_mas->mas, false);
+ mas_replace_node(wr_mas->mas, old_enode);
reuse_node:
mas_update_gap(wr_mas->mas);
return 1;
@@ -3675,8 +3503,7 @@ static inline int mas_root_expand(struct ma_state *mas, void *entry)
node = mas_pop_node(mas);
pivots = ma_pivots(node, type);
slots = ma_slots(node, type);
- node->parent = ma_parent_ptr(
- ((unsigned long)mas->tree | MA_ROOT_PARENT));
+ node->parent = ma_parent_ptr(mas_tree_parent(mas));
mas->node = mt_mk_node(node, type);
if (mas->index) {
@@ -3692,7 +3519,8 @@ static inline int mas_root_expand(struct ma_state *mas, void *entry)
mas->offset = slot;
pivots[slot] = mas->last;
if (mas->last != ULONG_MAX)
- slot++;
+ pivots[++slot] = ULONG_MAX;
+
mas->depth = 1;
mas_set_height(mas);
ma_set_meta(node, maple_leaf_64, 0, slot);
@@ -3918,6 +3746,7 @@ dead_node:
return NULL;
}
+static void mte_destroy_walk(struct maple_enode *, struct maple_tree *);
/*
* mas_new_root() - Create a new root node that only contains the entry passed
* in.
@@ -3951,8 +3780,7 @@ static inline int mas_new_root(struct ma_state *mas, void *entry)
node = mas_pop_node(mas);
pivots = ma_pivots(node, type);
slots = ma_slots(node, type);
- node->parent = ma_parent_ptr(
- ((unsigned long)mas->tree | MA_ROOT_PARENT));
+ node->parent = ma_parent_ptr(mas_tree_parent(mas));
mas->node = mt_mk_node(node, type);
rcu_assign_pointer(slots[0], entry);
pivots[0] = mas->last;
@@ -3985,7 +3813,6 @@ static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
/* Left and Right side of spanning store */
MA_STATE(l_mas, NULL, 0, 0);
MA_STATE(r_mas, NULL, 0, 0);
-
MA_WR_STATE(r_wr_mas, &r_mas, wr_mas->entry);
MA_WR_STATE(l_wr_mas, &l_mas, wr_mas->entry);
@@ -4146,9 +3973,10 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas,
done:
mas_leaf_set_meta(mas, newnode, dst_pivots, maple_leaf_64, new_end);
if (in_rcu) {
- mte_set_node_dead(mas->node);
+ struct maple_enode *old_enode = mas->node;
+
mas->node = mt_mk_node(newnode, wr_mas->type);
- mas_replace(mas, false);
+ mas_replace_node(mas, old_enode);
} else {
memcpy(wr_mas->node, newnode, sizeof(struct maple_node));
}
@@ -4167,23 +3995,35 @@ static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
unsigned char offset = mas->offset;
+ void __rcu **slots = wr_mas->slots;
bool gap = false;
- if (wr_mas->offset_end - offset != 1)
- return false;
-
- gap |= !mt_slot_locked(mas->tree, wr_mas->slots, offset);
- gap |= !mt_slot_locked(mas->tree, wr_mas->slots, offset + 1);
+ gap |= !mt_slot_locked(mas->tree, slots, offset);
+ gap |= !mt_slot_locked(mas->tree, slots, offset + 1);
- if (mas->index == wr_mas->r_min) {
- /* Overwriting the range and over a part of the next range. */
- rcu_assign_pointer(wr_mas->slots[offset], wr_mas->entry);
- wr_mas->pivots[offset] = mas->last;
- } else {
- /* Overwriting a part of the range and over the next range */
- rcu_assign_pointer(wr_mas->slots[offset + 1], wr_mas->entry);
+ if (wr_mas->offset_end - offset == 1) {
+ if (mas->index == wr_mas->r_min) {
+ /* Overwriting the range and a part of the next one */
+ rcu_assign_pointer(slots[offset], wr_mas->entry);
+ wr_mas->pivots[offset] = mas->last;
+ } else {
+ /* Overwriting a part of the range and the next one */
+ rcu_assign_pointer(slots[offset + 1], wr_mas->entry);
+ wr_mas->pivots[offset] = mas->index - 1;
+ mas->offset++; /* Keep mas accurate. */
+ }
+ } else if (!mt_in_rcu(mas->tree)) {
+ /*
+ * Expand the range, only partially overwriting the previous and
+ * next ranges
+ */
+ gap |= !mt_slot_locked(mas->tree, slots, offset + 2);
+ rcu_assign_pointer(slots[offset + 1], wr_mas->entry);
wr_mas->pivots[offset] = mas->index - 1;
+ wr_mas->pivots[offset + 1] = mas->last;
mas->offset++; /* Keep mas accurate. */
+ } else {
+ return false;
}
trace_ma_write(__func__, mas, 0, wr_mas->entry);
@@ -4197,18 +4037,6 @@ static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas)
return true;
}
-static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas)
-{
- while ((wr_mas->offset_end < wr_mas->node_end) &&
- (wr_mas->mas->last > wr_mas->pivots[wr_mas->offset_end]))
- wr_mas->offset_end++;
-
- if (wr_mas->offset_end < wr_mas->node_end)
- wr_mas->end_piv = wr_mas->pivots[wr_mas->offset_end];
- else
- wr_mas->end_piv = wr_mas->mas->max;
-}
-
static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
@@ -4245,6 +4073,21 @@ static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
}
}
+static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas)
+{
+ while ((wr_mas->offset_end < wr_mas->node_end) &&
+ (wr_mas->mas->last > wr_mas->pivots[wr_mas->offset_end]))
+ wr_mas->offset_end++;
+
+ if (wr_mas->offset_end < wr_mas->node_end)
+ wr_mas->end_piv = wr_mas->pivots[wr_mas->offset_end];
+ else
+ wr_mas->end_piv = wr_mas->mas->max;
+
+ if (!wr_mas->entry)
+ mas_wr_extend_null(wr_mas);
+}
+
static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
@@ -4263,39 +4106,63 @@ static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas)
/*
* mas_wr_append: Attempt to append
* @wr_mas: the maple write state
+ * @new_end: The end of the node after the modification
+ *
+ * This is currently unsafe in rcu mode since the end of the node may be cached
+ * by readers while the node contents may be updated which could result in
+ * inaccurate information.
*
* Return: True if appended, false otherwise
*/
-static inline bool mas_wr_append(struct ma_wr_state *wr_mas)
+static inline bool mas_wr_append(struct ma_wr_state *wr_mas,
+ unsigned char new_end)
{
- unsigned char end = wr_mas->node_end;
- unsigned char new_end = end + 1;
- struct ma_state *mas = wr_mas->mas;
- unsigned char node_pivots = mt_pivots[wr_mas->type];
+ struct ma_state *mas;
+ void __rcu **slots;
+ unsigned char end;
+
+ mas = wr_mas->mas;
+ if (mt_in_rcu(mas->tree))
+ return false;
if (mas->offset != wr_mas->node_end)
return false;
- if (new_end < node_pivots) {
+ end = wr_mas->node_end;
+ if (mas->offset != end)
+ return false;
+
+ if (new_end < mt_pivots[wr_mas->type]) {
wr_mas->pivots[new_end] = wr_mas->pivots[end];
- ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end);
+ ma_set_meta(wr_mas->node, wr_mas->type, 0, new_end);
}
- if (mas->last == wr_mas->r_max) {
- /* Append to end of range */
- rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->entry);
- wr_mas->pivots[end] = mas->index - 1;
- mas->offset = new_end;
+ slots = wr_mas->slots;
+ if (new_end == end + 1) {
+ if (mas->last == wr_mas->r_max) {
+ /* Append to end of range */
+ rcu_assign_pointer(slots[new_end], wr_mas->entry);
+ wr_mas->pivots[end] = mas->index - 1;
+ mas->offset = new_end;
+ } else {
+ /* Append to start of range */
+ rcu_assign_pointer(slots[new_end], wr_mas->content);
+ wr_mas->pivots[end] = mas->last;
+ rcu_assign_pointer(slots[end], wr_mas->entry);
+ }
} else {
- /* Append to start of range */
- rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->content);
- wr_mas->pivots[end] = mas->last;
- rcu_assign_pointer(wr_mas->slots[end], wr_mas->entry);
+ /* Append to the range without touching any boundaries. */
+ rcu_assign_pointer(slots[new_end], wr_mas->content);
+ wr_mas->pivots[end + 1] = mas->last;
+ rcu_assign_pointer(slots[end + 1], wr_mas->entry);
+ wr_mas->pivots[end] = mas->index - 1;
+ mas->offset = end + 1;
}
if (!wr_mas->content || !wr_mas->entry)
mas_update_gap(mas);
+ trace_ma_write(__func__, mas, new_end, wr_mas->entry);
return true;
}
@@ -4337,7 +4204,7 @@ static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
goto slow_path;
/* Attempt to append */
- if (new_end == wr_mas->node_end + 1 && mas_wr_append(wr_mas))
+ if (mas_wr_append(wr_mas, new_end))
return;
if (new_end == wr_mas->node_end && mas_wr_slot_store(wr_mas))
@@ -4377,10 +4244,6 @@ static inline void *mas_wr_store_entry(struct ma_wr_state *wr_mas)
/* At this point, we are at the leaf node that needs to be altered. */
mas_wr_end_piv(wr_mas);
-
- if (!wr_mas->entry)
- mas_wr_extend_null(wr_mas);
-
/* New root for a single pointer */
if (unlikely(!mas->index && mas->last == ULONG_MAX)) {
mas_new_root(mas, wr_mas->entry);
@@ -4920,7 +4783,7 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
min = mas_safe_min(mas, pivots, offset);
data_end = ma_data_end(node, type, pivots, mas->max);
for (; offset <= data_end; offset++) {
- pivot = mas_logical_pivot(mas, pivots, offset, type);
+ pivot = mas_safe_pivot(mas, pivots, offset, type);
/* Not within lower bounds */
if (mas->index > pivot)
@@ -5431,19 +5294,34 @@ static inline void mte_destroy_walk(struct maple_enode *enode,
static void mas_wr_store_setup(struct ma_wr_state *wr_mas)
{
+ if (mas_is_start(wr_mas->mas))
+ return;
+
if (unlikely(mas_is_paused(wr_mas->mas)))
- mas_reset(wr_mas->mas);
+ goto reset;
- if (!mas_is_start(wr_mas->mas)) {
- if (mas_is_none(wr_mas->mas)) {
- mas_reset(wr_mas->mas);
- } else {
- wr_mas->r_max = wr_mas->mas->max;
- wr_mas->type = mte_node_type(wr_mas->mas->node);
- if (mas_is_span_wr(wr_mas))
- mas_reset(wr_mas->mas);
- }
- }
+ if (unlikely(mas_is_none(wr_mas->mas)))
+ goto reset;
+
+ /*
+ * A less strict version of mas_is_span_wr() where we allow spanning
+ * writes within this node. This is to stop partial walks in
+ * mas_prealloc() from being reset.
+ */
+ if (wr_mas->mas->last > wr_mas->mas->max)
+ goto reset;
+
+ if (wr_mas->entry)
+ return;
+
+ if (mte_is_leaf(wr_mas->mas->node) &&
+ wr_mas->mas->last == wr_mas->mas->max)
+ goto reset;
+
+ return;
+
+reset:
+ mas_reset(wr_mas->mas);
}
/* Interface */
@@ -5535,15 +5413,58 @@ EXPORT_SYMBOL_GPL(mas_store_prealloc);
/**
* mas_preallocate() - Preallocate enough nodes for a store operation
* @mas: The maple state
+ * @entry: The entry that will be stored
* @gfp: The GFP_FLAGS to use for allocations.
*
* Return: 0 on success, -ENOMEM if memory could not be allocated.
*/
-int mas_preallocate(struct ma_state *mas, gfp_t gfp)
+int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
{
+ MA_WR_STATE(wr_mas, mas, entry);
+ unsigned char node_size;
+ int request = 1;
int ret;
- mas_node_count_gfp(mas, 1 + mas_mt_height(mas) * 3, gfp);
+
+ if (unlikely(!mas->index && mas->last == ULONG_MAX))
+ goto ask_now;
+
+ mas_wr_store_setup(&wr_mas);
+ wr_mas.content = mas_start(mas);
+ /* Root expand */
+ if (unlikely(mas_is_none(mas) || mas_is_ptr(mas)))
+ goto ask_now;
+
+ if (unlikely(!mas_wr_walk(&wr_mas))) {
+ /* Spanning store, use worst case for now */
+ request = 1 + mas_mt_height(mas) * 3;
+ goto ask_now;
+ }
+
+ /* At this point, we are at the leaf node that needs to be altered. */
+ /* Exact fit, no nodes needed. */
+ if (wr_mas.r_min == mas->index && wr_mas.r_max == mas->last)
+ return 0;
+
+ mas_wr_end_piv(&wr_mas);
+ node_size = mas_wr_new_end(&wr_mas);
+ if (node_size >= mt_slots[wr_mas.type]) {
+ /* Split, worst case for now. */
+ request = 1 + mas_mt_height(mas) * 2;
+ goto ask_now;
+ }
+
+ /* New root needs a singe node */
+ if (unlikely(mte_is_root(mas->node)))
+ goto ask_now;
+
+ /* Potential spanning rebalance collapsing a node, use worst-case */
+ if (node_size - 1 <= mt_min_slots[wr_mas.type])
+ request = mas_mt_height(mas) * 2 - 1;
+
+ /* node store, slot store needs one node */
+ask_now:
+ mas_node_count_gfp(mas, request, gfp);
mas->mas_flags |= MA_STATE_PREALLOC;
if (likely(!mas_is_err(mas)))
return 0;
@@ -5749,7 +5670,11 @@ EXPORT_SYMBOL_GPL(mas_next_range);
* @index: The start index
* @max: The maximum index to check
*
- * Return: The entry at @index or higher, or %NULL if nothing is found.
+ * Takes RCU read lock internally to protect the search, which does not
+ * protect the returned pointer after dropping RCU read lock.
+ * See also: Documentation/core-api/maple_tree.rst
+ *
+ * Return: The entry higher than @index or %NULL if nothing is found.
*/
void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max)
{
@@ -5855,7 +5780,11 @@ EXPORT_SYMBOL_GPL(mas_prev_range);
* @index: The start index
* @min: The minimum index to check
*
- * Return: The entry at @index or lower, or %NULL if nothing is found.
+ * Takes RCU read lock internally to protect the search, which does not
+ * protect the returned pointer after dropping RCU read lock.
+ * See also: Documentation/core-api/maple_tree.rst
+ *
+ * Return: The entry before @index or %NULL if nothing is found.
*/
void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min)
{
@@ -6278,7 +6207,7 @@ int mtree_store(struct maple_tree *mt, unsigned long index, void *entry,
EXPORT_SYMBOL(mtree_store);
/**
- * mtree_insert_range() - Insert an entry at a give range if there is no value.
+ * mtree_insert_range() - Insert an entry at a given range if there is no value.
* @mt: The maple tree
* @first: The start of the range
* @last: The end of the range
@@ -6314,11 +6243,11 @@ retry:
EXPORT_SYMBOL(mtree_insert_range);
/**
- * mtree_insert() - Insert an entry at a give index if there is no value.
+ * mtree_insert() - Insert an entry at a given index if there is no value.
* @mt: The maple tree
* @index : The index to store the value
* @entry: The entry to store
- * @gfp: The FGP_FLAGS to use for allocations.
+ * @gfp: The GFP_FLAGS to use for allocations.
*
* Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
* request, -ENOMEM if memory could not be allocated.
@@ -6467,9 +6396,15 @@ EXPORT_SYMBOL(mtree_destroy);
* mt_find() - Search from the start up until an entry is found.
* @mt: The maple tree
* @index: Pointer which contains the start location of the search
- * @max: The maximum value to check
+ * @max: The maximum value of the search range
+ *
+ * Takes RCU read lock internally to protect the search, which does not
+ * protect the returned pointer after dropping RCU read lock.
+ * See also: Documentation/core-api/maple_tree.rst
*
- * Handles locking. @index will be incremented to one beyond the range.
+ * In case that an entry is found @index is updated to point to the next
+ * possible entry independent whether the found entry is occupying a
+ * single index or a range if indices.
*
* Return: The entry at or after the @index or %NULL
*/
@@ -6527,7 +6462,9 @@ EXPORT_SYMBOL(mt_find);
* @index: Pointer which contains the start location of the search
* @max: The maximum value to check
*
- * Handles locking, detects wrapping on index == 0
+ * Same as mt_find() except that it checks @index for 0 before
+ * searching. If @index == 0, the search is aborted. This covers a wrap
+ * around of @index to 0 in an iterator loop.
*
* Return: The entry at or after the @index or %NULL
*/
@@ -6632,78 +6569,6 @@ static inline struct maple_enode *mas_get_slot(struct ma_state *mas,
offset);
}
-
-/*
- * mas_first_entry() - Go the first leaf and find the first entry.
- * @mas: the maple state.
- * @limit: the maximum index to check.
- * @*r_start: Pointer to set to the range start.
- *
- * Sets mas->offset to the offset of the entry, r_start to the range minimum.
- *
- * Return: The first entry or MAS_NONE.
- */
-static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn,
- unsigned long limit, enum maple_type mt)
-
-{
- unsigned long max;
- unsigned long *pivots;
- void __rcu **slots;
- void *entry = NULL;
-
- mas->index = mas->min;
- if (mas->index > limit)
- goto none;
-
- max = mas->max;
- mas->offset = 0;
- while (likely(!ma_is_leaf(mt))) {
- MAS_WARN_ON(mas, mte_dead_node(mas->node));
- slots = ma_slots(mn, mt);
- entry = mas_slot(mas, slots, 0);
- pivots = ma_pivots(mn, mt);
- if (unlikely(ma_dead_node(mn)))
- return NULL;
- max = pivots[0];
- mas->node = entry;
- mn = mas_mn(mas);
- mt = mte_node_type(mas->node);
- }
- MAS_WARN_ON(mas, mte_dead_node(mas->node));
-
- mas->max = max;
- slots = ma_slots(mn, mt);
- entry = mas_slot(mas, slots, 0);
- if (unlikely(ma_dead_node(mn)))
- return NULL;
-
- /* Slot 0 or 1 must be set */
- if (mas->index > limit)
- goto none;
-
- if (likely(entry))
- return entry;
-
- mas->offset = 1;
- entry = mas_slot(mas, slots, 1);
- pivots = ma_pivots(mn, mt);
- if (unlikely(ma_dead_node(mn)))
- return NULL;
-
- mas->index = pivots[0] + 1;
- if (mas->index > limit)
- goto none;
-
- if (likely(entry))
- return entry;
-
-none:
- if (likely(!ma_dead_node(mn)))
- mas->node = MAS_NONE;
- return NULL;
-}
-
/* Depth first search, post-order */
static void mas_dfs_postorder(struct ma_state *mas, unsigned long max)
{
@@ -6838,11 +6703,27 @@ static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
int i;
pr_cont(" contents: ");
- for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++)
- pr_cont("%lu ", node->gap[i]);
+ for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) {
+ switch (format) {
+ case mt_dump_hex:
+ pr_cont("%lx ", node->gap[i]);
+ break;
+ default:
+ case mt_dump_dec:
+ pr_cont("%lu ", node->gap[i]);
+ }
+ }
pr_cont("| %02X %02X| ", node->meta.end, node->meta.gap);
- for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++)
- pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
+ for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++) {
+ switch (format) {
+ case mt_dump_hex:
+ pr_cont("%p %lX ", node->slot[i], node->pivot[i]);
+ break;
+ default:
+ case mt_dump_dec:
+ pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
+ }
+ }
pr_cont("%p\n", node->slot[i]);
for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) {
unsigned long last = max;
@@ -6926,15 +6807,16 @@ EXPORT_SYMBOL_GPL(mt_dump);
static void mas_validate_gaps(struct ma_state *mas)
{
struct maple_enode *mte = mas->node;
- struct maple_node *p_mn;
+ struct maple_node *p_mn, *node = mte_to_node(mte);
+ enum maple_type mt = mte_node_type(mas->node);
unsigned long gap = 0, max_gap = 0;
unsigned long p_end, p_start = mas->min;
- unsigned char p_slot;
+ unsigned char p_slot, offset;
unsigned long *gaps = NULL;
- unsigned long *pivots = ma_pivots(mte_to_node(mte), mte_node_type(mte));
- int i;
+ unsigned long *pivots = ma_pivots(node, mt);
+ unsigned int i;
- if (ma_is_dense(mte_node_type(mte))) {
+ if (ma_is_dense(mt)) {
for (i = 0; i < mt_slot_count(mte); i++) {
if (mas_get_slot(mas, i)) {
if (gap > max_gap)
@@ -6947,52 +6829,59 @@ static void mas_validate_gaps(struct ma_state *mas)
goto counted;
}
- gaps = ma_gaps(mte_to_node(mte), mte_node_type(mte));
+ gaps = ma_gaps(node, mt);
for (i = 0; i < mt_slot_count(mte); i++) {
- p_end = mas_logical_pivot(mas, pivots, i, mte_node_type(mte));
+ p_end = mas_safe_pivot(mas, pivots, i, mt);
if (!gaps) {
- if (mas_get_slot(mas, i)) {
- gap = 0;
- goto not_empty;
- }
-
- gap += p_end - p_start + 1;
+ if (!mas_get_slot(mas, i))
+ gap = p_end - p_start + 1;
} else {
void *entry = mas_get_slot(mas, i);
gap = gaps[i];
- if (!entry) {
- if (gap != p_end - p_start + 1) {
- pr_err("%p[%u] -> %p %lu != %lu - %lu + 1\n",
- mas_mn(mas), i,
- mas_get_slot(mas, i), gap,
- p_end, p_start);
- mt_dump(mas->tree, mt_dump_hex);
-
- MT_BUG_ON(mas->tree,
- gap != p_end - p_start + 1);
- }
- } else {
- if (gap > p_end - p_start + 1) {
- pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n",
- mas_mn(mas), i, gap, p_end, p_start,
- p_end - p_start + 1);
- MT_BUG_ON(mas->tree,
- gap > p_end - p_start + 1);
- }
+ MT_BUG_ON(mas->tree, !entry);
+
+ if (gap > p_end - p_start + 1) {
+ pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n",
+ mas_mn(mas), i, gap, p_end, p_start,
+ p_end - p_start + 1);
+ MT_BUG_ON(mas->tree, gap > p_end - p_start + 1);
}
}
if (gap > max_gap)
max_gap = gap;
-not_empty:
+
p_start = p_end + 1;
if (p_end >= mas->max)
break;
}
counted:
+ if (mt == maple_arange_64) {
+ offset = ma_meta_gap(node, mt);
+ if (offset > i) {
+ pr_err("gap offset %p[%u] is invalid\n", node, offset);
+ MT_BUG_ON(mas->tree, 1);
+ }
+
+ if (gaps[offset] != max_gap) {
+ pr_err("gap %p[%u] is not the largest gap %lu\n",
+ node, offset, max_gap);
+ MT_BUG_ON(mas->tree, 1);
+ }
+
+ MT_BUG_ON(mas->tree, !gaps);
+ for (i++ ; i < mt_slot_count(mte); i++) {
+ if (gaps[i] != 0) {
+ pr_err("gap %p[%u] beyond node limit != 0\n",
+ node, i);
+ MT_BUG_ON(mas->tree, 1);
+ }
+ }
+ }
+
if (mte_is_root(mte))
return;
@@ -7002,10 +6891,8 @@ counted:
if (ma_gaps(p_mn, mas_parent_type(mas, mte))[p_slot] != max_gap) {
pr_err("gap %p[%u] != %lu\n", p_mn, p_slot, max_gap);
mt_dump(mas->tree, mt_dump_hex);
+ MT_BUG_ON(mas->tree, 1);
}
-
- MT_BUG_ON(mas->tree,
- ma_gaps(p_mn, mas_parent_type(mas, mte))[p_slot] != max_gap);
}
static void mas_validate_parent_slot(struct ma_state *mas)
@@ -7056,11 +6943,12 @@ static void mas_validate_child_slot(struct ma_state *mas)
for (i = 0; i < mt_slots[type]; i++) {
child = mas_slot(mas, slots, i);
- if (!pivots[i] || pivots[i] == mas->max)
- break;
- if (!child)
- break;
+ if (!child) {
+ pr_err("Non-leaf node lacks child at %p[%u]\n",
+ mas_mn(mas), i);
+ MT_BUG_ON(mas->tree, 1);
+ }
if (mte_parent_slot(child) != i) {
pr_err("Slot error at %p[%u]: child %p has pslot %u\n",
@@ -7075,11 +6963,16 @@ static void mas_validate_child_slot(struct ma_state *mas)
mte_to_node(mas->node));
MT_BUG_ON(mas->tree, 1);
}
+
+ if (i < mt_pivots[type] && pivots[i] == mas->max)
+ break;
}
}
/*
- * Validate all pivots are within mas->min and mas->max.
+ * Validate all pivots are within mas->min and mas->max, check metadata ends
+ * where the maximum ends and ensure there is no slots or pivots set outside of
+ * the end of the data.
*/
static void mas_validate_limits(struct ma_state *mas)
{
@@ -7089,26 +6982,15 @@ static void mas_validate_limits(struct ma_state *mas)
void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
unsigned long *pivots = ma_pivots(mas_mn(mas), type);
- /* all limits are fine here. */
- if (mte_is_root(mas->node))
- return;
-
for (i = 0; i < mt_slots[type]; i++) {
unsigned long piv;
piv = mas_safe_pivot(mas, pivots, i, type);
- if (!piv && (i != 0))
- break;
-
- if (!mte_is_leaf(mas->node)) {
- void *entry = mas_slot(mas, slots, i);
-
- if (!entry)
- pr_err("%p[%u] cannot be null\n",
- mas_mn(mas), i);
-
- MT_BUG_ON(mas->tree, !entry);
+ if (!piv && (i != 0)) {
+ pr_err("Missing node limit pivot at %p[%u]",
+ mas_mn(mas), i);
+ MAS_WARN_ON(mas, 1);
}
if (prev_piv > piv) {
@@ -7131,6 +7013,13 @@ static void mas_validate_limits(struct ma_state *mas)
if (piv == mas->max)
break;
}
+
+ if (mas_data_end(mas) != i) {
+ pr_err("node%p: data_end %u != the last slot offset %u\n",
+ mas_mn(mas), mas_data_end(mas), i);
+ MT_BUG_ON(mas->tree, 1);
+ }
+
for (i += 1; i < mt_slots[type]; i++) {
void *entry = mas_slot(mas, slots, i);
@@ -7205,21 +7094,20 @@ void mt_validate(struct maple_tree *mt)
if (!mas_searchable(&mas))
goto done;
- mas_first_entry(&mas, mas_mn(&mas), ULONG_MAX, mte_node_type(mas.node));
+ while (!mte_is_leaf(mas.node))
+ mas_descend(&mas);
+
while (!mas_is_none(&mas)) {
MAS_WARN_ON(&mas, mte_dead_node(mas.node));
- if (!mte_is_root(mas.node)) {
- end = mas_data_end(&mas);
- if (MAS_WARN_ON(&mas,
- (end < mt_min_slot_count(mas.node)) &&
- (mas.max != ULONG_MAX))) {
- pr_err("Invalid size %u of %p\n", end,
- mas_mn(&mas));
- }
+ end = mas_data_end(&mas);
+ if (MAS_WARN_ON(&mas, (end < mt_min_slot_count(mas.node)) &&
+ (mas.max != ULONG_MAX))) {
+ pr_err("Invalid size %u of %p\n", end, mas_mn(&mas));
}
+
mas_validate_parent_slot(&mas);
- mas_validate_child_slot(&mas);
mas_validate_limits(&mas);
+ mas_validate_child_slot(&mas);
if (mt_is_alloc(mt))
mas_validate_gaps(&mas);
mas_dfs_postorder(&mas, ULONG_MAX);
diff --git a/lib/math/Makefile b/lib/math/Makefile
index bfac26ddfc22..91fcdb0c9efe 100644
--- a/lib/math/Makefile
+++ b/lib/math/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-y += div64.o gcd.o lcm.o int_pow.o int_sqrt.o reciprocal_div.o
+obj-y += div64.o gcd.o lcm.o int_log.o int_pow.o int_sqrt.o reciprocal_div.o
obj-$(CONFIG_CORDIC) += cordic.o
obj-$(CONFIG_PRIME_NUMBERS) += prime_numbers.o
diff --git a/lib/math/int_log.c b/lib/math/int_log.c
new file mode 100644
index 000000000000..8f9da3a2ad39
--- /dev/null
+++ b/lib/math/int_log.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: LGPL-2.1-or-later
+/*
+ * Provides fixed-point logarithm operations.
+ *
+ * Copyright (C) 2006 Christoph Pfister (christophpfister@gmail.com)
+ */
+
+#include <linux/bitops.h>
+#include <linux/export.h>
+#include <linux/int_log.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include <asm/bug.h>
+
+static const unsigned short logtable[256] = {
+ 0x0000, 0x0171, 0x02e0, 0x044e, 0x05ba, 0x0725, 0x088e, 0x09f7,
+ 0x0b5d, 0x0cc3, 0x0e27, 0x0f8a, 0x10eb, 0x124b, 0x13aa, 0x1508,
+ 0x1664, 0x17bf, 0x1919, 0x1a71, 0x1bc8, 0x1d1e, 0x1e73, 0x1fc6,
+ 0x2119, 0x226a, 0x23ba, 0x2508, 0x2656, 0x27a2, 0x28ed, 0x2a37,
+ 0x2b80, 0x2cc8, 0x2e0f, 0x2f54, 0x3098, 0x31dc, 0x331e, 0x345f,
+ 0x359f, 0x36de, 0x381b, 0x3958, 0x3a94, 0x3bce, 0x3d08, 0x3e41,
+ 0x3f78, 0x40af, 0x41e4, 0x4319, 0x444c, 0x457f, 0x46b0, 0x47e1,
+ 0x4910, 0x4a3f, 0x4b6c, 0x4c99, 0x4dc5, 0x4eef, 0x5019, 0x5142,
+ 0x526a, 0x5391, 0x54b7, 0x55dc, 0x5700, 0x5824, 0x5946, 0x5a68,
+ 0x5b89, 0x5ca8, 0x5dc7, 0x5ee5, 0x6003, 0x611f, 0x623a, 0x6355,
+ 0x646f, 0x6588, 0x66a0, 0x67b7, 0x68ce, 0x69e4, 0x6af8, 0x6c0c,
+ 0x6d20, 0x6e32, 0x6f44, 0x7055, 0x7165, 0x7274, 0x7383, 0x7490,
+ 0x759d, 0x76aa, 0x77b5, 0x78c0, 0x79ca, 0x7ad3, 0x7bdb, 0x7ce3,
+ 0x7dea, 0x7ef0, 0x7ff6, 0x80fb, 0x81ff, 0x8302, 0x8405, 0x8507,
+ 0x8608, 0x8709, 0x8809, 0x8908, 0x8a06, 0x8b04, 0x8c01, 0x8cfe,
+ 0x8dfa, 0x8ef5, 0x8fef, 0x90e9, 0x91e2, 0x92db, 0x93d2, 0x94ca,
+ 0x95c0, 0x96b6, 0x97ab, 0x98a0, 0x9994, 0x9a87, 0x9b7a, 0x9c6c,
+ 0x9d5e, 0x9e4f, 0x9f3f, 0xa02e, 0xa11e, 0xa20c, 0xa2fa, 0xa3e7,
+ 0xa4d4, 0xa5c0, 0xa6ab, 0xa796, 0xa881, 0xa96a, 0xaa53, 0xab3c,
+ 0xac24, 0xad0c, 0xadf2, 0xaed9, 0xafbe, 0xb0a4, 0xb188, 0xb26c,
+ 0xb350, 0xb433, 0xb515, 0xb5f7, 0xb6d9, 0xb7ba, 0xb89a, 0xb97a,
+ 0xba59, 0xbb38, 0xbc16, 0xbcf4, 0xbdd1, 0xbead, 0xbf8a, 0xc065,
+ 0xc140, 0xc21b, 0xc2f5, 0xc3cf, 0xc4a8, 0xc580, 0xc658, 0xc730,
+ 0xc807, 0xc8de, 0xc9b4, 0xca8a, 0xcb5f, 0xcc34, 0xcd08, 0xcddc,
+ 0xceaf, 0xcf82, 0xd054, 0xd126, 0xd1f7, 0xd2c8, 0xd399, 0xd469,
+ 0xd538, 0xd607, 0xd6d6, 0xd7a4, 0xd872, 0xd93f, 0xda0c, 0xdad9,
+ 0xdba5, 0xdc70, 0xdd3b, 0xde06, 0xded0, 0xdf9a, 0xe063, 0xe12c,
+ 0xe1f5, 0xe2bd, 0xe385, 0xe44c, 0xe513, 0xe5d9, 0xe69f, 0xe765,
+ 0xe82a, 0xe8ef, 0xe9b3, 0xea77, 0xeb3b, 0xebfe, 0xecc1, 0xed83,
+ 0xee45, 0xef06, 0xefc8, 0xf088, 0xf149, 0xf209, 0xf2c8, 0xf387,
+ 0xf446, 0xf505, 0xf5c3, 0xf680, 0xf73e, 0xf7fb, 0xf8b7, 0xf973,
+ 0xfa2f, 0xfaea, 0xfba5, 0xfc60, 0xfd1a, 0xfdd4, 0xfe8e, 0xff47,
+};
+
+unsigned int intlog2(u32 value)
+{
+ /**
+ * returns: log2(value) * 2^24
+ * wrong result if value = 0 (log2(0) is undefined)
+ */
+ unsigned int msb;
+ unsigned int logentry;
+ unsigned int significand;
+ unsigned int interpolation;
+
+ if (unlikely(value == 0)) {
+ WARN_ON(1);
+ return 0;
+ }
+
+ /* first detect the msb (count begins at 0) */
+ msb = fls(value) - 1;
+
+ /**
+ * now we use a logtable after the following method:
+ *
+ * log2(2^x * y) * 2^24 = x * 2^24 + log2(y) * 2^24
+ * where x = msb and therefore 1 <= y < 2
+ * first y is determined by shifting the value left
+ * so that msb is bit 31
+ * 0x00231f56 -> 0x8C7D5800
+ * the result is y * 2^31 -> "significand"
+ * then the highest 9 bits are used for a table lookup
+ * the highest bit is discarded because it's always set
+ * the highest nine bits in our example are 100011000
+ * so we would use the entry 0x18
+ */
+ significand = value << (31 - msb);
+ logentry = (significand >> 23) % ARRAY_SIZE(logtable);
+
+ /**
+ * last step we do is interpolation because of the
+ * limitations of the log table the error is that part of
+ * the significand which isn't used for lookup then we
+ * compute the ratio between the error and the next table entry
+ * and interpolate it between the log table entry used and the
+ * next one the biggest error possible is 0x7fffff
+ * (in our example it's 0x7D5800)
+ * needed value for next table entry is 0x800000
+ * so the interpolation is
+ * (error / 0x800000) * (logtable_next - logtable_current)
+ * in the implementation the division is moved to the end for
+ * better accuracy there is also an overflow correction if
+ * logtable_next is 256
+ */
+ interpolation = ((significand & 0x7fffff) *
+ ((logtable[(logentry + 1) % ARRAY_SIZE(logtable)] -
+ logtable[logentry]) & 0xffff)) >> 15;
+
+ /* now we return the result */
+ return ((msb << 24) + (logtable[logentry] << 8) + interpolation);
+}
+EXPORT_SYMBOL(intlog2);
+
+unsigned int intlog10(u32 value)
+{
+ /**
+ * returns: log10(value) * 2^24
+ * wrong result if value = 0 (log10(0) is undefined)
+ */
+ u64 log;
+
+ if (unlikely(value == 0)) {
+ WARN_ON(1);
+ return 0;
+ }
+
+ log = intlog2(value);
+
+ /**
+ * we use the following method:
+ * log10(x) = log2(x) * log10(2)
+ */
+
+ return (log * 646456993) >> 31;
+}
+EXPORT_SYMBOL(intlog10);
diff --git a/lib/memcpy_kunit.c b/lib/memcpy_kunit.c
index 887926f04731..440aee705ccc 100644
--- a/lib/memcpy_kunit.c
+++ b/lib/memcpy_kunit.c
@@ -551,10 +551,10 @@ static void strtomem_test(struct kunit *test)
static struct kunit_case memcpy_test_cases[] = {
KUNIT_CASE(memset_test),
KUNIT_CASE(memcpy_test),
- KUNIT_CASE(memcpy_large_test),
- KUNIT_CASE(memmove_test),
- KUNIT_CASE(memmove_large_test),
- KUNIT_CASE(memmove_overlap_test),
+ KUNIT_CASE_SLOW(memcpy_large_test),
+ KUNIT_CASE_SLOW(memmove_test),
+ KUNIT_CASE_SLOW(memmove_large_test),
+ KUNIT_CASE_SLOW(memmove_overlap_test),
KUNIT_CASE(strtomem_test),
{}
};
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 489e15bde5c1..7a2b6c38fd59 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -355,6 +355,12 @@ static int nla_validate_mask(const struct nla_policy *pt,
case NLA_U64:
value = nla_get_u64(nla);
break;
+ case NLA_BE16:
+ value = ntohs(nla_get_be16(nla));
+ break;
+ case NLA_BE32:
+ value = ntohl(nla_get_be32(nla));
+ break;
default:
return -EINVAL;
}
diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c
index 5274bbb026d7..33c154264bfe 100644
--- a/lib/nmi_backtrace.c
+++ b/lib/nmi_backtrace.c
@@ -34,7 +34,7 @@ static unsigned long backtrace_flag;
* they are passed being updated as a side effect of this call.
*/
void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
- bool exclude_self,
+ int exclude_cpu,
void (*raise)(cpumask_t *mask))
{
int i, this_cpu = get_cpu();
@@ -49,8 +49,8 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
}
cpumask_copy(to_cpumask(backtrace_mask), mask);
- if (exclude_self)
- cpumask_clear_cpu(this_cpu, to_cpumask(backtrace_mask));
+ if (exclude_cpu != -1)
+ cpumask_clear_cpu(exclude_cpu, to_cpumask(backtrace_mask));
/*
* Don't try to send an NMI to this cpu; it may work on some
diff --git a/lib/notifier-error-inject.c b/lib/notifier-error-inject.c
index 2b24ea6c9497..954c3412d22d 100644
--- a/lib/notifier-error-inject.c
+++ b/lib/notifier-error-inject.c
@@ -83,9 +83,6 @@ static int __init err_inject_init(void)
notifier_err_inject_dir =
debugfs_create_dir("notifier-error-inject", NULL);
- if (!notifier_err_inject_dir)
- return -ENOMEM;
-
return 0;
}
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 1a31065b2036..976b9bd02a1b 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -1136,7 +1136,6 @@ static void set_iter_tags(struct radix_tree_iter *iter,
void __rcu **radix_tree_iter_resume(void __rcu **slot,
struct radix_tree_iter *iter)
{
- slot++;
iter->index = __radix_tree_iter_add(iter, 1);
iter->next_index = iter->index;
iter->tags = 0;
diff --git a/lib/raid6/mktables.c b/lib/raid6/mktables.c
index f02e10fa6238..3be03793237c 100644
--- a/lib/raid6/mktables.c
+++ b/lib/raid6/mktables.c
@@ -56,7 +56,9 @@ int main(int argc, char *argv[])
uint8_t v;
uint8_t exptbl[256], invtbl[256];
+ printf("#ifdef __KERNEL__\n");
printf("#include <linux/export.h>\n");
+ printf("#endif\n");
printf("#include <linux/raid/pq.h>\n");
/* Compute multiplication table */
diff --git a/lib/raid6/recov.c b/lib/raid6/recov.c
index e49d519de6cb..a7c1b2bbe40d 100644
--- a/lib/raid6/recov.c
+++ b/lib/raid6/recov.c
@@ -13,7 +13,6 @@
* the syndrome.)
*/
-#include <linux/export.h>
#include <linux/raid/pq.h>
/* Recover two failed data blocks. */
diff --git a/lib/raid6/test/.gitignore b/lib/raid6/test/.gitignore
new file mode 100644
index 000000000000..1b68a77f348f
--- /dev/null
+++ b/lib/raid6/test/.gitignore
@@ -0,0 +1,3 @@
+/int.uc
+/neon.uc
+/raid6test
diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile
index 4fb7700a741b..1f693ea3b980 100644
--- a/lib/raid6/test/Makefile
+++ b/lib/raid6/test/Makefile
@@ -6,14 +6,15 @@
pound := \#
-CC = gcc
-OPTFLAGS = -O2 # Adjust as desired
-CFLAGS = -I.. -I ../../../include -g $(OPTFLAGS)
-LD = ld
-AWK = awk -f
-AR = ar
-RANLIB = ranlib
-OBJS = int1.o int2.o int4.o int8.o int16.o int32.o recov.o algos.o tables.o
+# Adjust as desired
+CC = gcc
+OPTFLAGS = -O2
+CFLAGS = -I.. -I ../../../include -g $(OPTFLAGS)
+LD = ld
+AWK = awk -f
+AR = ar
+RANLIB = ranlib
+OBJS = int1.o int2.o int4.o int8.o int16.o int32.o recov.o algos.o tables.o
ARCH := $(shell uname -m 2>/dev/null | sed -e /s/i.86/i386/)
ifeq ($(ARCH),i386)
@@ -34,24 +35,25 @@ ifeq ($(ARCH),aarch64)
HAS_NEON = yes
endif
+ifeq ($(findstring ppc,$(ARCH)),ppc)
+ CFLAGS += -I../../../arch/powerpc/include
+ HAS_ALTIVEC := $(shell printf '$(pound)include <altivec.h>\nvector int a;\n' |\
+ gcc -c -x c - >/dev/null && rm ./-.o && echo yes)
+endif
+
ifeq ($(IS_X86),yes)
OBJS += mmx.o sse1.o sse2.o avx2.o recov_ssse3.o recov_avx2.o avx512.o recov_avx512.o
CFLAGS += -DCONFIG_X86
- CFLAGS += $(shell echo "vpmovm2b %k1, %zmm5" | \
- gcc -c -x assembler - >/dev/null 2>&1 && \
- rm ./-.o && echo -DCONFIG_AS_AVX512=1)
+ CFLAGS += $(shell echo "vpmovm2b %k1, %zmm5" | \
+ gcc -c -x assembler - >/dev/null 2>&1 && \
+ rm ./-.o && echo -DCONFIG_AS_AVX512=1)
else ifeq ($(HAS_NEON),yes)
OBJS += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o
CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1
-else
- HAS_ALTIVEC := $(shell printf '$(pound)include <altivec.h>\nvector int a;\n' |\
- gcc -c -x c - >/dev/null && rm ./-.o && echo yes)
- ifeq ($(HAS_ALTIVEC),yes)
- CFLAGS += -I../../../arch/powerpc/include
- CFLAGS += -DCONFIG_ALTIVEC
- OBJS += altivec1.o altivec2.o altivec4.o altivec8.o \
- vpermxor1.o vpermxor2.o vpermxor4.o vpermxor8.o
- endif
+else ifeq ($(HAS_ALTIVEC),yes)
+ CFLAGS += -DCONFIG_ALTIVEC
+ OBJS += altivec1.o altivec2.o altivec4.o altivec8.o \
+ vpermxor1.o vpermxor2.o vpermxor4.o vpermxor8.o
endif
.c.o:
@@ -63,12 +65,12 @@ endif
%.uc: ../%.uc
cp -f $< $@
-all: raid6.a raid6test
+all: raid6.a raid6test
raid6.a: $(OBJS)
- rm -f $@
- $(AR) cq $@ $^
- $(RANLIB) $@
+ rm -f $@
+ $(AR) cq $@ $^
+ $(RANLIB) $@
raid6test: test.c raid6.a
$(CC) $(CFLAGS) -o raid6test $^
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index eff4e42c425a..d0a5081dfd12 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -550,7 +550,7 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth);
static void __sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
{
- int i, wake_index;
+ int i, wake_index, woken;
if (!atomic_read(&sbq->ws_active))
return;
@@ -567,13 +567,12 @@ static void __sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
*/
wake_index = sbq_index_inc(wake_index);
- /*
- * It is sufficient to wake up at least one waiter to
- * guarantee forward progress.
- */
- if (waitqueue_active(&ws->wait) &&
- wake_up_nr(&ws->wait, nr))
- break;
+ if (waitqueue_active(&ws->wait)) {
+ woken = wake_up_nr(&ws->wait, nr);
+ if (woken == nr)
+ break;
+ nr -= woken;
+ }
}
if (wake_index != atomic_read(&sbq->wake_index))
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index e86231a44c3d..c65566b4dc66 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -1148,7 +1148,7 @@ static ssize_t extract_user_to_sg(struct iov_iter *iter,
failed:
while (sgtable->nents > sgtable->orig_nents)
- put_page(sg_page(&sgtable->sgl[--sgtable->nents]));
+ unpin_user_page(sg_page(&sgtable->sgl[--sgtable->nents]));
return res;
}
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index 187f5b2db4cf..f2ea9f30c7c5 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -1161,6 +1161,10 @@ static void __init test_bitmap_print_buf(void)
}
}
+/*
+ * FIXME: Clang breaks compile-time evaluations when KASAN and GCOV are enabled.
+ * To workaround it, GCOV is force-disabled in Makefile for this configuration.
+ */
static void __init test_bitmap_const_eval(void)
{
DECLARE_BITMAP(bitmap, BITS_PER_LONG);
@@ -1186,11 +1190,7 @@ static void __init test_bitmap_const_eval(void)
* the compiler is fixed.
*/
bitmap_clear(bitmap, 0, BITS_PER_LONG);
-#if defined(__s390__) && defined(__clang__)
- if (!const_test_bit(7, bitmap))
-#else
if (!test_bit(7, bitmap))
-#endif
bitmap_set(bitmap, 5, 2);
/* Equals to `unsigned long bitopvar = BIT(20)` */
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index fa0833410ac1..ecde4216201e 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -596,8 +596,8 @@ static int __bpf_fill_alu_shift(struct bpf_test *self, u8 op,
{
static const s64 regs[] = {
0x0123456789abcdefLL, /* dword > 0, word < 0 */
- 0xfedcba9876543210LL, /* dowrd < 0, word > 0 */
- 0xfedcba0198765432LL, /* dowrd < 0, word < 0 */
+ 0xfedcba9876543210LL, /* dword < 0, word > 0 */
+ 0xfedcba0198765432LL, /* dword < 0, word < 0 */
0x0123458967abcdefLL, /* dword > 0, word > 0 */
};
int bits = alu32 ? 32 : 64;
@@ -14381,25 +14381,15 @@ static void *generate_test_data(struct bpf_test *test, int sub)
* single fragment to the skb, filled with
* test->frag_data.
*/
- void *ptr;
-
page = alloc_page(GFP_KERNEL);
-
if (!page)
goto err_kfree_skb;
- ptr = kmap(page);
- if (!ptr)
- goto err_free_page;
- memcpy(ptr, test->frag_data, MAX_DATA);
- kunmap(page);
+ memcpy(page_address(page), test->frag_data, MAX_DATA);
skb_add_rx_frag(skb, 0, page, 0, MAX_DATA, MAX_DATA);
}
return skb;
-
-err_free_page:
- __free_page(page);
err_kfree_skb:
kfree_skb(skb);
return NULL;
@@ -14577,8 +14567,10 @@ static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
if (ret == test->test[i].result) {
pr_cont("%lld ", duration);
} else {
- pr_cont("ret %d != %d ", ret,
- test->test[i].result);
+ s32 res = test->test[i].result;
+
+ pr_cont("ret %d != %d (%#x != %#x)",
+ ret, res, ret, res);
err_cnt++;
}
}
@@ -15055,7 +15047,7 @@ static __init int prepare_tail_call_tests(struct bpf_array **pprogs)
struct bpf_array *progs;
int which, err;
- /* Allocate the table of programs to be used for tall calls */
+ /* Allocate the table of programs to be used for tail calls */
progs = kzalloc(struct_size(progs, ptrs, ntests + 1), GFP_KERNEL);
if (!progs)
goto out_nomem;
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index 67e6f83fe0f8..717dcb830127 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -368,16 +368,13 @@ static int dmirror_do_read(struct dmirror *dmirror, unsigned long start,
for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) {
void *entry;
struct page *page;
- void *tmp;
entry = xa_load(&dmirror->pt, pfn);
page = xa_untag_pointer(entry);
if (!page)
return -ENOENT;
- tmp = kmap(page);
- memcpy(ptr, tmp, PAGE_SIZE);
- kunmap(page);
+ memcpy_from_page(ptr, page, 0, PAGE_SIZE);
ptr += PAGE_SIZE;
bounce->cpages++;
@@ -437,16 +434,13 @@ static int dmirror_do_write(struct dmirror *dmirror, unsigned long start,
for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) {
void *entry;
struct page *page;
- void *tmp;
entry = xa_load(&dmirror->pt, pfn);
page = xa_untag_pointer(entry);
if (!page || xa_pointer_tag(entry) != DPT_XA_TAG_WRITE)
return -ENOENT;
- tmp = kmap(page);
- memcpy(tmp, ptr, PAGE_SIZE);
- kunmap(page);
+ memcpy_to_page(page, 0, ptr, PAGE_SIZE);
ptr += PAGE_SIZE;
bounce->cpages++;
diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
index 9939be34e516..0674aebd4423 100644
--- a/lib/test_maple_tree.c
+++ b/lib/test_maple_tree.c
@@ -44,6 +44,8 @@ atomic_t maple_tree_tests_passed;
/* #define BENCH_WALK */
/* #define BENCH_MT_FOR_EACH */
/* #define BENCH_FORK */
+/* #define BENCH_MAS_FOR_EACH */
+/* #define BENCH_MAS_PREV */
#ifdef __KERNEL__
#define mt_set_non_kernel(x) do {} while (0)
@@ -1157,6 +1159,71 @@ static noinline void __init check_ranges(struct maple_tree *mt)
MT_BUG_ON(mt, !mt_height(mt));
mtree_destroy(mt);
+ /* Check in-place modifications */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ /* Append to the start of last range */
+ mt_set_non_kernel(50);
+ for (i = 0; i <= 500; i++) {
+ val = i * 5 + 1;
+ val2 = val + 4;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ }
+
+ /* Append to the last range without touching any boundaries */
+ for (i = 0; i < 10; i++) {
+ val = val2 + 5;
+ val2 = val + 4;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ }
+
+ /* Append to the end of last range */
+ val = val2;
+ for (i = 0; i < 10; i++) {
+ val += 5;
+ MT_BUG_ON(mt, mtree_test_store_range(mt, val, ULONG_MAX,
+ xa_mk_value(val)) != 0);
+ }
+
+ /* Overwriting the range and over a part of the next range */
+ for (i = 10; i < 30; i += 2) {
+ val = i * 5 + 1;
+ val2 = val + 5;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ }
+
+ /* Overwriting a part of the range and over the next range */
+ for (i = 50; i < 70; i += 2) {
+ val2 = i * 5;
+ val = val2 - 5;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ }
+
+ /*
+ * Expand the range, only partially overwriting the previous and
+ * next ranges
+ */
+ for (i = 100; i < 130; i += 3) {
+ val = i * 5 - 5;
+ val2 = i * 5 + 1;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ }
+
+ /*
+ * Expand the range, only partially overwriting the previous and
+ * next ranges, in RCU mode
+ */
+ mt_set_in_rcu(mt);
+ for (i = 150; i < 180; i += 3) {
+ val = i * 5 - 5;
+ val2 = i * 5 + 1;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ }
+
+ MT_BUG_ON(mt, !mt_height(mt));
+ mt_validate(mt);
+ mt_set_non_kernel(0);
+ mtree_destroy(mt);
+
/* Test rebalance gaps */
mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
mt_set_non_kernel(50);
@@ -1705,6 +1772,66 @@ static noinline void __init bench_mt_for_each(struct maple_tree *mt)
}
#endif
+#if defined(BENCH_MAS_FOR_EACH)
+static noinline void __init bench_mas_for_each(struct maple_tree *mt)
+{
+ int i, count = 1000000;
+ unsigned long max = 2500;
+ void *entry;
+ MA_STATE(mas, mt, 0, 0);
+
+ for (i = 0; i < max; i += 5) {
+ int gap = 4;
+
+ if (i % 30 == 0)
+ gap = 3;
+ mtree_store_range(mt, i, i + gap, xa_mk_value(i), GFP_KERNEL);
+ }
+
+ rcu_read_lock();
+ for (i = 0; i < count; i++) {
+ unsigned long j = 0;
+
+ mas_for_each(&mas, entry, max) {
+ MT_BUG_ON(mt, entry != xa_mk_value(j));
+ j += 5;
+ }
+ mas_set(&mas, 0);
+ }
+ rcu_read_unlock();
+
+}
+#endif
+#if defined(BENCH_MAS_PREV)
+static noinline void __init bench_mas_prev(struct maple_tree *mt)
+{
+ int i, count = 1000000;
+ unsigned long max = 2500;
+ void *entry;
+ MA_STATE(mas, mt, 0, 0);
+
+ for (i = 0; i < max; i += 5) {
+ int gap = 4;
+
+ if (i % 30 == 0)
+ gap = 3;
+ mtree_store_range(mt, i, i + gap, xa_mk_value(i), GFP_KERNEL);
+ }
+
+ rcu_read_lock();
+ for (i = 0; i < count; i++) {
+ unsigned long j = 2495;
+
+ mas_set(&mas, ULONG_MAX);
+ while ((entry = mas_prev(&mas, 0)) != NULL) {
+ MT_BUG_ON(mt, entry != xa_mk_value(j));
+ j -= 5;
+ }
+ }
+ rcu_read_unlock();
+
+}
+#endif
/* check_forking - simulate the kernel forking sequence with the tree. */
static noinline void __init check_forking(struct maple_tree *mt)
{
@@ -1898,13 +2025,16 @@ static noinline void __init next_prev_test(struct maple_tree *mt)
725};
static const unsigned long level2_32[] = { 1747, 2000, 1750, 1755,
1760, 1765};
+ unsigned long last_index;
if (MAPLE_32BIT) {
nr_entries = 500;
level2 = level2_32;
+ last_index = 0x138e;
} else {
nr_entries = 200;
level2 = level2_64;
+ last_index = 0x7d6;
}
for (i = 0; i <= nr_entries; i++)
@@ -2011,7 +2141,7 @@ static noinline void __init next_prev_test(struct maple_tree *mt)
val = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, val != NULL);
- MT_BUG_ON(mt, mas.index != 0x7d6);
+ MT_BUG_ON(mt, mas.index != last_index);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
val = mas_prev(&mas, 0);
@@ -3430,6 +3560,20 @@ static int __init maple_tree_seed(void)
mtree_destroy(&tree);
goto skip;
#endif
+#if defined(BENCH_MAS_FOR_EACH)
+#define BENCH
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ bench_mas_for_each(&tree);
+ mtree_destroy(&tree);
+ goto skip;
+#endif
+#if defined(BENCH_MAS_PREV)
+#define BENCH
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ bench_mas_prev(&tree);
+ mtree_destroy(&tree);
+ goto skip;
+#endif
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_iteration(&tree);
diff --git a/lib/test_meminit.c b/lib/test_meminit.c
index 60e1984c060f..0ae35223d773 100644
--- a/lib/test_meminit.c
+++ b/lib/test_meminit.c
@@ -93,7 +93,7 @@ static int __init test_pages(int *total_failures)
int failures = 0, num_tests = 0;
int i;
- for (i = 0; i < 10; i++)
+ for (i = 0; i <= MAX_ORDER; i++)
num_tests += do_alloc_pages_order(i, &failures);
REPORT_FAILURES_IN_FN();
diff --git a/lib/test_printf.c b/lib/test_printf.c
index 7677ebccf3c3..69b6a5e177f2 100644
--- a/lib/test_printf.c
+++ b/lib/test_printf.c
@@ -12,6 +12,7 @@
#include <linux/random.h>
#include <linux/rtc.h>
#include <linux/slab.h>
+#include <linux/sprintf.h>
#include <linux/string.h>
#include <linux/bitmap.h>
@@ -41,8 +42,6 @@ KSTM_MODULE_GLOBALS();
static char *test_buffer __initdata;
static char *alloced_buffer __initdata;
-extern bool no_hash_pointers;
-
static int __printf(4, 0) __init
do_test(int bufsize, const char *expect, int elen,
const char *fmt, va_list ap)
diff --git a/lib/ts_bm.c b/lib/ts_bm.c
index c8ecbf74ef29..e5f30f9177df 100644
--- a/lib/ts_bm.c
+++ b/lib/ts_bm.c
@@ -55,6 +55,24 @@ struct ts_bm
unsigned int good_shift[];
};
+static unsigned int matchpat(const u8 *pattern, unsigned int patlen,
+ const u8 *text, bool icase)
+{
+ unsigned int i;
+
+ for (i = 0; i < patlen; i++) {
+ u8 t = *(text-i);
+
+ if (icase)
+ t = toupper(t);
+
+ if (t != *(pattern-i))
+ break;
+ }
+
+ return i;
+}
+
static unsigned int bm_find(struct ts_config *conf, struct ts_state *state)
{
struct ts_bm *bm = ts_config_priv(conf);
@@ -72,19 +90,18 @@ static unsigned int bm_find(struct ts_config *conf, struct ts_state *state)
break;
while (shift < text_len) {
- DEBUGP("Searching in position %d (%c)\n",
- shift, text[shift]);
- for (i = 0; i < bm->patlen; i++)
- if ((icase ? toupper(text[shift-i])
- : text[shift-i])
- != bm->pattern[bm->patlen-1-i])
- goto next;
-
- /* London calling... */
- DEBUGP("found!\n");
- return consumed + (shift-(bm->patlen-1));
-
-next: bs = bm->bad_shift[text[shift-i]];
+ DEBUGP("Searching in position %d (%c)\n",
+ shift, text[shift]);
+
+ i = matchpat(&bm->pattern[bm->patlen-1], bm->patlen,
+ &text[shift], icase);
+ if (i == bm->patlen) {
+ /* London calling... */
+ DEBUGP("found!\n");
+ return consumed + (shift-(bm->patlen-1));
+ }
+
+ bs = bm->bad_shift[text[shift-i]];
/* Now jumping to... */
shift = max_t(int, shift-i+bs, shift+bm->good_shift[i]);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 40f560959b16..afb88b24fa74 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -34,6 +34,7 @@
#include <linux/dcache.h>
#include <linux/cred.h>
#include <linux/rtc.h>
+#include <linux/sprintf.h>
#include <linux/time.h>
#include <linux/uuid.h>
#include <linux/of.h>