diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 5 | ||||
-rw-r--r-- | lib/Kconfig.debug | 16 | ||||
-rw-r--r-- | lib/Kconfig.kcsan | 20 | ||||
-rw-r--r-- | lib/Makefile | 4 | ||||
-rw-r--r-- | lib/asn1_encoder.c | 2 | ||||
-rw-r--r-- | lib/atomic64.c | 2 | ||||
-rw-r--r-- | lib/crypto/Kconfig | 23 | ||||
-rw-r--r-- | lib/crypto/Makefile | 9 | ||||
-rw-r--r-- | lib/crypto/blake2s-generic.c | 6 | ||||
-rw-r--r-- | lib/crypto/blake2s.c | 6 | ||||
-rw-r--r-- | lib/kunit/test.c | 25 | ||||
-rw-r--r-- | lib/locking-selftest.c | 172 | ||||
-rw-r--r-- | lib/logic_iomem.c | 23 | ||||
-rw-r--r-- | lib/mpi/mpi-mod.c | 2 | ||||
-rw-r--r-- | lib/objagg.c | 7 | ||||
-rw-r--r-- | lib/ref_tracker.c | 140 | ||||
-rw-r--r-- | lib/siphash.c | 12 | ||||
-rw-r--r-- | lib/test_bpf.c | 4 | ||||
-rw-r--r-- | lib/test_ref_tracker.c | 115 | ||||
-rw-r--r-- | lib/vsprintf.c | 4 |
20 files changed, 472 insertions, 125 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 5e7165e6a346..655b0e43f260 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -680,6 +680,11 @@ config STACK_HASH_ORDER Select the hash size as a power of 2 for the stackdepot hash table. Choose a lower value to reduce the memory impact. +config REF_TRACKER + bool + depends on STACKTRACE_SUPPORT + select STACKDEPOT + config SBITMAP bool diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 5c12bde10996..c77fe36bb3d8 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -316,6 +316,7 @@ config DEBUG_INFO_BTF bool "Generate BTF typeinfo" depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST + depends on BPF_SYSCALL help Generate deduplicated BTF type information from DWARF debug info. Turning this on expects presence of pahole tool, which will convert @@ -598,6 +599,11 @@ config DEBUG_MISC Say Y here if you need to enable miscellaneous debug code that should be under a more specific debug option but isn't. +menu "Networking Debugging" + +source "net/Kconfig.debug" + +endmenu # "Networking Debugging" menu "Memory Debugging" @@ -2106,6 +2112,16 @@ config BACKTRACE_SELF_TEST Say N if you are unsure. +config TEST_REF_TRACKER + tristate "Self test for reference tracker" + depends on DEBUG_KERNEL && STACKTRACE_SUPPORT + select REF_TRACKER + help + This option provides a kernel module performing tests + using reference tracker infrastructure. + + Say N if you are unsure. + config RBTREE_TEST tristate "Red-Black tree test" depends on DEBUG_KERNEL diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan index e0a93ffdef30..63b70b8c5551 100644 --- a/lib/Kconfig.kcsan +++ b/lib/Kconfig.kcsan @@ -191,6 +191,26 @@ config KCSAN_STRICT closely aligns with the rules defined by the Linux-kernel memory consistency model (LKMM). +config KCSAN_WEAK_MEMORY + bool "Enable weak memory modeling to detect missing memory barriers" + default y + depends on KCSAN_STRICT + # We can either let objtool nop __tsan_func_{entry,exit}() and builtin + # atomics instrumentation in .noinstr.text, or use a compiler that can + # implement __no_kcsan to really remove all instrumentation. + depends on STACK_VALIDATION || CC_IS_GCC || CLANG_VERSION >= 140000 + help + Enable support for modeling a subset of weak memory, which allows + detecting a subset of data races due to missing memory barriers. + + Depends on KCSAN_STRICT, because the options strenghtening certain + plain accesses by default (depending on !KCSAN_STRICT) reduce the + ability to detect any data races invoving reordered accesses, in + particular reordered writes. + + Weak memory modeling relies on additional instrumentation and may + affect performance. + config KCSAN_REPORT_VALUE_CHANGE_ONLY bool "Only report races where watcher observed a data value change" default y diff --git a/lib/Makefile b/lib/Makefile index 364c23f15578..b213a7bbf3fd 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -101,7 +101,7 @@ obj-$(CONFIG_TEST_LOCKUP) += test_lockup.o obj-$(CONFIG_TEST_HMM) += test_hmm.o obj-$(CONFIG_TEST_FREE_PAGES) += test_free_pages.o obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o - +obj-$(CONFIG_TEST_REF_TRACKER) += test_ref_tracker.o # # CFLAGS for compiling floating point code inside the kernel. x86/Makefile turns # off the generation of FPU/SSE* instructions for kernel proper but FPU_FLAGS @@ -270,6 +270,8 @@ obj-$(CONFIG_STACKDEPOT) += stackdepot.o KASAN_SANITIZE_stackdepot.o := n KCOV_INSTRUMENT_stackdepot.o := n +obj-$(CONFIG_REF_TRACKER) += ref_tracker.o + libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \ fdt_empty_tree.o fdt_addresses.o $(foreach file, $(libfdt_files), \ diff --git a/lib/asn1_encoder.c b/lib/asn1_encoder.c index 27bbe891714f..0fd3c454a468 100644 --- a/lib/asn1_encoder.c +++ b/lib/asn1_encoder.c @@ -164,8 +164,6 @@ asn1_encode_oid(unsigned char *data, const unsigned char *end_data, data_len -= 3; - ret = 0; - for (i = 2; i < oid_len; i++) { ret = asn1_encode_oid_digit(&d, &data_len, oid[i]); if (ret < 0) diff --git a/lib/atomic64.c b/lib/atomic64.c index 3df653994177..caf895789a1e 100644 --- a/lib/atomic64.c +++ b/lib/atomic64.c @@ -118,7 +118,6 @@ ATOMIC64_OPS(sub, -=) #undef ATOMIC64_OPS #define ATOMIC64_OPS(op, c_op) \ ATOMIC64_OP(op, c_op) \ - ATOMIC64_OP_RETURN(op, c_op) \ ATOMIC64_FETCH_OP(op, c_op) ATOMIC64_OPS(and, &=) @@ -127,7 +126,6 @@ ATOMIC64_OPS(xor, ^=) #undef ATOMIC64_OPS #undef ATOMIC64_FETCH_OP -#undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP s64 generic_atomic64_dec_if_positive(atomic64_t *v) diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig index 545ccbddf6a1..8620f38e117c 100644 --- a/lib/crypto/Kconfig +++ b/lib/crypto/Kconfig @@ -1,7 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -comment "Crypto library routines" - config CRYPTO_LIB_AES tristate @@ -9,14 +7,14 @@ config CRYPTO_LIB_ARC4 tristate config CRYPTO_ARCH_HAVE_LIB_BLAKE2S - tristate + bool help Declares whether the architecture provides an arch-specific accelerated implementation of the Blake2s library interface, either builtin or as a module. config CRYPTO_LIB_BLAKE2S_GENERIC - tristate + def_bool !CRYPTO_ARCH_HAVE_LIB_BLAKE2S help This symbol can be depended upon by arch implementations of the Blake2s library interface that require the generic code as a @@ -24,15 +22,6 @@ config CRYPTO_LIB_BLAKE2S_GENERIC implementation is enabled, this implementation serves the users of CRYPTO_LIB_BLAKE2S. -config CRYPTO_LIB_BLAKE2S - tristate "BLAKE2s hash function library" - depends on CRYPTO_ARCH_HAVE_LIB_BLAKE2S || !CRYPTO_ARCH_HAVE_LIB_BLAKE2S - select CRYPTO_LIB_BLAKE2S_GENERIC if CRYPTO_ARCH_HAVE_LIB_BLAKE2S=n - help - Enable the Blake2s library interface. This interface may be fulfilled - by either the generic implementation or an arch-specific one, if one - is available and enabled. - config CRYPTO_ARCH_HAVE_LIB_CHACHA tristate help @@ -51,7 +40,7 @@ config CRYPTO_LIB_CHACHA_GENERIC of CRYPTO_LIB_CHACHA. config CRYPTO_LIB_CHACHA - tristate "ChaCha library interface" + tristate depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA select CRYPTO_LIB_CHACHA_GENERIC if CRYPTO_ARCH_HAVE_LIB_CHACHA=n help @@ -76,7 +65,7 @@ config CRYPTO_LIB_CURVE25519_GENERIC of CRYPTO_LIB_CURVE25519. config CRYPTO_LIB_CURVE25519 - tristate "Curve25519 scalar multiplication library" + tristate depends on CRYPTO_ARCH_HAVE_LIB_CURVE25519 || !CRYPTO_ARCH_HAVE_LIB_CURVE25519 select CRYPTO_LIB_CURVE25519_GENERIC if CRYPTO_ARCH_HAVE_LIB_CURVE25519=n help @@ -111,7 +100,7 @@ config CRYPTO_LIB_POLY1305_GENERIC of CRYPTO_LIB_POLY1305. config CRYPTO_LIB_POLY1305 - tristate "Poly1305 library interface" + tristate depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305 select CRYPTO_LIB_POLY1305_GENERIC if CRYPTO_ARCH_HAVE_LIB_POLY1305=n help @@ -120,7 +109,7 @@ config CRYPTO_LIB_POLY1305 is available and enabled. config CRYPTO_LIB_CHACHA20POLY1305 - tristate "ChaCha20-Poly1305 AEAD support (8-byte nonce library version)" + tristate depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305 select CRYPTO_LIB_CHACHA diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile index 73205ed269ba..ed43a41f2dcc 100644 --- a/lib/crypto/Makefile +++ b/lib/crypto/Makefile @@ -10,11 +10,10 @@ libaes-y := aes.o obj-$(CONFIG_CRYPTO_LIB_ARC4) += libarc4.o libarc4-y := arc4.o -obj-$(CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC) += libblake2s-generic.o -libblake2s-generic-y += blake2s-generic.o - -obj-$(CONFIG_CRYPTO_LIB_BLAKE2S) += libblake2s.o -libblake2s-y += blake2s.o +# blake2s is used by the /dev/random driver which is always builtin +obj-y += libblake2s.o +libblake2s-y := blake2s.o +libblake2s-$(CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC) += blake2s-generic.o obj-$(CONFIG_CRYPTO_LIB_CHACHA20POLY1305) += libchacha20poly1305.o libchacha20poly1305-y += chacha20poly1305.o diff --git a/lib/crypto/blake2s-generic.c b/lib/crypto/blake2s-generic.c index 04ff8df24513..75ccb3e633e6 100644 --- a/lib/crypto/blake2s-generic.c +++ b/lib/crypto/blake2s-generic.c @@ -37,7 +37,11 @@ static inline void blake2s_increment_counter(struct blake2s_state *state, state->t[1] += (state->t[0] < inc); } -void blake2s_compress_generic(struct blake2s_state *state,const u8 *block, +void blake2s_compress(struct blake2s_state *state, const u8 *block, + size_t nblocks, const u32 inc) + __weak __alias(blake2s_compress_generic); + +void blake2s_compress_generic(struct blake2s_state *state, const u8 *block, size_t nblocks, const u32 inc) { u32 m[16]; diff --git a/lib/crypto/blake2s.c b/lib/crypto/blake2s.c index 4055aa593ec4..93f2ae051370 100644 --- a/lib/crypto/blake2s.c +++ b/lib/crypto/blake2s.c @@ -16,12 +16,6 @@ #include <linux/init.h> #include <linux/bug.h> -#if IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S) -# define blake2s_compress blake2s_compress_arch -#else -# define blake2s_compress blake2s_compress_generic -#endif - void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen) { __blake2s_update(state, in, inlen, blake2s_compress); diff --git a/lib/kunit/test.c b/lib/kunit/test.c index 3bd741e50a2d..c7ed4aabec04 100644 --- a/lib/kunit/test.c +++ b/lib/kunit/test.c @@ -504,25 +504,28 @@ int kunit_run_tests(struct kunit_suite *suite) struct kunit_result_stats param_stats = { 0 }; test_case->status = KUNIT_SKIPPED; - if (test_case->generate_params) { + if (!test_case->generate_params) { + /* Non-parameterised test. */ + kunit_run_case_catch_errors(suite, test_case, &test); + kunit_update_stats(¶m_stats, test.status); + } else { /* Get initial param. */ param_desc[0] = '\0'; test.param_value = test_case->generate_params(NULL, param_desc); - } + kunit_log(KERN_INFO, &test, KUNIT_SUBTEST_INDENT KUNIT_SUBTEST_INDENT + "# Subtest: %s", test_case->name); - do { - kunit_run_case_catch_errors(suite, test_case, &test); + while (test.param_value) { + kunit_run_case_catch_errors(suite, test_case, &test); - if (test_case->generate_params) { if (param_desc[0] == '\0') { snprintf(param_desc, sizeof(param_desc), "param-%d", test.param_index); } kunit_log(KERN_INFO, &test, - KUNIT_SUBTEST_INDENT - "# %s: %s %d - %s", - test_case->name, + KUNIT_SUBTEST_INDENT KUNIT_SUBTEST_INDENT + "%s %d - %s", kunit_status_to_ok_not_ok(test.status), test.param_index + 1, param_desc); @@ -530,11 +533,11 @@ int kunit_run_tests(struct kunit_suite *suite) param_desc[0] = '\0'; test.param_value = test_case->generate_params(test.param_value, param_desc); test.param_index++; - } - kunit_update_stats(¶m_stats, test.status); + kunit_update_stats(¶m_stats, test.status); + } + } - } while (test.param_value); kunit_print_test_stats(&test, param_stats); diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c index 71652e1c397c..8d24279fad05 100644 --- a/lib/locking-selftest.c +++ b/lib/locking-selftest.c @@ -26,6 +26,12 @@ #include <linux/rtmutex.h> #include <linux/local_lock.h> +#ifdef CONFIG_PREEMPT_RT +# define NON_RT(...) +#else +# define NON_RT(...) __VA_ARGS__ +#endif + /* * Change this to 1 if you want to see the failure printouts: */ @@ -139,7 +145,7 @@ static DEFINE_RT_MUTEX(rtmutex_Z2); #endif -static local_lock_t local_A = INIT_LOCAL_LOCK(local_A); +static DEFINE_PER_CPU(local_lock_t, local_A); /* * non-inlined runtime initializers, to let separate locks share @@ -712,12 +718,18 @@ GENERATE_TESTCASE(ABCDBCDA_rtmutex); #undef E +#ifdef CONFIG_PREEMPT_RT +# define RT_PREPARE_DBL_UNLOCK() { migrate_disable(); rcu_read_lock(); } +#else +# define RT_PREPARE_DBL_UNLOCK() +#endif /* * Double unlock: */ #define E() \ \ LOCK(A); \ + RT_PREPARE_DBL_UNLOCK(); \ UNLOCK(A); \ UNLOCK(A); /* fail */ @@ -802,6 +814,7 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock) #include "locking-selftest-wlock-hardirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_wlock) +#ifndef CONFIG_PREEMPT_RT #include "locking-selftest-spin-softirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_spin) @@ -810,10 +823,12 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_rlock) #include "locking-selftest-wlock-softirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock) +#endif #undef E1 #undef E2 +#ifndef CONFIG_PREEMPT_RT /* * Enabling hardirqs with a softirq-safe lock held: */ @@ -846,6 +861,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock) #undef E1 #undef E2 +#endif + /* * Enabling irqs with an irq-safe lock held: */ @@ -875,6 +892,7 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock) #include "locking-selftest-wlock-hardirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_wlock) +#ifndef CONFIG_PREEMPT_RT #include "locking-selftest-spin-softirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_spin) @@ -883,6 +901,7 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_rlock) #include "locking-selftest-wlock-softirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock) +#endif #undef E1 #undef E2 @@ -921,6 +940,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock) #include "locking-selftest-wlock-hardirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_wlock) +#ifndef CONFIG_PREEMPT_RT #include "locking-selftest-spin-softirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_spin) @@ -929,6 +949,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_rlock) #include "locking-selftest-wlock-softirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock) +#endif #undef E1 #undef E2 @@ -969,6 +990,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock) #include "locking-selftest-wlock-hardirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_wlock) +#ifndef CONFIG_PREEMPT_RT #include "locking-selftest-spin-softirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_spin) @@ -977,6 +999,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_rlock) #include "locking-selftest-wlock-softirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock) +#endif #undef E1 #undef E2 @@ -1031,6 +1054,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_rlock) #include "locking-selftest-wlock-hardirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_wlock) +#ifndef CONFIG_PREEMPT_RT #include "locking-selftest-spin-softirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_spin) @@ -1039,6 +1063,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_rlock) #include "locking-selftest-wlock-softirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_wlock) +#endif #undef E1 #undef E2 @@ -1206,12 +1231,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_hard_rlock) #include "locking-selftest-wlock.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_hard_wlock) +#ifndef CONFIG_PREEMPT_RT #include "locking-selftest-softirq.h" #include "locking-selftest-rlock.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft_rlock) #include "locking-selftest-wlock.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft_wlock) +#endif #undef E1 #undef E2 @@ -1252,12 +1279,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_hard_rlock) #include "locking-selftest-wlock.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_hard_wlock) +#ifndef CONFIG_PREEMPT_RT #include "locking-selftest-softirq.h" #include "locking-selftest-rlock.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft_rlock) #include "locking-selftest-wlock.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft_wlock) +#endif #undef E1 #undef E2 @@ -1306,12 +1335,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_hard_rlock) #include "locking-selftest-wlock.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_hard_wlock) +#ifndef CONFIG_PREEMPT_RT #include "locking-selftest-softirq.h" #include "locking-selftest-rlock.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_rlock) #include "locking-selftest-wlock.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_wlock) +#endif #ifdef CONFIG_DEBUG_LOCK_ALLOC # define I_SPINLOCK(x) lockdep_reset_lock(&lock_##x.dep_map) @@ -1320,7 +1351,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_wlock) # define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map) # define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map) # define I_WW(x) lockdep_reset_lock(&x.dep_map) -# define I_LOCAL_LOCK(x) lockdep_reset_lock(&local_##x.dep_map) +# define I_LOCAL_LOCK(x) lockdep_reset_lock(this_cpu_ptr(&local_##x.dep_map)) #ifdef CONFIG_RT_MUTEXES # define I_RTMUTEX(x) lockdep_reset_lock(&rtmutex_##x.dep_map) #endif @@ -1380,7 +1411,7 @@ static void reset_locks(void) init_shared_classes(); raw_spin_lock_init(&raw_lock_A); raw_spin_lock_init(&raw_lock_B); - local_lock_init(&local_A); + local_lock_init(this_cpu_ptr(&local_A)); ww_mutex_init(&o, &ww_lockdep); ww_mutex_init(&o2, &ww_lockdep); ww_mutex_init(&o3, &ww_lockdep); memset(&t, 0, sizeof(t)); memset(&t2, 0, sizeof(t2)); @@ -1398,7 +1429,13 @@ static int unexpected_testcase_failures; static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask) { - unsigned long saved_preempt_count = preempt_count(); + int saved_preempt_count = preempt_count(); +#ifdef CONFIG_PREEMPT_RT +#ifdef CONFIG_SMP + int saved_mgd_count = current->migration_disabled; +#endif + int saved_rcu_count = current->rcu_read_lock_nesting; +#endif WARN_ON(irqs_disabled()); @@ -1432,6 +1469,18 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask) * count, so restore it: */ preempt_count_set(saved_preempt_count); + +#ifdef CONFIG_PREEMPT_RT +#ifdef CONFIG_SMP + while (current->migration_disabled > saved_mgd_count) + migrate_enable(); +#endif + + while (current->rcu_read_lock_nesting > saved_rcu_count) + rcu_read_unlock(); + WARN_ON_ONCE(current->rcu_read_lock_nesting < saved_rcu_count); +#endif + #ifdef CONFIG_TRACE_IRQFLAGS if (softirq_count()) current->softirqs_enabled = 0; @@ -1499,7 +1548,7 @@ static inline void print_testname(const char *testname) #define DO_TESTCASE_2x2RW(desc, name, nr) \ DO_TESTCASE_2RW("hard-"desc, name##_hard, nr) \ - DO_TESTCASE_2RW("soft-"desc, name##_soft, nr) \ + NON_RT(DO_TESTCASE_2RW("soft-"desc, name##_soft, nr)) \ #define DO_TESTCASE_6x2x2RW(desc, name) \ DO_TESTCASE_2x2RW(desc, name, 123); \ @@ -1547,19 +1596,19 @@ static inline void print_testname(const char *testname) #define DO_TESTCASE_2I(desc, name, nr) \ DO_TESTCASE_1("hard-"desc, name##_hard, nr); \ - DO_TESTCASE_1("soft-"desc, name##_soft, nr); + NON_RT(DO_TESTCASE_1("soft-"desc, name##_soft, nr)); #define DO_TESTCASE_2IB(desc, name, nr) \ DO_TESTCASE_1B("hard-"desc, name##_hard, nr); \ - DO_TESTCASE_1B("soft-"desc, name##_soft, nr); + NON_RT(DO_TESTCASE_1B("soft-"desc, name##_soft, nr)); #define DO_TESTCASE_6I(desc, name, nr) \ DO_TESTCASE_3("hard-"desc, name##_hard, nr); \ - DO_TESTCASE_3("soft-"desc, name##_soft, nr); + NON_RT(DO_TESTCASE_3("soft-"desc, name##_soft, nr)); #define DO_TESTCASE_6IRW(desc, name, nr) \ DO_TESTCASE_3RW("hard-"desc, name##_hard, nr); \ - DO_TESTCASE_3RW("soft-"desc, name##_soft, nr); + NON_RT(DO_TESTCASE_3RW("soft-"desc, name##_soft, nr)); #define DO_TESTCASE_2x3(desc, name) \ DO_TESTCASE_3(desc, name, 12); \ @@ -1651,6 +1700,22 @@ static void ww_test_fail_acquire(void) #endif } +#ifdef CONFIG_PREEMPT_RT +#define ww_mutex_base_lock(b) rt_mutex_lock(b) +#define ww_mutex_base_trylock(b) rt_mutex_trylock(b) +#define ww_mutex_base_lock_nest_lock(b, b2) rt_mutex_lock_nest_lock(b, b2) +#define ww_mutex_base_lock_interruptible(b) rt_mutex_lock_interruptible(b) +#define ww_mutex_base_lock_killable(b) rt_mutex_lock_killable(b) +#define ww_mutex_base_unlock(b) rt_mutex_unlock(b) +#else +#define ww_mutex_base_lock(b) mutex_lock(b) +#define ww_mutex_base_trylock(b) mutex_trylock(b) +#define ww_mutex_base_lock_nest_lock(b, b2) mutex_lock_nest_lock(b, b2) +#define ww_mutex_base_lock_interruptible(b) mutex_lock_interruptible(b) +#define ww_mutex_base_lock_killable(b) mutex_lock_killable(b) +#define ww_mutex_base_unlock(b) mutex_unlock(b) +#endif + static void ww_test_normal(void) { int ret; @@ -1665,50 +1730,50 @@ static void ww_test_normal(void) /* mutex_lock (and indirectly, mutex_lock_nested) */ o.ctx = (void *)~0UL; - mutex_lock(&o.base); - mutex_unlock(&o.base); + ww_mutex_base_lock(&o.base); + ww_mutex_base_unlock(&o.base); WARN_ON(o.ctx != (void *)~0UL); /* mutex_lock_interruptible (and *_nested) */ o.ctx = (void *)~0UL; - ret = mutex_lock_interruptible(&o.base); + ret = ww_mutex_base_lock_interruptible(&o.base); if (!ret) - mutex_unlock(&o.base); + ww_mutex_base_unlock(&o.base); else WARN_ON(1); WARN_ON(o.ctx != (void *)~0UL); /* mutex_lock_killable (and *_nested) */ o.ctx = (void *)~0UL; - ret = mutex_lock_killable(&o.base); + ret = ww_mutex_base_lock_killable(&o.base); if (!ret) - mutex_unlock(&o.base); + ww_mutex_base_unlock(&o.base); else WARN_ON(1); WARN_ON(o.ctx != (void *)~0UL); /* trylock, succeeding */ o.ctx = (void *)~0UL; - ret = mutex_trylock(&o.base); + ret = ww_mutex_base_trylock(&o.base); WARN_ON(!ret); if (ret) - mutex_unlock(&o.base); + ww_mutex_base_unlock(&o.base); else WARN_ON(1); WARN_ON(o.ctx != (void *)~0UL); /* trylock, failing */ o.ctx = (void *)~0UL; - mutex_lock(&o.base); - ret = mutex_trylock(&o.base); + ww_mutex_base_lock(&o.base); + ret = ww_mutex_base_trylock(&o.base); WARN_ON(ret); - mutex_unlock(&o.base); + ww_mutex_base_unlock(&o.base); WARN_ON(o.ctx != (void *)~0UL); /* nest_lock */ o.ctx = (void *)~0UL; - mutex_lock_nest_lock(&o.base, &t); - mutex_unlock(&o.base); + ww_mutex_base_lock_nest_lock(&o.base, &t); + ww_mutex_base_unlock(&o.base); WARN_ON(o.ctx != (void *)~0UL); } @@ -1721,7 +1786,7 @@ static void ww_test_two_contexts(void) static void ww_test_diff_class(void) { WWAI(&t); -#ifdef CONFIG_DEBUG_MUTEXES +#ifdef DEBUG_WW_MUTEXES t.ww_class = NULL; #endif WWL(&o, &t); @@ -1785,7 +1850,7 @@ static void ww_test_edeadlk_normal(void) { int ret; - mutex_lock(&o2.base); + ww_mutex_base_lock(&o2.base); o2.ctx = &t2; mutex_release(&o2.base.dep_map, _THIS_IP_); @@ -1801,7 +1866,7 @@ static void ww_test_edeadlk_normal(void) o2.ctx = NULL; mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_); - mutex_unlock(&o2.base); + ww_mutex_base_unlock(&o2.base); WWU(&o); WWL(&o2, &t); @@ -1811,7 +1876,7 @@ static void ww_test_edeadlk_normal_slow(void) { int ret; - mutex_lock(&o2.base); + ww_mutex_base_lock(&o2.base); mutex_release(&o2.base.dep_map, _THIS_IP_); o2.ctx = &t2; @@ -1827,7 +1892,7 @@ static void ww_test_edeadlk_normal_slow(void) o2.ctx = NULL; mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_); - mutex_unlock(&o2.base); + ww_mutex_base_unlock(&o2.base); WWU(&o); ww_mutex_lock_slow(&o2, &t); @@ -1837,7 +1902,7 @@ static void ww_test_edeadlk_no_unlock(void) { int ret; - mutex_lock(&o2.base); + ww_mutex_base_lock(&o2.base); o2.ctx = &t2; mutex_release(&o2.base.dep_map, _THIS_IP_); @@ -1853,7 +1918,7 @@ static void ww_test_edeadlk_no_unlock(void) o2.ctx = NULL; mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_); - mutex_unlock(&o2.base); + ww_mutex_base_unlock(&o2.base); WWL(&o2, &t); } @@ -1862,7 +1927,7 @@ static void ww_test_edeadlk_no_unlock_slow(void) { int ret; - mutex_lock(&o2.base); + ww_mutex_base_lock(&o2.base); mutex_release(&o2.base.dep_map, _THIS_IP_); o2.ctx = &t2; @@ -1878,7 +1943,7 @@ static void ww_test_edeadlk_no_unlock_slow(void) o2.ctx = NULL; mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_); - mutex_unlock(&o2.base); + ww_mutex_base_unlock(&o2.base); ww_mutex_lock_slow(&o2, &t); } @@ -1887,7 +1952,7 @@ static void ww_test_edeadlk_acquire_more(void) { int ret; - mutex_lock(&o2.base); + ww_mutex_base_lock(&o2.base); mutex_release(&o2.base.dep_map, _THIS_IP_); o2.ctx = &t2; @@ -1908,7 +1973,7 @@ static void ww_test_edeadlk_acquire_more_slow(void) { int ret; - mutex_lock(&o2.base); + ww_mutex_base_lock(&o2.base); mutex_release(&o2.base.dep_map, _THIS_IP_); o2.ctx = &t2; @@ -1929,11 +1994,11 @@ static void ww_test_edeadlk_acquire_more_edeadlk(void) { int ret; - mutex_lock(&o2.base); + ww_mutex_base_lock(&o2.base); mutex_release(&o2.base.dep_map, _THIS_IP_); o2.ctx = &t2; - mutex_lock(&o3.base); + ww_mutex_base_lock(&o3.base); mutex_release(&o3.base.dep_map, _THIS_IP_); o3.ctx = &t2; @@ -1955,11 +2020,11 @@ static void ww_test_edeadlk_acquire_more_edeadlk_slow(void) { int ret; - mutex_lock(&o2.base); + ww_mutex_base_lock(&o2.base); mutex_release(&o2.base.dep_map, _THIS_IP_); o2.ctx = &t2; - mutex_lock(&o3.base); + ww_mutex_base_lock(&o3.base); mutex_release(&o3.base.dep_map, _THIS_IP_); o3.ctx = &t2; @@ -1980,7 +2045,7 @@ static void ww_test_edeadlk_acquire_wrong(void) { int ret; - mutex_lock(&o2.base); + ww_mutex_base_lock(&o2.base); mutex_release(&o2.base.dep_map, _THIS_IP_); o2.ctx = &t2; @@ -2005,7 +2070,7 @@ static void ww_test_edeadlk_acquire_wrong_slow(void) { int ret; - mutex_lock(&o2.base); + ww_mutex_base_lock(&o2.base); mutex_release(&o2.base.dep_map, _THIS_IP_); o2.ctx = &t2; @@ -2646,8 +2711,8 @@ static void wait_context_tests(void) static void local_lock_2(void) { - local_lock_acquire(&local_A); /* IRQ-ON */ - local_lock_release(&local_A); + local_lock(&local_A); /* IRQ-ON */ + local_unlock(&local_A); HARDIRQ_ENTER(); spin_lock(&lock_A); /* IN-IRQ */ @@ -2656,18 +2721,18 @@ static void local_lock_2(void) HARDIRQ_DISABLE(); spin_lock(&lock_A); - local_lock_acquire(&local_A); /* IN-IRQ <-> IRQ-ON cycle, false */ - local_lock_release(&local_A); + local_lock(&local_A); /* IN-IRQ <-> IRQ-ON cycle, false */ + local_unlock(&local_A); spin_unlock(&lock_A); HARDIRQ_ENABLE(); } static void local_lock_3A(void) { - local_lock_acquire(&local_A); /* IRQ-ON */ + local_lock(&local_A); /* IRQ-ON */ spin_lock(&lock_B); /* IRQ-ON */ spin_unlock(&lock_B); - local_lock_release(&local_A); + local_unlock(&local_A); HARDIRQ_ENTER(); spin_lock(&lock_A); /* IN-IRQ */ @@ -2676,18 +2741,18 @@ static void local_lock_3A(void) HARDIRQ_DISABLE(); spin_lock(&lock_A); - local_lock_acquire(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */ - local_lock_release(&local_A); + local_lock(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */ + local_unlock(&local_A); spin_unlock(&lock_A); HARDIRQ_ENABLE(); } static void local_lock_3B(void) { - local_lock_acquire(&local_A); /* IRQ-ON */ + local_lock(&local_A); /* IRQ-ON */ spin_lock(&lock_B); /* IRQ-ON */ spin_unlock(&lock_B); - local_lock_release(&local_A); + local_unlock(&local_A); HARDIRQ_ENTER(); spin_lock(&lock_A); /* IN-IRQ */ @@ -2696,8 +2761,8 @@ static void local_lock_3B(void) HARDIRQ_DISABLE(); spin_lock(&lock_A); - local_lock_acquire(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */ - local_lock_release(&local_A); + local_lock(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */ + local_unlock(&local_A); spin_unlock(&lock_A); HARDIRQ_ENABLE(); @@ -2812,7 +2877,7 @@ void locking_selftest(void) printk("------------------------\n"); printk("| Locking API testsuite:\n"); printk("----------------------------------------------------------------------------\n"); - printk(" | spin |wlock |rlock |mutex | wsem | rsem |\n"); + printk(" | spin |wlock |rlock |mutex | wsem | rsem |rtmutex\n"); printk(" --------------------------------------------------------------------------\n"); init_shared_classes(); @@ -2885,12 +2950,11 @@ void locking_selftest(void) DO_TESTCASE_6x1RR("rlock W1R2/R2R3/W3W1", W1R2_R2R3_W3W1); printk(" --------------------------------------------------------------------------\n"); - /* * irq-context testcases: */ DO_TESTCASE_2x6("irqs-on + irq-safe-A", irqsafe1); - DO_TESTCASE_2x3("sirq-safe-A => hirqs-on", irqsafe2A); + NON_RT(DO_TESTCASE_2x3("sirq-safe-A => hirqs-on", irqsafe2A)); DO_TESTCASE_2x6("safe-A + irqs-on", irqsafe2B); DO_TESTCASE_6x6("safe-A + unsafe-B #1", irqsafe3); DO_TESTCASE_6x6("safe-A + unsafe-B #2", irqsafe4); diff --git a/lib/logic_iomem.c b/lib/logic_iomem.c index 9bdfde0c0f86..8c3365f26e51 100644 --- a/lib/logic_iomem.c +++ b/lib/logic_iomem.c @@ -21,15 +21,15 @@ struct logic_iomem_area { #define AREA_SHIFT 24 #define MAX_AREA_SIZE (1 << AREA_SHIFT) -#define MAX_AREAS ((1ULL<<32) / MAX_AREA_SIZE) +#define MAX_AREAS ((1U << 31) / MAX_AREA_SIZE) #define AREA_BITS ((MAX_AREAS - 1) << AREA_SHIFT) #define AREA_MASK (MAX_AREA_SIZE - 1) #ifdef CONFIG_64BIT #define IOREMAP_BIAS 0xDEAD000000000000UL #define IOREMAP_MASK 0xFFFFFFFF00000000UL #else -#define IOREMAP_BIAS 0 -#define IOREMAP_MASK 0 +#define IOREMAP_BIAS 0x80000000UL +#define IOREMAP_MASK 0x80000000UL #endif static DEFINE_MUTEX(regions_mtx); @@ -76,10 +76,10 @@ static void __iomem *real_ioremap(phys_addr_t offset, size_t size) return NULL; } -static void real_iounmap(void __iomem *addr) +static void real_iounmap(volatile void __iomem *addr) { WARN(1, "invalid iounmap for addr 0x%llx\n", - (unsigned long long __force)addr); + (unsigned long long)(uintptr_t __force)addr); } #endif /* CONFIG_LOGIC_IOMEM_FALLBACK */ @@ -149,7 +149,7 @@ get_area(const volatile void __iomem *addr) return NULL; } -void iounmap(void __iomem *addr) +void iounmap(volatile void __iomem *addr) { struct logic_iomem_area *area = get_area(addr); @@ -173,7 +173,7 @@ EXPORT_SYMBOL(iounmap); static u##sz real_raw_read ## op(const volatile void __iomem *addr) \ { \ WARN(1, "Invalid read" #op " at address %llx\n", \ - (unsigned long long __force)addr); \ + (unsigned long long)(uintptr_t __force)addr); \ return (u ## sz)~0ULL; \ } \ \ @@ -181,7 +181,8 @@ static void real_raw_write ## op(u ## sz val, \ volatile void __iomem *addr) \ { \ WARN(1, "Invalid writeq" #op " of 0x%llx at address %llx\n", \ - (unsigned long long)val, (unsigned long long __force)addr);\ + (unsigned long long)val, \ + (unsigned long long)(uintptr_t __force)addr);\ } \ MAKE_FALLBACK(b, 8); @@ -194,14 +195,14 @@ MAKE_FALLBACK(q, 64); static void real_memset_io(volatile void __iomem *addr, int value, size_t size) { WARN(1, "Invalid memset_io at address 0x%llx\n", - (unsigned long long __force)addr); + (unsigned long long)(uintptr_t __force)addr); } static void real_memcpy_fromio(void *buffer, const volatile void __iomem *addr, size_t size) { WARN(1, "Invalid memcpy_fromio at address 0x%llx\n", - (unsigned long long __force)addr); + (unsigned long long)(uintptr_t __force)addr); memset(buffer, 0xff, size); } @@ -210,7 +211,7 @@ static void real_memcpy_toio(volatile void __iomem *addr, const void *buffer, size_t size) { WARN(1, "Invalid memcpy_toio at address 0x%llx\n", - (unsigned long long __force)addr); + (unsigned long long)(uintptr_t __force)addr); } #endif /* CONFIG_LOGIC_IOMEM_FALLBACK */ diff --git a/lib/mpi/mpi-mod.c b/lib/mpi/mpi-mod.c index 47bc59edd4ff..54fcc01564d9 100644 --- a/lib/mpi/mpi-mod.c +++ b/lib/mpi/mpi-mod.c @@ -40,6 +40,8 @@ mpi_barrett_t mpi_barrett_init(MPI m, int copy) mpi_normalize(m); ctx = kcalloc(1, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return NULL; if (copy) { ctx->m = mpi_copy(m); diff --git a/lib/objagg.c b/lib/objagg.c index 5e1676ccdadd..1e248629ed64 100644 --- a/lib/objagg.c +++ b/lib/objagg.c @@ -781,7 +781,6 @@ static struct objagg_tmp_graph *objagg_tmp_graph_create(struct objagg *objagg) struct objagg_tmp_node *node; struct objagg_tmp_node *pnode; struct objagg_obj *objagg_obj; - size_t alloc_size; int i, j; graph = kzalloc(sizeof(*graph), GFP_KERNEL); @@ -793,9 +792,7 @@ static struct objagg_tmp_graph *objagg_tmp_graph_create(struct objagg *objagg) goto err_nodes_alloc; graph->nodes_count = nodes_count; - alloc_size = BITS_TO_LONGS(nodes_count * nodes_count) * - sizeof(unsigned long); - graph->edges = kzalloc(alloc_size, GFP_KERNEL); + graph->edges = bitmap_zalloc(nodes_count * nodes_count, GFP_KERNEL); if (!graph->edges) goto err_edges_alloc; @@ -833,7 +830,7 @@ err_nodes_alloc: static void objagg_tmp_graph_destroy(struct objagg_tmp_graph *graph) { - kfree(graph->edges); + bitmap_free(graph->edges); kfree(graph->nodes); kfree(graph); } diff --git a/lib/ref_tracker.c b/lib/ref_tracker.c new file mode 100644 index 000000000000..0ae2e66dcf0f --- /dev/null +++ b/lib/ref_tracker.c @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#include <linux/export.h> +#include <linux/ref_tracker.h> +#include <linux/slab.h> +#include <linux/stacktrace.h> +#include <linux/stackdepot.h> + +#define REF_TRACKER_STACK_ENTRIES 16 + +struct ref_tracker { + struct list_head head; /* anchor into dir->list or dir->quarantine */ + bool dead; + depot_stack_handle_t alloc_stack_handle; + depot_stack_handle_t free_stack_handle; +}; + +void ref_tracker_dir_exit(struct ref_tracker_dir *dir) +{ + struct ref_tracker *tracker, *n; + unsigned long flags; + bool leak = false; + + spin_lock_irqsave(&dir->lock, flags); + list_for_each_entry_safe(tracker, n, &dir->quarantine, head) { + list_del(&tracker->head); + kfree(tracker); + dir->quarantine_avail++; + } + list_for_each_entry_safe(tracker, n, &dir->list, head) { + pr_err("leaked reference.\n"); + if (tracker->alloc_stack_handle) + stack_depot_print(tracker->alloc_stack_handle); + leak = true; + list_del(&tracker->head); + kfree(tracker); + } + spin_unlock_irqrestore(&dir->lock, flags); + WARN_ON_ONCE(leak); + WARN_ON_ONCE(refcount_read(&dir->untracked) != 1); +} +EXPORT_SYMBOL(ref_tracker_dir_exit); + +void ref_tracker_dir_print(struct ref_tracker_dir *dir, + unsigned int display_limit) +{ + struct ref_tracker *tracker; + unsigned long flags; + unsigned int i = 0; + + spin_lock_irqsave(&dir->lock, flags); + list_for_each_entry(tracker, &dir->list, head) { + if (i < display_limit) { + pr_err("leaked reference.\n"); + if (tracker->alloc_stack_handle) + stack_depot_print(tracker->alloc_stack_handle); + i++; + } else { + break; + } + } + spin_unlock_irqrestore(&dir->lock, flags); +} +EXPORT_SYMBOL(ref_tracker_dir_print); + +int ref_tracker_alloc(struct ref_tracker_dir *dir, + struct ref_tracker **trackerp, + gfp_t gfp) +{ + unsigned long entries[REF_TRACKER_STACK_ENTRIES]; + struct ref_tracker *tracker; + unsigned int nr_entries; + unsigned long flags; + + *trackerp = tracker = kzalloc(sizeof(*tracker), gfp | __GFP_NOFAIL); + if (unlikely(!tracker)) { + pr_err_once("memory allocation failure, unreliable refcount tracker.\n"); + refcount_inc(&dir->untracked); + return -ENOMEM; + } + nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1); + nr_entries = filter_irq_stacks(entries, nr_entries); + tracker->alloc_stack_handle = stack_depot_save(entries, nr_entries, gfp); + + spin_lock_irqsave(&dir->lock, flags); + list_add(&tracker->head, &dir->list); + spin_unlock_irqrestore(&dir->lock, flags); + return 0; +} +EXPORT_SYMBOL_GPL(ref_tracker_alloc); + +int ref_tracker_free(struct ref_tracker_dir *dir, + struct ref_tracker **trackerp) +{ + unsigned long entries[REF_TRACKER_STACK_ENTRIES]; + struct ref_tracker *tracker = *trackerp; + depot_stack_handle_t stack_handle; + unsigned int nr_entries; + unsigned long flags; + + if (!tracker) { + refcount_dec(&dir->untracked); + return -EEXIST; + } + nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1); + nr_entries = filter_irq_stacks(entries, nr_entries); + stack_handle = stack_depot_save(entries, nr_entries, GFP_ATOMIC); + + spin_lock_irqsave(&dir->lock, flags); + if (tracker->dead) { + pr_err("reference already released.\n"); + if (tracker->alloc_stack_handle) { + pr_err("allocated in:\n"); + stack_depot_print(tracker->alloc_stack_handle); + } + if (tracker->free_stack_handle) { + pr_err("freed in:\n"); + stack_depot_print(tracker->free_stack_handle); + } + spin_unlock_irqrestore(&dir->lock, flags); + WARN_ON_ONCE(1); + return -EINVAL; + } + tracker->dead = true; + + tracker->free_stack_handle = stack_handle; + + list_move_tail(&tracker->head, &dir->quarantine); + if (!dir->quarantine_avail) { + tracker = list_first_entry(&dir->quarantine, struct ref_tracker, head); + list_del(&tracker->head); + } else { + dir->quarantine_avail--; + tracker = NULL; + } + spin_unlock_irqrestore(&dir->lock, flags); + + kfree(tracker); + return 0; +} +EXPORT_SYMBOL_GPL(ref_tracker_free); diff --git a/lib/siphash.c b/lib/siphash.c index a90112ee72a1..72b9068ab57b 100644 --- a/lib/siphash.c +++ b/lib/siphash.c @@ -49,6 +49,7 @@ SIPROUND; \ return (v0 ^ v1) ^ (v2 ^ v3); +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key) { const u8 *end = data + len - (len % sizeof(u64)); @@ -80,8 +81,8 @@ u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key) POSTAMBLE } EXPORT_SYMBOL(__siphash_aligned); +#endif -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key) { const u8 *end = data + len - (len % sizeof(u64)); @@ -113,7 +114,6 @@ u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key) POSTAMBLE } EXPORT_SYMBOL(__siphash_unaligned); -#endif /** * siphash_1u64 - compute 64-bit siphash PRF value of a u64 @@ -250,6 +250,7 @@ EXPORT_SYMBOL(siphash_3u32); HSIPROUND; \ return (v0 ^ v1) ^ (v2 ^ v3); +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) { const u8 *end = data + len - (len % sizeof(u64)); @@ -280,8 +281,8 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) HPOSTAMBLE } EXPORT_SYMBOL(__hsiphash_aligned); +#endif -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u32 __hsiphash_unaligned(const void *data, size_t len, const hsiphash_key_t *key) { @@ -313,7 +314,6 @@ u32 __hsiphash_unaligned(const void *data, size_t len, HPOSTAMBLE } EXPORT_SYMBOL(__hsiphash_unaligned); -#endif /** * hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32 @@ -418,6 +418,7 @@ EXPORT_SYMBOL(hsiphash_4u32); HSIPROUND; \ return v1 ^ v3; +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) { const u8 *end = data + len - (len % sizeof(u32)); @@ -438,8 +439,8 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) HPOSTAMBLE } EXPORT_SYMBOL(__hsiphash_aligned); +#endif -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u32 __hsiphash_unaligned(const void *data, size_t len, const hsiphash_key_t *key) { @@ -461,7 +462,6 @@ u32 __hsiphash_unaligned(const void *data, size_t len, HPOSTAMBLE } EXPORT_SYMBOL(__hsiphash_unaligned); -#endif /** * hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32 diff --git a/lib/test_bpf.c b/lib/test_bpf.c index adae39567264..0c5cb2d6436a 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -14683,7 +14683,7 @@ static struct tail_call_test tail_call_tests[] = { BPF_EXIT_INSN(), }, .flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE, - .result = (MAX_TAIL_CALL_CNT + 1 + 1) * MAX_TESTRUNS, + .result = (MAX_TAIL_CALL_CNT + 1) * MAX_TESTRUNS, }, { "Tail call count preserved across function calls", @@ -14705,7 +14705,7 @@ static struct tail_call_test tail_call_tests[] = { }, .stack_depth = 8, .flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE, - .result = (MAX_TAIL_CALL_CNT + 1 + 1) * MAX_TESTRUNS, + .result = (MAX_TAIL_CALL_CNT + 1) * MAX_TESTRUNS, }, { "Tail call error path, NULL target", diff --git a/lib/test_ref_tracker.c b/lib/test_ref_tracker.c new file mode 100644 index 000000000000..19d7dec70cc6 --- /dev/null +++ b/lib/test_ref_tracker.c @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Referrence tracker self test. + * + * Copyright (c) 2021 Eric Dumazet <edumazet@google.com> + */ +#include <linux/init.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/ref_tracker.h> +#include <linux/slab.h> +#include <linux/timer.h> + +static struct ref_tracker_dir ref_dir; +static struct ref_tracker *tracker[20]; + +#define TRT_ALLOC(X) static noinline void \ + alloctest_ref_tracker_alloc##X(struct ref_tracker_dir *dir, \ + struct ref_tracker **trackerp) \ + { \ + ref_tracker_alloc(dir, trackerp, GFP_KERNEL); \ + } + +TRT_ALLOC(1) +TRT_ALLOC(2) +TRT_ALLOC(3) +TRT_ALLOC(4) +TRT_ALLOC(5) +TRT_ALLOC(6) +TRT_ALLOC(7) +TRT_ALLOC(8) +TRT_ALLOC(9) +TRT_ALLOC(10) +TRT_ALLOC(11) +TRT_ALLOC(12) +TRT_ALLOC(13) +TRT_ALLOC(14) +TRT_ALLOC(15) +TRT_ALLOC(16) +TRT_ALLOC(17) +TRT_ALLOC(18) +TRT_ALLOC(19) + +#undef TRT_ALLOC + +static noinline void +alloctest_ref_tracker_free(struct ref_tracker_dir *dir, + struct ref_tracker **trackerp) +{ + ref_tracker_free(dir, trackerp); +} + + +static struct timer_list test_ref_tracker_timer; +static atomic_t test_ref_timer_done = ATOMIC_INIT(0); + +static void test_ref_tracker_timer_func(struct timer_list *t) +{ + ref_tracker_alloc(&ref_dir, &tracker[0], GFP_ATOMIC); + atomic_set(&test_ref_timer_done, 1); +} + +static int __init test_ref_tracker_init(void) +{ + int i; + + ref_tracker_dir_init(&ref_dir, 100); + + timer_setup(&test_ref_tracker_timer, test_ref_tracker_timer_func, 0); + mod_timer(&test_ref_tracker_timer, jiffies + 1); + + alloctest_ref_tracker_alloc1(&ref_dir, &tracker[1]); + alloctest_ref_tracker_alloc2(&ref_dir, &tracker[2]); + alloctest_ref_tracker_alloc3(&ref_dir, &tracker[3]); + alloctest_ref_tracker_alloc4(&ref_dir, &tracker[4]); + alloctest_ref_tracker_alloc5(&ref_dir, &tracker[5]); + alloctest_ref_tracker_alloc6(&ref_dir, &tracker[6]); + alloctest_ref_tracker_alloc7(&ref_dir, &tracker[7]); + alloctest_ref_tracker_alloc8(&ref_dir, &tracker[8]); + alloctest_ref_tracker_alloc9(&ref_dir, &tracker[9]); + alloctest_ref_tracker_alloc10(&ref_dir, &tracker[10]); + alloctest_ref_tracker_alloc11(&ref_dir, &tracker[11]); + alloctest_ref_tracker_alloc12(&ref_dir, &tracker[12]); + alloctest_ref_tracker_alloc13(&ref_dir, &tracker[13]); + alloctest_ref_tracker_alloc14(&ref_dir, &tracker[14]); + alloctest_ref_tracker_alloc15(&ref_dir, &tracker[15]); + alloctest_ref_tracker_alloc16(&ref_dir, &tracker[16]); + alloctest_ref_tracker_alloc17(&ref_dir, &tracker[17]); + alloctest_ref_tracker_alloc18(&ref_dir, &tracker[18]); + alloctest_ref_tracker_alloc19(&ref_dir, &tracker[19]); + + /* free all trackers but first 0 and 1. */ + for (i = 2; i < ARRAY_SIZE(tracker); i++) + alloctest_ref_tracker_free(&ref_dir, &tracker[i]); + + /* Attempt to free an already freed tracker. */ + alloctest_ref_tracker_free(&ref_dir, &tracker[2]); + + while (!atomic_read(&test_ref_timer_done)) + msleep(1); + + /* This should warn about tracker[0] & tracker[1] being not freed. */ + ref_tracker_dir_exit(&ref_dir); + + return 0; +} + +static void __exit test_ref_tracker_exit(void) +{ +} + +module_init(test_ref_tracker_init); +module_exit(test_ref_tracker_exit); + +MODULE_LICENSE("GPL v2"); diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 58d5e567f836..53d6081f9e8b 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -3564,7 +3564,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args) ++fmt; for ( ; *fmt && *fmt != ']'; ++fmt, ++len) - set_bit((u8)*fmt, set); + __set_bit((u8)*fmt, set); /* no ']' or no character set found */ if (!*fmt || !len) @@ -3574,7 +3574,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args) if (negate) { bitmap_complement(set, set, 256); /* exclude null '\0' byte */ - clear_bit(0, set); + __clear_bit(0, set); } /* match must be non-empty */ |