summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/842/842_compress.c2
-rw-r--r--lib/Kconfig1
-rw-r--r--lib/Kconfig.debug17
-rw-r--r--lib/Kconfig.ubsan3
-rw-r--r--lib/Makefile2
-rw-r--r--lib/alloc_tag.c3
-rw-r--r--lib/atomic64.c78
-rw-r--r--lib/crypto/Kconfig37
-rw-r--r--lib/dynamic_queue_limits.c2
-rw-r--r--lib/group_cpus.c9
-rw-r--r--lib/iov_iter.c5
-rw-r--r--lib/kunit/static_stub.c2
-rw-r--r--lib/longest_symbol_kunit.c82
-rw-r--r--lib/lzo/Makefile2
-rw-r--r--lib/lzo/lzo1x_compress.c102
-rw-r--r--lib/lzo/lzo1x_compress_safe.c18
-rw-r--r--lib/maple_tree.c28
-rw-r--r--lib/rcuref.c5
-rw-r--r--lib/rhashtable.c12
-rw-r--r--lib/sbitmap.c56
-rw-r--r--lib/sg_split.c2
-rw-r--r--lib/stackinit_kunit.c30
-rw-r--r--lib/string.c13
-rw-r--r--lib/test_objagg.c4
-rw-r--r--lib/test_ubsan.c18
-rw-r--r--lib/usercopy_kunit.c1
-rw-r--r--lib/vsprintf.c2
-rw-r--r--lib/zstd/common/portability_macros.h2
28 files changed, 382 insertions, 156 deletions
diff --git a/lib/842/842_compress.c b/lib/842/842_compress.c
index c02baa4168e1..055356508d97 100644
--- a/lib/842/842_compress.c
+++ b/lib/842/842_compress.c
@@ -532,6 +532,8 @@ int sw842_compress(const u8 *in, unsigned int ilen,
}
if (repeat_count) {
ret = add_repeat_template(p, repeat_count);
+ if (ret)
+ return ret;
repeat_count = 0;
if (next == last) /* reached max repeat bits */
goto repeat;
diff --git a/lib/Kconfig b/lib/Kconfig
index b38849af6f13..b893c9288c14 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -767,6 +767,7 @@ config GENERIC_LIB_DEVMEM_IS_ALLOWED
config PLDMFW
bool
+ select CRC32
default n
config ASN1_ENCODER
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 3f9c238bb58e..b1d7c427bbe3 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1511,7 +1511,7 @@ config LOCKDEP_SMALL
config LOCKDEP_BITS
int "Bitsize for MAX_LOCKDEP_ENTRIES"
depends on LOCKDEP && !LOCKDEP_SMALL
- range 10 30
+ range 10 24
default 15
help
Try increasing this value if you hit "BUG: MAX_LOCKDEP_ENTRIES too low!" message.
@@ -1527,7 +1527,7 @@ config LOCKDEP_CHAINS_BITS
config LOCKDEP_STACK_TRACE_BITS
int "Bitsize for MAX_STACK_TRACE_ENTRIES"
depends on LOCKDEP && !LOCKDEP_SMALL
- range 10 30
+ range 10 26
default 19
help
Try increasing this value if you hit "BUG: MAX_STACK_TRACE_ENTRIES too low!" message.
@@ -1535,7 +1535,7 @@ config LOCKDEP_STACK_TRACE_BITS
config LOCKDEP_STACK_TRACE_HASH_BITS
int "Bitsize for STACK_TRACE_HASH_SIZE"
depends on LOCKDEP && !LOCKDEP_SMALL
- range 10 30
+ range 10 26
default 14
help
Try increasing this value if you need large STACK_TRACE_HASH_SIZE.
@@ -1543,7 +1543,7 @@ config LOCKDEP_STACK_TRACE_HASH_BITS
config LOCKDEP_CIRCULAR_QUEUE_BITS
int "Bitsize for elements in circular_queue struct"
depends on LOCKDEP
- range 10 30
+ range 10 26
default 12
help
Try increasing this value if you hit "lockdep bfs error:-1" warning due to __cq_enqueue() failure.
@@ -2807,6 +2807,15 @@ config FORTIFY_KUNIT_TEST
by the str*() and mem*() family of functions. For testing runtime
traps of FORTIFY_SOURCE, see LKDTM's "FORTIFY_*" tests.
+config LONGEST_SYM_KUNIT_TEST
+ tristate "Test the longest symbol possible" if !KUNIT_ALL_TESTS
+ depends on KUNIT && KPROBES
+ default KUNIT_ALL_TESTS
+ help
+ Tests the longest symbol possible
+
+ If unsure, say N.
+
config HW_BREAKPOINT_KUNIT_TEST
bool "Test hw_breakpoint constraints accounting" if !KUNIT_ALL_TESTS
depends on HAVE_HW_BREAKPOINT
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index 1d4aa7a83b3a..4e4dc430614a 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -118,7 +118,8 @@ config UBSAN_UNREACHABLE
config UBSAN_SIGNED_WRAP
bool "Perform checking for signed arithmetic wrap-around"
- default UBSAN
+ # This is very experimental so drop the next line if you really want it
+ depends on BROKEN
depends on !COMPILE_TEST
# The no_sanitize attribute was introduced in GCC with version 8.
depends on !CC_IS_GCC || GCC_VERSION >= 80000
diff --git a/lib/Makefile b/lib/Makefile
index 773adf88af41..fc878e716825 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -389,6 +389,8 @@ CFLAGS_fortify_kunit.o += $(DISABLE_STRUCTLEAK_PLUGIN)
obj-$(CONFIG_FORTIFY_KUNIT_TEST) += fortify_kunit.o
obj-$(CONFIG_SIPHASH_KUNIT_TEST) += siphash_kunit.o
obj-$(CONFIG_USERCOPY_KUNIT_TEST) += usercopy_kunit.o
+obj-$(CONFIG_LONGEST_SYM_KUNIT_TEST) += longest_symbol_kunit.o
+CFLAGS_longest_symbol_kunit.o += $(call cc-disable-warning, missing-prototypes)
obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o
diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c
index 81e5f9a70f22..e76c40bf29d0 100644
--- a/lib/alloc_tag.c
+++ b/lib/alloc_tag.c
@@ -113,6 +113,9 @@ size_t alloc_tag_top_users(struct codetag_bytes *tags, size_t count, bool can_sl
struct codetag_bytes n;
unsigned int i, nr = 0;
+ if (IS_ERR_OR_NULL(alloc_tag_cttype))
+ return 0;
+
if (can_sleep)
codetag_lock_module_list(alloc_tag_cttype, true);
else if (!codetag_trylock_module_list(alloc_tag_cttype))
diff --git a/lib/atomic64.c b/lib/atomic64.c
index caf895789a1e..1a72bba36d24 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -25,15 +25,15 @@
* Ensure each lock is in a separate cacheline.
*/
static union {
- raw_spinlock_t lock;
+ arch_spinlock_t lock;
char pad[L1_CACHE_BYTES];
} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
[0 ... (NR_LOCKS - 1)] = {
- .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
+ .lock = __ARCH_SPIN_LOCK_UNLOCKED,
},
};
-static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
+static inline arch_spinlock_t *lock_addr(const atomic64_t *v)
{
unsigned long addr = (unsigned long) v;
@@ -45,12 +45,14 @@ static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
s64 generic_atomic64_read(const atomic64_t *v)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ arch_spinlock_t *lock = lock_addr(v);
s64 val;
- raw_spin_lock_irqsave(lock, flags);
+ local_irq_save(flags);
+ arch_spin_lock(lock);
val = v->counter;
- raw_spin_unlock_irqrestore(lock, flags);
+ arch_spin_unlock(lock);
+ local_irq_restore(flags);
return val;
}
EXPORT_SYMBOL(generic_atomic64_read);
@@ -58,11 +60,13 @@ EXPORT_SYMBOL(generic_atomic64_read);
void generic_atomic64_set(atomic64_t *v, s64 i)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ arch_spinlock_t *lock = lock_addr(v);
- raw_spin_lock_irqsave(lock, flags);
+ local_irq_save(flags);
+ arch_spin_lock(lock);
v->counter = i;
- raw_spin_unlock_irqrestore(lock, flags);
+ arch_spin_unlock(lock);
+ local_irq_restore(flags);
}
EXPORT_SYMBOL(generic_atomic64_set);
@@ -70,11 +74,13 @@ EXPORT_SYMBOL(generic_atomic64_set);
void generic_atomic64_##op(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
- raw_spinlock_t *lock = lock_addr(v); \
+ arch_spinlock_t *lock = lock_addr(v); \
\
- raw_spin_lock_irqsave(lock, flags); \
+ local_irq_save(flags); \
+ arch_spin_lock(lock); \
v->counter c_op a; \
- raw_spin_unlock_irqrestore(lock, flags); \
+ arch_spin_unlock(lock); \
+ local_irq_restore(flags); \
} \
EXPORT_SYMBOL(generic_atomic64_##op);
@@ -82,12 +88,14 @@ EXPORT_SYMBOL(generic_atomic64_##op);
s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
- raw_spinlock_t *lock = lock_addr(v); \
+ arch_spinlock_t *lock = lock_addr(v); \
s64 val; \
\
- raw_spin_lock_irqsave(lock, flags); \
+ local_irq_save(flags); \
+ arch_spin_lock(lock); \
val = (v->counter c_op a); \
- raw_spin_unlock_irqrestore(lock, flags); \
+ arch_spin_unlock(lock); \
+ local_irq_restore(flags); \
return val; \
} \
EXPORT_SYMBOL(generic_atomic64_##op##_return);
@@ -96,13 +104,15 @@ EXPORT_SYMBOL(generic_atomic64_##op##_return);
s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
- raw_spinlock_t *lock = lock_addr(v); \
+ arch_spinlock_t *lock = lock_addr(v); \
s64 val; \
\
- raw_spin_lock_irqsave(lock, flags); \
+ local_irq_save(flags); \
+ arch_spin_lock(lock); \
val = v->counter; \
v->counter c_op a; \
- raw_spin_unlock_irqrestore(lock, flags); \
+ arch_spin_unlock(lock); \
+ local_irq_restore(flags); \
return val; \
} \
EXPORT_SYMBOL(generic_atomic64_fetch_##op);
@@ -131,14 +141,16 @@ ATOMIC64_OPS(xor, ^=)
s64 generic_atomic64_dec_if_positive(atomic64_t *v)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ arch_spinlock_t *lock = lock_addr(v);
s64 val;
- raw_spin_lock_irqsave(lock, flags);
+ local_irq_save(flags);
+ arch_spin_lock(lock);
val = v->counter - 1;
if (val >= 0)
v->counter = val;
- raw_spin_unlock_irqrestore(lock, flags);
+ arch_spin_unlock(lock);
+ local_irq_restore(flags);
return val;
}
EXPORT_SYMBOL(generic_atomic64_dec_if_positive);
@@ -146,14 +158,16 @@ EXPORT_SYMBOL(generic_atomic64_dec_if_positive);
s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ arch_spinlock_t *lock = lock_addr(v);
s64 val;
- raw_spin_lock_irqsave(lock, flags);
+ local_irq_save(flags);
+ arch_spin_lock(lock);
val = v->counter;
if (val == o)
v->counter = n;
- raw_spin_unlock_irqrestore(lock, flags);
+ arch_spin_unlock(lock);
+ local_irq_restore(flags);
return val;
}
EXPORT_SYMBOL(generic_atomic64_cmpxchg);
@@ -161,13 +175,15 @@ EXPORT_SYMBOL(generic_atomic64_cmpxchg);
s64 generic_atomic64_xchg(atomic64_t *v, s64 new)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ arch_spinlock_t *lock = lock_addr(v);
s64 val;
- raw_spin_lock_irqsave(lock, flags);
+ local_irq_save(flags);
+ arch_spin_lock(lock);
val = v->counter;
v->counter = new;
- raw_spin_unlock_irqrestore(lock, flags);
+ arch_spin_unlock(lock);
+ local_irq_restore(flags);
return val;
}
EXPORT_SYMBOL(generic_atomic64_xchg);
@@ -175,14 +191,16 @@ EXPORT_SYMBOL(generic_atomic64_xchg);
s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ arch_spinlock_t *lock = lock_addr(v);
s64 val;
- raw_spin_lock_irqsave(lock, flags);
+ local_irq_save(flags);
+ arch_spin_lock(lock);
val = v->counter;
if (val != u)
v->counter += a;
- raw_spin_unlock_irqrestore(lock, flags);
+ arch_spin_unlock(lock);
+ local_irq_restore(flags);
return val;
}
diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig
index b01253cac70a..b09e78da959a 100644
--- a/lib/crypto/Kconfig
+++ b/lib/crypto/Kconfig
@@ -42,7 +42,7 @@ config CRYPTO_LIB_BLAKE2S_GENERIC
of CRYPTO_LIB_BLAKE2S.
config CRYPTO_ARCH_HAVE_LIB_CHACHA
- tristate
+ bool
help
Declares whether the architecture provides an arch-specific
accelerated implementation of the ChaCha library interface,
@@ -58,17 +58,21 @@ config CRYPTO_LIB_CHACHA_GENERIC
implementation is enabled, this implementation serves the users
of CRYPTO_LIB_CHACHA.
+config CRYPTO_LIB_CHACHA_INTERNAL
+ tristate
+ select CRYPTO_LIB_CHACHA_GENERIC if CRYPTO_ARCH_HAVE_LIB_CHACHA=n
+
config CRYPTO_LIB_CHACHA
tristate "ChaCha library interface"
- depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA
- select CRYPTO_LIB_CHACHA_GENERIC if CRYPTO_ARCH_HAVE_LIB_CHACHA=n
+ select CRYPTO
+ select CRYPTO_LIB_CHACHA_INTERNAL
help
Enable the ChaCha library interface. This interface may be fulfilled
by either the generic implementation or an arch-specific one, if one
is available and enabled.
config CRYPTO_ARCH_HAVE_LIB_CURVE25519
- tristate
+ bool
help
Declares whether the architecture provides an arch-specific
accelerated implementation of the Curve25519 library interface,
@@ -76,6 +80,7 @@ config CRYPTO_ARCH_HAVE_LIB_CURVE25519
config CRYPTO_LIB_CURVE25519_GENERIC
tristate
+ select CRYPTO_LIB_UTILS
help
This symbol can be depended upon by arch implementations of the
Curve25519 library interface that require the generic code as a
@@ -83,11 +88,14 @@ config CRYPTO_LIB_CURVE25519_GENERIC
implementation is enabled, this implementation serves the users
of CRYPTO_LIB_CURVE25519.
+config CRYPTO_LIB_CURVE25519_INTERNAL
+ tristate
+ select CRYPTO_LIB_CURVE25519_GENERIC if CRYPTO_ARCH_HAVE_LIB_CURVE25519=n
+
config CRYPTO_LIB_CURVE25519
tristate "Curve25519 scalar multiplication library"
- depends on CRYPTO_ARCH_HAVE_LIB_CURVE25519 || !CRYPTO_ARCH_HAVE_LIB_CURVE25519
- select CRYPTO_LIB_CURVE25519_GENERIC if CRYPTO_ARCH_HAVE_LIB_CURVE25519=n
- select CRYPTO_LIB_UTILS
+ select CRYPTO
+ select CRYPTO_LIB_CURVE25519_INTERNAL
help
Enable the Curve25519 library interface. This interface may be
fulfilled by either the generic implementation or an arch-specific
@@ -104,7 +112,7 @@ config CRYPTO_LIB_POLY1305_RSIZE
default 1
config CRYPTO_ARCH_HAVE_LIB_POLY1305
- tristate
+ bool
help
Declares whether the architecture provides an arch-specific
accelerated implementation of the Poly1305 library interface,
@@ -119,10 +127,14 @@ config CRYPTO_LIB_POLY1305_GENERIC
implementation is enabled, this implementation serves the users
of CRYPTO_LIB_POLY1305.
+config CRYPTO_LIB_POLY1305_INTERNAL
+ tristate
+ select CRYPTO_LIB_POLY1305_GENERIC if CRYPTO_ARCH_HAVE_LIB_POLY1305=n
+
config CRYPTO_LIB_POLY1305
tristate "Poly1305 library interface"
- depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305
- select CRYPTO_LIB_POLY1305_GENERIC if CRYPTO_ARCH_HAVE_LIB_POLY1305=n
+ select CRYPTO
+ select CRYPTO_LIB_POLY1305_INTERNAL
help
Enable the Poly1305 library interface. This interface may be fulfilled
by either the generic implementation or an arch-specific one, if one
@@ -130,11 +142,10 @@ config CRYPTO_LIB_POLY1305
config CRYPTO_LIB_CHACHA20POLY1305
tristate "ChaCha20-Poly1305 AEAD support (8-byte nonce library version)"
- depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA
- depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305
- depends on CRYPTO
+ select CRYPTO
select CRYPTO_LIB_CHACHA
select CRYPTO_LIB_POLY1305
+ select CRYPTO_LIB_UTILS
select CRYPTO_ALGAPI
config CRYPTO_LIB_SHA1
diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c
index e49deddd3de9..7d1dfbb99b39 100644
--- a/lib/dynamic_queue_limits.c
+++ b/lib/dynamic_queue_limits.c
@@ -190,7 +190,7 @@ EXPORT_SYMBOL(dql_completed);
void dql_reset(struct dql *dql)
{
/* Reset all dynamic values */
- dql->limit = 0;
+ dql->limit = dql->min_limit;
dql->num_queued = 0;
dql->num_completed = 0;
dql->last_obj_cnt = 0;
diff --git a/lib/group_cpus.c b/lib/group_cpus.c
index ee272c4cefcc..18d43a406114 100644
--- a/lib/group_cpus.c
+++ b/lib/group_cpus.c
@@ -352,6 +352,9 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps)
int ret = -ENOMEM;
struct cpumask *masks = NULL;
+ if (numgrps == 0)
+ return NULL;
+
if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
return NULL;
@@ -426,8 +429,12 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps)
#else /* CONFIG_SMP */
struct cpumask *group_cpus_evenly(unsigned int numgrps)
{
- struct cpumask *masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL);
+ struct cpumask *masks;
+ if (numgrps == 0)
+ return NULL;
+
+ masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL);
if (!masks)
return NULL;
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 908e75a28d90..8ede6be556a9 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -820,7 +820,7 @@ static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask,
size_t size = i->count;
do {
- size_t len = bvec->bv_len;
+ size_t len = bvec->bv_len - skip;
if (len > size)
len = size;
@@ -1428,6 +1428,8 @@ static ssize_t __import_iovec_ubuf(int type, const struct iovec __user *uvec,
struct iovec *iov = *iovp;
ssize_t ret;
+ *iovp = NULL;
+
if (compat)
ret = copy_compat_iovec_from_user(iov, uvec, 1);
else
@@ -1438,7 +1440,6 @@ static ssize_t __import_iovec_ubuf(int type, const struct iovec __user *uvec,
ret = import_ubuf(type, iov->iov_base, iov->iov_len, i);
if (unlikely(ret))
return ret;
- *iovp = NULL;
return i->count;
}
diff --git a/lib/kunit/static_stub.c b/lib/kunit/static_stub.c
index 92b2cccd5e76..484fd85251b4 100644
--- a/lib/kunit/static_stub.c
+++ b/lib/kunit/static_stub.c
@@ -96,7 +96,7 @@ void __kunit_activate_static_stub(struct kunit *test,
/* If the replacement address is NULL, deactivate the stub. */
if (!replacement_addr) {
- kunit_deactivate_static_stub(test, replacement_addr);
+ kunit_deactivate_static_stub(test, real_fn_addr);
return;
}
diff --git a/lib/longest_symbol_kunit.c b/lib/longest_symbol_kunit.c
new file mode 100644
index 000000000000..e3c28ff1807f
--- /dev/null
+++ b/lib/longest_symbol_kunit.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test the longest symbol length. Execute with:
+ * ./tools/testing/kunit/kunit.py run longest-symbol
+ * --arch=x86_64 --kconfig_add CONFIG_KPROBES=y --kconfig_add CONFIG_MODULES=y
+ * --kconfig_add CONFIG_RETPOLINE=n --kconfig_add CONFIG_CFI_CLANG=n
+ * --kconfig_add CONFIG_MITIGATION_RETPOLINE=n
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/test.h>
+#include <linux/stringify.h>
+#include <linux/kprobes.h>
+#include <linux/kallsyms.h>
+
+#define DI(name) s##name##name
+#define DDI(name) DI(n##name##name)
+#define DDDI(name) DDI(n##name##name)
+#define DDDDI(name) DDDI(n##name##name)
+#define DDDDDI(name) DDDDI(n##name##name)
+
+/*Generate a symbol whose name length is 511 */
+#define LONGEST_SYM_NAME DDDDDI(g1h2i3j4k5l6m7n)
+
+#define RETURN_LONGEST_SYM 0xAAAAA
+
+noinline int LONGEST_SYM_NAME(void);
+noinline int LONGEST_SYM_NAME(void)
+{
+ return RETURN_LONGEST_SYM;
+}
+
+_Static_assert(sizeof(__stringify(LONGEST_SYM_NAME)) == KSYM_NAME_LEN,
+"Incorrect symbol length found. Expected KSYM_NAME_LEN: "
+__stringify(KSYM_NAME_LEN) ", but found: "
+__stringify(sizeof(LONGEST_SYM_NAME)));
+
+static void test_longest_symbol(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, RETURN_LONGEST_SYM, LONGEST_SYM_NAME());
+};
+
+static void test_longest_symbol_kallsyms(struct kunit *test)
+{
+ unsigned long (*kallsyms_lookup_name)(const char *name);
+ static int (*longest_sym)(void);
+
+ struct kprobe kp = {
+ .symbol_name = "kallsyms_lookup_name",
+ };
+
+ if (register_kprobe(&kp) < 0) {
+ pr_info("%s: kprobe not registered", __func__);
+ KUNIT_FAIL(test, "test_longest_symbol kallsyms: kprobe not registered\n");
+ return;
+ }
+
+ kunit_warn(test, "test_longest_symbol kallsyms: kprobe registered\n");
+ kallsyms_lookup_name = (unsigned long (*)(const char *name))kp.addr;
+ unregister_kprobe(&kp);
+
+ longest_sym =
+ (void *) kallsyms_lookup_name(__stringify(LONGEST_SYM_NAME));
+ KUNIT_EXPECT_EQ(test, RETURN_LONGEST_SYM, longest_sym());
+};
+
+static struct kunit_case longest_symbol_test_cases[] = {
+ KUNIT_CASE(test_longest_symbol),
+ KUNIT_CASE(test_longest_symbol_kallsyms),
+ {}
+};
+
+static struct kunit_suite longest_symbol_test_suite = {
+ .name = "longest-symbol",
+ .test_cases = longest_symbol_test_cases,
+};
+kunit_test_suite(longest_symbol_test_suite);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Test the longest symbol length");
+MODULE_AUTHOR("Sergio González Collado");
diff --git a/lib/lzo/Makefile b/lib/lzo/Makefile
index 2f58fafbbddd..fc7b2b7ef4b2 100644
--- a/lib/lzo/Makefile
+++ b/lib/lzo/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-lzo_compress-objs := lzo1x_compress.o
+lzo_compress-objs := lzo1x_compress.o lzo1x_compress_safe.o
lzo_decompress-objs := lzo1x_decompress_safe.o
obj-$(CONFIG_LZO_COMPRESS) += lzo_compress.o
diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c
index 47d6d43ea957..7b10ca86a893 100644
--- a/lib/lzo/lzo1x_compress.c
+++ b/lib/lzo/lzo1x_compress.c
@@ -18,11 +18,22 @@
#include <linux/lzo.h>
#include "lzodefs.h"
-static noinline size_t
-lzo1x_1_do_compress(const unsigned char *in, size_t in_len,
- unsigned char *out, size_t *out_len,
- size_t ti, void *wrkmem, signed char *state_offset,
- const unsigned char bitstream_version)
+#undef LZO_UNSAFE
+
+#ifndef LZO_SAFE
+#define LZO_UNSAFE 1
+#define LZO_SAFE(name) name
+#define HAVE_OP(x) 1
+#endif
+
+#define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun
+
+static noinline int
+LZO_SAFE(lzo1x_1_do_compress)(const unsigned char *in, size_t in_len,
+ unsigned char **out, unsigned char *op_end,
+ size_t *tp, void *wrkmem,
+ signed char *state_offset,
+ const unsigned char bitstream_version)
{
const unsigned char *ip;
unsigned char *op;
@@ -30,8 +41,9 @@ lzo1x_1_do_compress(const unsigned char *in, size_t in_len,
const unsigned char * const ip_end = in + in_len - 20;
const unsigned char *ii;
lzo_dict_t * const dict = (lzo_dict_t *) wrkmem;
+ size_t ti = *tp;
- op = out;
+ op = *out;
ip = in;
ii = ip;
ip += ti < 4 ? 4 - ti : 0;
@@ -116,25 +128,32 @@ next:
if (t != 0) {
if (t <= 3) {
op[*state_offset] |= t;
+ NEED_OP(4);
COPY4(op, ii);
op += t;
} else if (t <= 16) {
+ NEED_OP(17);
*op++ = (t - 3);
COPY8(op, ii);
COPY8(op + 8, ii + 8);
op += t;
} else {
if (t <= 18) {
+ NEED_OP(1);
*op++ = (t - 3);
} else {
size_t tt = t - 18;
+ NEED_OP(1);
*op++ = 0;
while (unlikely(tt > 255)) {
tt -= 255;
+ NEED_OP(1);
*op++ = 0;
}
+ NEED_OP(1);
*op++ = tt;
}
+ NEED_OP(t);
do {
COPY8(op, ii);
COPY8(op + 8, ii + 8);
@@ -151,6 +170,7 @@ next:
if (unlikely(run_length)) {
ip += run_length;
run_length -= MIN_ZERO_RUN_LENGTH;
+ NEED_OP(4);
put_unaligned_le32((run_length << 21) | 0xfffc18
| (run_length & 0x7), op);
op += 4;
@@ -243,10 +263,12 @@ m_len_done:
ip += m_len;
if (m_len <= M2_MAX_LEN && m_off <= M2_MAX_OFFSET) {
m_off -= 1;
+ NEED_OP(2);
*op++ = (((m_len - 1) << 5) | ((m_off & 7) << 2));
*op++ = (m_off >> 3);
} else if (m_off <= M3_MAX_OFFSET) {
m_off -= 1;
+ NEED_OP(1);
if (m_len <= M3_MAX_LEN)
*op++ = (M3_MARKER | (m_len - 2));
else {
@@ -254,14 +276,18 @@ m_len_done:
*op++ = M3_MARKER | 0;
while (unlikely(m_len > 255)) {
m_len -= 255;
+ NEED_OP(1);
*op++ = 0;
}
+ NEED_OP(1);
*op++ = (m_len);
}
+ NEED_OP(2);
*op++ = (m_off << 2);
*op++ = (m_off >> 6);
} else {
m_off -= 0x4000;
+ NEED_OP(1);
if (m_len <= M4_MAX_LEN)
*op++ = (M4_MARKER | ((m_off >> 11) & 8)
| (m_len - 2));
@@ -282,11 +308,14 @@ m_len_done:
m_len -= M4_MAX_LEN;
*op++ = (M4_MARKER | ((m_off >> 11) & 8));
while (unlikely(m_len > 255)) {
+ NEED_OP(1);
m_len -= 255;
*op++ = 0;
}
+ NEED_OP(1);
*op++ = (m_len);
}
+ NEED_OP(2);
*op++ = (m_off << 2);
*op++ = (m_off >> 6);
}
@@ -295,14 +324,20 @@ finished_writing_instruction:
ii = ip;
goto next;
}
- *out_len = op - out;
- return in_end - (ii - ti);
+ *out = op;
+ *tp = in_end - (ii - ti);
+ return LZO_E_OK;
+
+output_overrun:
+ return LZO_E_OUTPUT_OVERRUN;
}
-static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
- unsigned char *out, size_t *out_len,
- void *wrkmem, const unsigned char bitstream_version)
+static int LZO_SAFE(lzogeneric1x_1_compress)(
+ const unsigned char *in, size_t in_len,
+ unsigned char *out, size_t *out_len,
+ void *wrkmem, const unsigned char bitstream_version)
{
+ unsigned char * const op_end = out + *out_len;
const unsigned char *ip = in;
unsigned char *op = out;
unsigned char *data_start;
@@ -326,14 +361,18 @@ static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
while (l > 20) {
size_t ll = min_t(size_t, l, m4_max_offset + 1);
uintptr_t ll_end = (uintptr_t) ip + ll;
+ int err;
+
if ((ll_end + ((t + ll) >> 5)) <= ll_end)
break;
BUILD_BUG_ON(D_SIZE * sizeof(lzo_dict_t) > LZO1X_1_MEM_COMPRESS);
memset(wrkmem, 0, D_SIZE * sizeof(lzo_dict_t));
- t = lzo1x_1_do_compress(ip, ll, op, out_len, t, wrkmem,
- &state_offset, bitstream_version);
+ err = LZO_SAFE(lzo1x_1_do_compress)(
+ ip, ll, &op, op_end, &t, wrkmem,
+ &state_offset, bitstream_version);
+ if (err != LZO_E_OK)
+ return err;
ip += ll;
- op += *out_len;
l -= ll;
}
t += l;
@@ -342,20 +381,26 @@ static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
const unsigned char *ii = in + in_len - t;
if (op == data_start && t <= 238) {
+ NEED_OP(1);
*op++ = (17 + t);
} else if (t <= 3) {
op[state_offset] |= t;
} else if (t <= 18) {
+ NEED_OP(1);
*op++ = (t - 3);
} else {
size_t tt = t - 18;
+ NEED_OP(1);
*op++ = 0;
while (tt > 255) {
tt -= 255;
+ NEED_OP(1);
*op++ = 0;
}
+ NEED_OP(1);
*op++ = tt;
}
+ NEED_OP(t);
if (t >= 16) do {
COPY8(op, ii);
COPY8(op + 8, ii + 8);
@@ -368,31 +413,38 @@ static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
} while (--t > 0);
}
+ NEED_OP(3);
*op++ = M4_MARKER | 1;
*op++ = 0;
*op++ = 0;
*out_len = op - out;
return LZO_E_OK;
+
+output_overrun:
+ return LZO_E_OUTPUT_OVERRUN;
}
-int lzo1x_1_compress(const unsigned char *in, size_t in_len,
- unsigned char *out, size_t *out_len,
- void *wrkmem)
+int LZO_SAFE(lzo1x_1_compress)(const unsigned char *in, size_t in_len,
+ unsigned char *out, size_t *out_len,
+ void *wrkmem)
{
- return lzogeneric1x_1_compress(in, in_len, out, out_len, wrkmem, 0);
+ return LZO_SAFE(lzogeneric1x_1_compress)(
+ in, in_len, out, out_len, wrkmem, 0);
}
-int lzorle1x_1_compress(const unsigned char *in, size_t in_len,
- unsigned char *out, size_t *out_len,
- void *wrkmem)
+int LZO_SAFE(lzorle1x_1_compress)(const unsigned char *in, size_t in_len,
+ unsigned char *out, size_t *out_len,
+ void *wrkmem)
{
- return lzogeneric1x_1_compress(in, in_len, out, out_len,
- wrkmem, LZO_VERSION);
+ return LZO_SAFE(lzogeneric1x_1_compress)(
+ in, in_len, out, out_len, wrkmem, LZO_VERSION);
}
-EXPORT_SYMBOL_GPL(lzo1x_1_compress);
-EXPORT_SYMBOL_GPL(lzorle1x_1_compress);
+EXPORT_SYMBOL_GPL(LZO_SAFE(lzo1x_1_compress));
+EXPORT_SYMBOL_GPL(LZO_SAFE(lzorle1x_1_compress));
+#ifndef LZO_UNSAFE
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("LZO1X-1 Compressor");
+#endif
diff --git a/lib/lzo/lzo1x_compress_safe.c b/lib/lzo/lzo1x_compress_safe.c
new file mode 100644
index 000000000000..371c9f849492
--- /dev/null
+++ b/lib/lzo/lzo1x_compress_safe.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * LZO1X Compressor from LZO
+ *
+ * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <markus@oberhumer.com>
+ *
+ * The full LZO package can be found at:
+ * http://www.oberhumer.com/opensource/lzo/
+ *
+ * Changed for Linux kernel use by:
+ * Nitin Gupta <nitingupta910@gmail.com>
+ * Richard Purdie <rpurdie@openedhand.com>
+ */
+
+#define LZO_SAFE(name) name##_safe
+#define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x))
+
+#include "lzo1x_compress.c"
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 0cbe913634be..59f83ece2024 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -1849,11 +1849,11 @@ static inline int mab_no_null_split(struct maple_big_node *b_node,
* Return: The first split location. The middle split is set in @mid_split.
*/
static inline int mab_calc_split(struct ma_state *mas,
- struct maple_big_node *bn, unsigned char *mid_split, unsigned long min)
+ struct maple_big_node *bn, unsigned char *mid_split)
{
unsigned char b_end = bn->b_end;
int split = b_end / 2; /* Assume equal split. */
- unsigned char slot_min, slot_count = mt_slots[bn->type];
+ unsigned char slot_count = mt_slots[bn->type];
/*
* To support gap tracking, all NULL entries are kept together and a node cannot
@@ -1886,18 +1886,7 @@ static inline int mab_calc_split(struct ma_state *mas,
split = b_end / 3;
*mid_split = split * 2;
} else {
- slot_min = mt_min_slots[bn->type];
-
*mid_split = 0;
- /*
- * Avoid having a range less than the slot count unless it
- * causes one node to be deficient.
- * NOTE: mt_min_slots is 1 based, b_end and split are zero.
- */
- while ((split < slot_count - 1) &&
- ((bn->pivot[split] - min) < slot_count - 1) &&
- (b_end - split > slot_min))
- split++;
}
/* Avoid ending a node on a NULL entry */
@@ -2366,7 +2355,7 @@ static inline struct maple_enode
static inline unsigned char mas_mab_to_node(struct ma_state *mas,
struct maple_big_node *b_node, struct maple_enode **left,
struct maple_enode **right, struct maple_enode **middle,
- unsigned char *mid_split, unsigned long min)
+ unsigned char *mid_split)
{
unsigned char split = 0;
unsigned char slot_count = mt_slots[b_node->type];
@@ -2379,7 +2368,7 @@ static inline unsigned char mas_mab_to_node(struct ma_state *mas,
if (b_node->b_end < slot_count) {
split = b_node->b_end;
} else {
- split = mab_calc_split(mas, b_node, mid_split, min);
+ split = mab_calc_split(mas, b_node, mid_split);
*right = mas_new_ma_node(mas, b_node);
}
@@ -2866,7 +2855,7 @@ static void mas_spanning_rebalance(struct ma_state *mas,
mast->bn->b_end--;
mast->bn->type = mte_node_type(mast->orig_l->node);
split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle,
- &mid_split, mast->orig_l->min);
+ &mid_split);
mast_set_split_parents(mast, left, middle, right, split,
mid_split);
mast_cp_to_nodes(mast, left, middle, right, split, mid_split);
@@ -3357,7 +3346,7 @@ static void mas_split(struct ma_state *mas, struct maple_big_node *b_node)
if (mas_push_data(mas, height, &mast, false))
break;
- split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min);
+ split = mab_calc_split(mas, b_node, &mid_split);
mast_split_data(&mast, mas, split);
/*
* Usually correct, mab_mas_cp in the above call overwrites
@@ -5346,6 +5335,7 @@ static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt,
struct maple_enode *start;
if (mte_is_leaf(enode)) {
+ mte_set_node_dead(enode);
node->type = mte_node_type(enode);
goto free_leaf;
}
@@ -5553,8 +5543,9 @@ int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
mas_wr_store_type(&wr_mas);
request = mas_prealloc_calc(mas, entry);
if (!request)
- return ret;
+ goto set_flag;
+ mas->mas_flags &= ~MA_STATE_PREALLOC;
mas_node_count_gfp(mas, request, gfp);
if (mas_is_err(mas)) {
mas_set_alloc_req(mas, 0);
@@ -5564,6 +5555,7 @@ int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
return ret;
}
+set_flag:
mas->mas_flags |= MA_STATE_PREALLOC;
return ret;
}
diff --git a/lib/rcuref.c b/lib/rcuref.c
index 97f300eca927..5bd726b71e39 100644
--- a/lib/rcuref.c
+++ b/lib/rcuref.c
@@ -220,6 +220,7 @@ EXPORT_SYMBOL_GPL(rcuref_get_slowpath);
/**
* rcuref_put_slowpath - Slowpath of __rcuref_put()
* @ref: Pointer to the reference count
+ * @cnt: The resulting value of the fastpath decrement
*
* Invoked when the reference count is outside of the valid zone.
*
@@ -233,10 +234,8 @@ EXPORT_SYMBOL_GPL(rcuref_get_slowpath);
* with a concurrent get()/put() pair. Caller is not allowed to
* deconstruct the protected object.
*/
-bool rcuref_put_slowpath(rcuref_t *ref)
+bool rcuref_put_slowpath(rcuref_t *ref, unsigned int cnt)
{
- unsigned int cnt = atomic_read(&ref->refcnt);
-
/* Did this drop the last reference? */
if (likely(cnt == RCUREF_NOREF)) {
/*
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 6c902639728b..0e9a1d4cf89b 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -584,10 +584,6 @@ static struct bucket_table *rhashtable_insert_one(
*/
rht_assign_locked(bkt, obj);
- atomic_inc(&ht->nelems);
- if (rht_grow_above_75(ht, tbl))
- schedule_work(&ht->run_work);
-
return NULL;
}
@@ -615,15 +611,23 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
data = ERR_PTR(-EAGAIN);
} else {
+ bool inserted;
+
flags = rht_lock(tbl, bkt);
data = rhashtable_lookup_one(ht, bkt, tbl,
hash, key, obj);
new_tbl = rhashtable_insert_one(ht, bkt, tbl,
hash, obj, data);
+ inserted = data && !new_tbl;
+ if (inserted)
+ atomic_inc(&ht->nelems);
if (PTR_ERR(new_tbl) != -EEXIST)
data = ERR_CAST(new_tbl);
rht_unlock(tbl, bkt, flags);
+
+ if (inserted && rht_grow_above_75(ht, tbl))
+ schedule_work(&ht->run_work);
}
} while (!IS_ERR_OR_NULL(new_tbl));
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index d3412984170c..c07e3cd82e29 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -208,8 +208,28 @@ static int sbitmap_find_bit_in_word(struct sbitmap_word *map,
return nr;
}
+static unsigned int __map_depth_with_shallow(const struct sbitmap *sb,
+ int index,
+ unsigned int shallow_depth)
+{
+ u64 shallow_word_depth;
+ unsigned int word_depth, reminder;
+
+ word_depth = __map_depth(sb, index);
+ if (shallow_depth >= sb->depth)
+ return word_depth;
+
+ shallow_word_depth = word_depth * shallow_depth;
+ reminder = do_div(shallow_word_depth, sb->depth);
+
+ if (reminder >= (index + 1) * word_depth)
+ shallow_word_depth++;
+
+ return (unsigned int)shallow_word_depth;
+}
+
static int sbitmap_find_bit(struct sbitmap *sb,
- unsigned int depth,
+ unsigned int shallow_depth,
unsigned int index,
unsigned int alloc_hint,
bool wrap)
@@ -218,12 +238,12 @@ static int sbitmap_find_bit(struct sbitmap *sb,
int nr = -1;
for (i = 0; i < sb->map_nr; i++) {
- nr = sbitmap_find_bit_in_word(&sb->map[index],
- min_t(unsigned int,
- __map_depth(sb, index),
- depth),
- alloc_hint, wrap);
+ unsigned int depth = __map_depth_with_shallow(sb, index,
+ shallow_depth);
+ if (depth)
+ nr = sbitmap_find_bit_in_word(&sb->map[index], depth,
+ alloc_hint, wrap);
if (nr != -1) {
nr += index << sb->shift;
break;
@@ -406,27 +426,9 @@ EXPORT_SYMBOL_GPL(sbitmap_bitmap_show);
static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,
unsigned int depth)
{
- unsigned int wake_batch;
- unsigned int shallow_depth;
-
- /*
- * Each full word of the bitmap has bits_per_word bits, and there might
- * be a partial word. There are depth / bits_per_word full words and
- * depth % bits_per_word bits left over. In bitwise arithmetic:
- *
- * bits_per_word = 1 << shift
- * depth / bits_per_word = depth >> shift
- * depth % bits_per_word = depth & ((1 << shift) - 1)
- *
- * Each word can be limited to sbq->min_shallow_depth bits.
- */
- shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth);
- depth = ((depth >> sbq->sb.shift) * shallow_depth +
- min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth));
- wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1,
- SBQ_WAKE_BATCH);
-
- return wake_batch;
+ return clamp_t(unsigned int,
+ min(depth, sbq->min_shallow_depth) / SBQ_WAIT_QUEUES,
+ 1, SBQ_WAKE_BATCH);
}
int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
diff --git a/lib/sg_split.c b/lib/sg_split.c
index 60a0babebf2e..0f89aab5c671 100644
--- a/lib/sg_split.c
+++ b/lib/sg_split.c
@@ -88,8 +88,6 @@ static void sg_split_phys(struct sg_splitter *splitters, const int nb_splits)
if (!j) {
out_sg->offset += split->skip_sg0;
out_sg->length -= split->skip_sg0;
- } else {
- out_sg->offset = 0;
}
sg_dma_address(out_sg) = 0;
sg_dma_len(out_sg) = 0;
diff --git a/lib/stackinit_kunit.c b/lib/stackinit_kunit.c
index c40818ec9c18..49d32e43d06e 100644
--- a/lib/stackinit_kunit.c
+++ b/lib/stackinit_kunit.c
@@ -147,6 +147,15 @@ static bool stackinit_range_contains(char *haystack_start, size_t haystack_size,
; var = *(arg)
/*
+ * The "did we actually fill the stack?" check value needs
+ * to be neither 0 nor any of the "pattern" bytes. The
+ * pattern bytes are compiler, architecture, and type based,
+ * so we have to pick a value that never appears for those
+ * combinations. Use 0x99 which is not 0xFF, 0xFE, nor 0xAA.
+ */
+#define FILL_BYTE 0x99
+
+/*
* @name: unique string name for the test
* @var_type: type to be tested for zeroing initialization
* @which: is this a SCALAR, STRING, or STRUCT type?
@@ -168,12 +177,12 @@ static noinline void test_ ## name (struct kunit *test) \
ZERO_CLONE_ ## which(zero); \
/* Clear entire check buffer for 0xFF overlap test. */ \
memset(check_buf, 0x00, sizeof(check_buf)); \
- /* Fill stack with 0xFF. */ \
+ /* Fill stack with FILL_BYTE. */ \
ignored = leaf_ ##name((unsigned long)&ignored, 1, \
FETCH_ARG_ ## which(zero)); \
- /* Verify all bytes overwritten with 0xFF. */ \
+ /* Verify all bytes overwritten with FILL_BYTE. */ \
for (sum = 0, i = 0; i < target_size; i++) \
- sum += (check_buf[i] != 0xFF); \
+ sum += (check_buf[i] != FILL_BYTE); \
/* Clear entire check buffer for later bit tests. */ \
memset(check_buf, 0x00, sizeof(check_buf)); \
/* Extract stack-defined variable contents. */ \
@@ -184,7 +193,8 @@ static noinline void test_ ## name (struct kunit *test) \
* possible between the two leaf function calls. \
*/ \
KUNIT_ASSERT_EQ_MSG(test, sum, 0, \
- "leaf fill was not 0xFF!?\n"); \
+ "leaf fill was not 0x%02X!?\n", \
+ FILL_BYTE); \
\
/* Validate that compiler lined up fill and target. */ \
KUNIT_ASSERT_TRUE_MSG(test, \
@@ -196,9 +206,9 @@ static noinline void test_ ## name (struct kunit *test) \
(int)((ssize_t)(uintptr_t)fill_start - \
(ssize_t)(uintptr_t)target_start)); \
\
- /* Look for any bytes still 0xFF in check region. */ \
+ /* Validate check region has no FILL_BYTE bytes. */ \
for (sum = 0, i = 0; i < target_size; i++) \
- sum += (check_buf[i] == 0xFF); \
+ sum += (check_buf[i] == FILL_BYTE); \
\
if (sum != 0 && xfail) \
kunit_skip(test, \
@@ -233,12 +243,12 @@ static noinline int leaf_ ## name(unsigned long sp, bool fill, \
* stack frame of SOME kind... \
*/ \
memset(buf, (char)(sp & 0xff), sizeof(buf)); \
- /* Fill variable with 0xFF. */ \
+ /* Fill variable with FILL_BYTE. */ \
if (fill) { \
fill_start = &var; \
fill_size = sizeof(var); \
memset(fill_start, \
- (char)((sp & 0xff) | forced_mask), \
+ FILL_BYTE & forced_mask, \
fill_size); \
} \
\
@@ -380,7 +390,7 @@ static int noinline __leaf_switch_none(int path, bool fill)
fill_start = &var;
fill_size = sizeof(var);
- memset(fill_start, forced_mask | 0x55, fill_size);
+ memset(fill_start, (forced_mask | 0x55) & FILL_BYTE, fill_size);
}
memcpy(check_buf, target_start, target_size);
break;
@@ -391,7 +401,7 @@ static int noinline __leaf_switch_none(int path, bool fill)
fill_start = &var;
fill_size = sizeof(var);
- memset(fill_start, forced_mask | 0xaa, fill_size);
+ memset(fill_start, (forced_mask | 0xaa) & FILL_BYTE, fill_size);
}
memcpy(check_buf, target_start, target_size);
break;
diff --git a/lib/string.c b/lib/string.c
index 76327b51e36f..e657809fa718 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -113,6 +113,7 @@ ssize_t sized_strscpy(char *dest, const char *src, size_t count)
if (count == 0 || WARN_ON_ONCE(count > INT_MAX))
return -E2BIG;
+#ifndef CONFIG_DCACHE_WORD_ACCESS
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
/*
* If src is unaligned, don't cross a page boundary,
@@ -128,11 +129,13 @@ ssize_t sized_strscpy(char *dest, const char *src, size_t count)
if (((long) dest | (long) src) & (sizeof(long) - 1))
max = 0;
#endif
+#endif
/*
- * read_word_at_a_time() below may read uninitialized bytes after the
- * trailing zero and use them in comparisons. Disable this optimization
- * under KMSAN to prevent false positive reports.
+ * load_unaligned_zeropad() or read_word_at_a_time() below may read
+ * uninitialized bytes after the trailing zero and use them in
+ * comparisons. Disable this optimization under KMSAN to prevent
+ * false positive reports.
*/
if (IS_ENABLED(CONFIG_KMSAN))
max = 0;
@@ -140,7 +143,11 @@ ssize_t sized_strscpy(char *dest, const char *src, size_t count)
while (max >= sizeof(unsigned long)) {
unsigned long c, data;
+#ifdef CONFIG_DCACHE_WORD_ACCESS
+ c = load_unaligned_zeropad(src+res);
+#else
c = read_word_at_a_time(src+res);
+#endif
if (has_zero(c, &data, &constants)) {
data = prep_zero_mask(c, data, &constants);
data = create_zero_mask(data);
diff --git a/lib/test_objagg.c b/lib/test_objagg.c
index d34df4306b87..222b39fc2629 100644
--- a/lib/test_objagg.c
+++ b/lib/test_objagg.c
@@ -899,8 +899,10 @@ static int check_expect_hints_stats(struct objagg_hints *objagg_hints,
int err;
stats = objagg_hints_stats_get(objagg_hints);
- if (IS_ERR(stats))
+ if (IS_ERR(stats)) {
+ *errmsg = "objagg_hints_stats_get() failed.";
return PTR_ERR(stats);
+ }
err = __check_expect_stats(stats, expect_stats, errmsg);
objagg_stats_put(stats);
return err;
diff --git a/lib/test_ubsan.c b/lib/test_ubsan.c
index 5d7b10e98610..63b7566e7863 100644
--- a/lib/test_ubsan.c
+++ b/lib/test_ubsan.c
@@ -68,18 +68,22 @@ static void test_ubsan_shift_out_of_bounds(void)
static void test_ubsan_out_of_bounds(void)
{
- volatile int i = 4, j = 5, k = -1;
- volatile char above[4] = { }; /* Protect surrounding memory. */
- volatile int arr[4];
- volatile char below[4] = { }; /* Protect surrounding memory. */
+ int i = 4, j = 4, k = -1;
+ volatile struct {
+ char above[4]; /* Protect surrounding memory. */
+ int arr[4];
+ char below[4]; /* Protect surrounding memory. */
+ } data;
- above[0] = below[0];
+ OPTIMIZER_HIDE_VAR(i);
+ OPTIMIZER_HIDE_VAR(j);
+ OPTIMIZER_HIDE_VAR(k);
UBSAN_TEST(CONFIG_UBSAN_BOUNDS, "above");
- arr[j] = i;
+ data.arr[j] = i;
UBSAN_TEST(CONFIG_UBSAN_BOUNDS, "below");
- arr[k] = i;
+ data.arr[k] = i;
}
enum ubsan_test_enum {
diff --git a/lib/usercopy_kunit.c b/lib/usercopy_kunit.c
index 77fa00a13df7..80f8abe10968 100644
--- a/lib/usercopy_kunit.c
+++ b/lib/usercopy_kunit.c
@@ -27,6 +27,7 @@
!defined(CONFIG_MICROBLAZE) && \
!defined(CONFIG_NIOS2) && \
!defined(CONFIG_PPC32) && \
+ !defined(CONFIG_SPARC32) && \
!defined(CONFIG_SUPERH))
# define TEST_U64
#endif
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index c5e2ec9303c5..a69e71a1ca55 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -2255,7 +2255,7 @@ int __init no_hash_pointers_enable(char *str)
early_param("no_hash_pointers", no_hash_pointers_enable);
/* Used for Rust formatting ('%pA'). */
-char *rust_fmt_argument(char *buf, char *end, void *ptr);
+char *rust_fmt_argument(char *buf, char *end, const void *ptr);
/*
* Show a '%p' thing. A kernel extension is that the '%p' is followed
diff --git a/lib/zstd/common/portability_macros.h b/lib/zstd/common/portability_macros.h
index 0e3b2c0a527d..0dde8bf56595 100644
--- a/lib/zstd/common/portability_macros.h
+++ b/lib/zstd/common/portability_macros.h
@@ -55,7 +55,7 @@
#ifndef DYNAMIC_BMI2
#if ((defined(__clang__) && __has_attribute(__target__)) \
|| (defined(__GNUC__) \
- && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \
+ && (__GNUC__ >= 11))) \
&& (defined(__x86_64__) || defined(_M_X64)) \
&& !defined(__BMI2__)
# define DYNAMIC_BMI2 1