summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug14
-rw-r--r--lib/Kconfig.kcsan2
-rw-r--r--lib/Makefile4
-rw-r--r--lib/bug.c15
-rw-r--r--lib/dec_and_lock.c31
-rw-r--r--lib/group_cpus.c428
-rw-r--r--lib/iov_iter.c15
-rw-r--r--lib/kunit/assert.c40
-rw-r--r--lib/kunit/test.c1
-rw-r--r--lib/lockref.c1
-rw-r--r--lib/maple_tree.c22
-rw-r--r--lib/memcpy_kunit.c2
-rw-r--r--lib/nlattr.c3
-rw-r--r--lib/parser.c39
-rw-r--r--lib/sbitmap.c102
-rw-r--r--lib/scatterlist.c25
-rw-r--r--lib/string.c10
-rw-r--r--lib/test_maple_tree.c89
-rw-r--r--lib/ubsan.c73
-rw-r--r--lib/ubsan.h32
-rw-r--r--lib/usercopy.c7
-rw-r--r--lib/win_minmax.c2
22 files changed, 828 insertions, 129 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 881c3f84e88a..02ee440f7be3 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -754,6 +754,7 @@ config DEBUG_KMEMLEAK
select KALLSYMS
select CRC32
select STACKDEPOT
+ select STACKDEPOT_ALWAYS_INIT if !DEBUG_KMEMLEAK_DEFAULT_OFF
help
Say Y here if you want to enable the memory leak
detector. The memory allocation/freeing is traced in a way
@@ -1207,7 +1208,7 @@ config SCHED_DEBUG
depends on DEBUG_KERNEL && PROC_FS
default y
help
- If you say Y here, the /proc/sched_debug file will be provided
+ If you say Y here, the /sys/kernel/debug/sched file will be provided
that can help debug the scheduler. The runtime overhead of this
option is minimal.
@@ -1917,7 +1918,7 @@ config FUNCTION_ERROR_INJECTION
help
Add fault injections into various functions that are annotated with
ALLOW_ERROR_INJECTION() in the kernel. BPF may also modify the return
- value of theses functions. This is useful to test error paths of code.
+ value of these functions. This is useful to test error paths of code.
If unsure, say N
@@ -2566,6 +2567,15 @@ config MEMCPY_KUNIT_TEST
If unsure, say N.
+config MEMCPY_SLOW_KUNIT_TEST
+ bool "Include exhaustive memcpy tests"
+ depends on MEMCPY_KUNIT_TEST
+ default y
+ help
+ Some memcpy tests are quite exhaustive in checking for overlaps
+ and bit ranges. These can be very slow, so they are split out
+ as a separate config, in case they need to be disabled.
+
config IS_SIGNED_TYPE_KUNIT_TEST
tristate "Test is_signed_type() macro" if !KUNIT_ALL_TESTS
depends on KUNIT
diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan
index 375575a5a0e3..4dedd61e5192 100644
--- a/lib/Kconfig.kcsan
+++ b/lib/Kconfig.kcsan
@@ -194,7 +194,7 @@ config KCSAN_WEAK_MEMORY
Enable support for modeling a subset of weak memory, which allows
detecting a subset of data races due to missing memory barriers.
- Depends on KCSAN_STRICT, because the options strenghtening certain
+ Depends on KCSAN_STRICT, because the options strengthening certain
plain accesses by default (depending on !KCSAN_STRICT) reduce the
ability to detect any data races invoving reordered accesses, in
particular reordered writes.
diff --git a/lib/Makefile b/lib/Makefile
index 4d9461bfea42..36938c564a2a 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -340,9 +340,7 @@ quiet_cmd_build_OID_registry = GEN $@
clean-files += oid_registry_data.c
obj-$(CONFIG_UCS2_STRING) += ucs2_string.o
-ifneq ($(CONFIG_UBSAN_TRAP),y)
obj-$(CONFIG_UBSAN) += ubsan.o
-endif
UBSAN_SANITIZE_ubsan.o := n
KASAN_SANITIZE_ubsan.o := n
@@ -353,6 +351,8 @@ obj-$(CONFIG_SBITMAP) += sbitmap.o
obj-$(CONFIG_PARMAN) += parman.o
+obj-y += group_cpus.o
+
# GCC library routines
obj-$(CONFIG_GENERIC_LIB_ASHLDI3) += ashldi3.o
obj-$(CONFIG_GENERIC_LIB_ASHRDI3) += ashrdi3.o
diff --git a/lib/bug.c b/lib/bug.c
index c223a2575b72..e0ff21989990 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -47,6 +47,7 @@
#include <linux/sched.h>
#include <linux/rculist.h>
#include <linux/ftrace.h>
+#include <linux/context_tracking.h>
extern struct bug_entry __start___bug_table[], __stop___bug_table[];
@@ -153,7 +154,7 @@ struct bug_entry *find_bug(unsigned long bugaddr)
return module_find_bug(bugaddr);
}
-enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
+static enum bug_trap_type __report_bug(unsigned long bugaddr, struct pt_regs *regs)
{
struct bug_entry *bug;
const char *file;
@@ -209,6 +210,18 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
return BUG_TRAP_TYPE_BUG;
}
+enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
+{
+ enum bug_trap_type ret;
+ bool rcu = false;
+
+ rcu = warn_rcu_enter();
+ ret = __report_bug(bugaddr, regs);
+ warn_rcu_exit(rcu);
+
+ return ret;
+}
+
static void clear_once_table(struct bug_entry *start, struct bug_entry *end)
{
struct bug_entry *bug;
diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c
index 9555b68bb774..1dcca8f2e194 100644
--- a/lib/dec_and_lock.c
+++ b/lib/dec_and_lock.c
@@ -49,3 +49,34 @@ int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
return 0;
}
EXPORT_SYMBOL(_atomic_dec_and_lock_irqsave);
+
+int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock)
+{
+ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
+ if (atomic_add_unless(atomic, -1, 1))
+ return 0;
+
+ /* Otherwise do it the slow way */
+ raw_spin_lock(lock);
+ if (atomic_dec_and_test(atomic))
+ return 1;
+ raw_spin_unlock(lock);
+ return 0;
+}
+EXPORT_SYMBOL(_atomic_dec_and_raw_lock);
+
+int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock,
+ unsigned long *flags)
+{
+ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
+ if (atomic_add_unless(atomic, -1, 1))
+ return 0;
+
+ /* Otherwise do it the slow way */
+ raw_spin_lock_irqsave(lock, *flags);
+ if (atomic_dec_and_test(atomic))
+ return 1;
+ raw_spin_unlock_irqrestore(lock, *flags);
+ return 0;
+}
+EXPORT_SYMBOL(_atomic_dec_and_raw_lock_irqsave);
diff --git a/lib/group_cpus.c b/lib/group_cpus.c
new file mode 100644
index 000000000000..9c837a35fef7
--- /dev/null
+++ b/lib/group_cpus.c
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016 Thomas Gleixner.
+ * Copyright (C) 2016-2017 Christoph Hellwig.
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/cpu.h>
+#include <linux/sort.h>
+#include <linux/group_cpus.h>
+
+#ifdef CONFIG_SMP
+
+static void grp_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk,
+ unsigned int cpus_per_grp)
+{
+ const struct cpumask *siblmsk;
+ int cpu, sibl;
+
+ for ( ; cpus_per_grp > 0; ) {
+ cpu = cpumask_first(nmsk);
+
+ /* Should not happen, but I'm too lazy to think about it */
+ if (cpu >= nr_cpu_ids)
+ return;
+
+ cpumask_clear_cpu(cpu, nmsk);
+ cpumask_set_cpu(cpu, irqmsk);
+ cpus_per_grp--;
+
+ /* If the cpu has siblings, use them first */
+ siblmsk = topology_sibling_cpumask(cpu);
+ for (sibl = -1; cpus_per_grp > 0; ) {
+ sibl = cpumask_next(sibl, siblmsk);
+ if (sibl >= nr_cpu_ids)
+ break;
+ if (!cpumask_test_and_clear_cpu(sibl, nmsk))
+ continue;
+ cpumask_set_cpu(sibl, irqmsk);
+ cpus_per_grp--;
+ }
+ }
+}
+
+static cpumask_var_t *alloc_node_to_cpumask(void)
+{
+ cpumask_var_t *masks;
+ int node;
+
+ masks = kcalloc(nr_node_ids, sizeof(cpumask_var_t), GFP_KERNEL);
+ if (!masks)
+ return NULL;
+
+ for (node = 0; node < nr_node_ids; node++) {
+ if (!zalloc_cpumask_var(&masks[node], GFP_KERNEL))
+ goto out_unwind;
+ }
+
+ return masks;
+
+out_unwind:
+ while (--node >= 0)
+ free_cpumask_var(masks[node]);
+ kfree(masks);
+ return NULL;
+}
+
+static void free_node_to_cpumask(cpumask_var_t *masks)
+{
+ int node;
+
+ for (node = 0; node < nr_node_ids; node++)
+ free_cpumask_var(masks[node]);
+ kfree(masks);
+}
+
+static void build_node_to_cpumask(cpumask_var_t *masks)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ cpumask_set_cpu(cpu, masks[cpu_to_node(cpu)]);
+}
+
+static int get_nodes_in_cpumask(cpumask_var_t *node_to_cpumask,
+ const struct cpumask *mask, nodemask_t *nodemsk)
+{
+ int n, nodes = 0;
+
+ /* Calculate the number of nodes in the supplied affinity mask */
+ for_each_node(n) {
+ if (cpumask_intersects(mask, node_to_cpumask[n])) {
+ node_set(n, *nodemsk);
+ nodes++;
+ }
+ }
+ return nodes;
+}
+
+struct node_groups {
+ unsigned id;
+
+ union {
+ unsigned ngroups;
+ unsigned ncpus;
+ };
+};
+
+static int ncpus_cmp_func(const void *l, const void *r)
+{
+ const struct node_groups *ln = l;
+ const struct node_groups *rn = r;
+
+ return ln->ncpus - rn->ncpus;
+}
+
+/*
+ * Allocate group number for each node, so that for each node:
+ *
+ * 1) the allocated number is >= 1
+ *
+ * 2) the allocated number is <= active CPU number of this node
+ *
+ * The actual allocated total groups may be less than @numgrps when
+ * active total CPU number is less than @numgrps.
+ *
+ * Active CPUs means the CPUs in '@cpu_mask AND @node_to_cpumask[]'
+ * for each node.
+ */
+static void alloc_nodes_groups(unsigned int numgrps,
+ cpumask_var_t *node_to_cpumask,
+ const struct cpumask *cpu_mask,
+ const nodemask_t nodemsk,
+ struct cpumask *nmsk,
+ struct node_groups *node_groups)
+{
+ unsigned n, remaining_ncpus = 0;
+
+ for (n = 0; n < nr_node_ids; n++) {
+ node_groups[n].id = n;
+ node_groups[n].ncpus = UINT_MAX;
+ }
+
+ for_each_node_mask(n, nodemsk) {
+ unsigned ncpus;
+
+ cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
+ ncpus = cpumask_weight(nmsk);
+
+ if (!ncpus)
+ continue;
+ remaining_ncpus += ncpus;
+ node_groups[n].ncpus = ncpus;
+ }
+
+ numgrps = min_t(unsigned, remaining_ncpus, numgrps);
+
+ sort(node_groups, nr_node_ids, sizeof(node_groups[0]),
+ ncpus_cmp_func, NULL);
+
+ /*
+ * Allocate groups for each node according to the ratio of this
+ * node's nr_cpus to remaining un-assigned ncpus. 'numgrps' is
+ * bigger than number of active numa nodes. Always start the
+ * allocation from the node with minimized nr_cpus.
+ *
+ * This way guarantees that each active node gets allocated at
+ * least one group, and the theory is simple: over-allocation
+ * is only done when this node is assigned by one group, so
+ * other nodes will be allocated >= 1 groups, since 'numgrps' is
+ * bigger than number of numa nodes.
+ *
+ * One perfect invariant is that number of allocated groups for
+ * each node is <= CPU count of this node:
+ *
+ * 1) suppose there are two nodes: A and B
+ * ncpu(X) is CPU count of node X
+ * grps(X) is the group count allocated to node X via this
+ * algorithm
+ *
+ * ncpu(A) <= ncpu(B)
+ * ncpu(A) + ncpu(B) = N
+ * grps(A) + grps(B) = G
+ *
+ * grps(A) = max(1, round_down(G * ncpu(A) / N))
+ * grps(B) = G - grps(A)
+ *
+ * both N and G are integer, and 2 <= G <= N, suppose
+ * G = N - delta, and 0 <= delta <= N - 2
+ *
+ * 2) obviously grps(A) <= ncpu(A) because:
+ *
+ * if grps(A) is 1, then grps(A) <= ncpu(A) given
+ * ncpu(A) >= 1
+ *
+ * otherwise,
+ * grps(A) <= G * ncpu(A) / N <= ncpu(A), given G <= N
+ *
+ * 3) prove how grps(B) <= ncpu(B):
+ *
+ * if round_down(G * ncpu(A) / N) == 0, vecs(B) won't be
+ * over-allocated, so grps(B) <= ncpu(B),
+ *
+ * otherwise:
+ *
+ * grps(A) =
+ * round_down(G * ncpu(A) / N) =
+ * round_down((N - delta) * ncpu(A) / N) =
+ * round_down((N * ncpu(A) - delta * ncpu(A)) / N) >=
+ * round_down((N * ncpu(A) - delta * N) / N) =
+ * cpu(A) - delta
+ *
+ * then:
+ *
+ * grps(A) - G >= ncpu(A) - delta - G
+ * =>
+ * G - grps(A) <= G + delta - ncpu(A)
+ * =>
+ * grps(B) <= N - ncpu(A)
+ * =>
+ * grps(B) <= cpu(B)
+ *
+ * For nodes >= 3, it can be thought as one node and another big
+ * node given that is exactly what this algorithm is implemented,
+ * and we always re-calculate 'remaining_ncpus' & 'numgrps', and
+ * finally for each node X: grps(X) <= ncpu(X).
+ *
+ */
+ for (n = 0; n < nr_node_ids; n++) {
+ unsigned ngroups, ncpus;
+
+ if (node_groups[n].ncpus == UINT_MAX)
+ continue;
+
+ WARN_ON_ONCE(numgrps == 0);
+
+ ncpus = node_groups[n].ncpus;
+ ngroups = max_t(unsigned, 1,
+ numgrps * ncpus / remaining_ncpus);
+ WARN_ON_ONCE(ngroups > ncpus);
+
+ node_groups[n].ngroups = ngroups;
+
+ remaining_ncpus -= ncpus;
+ numgrps -= ngroups;
+ }
+}
+
+static int __group_cpus_evenly(unsigned int startgrp, unsigned int numgrps,
+ cpumask_var_t *node_to_cpumask,
+ const struct cpumask *cpu_mask,
+ struct cpumask *nmsk, struct cpumask *masks)
+{
+ unsigned int i, n, nodes, cpus_per_grp, extra_grps, done = 0;
+ unsigned int last_grp = numgrps;
+ unsigned int curgrp = startgrp;
+ nodemask_t nodemsk = NODE_MASK_NONE;
+ struct node_groups *node_groups;
+
+ if (cpumask_empty(cpu_mask))
+ return 0;
+
+ nodes = get_nodes_in_cpumask(node_to_cpumask, cpu_mask, &nodemsk);
+
+ /*
+ * If the number of nodes in the mask is greater than or equal the
+ * number of groups we just spread the groups across the nodes.
+ */
+ if (numgrps <= nodes) {
+ for_each_node_mask(n, nodemsk) {
+ /* Ensure that only CPUs which are in both masks are set */
+ cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
+ cpumask_or(&masks[curgrp], &masks[curgrp], nmsk);
+ if (++curgrp == last_grp)
+ curgrp = 0;
+ }
+ return numgrps;
+ }
+
+ node_groups = kcalloc(nr_node_ids,
+ sizeof(struct node_groups),
+ GFP_KERNEL);
+ if (!node_groups)
+ return -ENOMEM;
+
+ /* allocate group number for each node */
+ alloc_nodes_groups(numgrps, node_to_cpumask, cpu_mask,
+ nodemsk, nmsk, node_groups);
+ for (i = 0; i < nr_node_ids; i++) {
+ unsigned int ncpus, v;
+ struct node_groups *nv = &node_groups[i];
+
+ if (nv->ngroups == UINT_MAX)
+ continue;
+
+ /* Get the cpus on this node which are in the mask */
+ cpumask_and(nmsk, cpu_mask, node_to_cpumask[nv->id]);
+ ncpus = cpumask_weight(nmsk);
+ if (!ncpus)
+ continue;
+
+ WARN_ON_ONCE(nv->ngroups > ncpus);
+
+ /* Account for rounding errors */
+ extra_grps = ncpus - nv->ngroups * (ncpus / nv->ngroups);
+
+ /* Spread allocated groups on CPUs of the current node */
+ for (v = 0; v < nv->ngroups; v++, curgrp++) {
+ cpus_per_grp = ncpus / nv->ngroups;
+
+ /* Account for extra groups to compensate rounding errors */
+ if (extra_grps) {
+ cpus_per_grp++;
+ --extra_grps;
+ }
+
+ /*
+ * wrapping has to be considered given 'startgrp'
+ * may start anywhere
+ */
+ if (curgrp >= last_grp)
+ curgrp = 0;
+ grp_spread_init_one(&masks[curgrp], nmsk,
+ cpus_per_grp);
+ }
+ done += nv->ngroups;
+ }
+ kfree(node_groups);
+ return done;
+}
+
+/**
+ * group_cpus_evenly - Group all CPUs evenly per NUMA/CPU locality
+ * @numgrps: number of groups
+ *
+ * Return: cpumask array if successful, NULL otherwise. And each element
+ * includes CPUs assigned to this group
+ *
+ * Try to put close CPUs from viewpoint of CPU and NUMA locality into
+ * same group, and run two-stage grouping:
+ * 1) allocate present CPUs on these groups evenly first
+ * 2) allocate other possible CPUs on these groups evenly
+ *
+ * We guarantee in the resulted grouping that all CPUs are covered, and
+ * no same CPU is assigned to multiple groups
+ */
+struct cpumask *group_cpus_evenly(unsigned int numgrps)
+{
+ unsigned int curgrp = 0, nr_present = 0, nr_others = 0;
+ cpumask_var_t *node_to_cpumask;
+ cpumask_var_t nmsk, npresmsk;
+ int ret = -ENOMEM;
+ struct cpumask *masks = NULL;
+
+ if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
+ return NULL;
+
+ if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL))
+ goto fail_nmsk;
+
+ node_to_cpumask = alloc_node_to_cpumask();
+ if (!node_to_cpumask)
+ goto fail_npresmsk;
+
+ masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL);
+ if (!masks)
+ goto fail_node_to_cpumask;
+
+ /* Stabilize the cpumasks */
+ cpus_read_lock();
+ build_node_to_cpumask(node_to_cpumask);
+
+ /* grouping present CPUs first */
+ ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask,
+ cpu_present_mask, nmsk, masks);
+ if (ret < 0)
+ goto fail_build_affinity;
+ nr_present = ret;
+
+ /*
+ * Allocate non present CPUs starting from the next group to be
+ * handled. If the grouping of present CPUs already exhausted the
+ * group space, assign the non present CPUs to the already
+ * allocated out groups.
+ */
+ if (nr_present >= numgrps)
+ curgrp = 0;
+ else
+ curgrp = nr_present;
+ cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask);
+ ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask,
+ npresmsk, nmsk, masks);
+ if (ret >= 0)
+ nr_others = ret;
+
+ fail_build_affinity:
+ cpus_read_unlock();
+
+ if (ret >= 0)
+ WARN_ON(nr_present + nr_others < numgrps);
+
+ fail_node_to_cpumask:
+ free_node_to_cpumask(node_to_cpumask);
+
+ fail_npresmsk:
+ free_cpumask_var(npresmsk);
+
+ fail_nmsk:
+ free_cpumask_var(nmsk);
+ if (ret < 0) {
+ kfree(masks);
+ return NULL;
+ }
+ return masks;
+}
+#else /* CONFIG_SMP */
+struct cpumask *group_cpus_evenly(unsigned int numgrps)
+{
+ struct cpumask *masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL);
+
+ if (!masks)
+ return NULL;
+
+ /* assign all CPUs(cpu 0) to the 1st group only */
+ cpumask_copy(&masks[0], cpu_possible_mask);
+ return masks;
+}
+#endif /* CONFIG_SMP */
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index f9a3ff37ecd1..d9b3332c8405 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -1877,6 +1877,17 @@ int import_single_range(int rw, void __user *buf, size_t len,
}
EXPORT_SYMBOL(import_single_range);
+int import_ubuf(int rw, void __user *buf, size_t len, struct iov_iter *i)
+{
+ if (len > MAX_RW_COUNT)
+ len = MAX_RW_COUNT;
+ if (unlikely(!access_ok(buf, len)))
+ return -EFAULT;
+
+ iov_iter_ubuf(i, rw, buf, len);
+ return 0;
+}
+
/**
* iov_iter_restore() - Restore a &struct iov_iter to the same state as when
* iov_iter_save_state() was called.
@@ -1891,8 +1902,8 @@ EXPORT_SYMBOL(import_single_range);
*/
void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
{
- if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) &&
- !iov_iter_is_kvec(i) && !iter_is_ubuf(i))
+ if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i) &&
+ !iter_is_ubuf(i)) && !iov_iter_is_kvec(i))
return;
i->iov_offset = state->iov_offset;
i->count = state->count;
diff --git a/lib/kunit/assert.c b/lib/kunit/assert.c
index f5b50babe38d..05a09652f5a1 100644
--- a/lib/kunit/assert.c
+++ b/lib/kunit/assert.c
@@ -241,24 +241,34 @@ void kunit_mem_assert_format(const struct kunit_assert *assert,
mem_assert = container_of(assert, struct kunit_mem_assert,
assert);
- string_stream_add(stream,
- KUNIT_SUBTEST_INDENT "Expected %s %s %s, but\n",
- mem_assert->text->left_text,
- mem_assert->text->operation,
- mem_assert->text->right_text);
+ if (!mem_assert->left_value) {
+ string_stream_add(stream,
+ KUNIT_SUBTEST_INDENT "Expected %s is not null, but is\n",
+ mem_assert->text->left_text);
+ } else if (!mem_assert->right_value) {
+ string_stream_add(stream,
+ KUNIT_SUBTEST_INDENT "Expected %s is not null, but is\n",
+ mem_assert->text->right_text);
+ } else {
+ string_stream_add(stream,
+ KUNIT_SUBTEST_INDENT "Expected %s %s %s, but\n",
+ mem_assert->text->left_text,
+ mem_assert->text->operation,
+ mem_assert->text->right_text);
- string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s ==\n",
- mem_assert->text->left_text);
- kunit_assert_hexdump(stream, mem_assert->left_value,
- mem_assert->right_value, mem_assert->size);
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s ==\n",
+ mem_assert->text->left_text);
+ kunit_assert_hexdump(stream, mem_assert->left_value,
+ mem_assert->right_value, mem_assert->size);
- string_stream_add(stream, "\n");
+ string_stream_add(stream, "\n");
- string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s ==\n",
- mem_assert->text->right_text);
- kunit_assert_hexdump(stream, mem_assert->right_value,
- mem_assert->left_value, mem_assert->size);
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s ==\n",
+ mem_assert->text->right_text);
+ kunit_assert_hexdump(stream, mem_assert->right_value,
+ mem_assert->left_value, mem_assert->size);
- kunit_assert_print_msg(message, stream);
+ kunit_assert_print_msg(message, stream);
+ }
}
EXPORT_SYMBOL_GPL(kunit_mem_assert_format);
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index c9ebf975e56b..890ba5b3a981 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -21,6 +21,7 @@
#include "try-catch-impl.h"
DEFINE_STATIC_KEY_FALSE(kunit_running);
+EXPORT_SYMBOL_GPL(kunit_running);
#if IS_BUILTIN(CONFIG_KUNIT)
/*
diff --git a/lib/lockref.c b/lib/lockref.c
index 45e93ece8ba0..2afe4c5d8919 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -23,7 +23,6 @@
} \
if (!--retry) \
break; \
- cpu_relax(); \
} \
} while (0)
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 26e2045d3cda..5a976393c9ae 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -670,12 +670,13 @@ static inline unsigned long mte_pivot(const struct maple_enode *mn,
unsigned char piv)
{
struct maple_node *node = mte_to_node(mn);
+ enum maple_type type = mte_node_type(mn);
- if (piv >= mt_pivots[piv]) {
+ if (piv >= mt_pivots[type]) {
WARN_ON(1);
return 0;
}
- switch (mte_node_type(mn)) {
+ switch (type) {
case maple_arange_64:
return node->ma64.pivot[piv];
case maple_range_64:
@@ -4887,7 +4888,7 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
unsigned long *pivots, *gaps;
void __rcu **slots;
unsigned long gap = 0;
- unsigned long max, min, index;
+ unsigned long max, min;
unsigned char offset;
if (unlikely(mas_is_err(mas)))
@@ -4909,8 +4910,7 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
min = mas_safe_min(mas, pivots, --offset);
max = mas_safe_pivot(mas, pivots, offset, type);
- index = mas->index;
- while (index <= max) {
+ while (mas->index <= max) {
gap = 0;
if (gaps)
gap = gaps[offset];
@@ -4941,10 +4941,8 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
min = mas_safe_min(mas, pivots, offset);
}
- if (unlikely(index > max)) {
- mas_set_err(mas, -EBUSY);
- return false;
- }
+ if (unlikely((mas->index > max) || (size - 1 > max - mas->index)))
+ goto no_space;
if (unlikely(ma_is_leaf(type))) {
mas->offset = offset;
@@ -4961,9 +4959,11 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
return false;
ascend:
- if (mte_is_root(mas->node))
- mas_set_err(mas, -EBUSY);
+ if (!mte_is_root(mas->node))
+ return false;
+no_space:
+ mas_set_err(mas, -EBUSY);
return false;
}
diff --git a/lib/memcpy_kunit.c b/lib/memcpy_kunit.c
index 89128551448d..887926f04731 100644
--- a/lib/memcpy_kunit.c
+++ b/lib/memcpy_kunit.c
@@ -309,6 +309,8 @@ static void set_random_nonzero(struct kunit *test, u8 *byte)
static void init_large(struct kunit *test)
{
+ if (!IS_ENABLED(CONFIG_MEMCPY_SLOW_KUNIT_TEST))
+ kunit_skip(test, "Slow test skipped. Enable with CONFIG_MEMCPY_SLOW_KUNIT_TEST=y");
/* Get many bit patterns. */
get_random_bytes(large_src, ARRAY_SIZE(large_src));
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 9055e8b4d144..489e15bde5c1 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/jiffies.h>
+#include <linux/nospec.h>
#include <linux/skbuff.h>
#include <linux/string.h>
#include <linux/types.h>
@@ -381,6 +382,7 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
if (type <= 0 || type > maxtype)
return 0;
+ type = array_index_nospec(type, maxtype + 1);
pt = &policy[type];
BUG_ON(pt->type > NLA_TYPE_MAX);
@@ -596,6 +598,7 @@ static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype,
}
continue;
}
+ type = array_index_nospec(type, maxtype + 1);
if (policy) {
int err = validate_nla(nla, maxtype, policy,
validate, extack, depth);
diff --git a/lib/parser.c b/lib/parser.c
index bcb23484100e..2b5e2b480253 100644
--- a/lib/parser.c
+++ b/lib/parser.c
@@ -11,6 +11,15 @@
#include <linux/slab.h>
#include <linux/string.h>
+/*
+ * max size needed by different bases to express U64
+ * HEX: "0xFFFFFFFFFFFFFFFF" --> 18
+ * DEC: "18446744073709551615" --> 20
+ * OCT: "01777777777777777777777" --> 23
+ * pick the max one to define NUMBER_BUF_LEN
+ */
+#define NUMBER_BUF_LEN 24
+
/**
* match_one - Determines if a string matches a simple pattern
* @s: the string to examine for presence of the pattern
@@ -129,14 +138,12 @@ EXPORT_SYMBOL(match_token);
static int match_number(substring_t *s, int *result, int base)
{
char *endp;
- char *buf;
+ char buf[NUMBER_BUF_LEN];
int ret;
long val;
- buf = match_strdup(s);
- if (!buf)
- return -ENOMEM;
-
+ if (match_strlcpy(buf, s, NUMBER_BUF_LEN) >= NUMBER_BUF_LEN)
+ return -ERANGE;
ret = 0;
val = simple_strtol(buf, &endp, base);
if (endp == buf)
@@ -145,7 +152,6 @@ static int match_number(substring_t *s, int *result, int base)
ret = -ERANGE;
else
*result = (int) val;
- kfree(buf);
return ret;
}
@@ -163,18 +169,15 @@ static int match_number(substring_t *s, int *result, int base)
*/
static int match_u64int(substring_t *s, u64 *result, int base)
{
- char *buf;
+ char buf[NUMBER_BUF_LEN];
int ret;
u64 val;
- buf = match_strdup(s);
- if (!buf)
- return -ENOMEM;
-
+ if (match_strlcpy(buf, s, NUMBER_BUF_LEN) >= NUMBER_BUF_LEN)
+ return -ERANGE;
ret = kstrtoull(buf, base, &val);
if (!ret)
*result = val;
- kfree(buf);
return ret;
}
@@ -206,14 +209,12 @@ EXPORT_SYMBOL(match_int);
*/
int match_uint(substring_t *s, unsigned int *result)
{
- int err = -ENOMEM;
- char *buf = match_strdup(s);
+ char buf[NUMBER_BUF_LEN];
- if (buf) {
- err = kstrtouint(buf, 10, result);
- kfree(buf);
- }
- return err;
+ if (match_strlcpy(buf, s, NUMBER_BUF_LEN) >= NUMBER_BUF_LEN)
+ return -ERANGE;
+
+ return kstrtouint(buf, 10, result);
}
EXPORT_SYMBOL(match_uint);
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 1fcede228fa2..eff4e42c425a 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -167,15 +167,16 @@ static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
return nr;
}
-static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
- unsigned int alloc_hint)
+static int sbitmap_find_bit_in_word(struct sbitmap_word *map,
+ unsigned int depth,
+ unsigned int alloc_hint,
+ bool wrap)
{
- struct sbitmap_word *map = &sb->map[index];
int nr;
do {
- nr = __sbitmap_get_word(&map->word, __map_depth(sb, index),
- alloc_hint, !sb->round_robin);
+ nr = __sbitmap_get_word(&map->word, depth,
+ alloc_hint, wrap);
if (nr != -1)
break;
if (!sbitmap_deferred_clear(map))
@@ -185,25 +186,22 @@ static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
return nr;
}
-static int __sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint)
+static int sbitmap_find_bit(struct sbitmap *sb,
+ unsigned int depth,
+ unsigned int index,
+ unsigned int alloc_hint,
+ bool wrap)
{
- unsigned int i, index;
+ unsigned int i;
int nr = -1;
- index = SB_NR_TO_INDEX(sb, alloc_hint);
-
- /*
- * Unless we're doing round robin tag allocation, just use the
- * alloc_hint to find the right word index. No point in looping
- * twice in find_next_zero_bit() for that case.
- */
- if (sb->round_robin)
- alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
- else
- alloc_hint = 0;
-
for (i = 0; i < sb->map_nr; i++) {
- nr = sbitmap_find_bit_in_index(sb, index, alloc_hint);
+ nr = sbitmap_find_bit_in_word(&sb->map[index],
+ min_t(unsigned int,
+ __map_depth(sb, index),
+ depth),
+ alloc_hint, wrap);
+
if (nr != -1) {
nr += index << sb->shift;
break;
@@ -218,6 +216,26 @@ static int __sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint)
return nr;
}
+static int __sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint)
+{
+ unsigned int index;
+
+ index = SB_NR_TO_INDEX(sb, alloc_hint);
+
+ /*
+ * Unless we're doing round robin tag allocation, just use the
+ * alloc_hint to find the right word index. No point in looping
+ * twice in find_next_zero_bit() for that case.
+ */
+ if (sb->round_robin)
+ alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
+ else
+ alloc_hint = 0;
+
+ return sbitmap_find_bit(sb, UINT_MAX, index, alloc_hint,
+ !sb->round_robin);
+}
+
int sbitmap_get(struct sbitmap *sb)
{
int nr;
@@ -239,37 +257,12 @@ static int __sbitmap_get_shallow(struct sbitmap *sb,
unsigned int alloc_hint,
unsigned long shallow_depth)
{
- unsigned int i, index;
- int nr = -1;
+ unsigned int index;
index = SB_NR_TO_INDEX(sb, alloc_hint);
+ alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
- for (i = 0; i < sb->map_nr; i++) {
-again:
- nr = __sbitmap_get_word(&sb->map[index].word,
- min_t(unsigned int,
- __map_depth(sb, index),
- shallow_depth),
- SB_NR_TO_BIT(sb, alloc_hint), true);
- if (nr != -1) {
- nr += index << sb->shift;
- break;
- }
-
- if (sbitmap_deferred_clear(&sb->map[index]))
- goto again;
-
- /* Jump to next index. */
- index++;
- alloc_hint = index << sb->shift;
-
- if (index >= sb->map_nr) {
- index = 0;
- alloc_hint = 0;
- }
- }
-
- return nr;
+ return sbitmap_find_bit(sb, shallow_depth, index, alloc_hint, true);
}
int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth)
@@ -464,13 +457,10 @@ void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq,
unsigned int users)
{
unsigned int wake_batch;
- unsigned int min_batch;
unsigned int depth = (sbq->sb.depth + users - 1) / users;
- min_batch = sbq->sb.depth >= (4 * SBQ_WAIT_QUEUES) ? 4 : 1;
-
wake_batch = clamp_val(depth / SBQ_WAIT_QUEUES,
- min_batch, SBQ_WAKE_BATCH);
+ 1, SBQ_WAKE_BATCH);
WRITE_ONCE(sbq->wake_batch, wake_batch);
}
@@ -521,11 +511,9 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
get_mask = ((1UL << nr_tags) - 1) << nr;
val = READ_ONCE(map->word);
- do {
- if ((val & ~get_mask) != val)
- goto next;
- } while (!atomic_long_try_cmpxchg(ptr, &val,
- get_mask | val));
+ while (!atomic_long_try_cmpxchg(ptr, &val,
+ get_mask | val))
+ ;
get_mask = (get_mask & ~val) >> nr;
if (get_mask) {
*offset = nr + (index << sb->shift);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index a0ad2a7959b5..8d7519a8f308 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -470,22 +470,27 @@ int sg_alloc_append_table_from_pages(struct sg_append_table *sgt_append,
return -EOPNOTSUPP;
if (sgt_append->prv) {
+ unsigned long next_pfn = (page_to_phys(sg_page(sgt_append->prv)) +
+ sgt_append->prv->offset + sgt_append->prv->length) / PAGE_SIZE;
+
if (WARN_ON(offset))
return -EINVAL;
/* Merge contiguous pages into the last SG */
prv_len = sgt_append->prv->length;
- last_pg = sg_page(sgt_append->prv);
- while (n_pages && pages_are_mergeable(last_pg, pages[0])) {
- if (sgt_append->prv->length + PAGE_SIZE > max_segment)
- break;
- sgt_append->prv->length += PAGE_SIZE;
- last_pg = pages[0];
- pages++;
- n_pages--;
+ if (page_to_pfn(pages[0]) == next_pfn) {
+ last_pg = pfn_to_page(next_pfn - 1);
+ while (n_pages && pages_are_mergeable(pages[0], last_pg)) {
+ if (sgt_append->prv->length + PAGE_SIZE > max_segment)
+ break;
+ sgt_append->prv->length += PAGE_SIZE;
+ last_pg = pages[0];
+ pages++;
+ n_pages--;
+ }
+ if (!n_pages)
+ goto out;
}
- if (!n_pages)
- goto out;
}
/* compute number of contiguous chunks */
diff --git a/lib/string.c b/lib/string.c
index 4fb566ea610f..3d55ef890106 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -480,13 +480,11 @@ EXPORT_SYMBOL(strcspn);
*/
char *strpbrk(const char *cs, const char *ct)
{
- const char *sc1, *sc2;
+ const char *sc;
- for (sc1 = cs; *sc1 != '\0'; ++sc1) {
- for (sc2 = ct; *sc2 != '\0'; ++sc2) {
- if (*sc1 == *sc2)
- return (char *)sc1;
- }
+ for (sc = cs; *sc != '\0'; ++sc) {
+ if (strchr(ct, *sc))
+ return (char *)sc;
}
return NULL;
}
diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
index 497fc93ccf9e..ec847bf4dcb4 100644
--- a/lib/test_maple_tree.c
+++ b/lib/test_maple_tree.c
@@ -2517,6 +2517,91 @@ static noinline void check_bnode_min_spanning(struct maple_tree *mt)
mt_set_non_kernel(0);
}
+static noinline void check_empty_area_window(struct maple_tree *mt)
+{
+ unsigned long i, nr_entries = 20;
+ MA_STATE(mas, mt, 0, 0);
+
+ for (i = 1; i <= nr_entries; i++)
+ mtree_store_range(mt, i*10, i*10 + 9,
+ xa_mk_value(i), GFP_KERNEL);
+
+ /* Create another hole besides the one at 0 */
+ mtree_store_range(mt, 160, 169, NULL, GFP_KERNEL);
+
+ /* Check lower bounds that don't fit */
+ rcu_read_lock();
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, 5, 90, 10) != -EBUSY);
+
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, 6, 90, 5) != -EBUSY);
+
+ /* Check lower bound that does fit */
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, 5, 90, 5) != 0);
+ MT_BUG_ON(mt, mas.index != 5);
+ MT_BUG_ON(mt, mas.last != 9);
+ rcu_read_unlock();
+
+ /* Check one gap that doesn't fit and one that does */
+ rcu_read_lock();
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, 5, 217, 9) != 0);
+ MT_BUG_ON(mt, mas.index != 161);
+ MT_BUG_ON(mt, mas.last != 169);
+
+ /* Check one gap that does fit above the min */
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, 100, 218, 3) != 0);
+ MT_BUG_ON(mt, mas.index != 216);
+ MT_BUG_ON(mt, mas.last != 218);
+
+ /* Check size that doesn't fit any gap */
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, 100, 218, 16) != -EBUSY);
+
+ /*
+ * Check size that doesn't fit the lower end of the window but
+ * does fit the gap
+ */
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, 167, 200, 4) != -EBUSY);
+
+ /*
+ * Check size that doesn't fit the upper end of the window but
+ * does fit the gap
+ */
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, 100, 162, 4) != -EBUSY);
+
+ /* Check mas_empty_area forward */
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area(&mas, 0, 100, 9) != 0);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 8);
+
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area(&mas, 0, 100, 4) != 0);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 3);
+
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area(&mas, 0, 100, 11) != -EBUSY);
+
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area(&mas, 5, 100, 6) != -EBUSY);
+
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area(&mas, 0, 8, 10) != -EBUSY);
+
+ mas_reset(&mas);
+ mas_empty_area(&mas, 100, 165, 3);
+
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area(&mas, 100, 163, 6) != -EBUSY);
+ rcu_read_unlock();
+}
+
static DEFINE_MTREE(tree);
static int maple_tree_seed(void)
{
@@ -2765,6 +2850,10 @@ static int maple_tree_seed(void)
check_bnode_min_spanning(&tree);
mtree_destroy(&tree);
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_empty_area_window(&tree);
+ mtree_destroy(&tree);
+
#if defined(BENCH)
skip:
#endif
diff --git a/lib/ubsan.c b/lib/ubsan.c
index 60c7099857a0..e2cc4a799312 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -14,10 +14,76 @@
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
+#include <linux/ubsan.h>
#include <kunit/test-bug.h>
#include "ubsan.h"
+#ifdef CONFIG_UBSAN_TRAP
+/*
+ * Only include matches for UBSAN checks that are actually compiled in.
+ * The mappings of struct SanitizerKind (the -fsanitize=xxx args) to
+ * enum SanitizerHandler (the traps) in Clang is in clang/lib/CodeGen/.
+ */
+const char *report_ubsan_failure(struct pt_regs *regs, u32 check_type)
+{
+ switch (check_type) {
+#ifdef CONFIG_UBSAN_BOUNDS
+ /*
+ * SanitizerKind::ArrayBounds and SanitizerKind::LocalBounds
+ * emit SanitizerHandler::OutOfBounds.
+ */
+ case ubsan_out_of_bounds:
+ return "UBSAN: array index out of bounds";
+#endif
+#ifdef CONFIG_UBSAN_SHIFT
+ /*
+ * SanitizerKind::ShiftBase and SanitizerKind::ShiftExponent
+ * emit SanitizerHandler::ShiftOutOfBounds.
+ */
+ case ubsan_shift_out_of_bounds:
+ return "UBSAN: shift out of bounds";
+#endif
+#ifdef CONFIG_UBSAN_DIV_ZERO
+ /*
+ * SanitizerKind::IntegerDivideByZero emits
+ * SanitizerHandler::DivremOverflow.
+ */
+ case ubsan_divrem_overflow:
+ return "UBSAN: divide/remainder overflow";
+#endif
+#ifdef CONFIG_UBSAN_UNREACHABLE
+ /*
+ * SanitizerKind::Unreachable emits
+ * SanitizerHandler::BuiltinUnreachable.
+ */
+ case ubsan_builtin_unreachable:
+ return "UBSAN: unreachable code";
+#endif
+#if defined(CONFIG_UBSAN_BOOL) || defined(CONFIG_UBSAN_ENUM)
+ /*
+ * SanitizerKind::Bool and SanitizerKind::Enum emit
+ * SanitizerHandler::LoadInvalidValue.
+ */
+ case ubsan_load_invalid_value:
+ return "UBSAN: loading invalid value";
+#endif
+#ifdef CONFIG_UBSAN_ALIGNMENT
+ /*
+ * SanitizerKind::Alignment emits SanitizerHandler::TypeMismatch
+ * or SanitizerHandler::AlignmentAssumption.
+ */
+ case ubsan_alignment_assumption:
+ return "UBSAN: alignment assumption";
+ case ubsan_type_mismatch:
+ return "UBSAN: type mismatch";
+#endif
+ default:
+ return "UBSAN: unrecognized failure code";
+ }
+}
+
+#else
static const char * const type_check_kinds[] = {
"load of",
"store to",
@@ -339,9 +405,10 @@ void __ubsan_handle_load_invalid_value(void *_data, void *val)
{
struct invalid_value_data *data = _data;
char val_str[VALUE_LENGTH];
+ unsigned long ua_flags = user_access_save();
if (suppress_report(&data->location))
- return;
+ goto out;
ubsan_prologue(&data->location, "invalid-load");
@@ -351,6 +418,8 @@ void __ubsan_handle_load_invalid_value(void *_data, void *val)
val_str, data->type->type_name);
ubsan_epilogue();
+out:
+ user_access_restore(ua_flags);
}
EXPORT_SYMBOL(__ubsan_handle_load_invalid_value);
@@ -384,3 +453,5 @@ void __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr,
ubsan_epilogue();
}
EXPORT_SYMBOL(__ubsan_handle_alignment_assumption);
+
+#endif /* !CONFIG_UBSAN_TRAP */
diff --git a/lib/ubsan.h b/lib/ubsan.h
index 9a0b71c5ff9f..cc5cb94895a6 100644
--- a/lib/ubsan.h
+++ b/lib/ubsan.h
@@ -2,6 +2,38 @@
#ifndef _LIB_UBSAN_H
#define _LIB_UBSAN_H
+/*
+ * ABI defined by Clang's UBSAN enum SanitizerHandler:
+ * https://github.com/llvm/llvm-project/blob/release/16.x/clang/lib/CodeGen/CodeGenFunction.h#L113
+ */
+enum ubsan_checks {
+ ubsan_add_overflow,
+ ubsan_builtin_unreachable,
+ ubsan_cfi_check_fail,
+ ubsan_divrem_overflow,
+ ubsan_dynamic_type_cache_miss,
+ ubsan_float_cast_overflow,
+ ubsan_function_type_mismatch,
+ ubsan_implicit_conversion,
+ ubsan_invalid_builtin,
+ ubsan_invalid_objc_cast,
+ ubsan_load_invalid_value,
+ ubsan_missing_return,
+ ubsan_mul_overflow,
+ ubsan_negate_overflow,
+ ubsan_nullability_arg,
+ ubsan_nullability_return,
+ ubsan_nonnull_arg,
+ ubsan_nonnull_return,
+ ubsan_out_of_bounds,
+ ubsan_pointer_overflow,
+ ubsan_shift_out_of_bounds,
+ ubsan_sub_overflow,
+ ubsan_type_mismatch,
+ ubsan_alignment_assumption,
+ ubsan_vla_bound_not_positive,
+};
+
enum {
type_kind_int = 0,
type_kind_float = 1,
diff --git a/lib/usercopy.c b/lib/usercopy.c
index 1505a52f23a0..d29fe29c6849 100644
--- a/lib/usercopy.c
+++ b/lib/usercopy.c
@@ -3,6 +3,7 @@
#include <linux/fault-inject-usercopy.h>
#include <linux/instrumented.h>
#include <linux/uaccess.h>
+#include <linux/nospec.h>
/* out-of-line parts */
@@ -12,6 +13,12 @@ unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n
unsigned long res = n;
might_fault();
if (!should_fail_usercopy() && likely(access_ok(from, n))) {
+ /*
+ * Ensure that bad access_ok() speculation will not
+ * lead to nasty side effects *after* the copy is
+ * finished:
+ */
+ barrier_nospec();
instrument_copy_from_user_before(to, from, n);
res = raw_copy_from_user(to, from, n);
instrument_copy_from_user_after(to, from, n, res);
diff --git a/lib/win_minmax.c b/lib/win_minmax.c
index 6bdc1cd15f76..ec10506834b6 100644
--- a/lib/win_minmax.c
+++ b/lib/win_minmax.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* lib/minmax.c: windowed min/max tracker
*
* Kathleen Nichols' algorithm for tracking the minimum (or maximum)