summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/842/842_debugfs.h5
-rw-r--r--lib/Kconfig8
-rw-r--r--lib/Kconfig.debug49
-rw-r--r--lib/Kconfig.kasan11
-rw-r--r--lib/Makefile6
-rw-r--r--lib/dim/Makefile6
-rw-r--r--lib/dim/dim.c4
-rw-r--r--lib/dim/net_dim.c56
-rw-r--r--lib/dim/rdma_dim.c108
-rw-r--r--lib/dynamic_debug.c12
-rw-r--r--lib/fault-inject.c73
-rw-r--r--lib/fonts/fonts.c103
-rw-r--r--lib/genalloc.c125
-rw-r--r--lib/ioremap.c11
-rw-r--r--lib/iov_iter.c15
-rw-r--r--lib/jedec_ddr_data.c132
-rw-r--r--lib/kobject.c4
-rw-r--r--lib/mpi/longlong.h16
-rw-r--r--lib/notifier-error-inject.c13
-rw-r--r--lib/percpu-refcount.c13
-rw-r--r--lib/raid6/Makefile98
-rw-r--r--lib/rbtree.c40
-rw-r--r--lib/string.c11
-rw-r--r--lib/string_helpers.c77
-rw-r--r--lib/test_firmware.c5
-rw-r--r--lib/test_kasan.c98
-rw-r--r--lib/test_meminit.c364
-rw-r--r--lib/test_overflow.c11
-rw-r--r--lib/test_string.c83
-rw-r--r--lib/vdso/gettimeofday.c79
30 files changed, 1103 insertions, 533 deletions
diff --git a/lib/842/842_debugfs.h b/lib/842/842_debugfs.h
index 277e403e8701..4469407c3e0d 100644
--- a/lib/842/842_debugfs.h
+++ b/lib/842/842_debugfs.h
@@ -22,8 +22,6 @@ static int __init sw842_debugfs_create(void)
return -ENODEV;
sw842_debugfs_root = debugfs_create_dir(MODULE_NAME, NULL);
- if (IS_ERR(sw842_debugfs_root))
- return PTR_ERR(sw842_debugfs_root);
for (i = 0; i < ARRAY_SIZE(template_count); i++) {
char name[32];
@@ -46,8 +44,7 @@ static int __init sw842_debugfs_create(void)
static void __exit sw842_debugfs_remove(void)
{
- if (sw842_debugfs_root && !IS_ERR(sw842_debugfs_root))
- debugfs_remove_recursive(sw842_debugfs_root);
+ debugfs_remove_recursive(sw842_debugfs_root);
}
#endif
diff --git a/lib/Kconfig b/lib/Kconfig
index 52a7b2e6fb74..f33d66fc0e86 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -531,14 +531,6 @@ config LRU_CACHE
config CLZ_TAB
bool
-config DDR
- bool "JEDEC DDR data"
- help
- Data from JEDEC specs for DDR SDRAM memories,
- particularly the AC timing parameters and addressing
- information. This data is useful for drivers handling
- DDR SDRAM controllers.
-
config IRQ_POLL
bool "IRQ polling library"
help
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index d4c8c9323aa4..5960e2980a8a 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -305,19 +305,26 @@ config DEBUG_FS
If unsure, say N.
-config HEADERS_CHECK
- bool "Run 'make headers_check' when building vmlinux"
+config HEADERS_INSTALL
+ bool "Install uapi headers to usr/include"
depends on !UML
help
- This option will extract the user-visible kernel headers whenever
- building the kernel, and will run basic sanity checks on them to
- ensure that exported files do not attempt to include files which
- were not exported, etc.
+ This option will install uapi headers (headers exported to user-space)
+ into the usr/include directory for use during the kernel build.
+ This is unneeded for building the kernel itself, but needed for some
+ user-space program samples. It is also needed by some features such
+ as uapi header sanity checks.
+
+config HEADERS_CHECK
+ bool "Run sanity checks on uapi headers when building 'all'"
+ depends on HEADERS_INSTALL
+ help
+ This option will run basic sanity checks on uapi headers when
+ building the 'all' target, for example, ensure that they do not
+ attempt to include files which were not exported, etc.
If you're making modifications to header files which are
- relevant for userspace, say 'Y', and check the headers
- exported to $(INSTALL_HDR_PATH) (usually 'usr/include' in
- your build tree), to make sure they're suitable.
+ relevant for userspace, say 'Y'.
config OPTIMIZE_INLINING
bool "Allow compiler to uninline functions marked 'inline'"
@@ -346,23 +353,13 @@ config DEBUG_SECTION_MISMATCH
which results in the code/data being placed in specific sections.
The section mismatch analysis is always performed after a full
kernel build, and enabling this option causes the following
- additional steps to occur:
+ additional step to occur:
- Add the option -fno-inline-functions-called-once to gcc commands.
When inlining a function annotated with __init in a non-init
function, we would lose the section information and thus
the analysis would not catch the illegal reference.
This option tells gcc to inline less (but it does result in
a larger kernel).
- - Run the section mismatch analysis for each module/built-in.a file.
- When we run the section mismatch analysis on vmlinux.o, we
- lose valuable information about where the mismatch was
- introduced.
- Running the analysis for each module/built-in.a file
- tells where the mismatch happens much closer to the
- source. The drawback is that the same mismatch is
- reported at least twice.
- - Enable verbose reporting from modpost in order to help resolve
- the section mismatches that are reported.
config SECTION_MISMATCH_WARN_ONLY
bool "Make section mismatch errors non-fatal"
@@ -1132,7 +1129,7 @@ config PROVE_LOCKING
the proof of observed correctness is also maintained for an
arbitrary combination of these separate locking variants.
- For more details, see Documentation/locking/lockdep-design.txt.
+ For more details, see Documentation/locking/lockdep-design.rst.
config LOCK_STAT
bool "Lock usage statistics"
@@ -1146,7 +1143,7 @@ config LOCK_STAT
help
This feature enables tracking lock contention points
- For more details, see Documentation/locking/lockstat.txt
+ For more details, see Documentation/locking/lockstat.rst
This also enables lock events required by "perf lock",
subcommand of perf.
@@ -2069,6 +2066,14 @@ config TEST_STACKINIT
If unsure, say N.
+config TEST_MEMINIT
+ tristate "Test heap/page initialization"
+ help
+ Test if the kernel is zero-initializing heap and page allocations.
+ This can be useful to test init_on_alloc and init_on_free features.
+
+ If unsure, say N.
+
endif # RUNTIME_TESTING_MENU
config MEMTEST
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 4fafba1a923b..7fa97a8b5717 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -106,7 +106,6 @@ endchoice
config KASAN_STACK_ENABLE
bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST
- default !(CLANG_VERSION < 90000)
depends on KASAN
help
The LLVM stack address sanitizer has a know problem that
@@ -115,11 +114,11 @@ config KASAN_STACK_ENABLE
Disabling asan-stack makes it safe to run kernels build
with clang-8 with KASAN enabled, though it loses some of
the functionality.
- This feature is always disabled when compile-testing with clang-8
- or earlier to avoid cluttering the output in stack overflow
- warnings, but clang-8 users can still enable it for builds without
- CONFIG_COMPILE_TEST. On gcc and later clang versions it is
- assumed to always be safe to use and enabled by default.
+ This feature is always disabled when compile-testing with clang
+ to avoid cluttering the output in stack overflow warnings,
+ but clang users can still enable it for builds without
+ CONFIG_COMPILE_TEST. On gcc it is assumed to always be safe
+ to use and enabled by default.
config KASAN_STACK
int
diff --git a/lib/Makefile b/lib/Makefile
index fdd56bc219b8..29c02a924973 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -92,6 +92,7 @@ obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o
obj-$(CONFIG_TEST_OBJAGG) += test_objagg.o
obj-$(CONFIG_TEST_STACKINIT) += test_stackinit.o
obj-$(CONFIG_TEST_BLACKHOLE_DEV) += test_blackhole_dev.o
+obj-$(CONFIG_TEST_MEMINIT) += test_meminit.o
obj-$(CONFIG_TEST_LIVEPATCH) += livepatch/
@@ -208,8 +209,6 @@ obj-$(CONFIG_SIGNATURE) += digsig.o
lib-$(CONFIG_CLZ_TAB) += clz_tab.o
-obj-$(CONFIG_DDR) += jedec_ddr_data.o
-
obj-$(CONFIG_GENERIC_STRNCPY_FROM_USER) += strncpy_from_user.o
obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o
@@ -280,7 +279,8 @@ obj-$(CONFIG_UCS2_STRING) += ucs2_string.o
obj-$(CONFIG_UBSAN) += ubsan.o
UBSAN_SANITIZE_ubsan.o := n
-CFLAGS_ubsan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
+KASAN_SANITIZE_ubsan.o := n
+CFLAGS_ubsan.o := $(call cc-option, -fno-stack-protector) $(DISABLE_STACKLEAK_PLUGIN)
obj-$(CONFIG_SBITMAP) += sbitmap.o
diff --git a/lib/dim/Makefile b/lib/dim/Makefile
index 160afe288df0..1d6858a108cb 100644
--- a/lib/dim/Makefile
+++ b/lib/dim/Makefile
@@ -2,8 +2,6 @@
# DIM Dynamic Interrupt Moderation library
#
-obj-$(CONFIG_DIMLIB) = net_dim.o
+obj-$(CONFIG_DIMLIB) += dim.o
-net_dim-y = \
- dim.o \
- net_dim.o
+dim-y := dim.o net_dim.o rdma_dim.o
diff --git a/lib/dim/dim.c b/lib/dim/dim.c
index 439d641ec796..38045d6d0538 100644
--- a/lib/dim/dim.c
+++ b/lib/dim/dim.c
@@ -74,8 +74,8 @@ void dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
delta_us);
curr_stats->cpms = DIV_ROUND_UP(ncomps * USEC_PER_MSEC, delta_us);
if (curr_stats->epms != 0)
- curr_stats->cpe_ratio =
- (curr_stats->cpms * 100) / curr_stats->epms;
+ curr_stats->cpe_ratio = DIV_ROUND_DOWN_ULL(
+ curr_stats->cpms * 100, curr_stats->epms);
else
curr_stats->cpe_ratio = 0;
diff --git a/lib/dim/net_dim.c b/lib/dim/net_dim.c
index 5bcc902c5388..a4db51c21266 100644
--- a/lib/dim/net_dim.c
+++ b/lib/dim/net_dim.c
@@ -5,6 +5,62 @@
#include <linux/dim.h>
+/*
+ * Net DIM profiles:
+ * There are different set of profiles for each CQ period mode.
+ * There are different set of profiles for RX/TX CQs.
+ * Each profile size must be of NET_DIM_PARAMS_NUM_PROFILES
+ */
+#define NET_DIM_PARAMS_NUM_PROFILES 5
+#define NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
+#define NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE 128
+#define NET_DIM_DEF_PROFILE_CQE 1
+#define NET_DIM_DEF_PROFILE_EQE 1
+
+#define NET_DIM_RX_EQE_PROFILES { \
+ {1, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {8, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {64, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {128, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {256, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+}
+
+#define NET_DIM_RX_CQE_PROFILES { \
+ {2, 256}, \
+ {8, 128}, \
+ {16, 64}, \
+ {32, 64}, \
+ {64, 64} \
+}
+
+#define NET_DIM_TX_EQE_PROFILES { \
+ {1, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {8, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {32, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {64, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {128, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE} \
+}
+
+#define NET_DIM_TX_CQE_PROFILES { \
+ {5, 128}, \
+ {8, 64}, \
+ {16, 32}, \
+ {32, 32}, \
+ {64, 32} \
+}
+
+static const struct dim_cq_moder
+rx_profile[DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
+ NET_DIM_RX_EQE_PROFILES,
+ NET_DIM_RX_CQE_PROFILES,
+};
+
+static const struct dim_cq_moder
+tx_profile[DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
+ NET_DIM_TX_EQE_PROFILES,
+ NET_DIM_TX_CQE_PROFILES,
+};
+
struct dim_cq_moder
net_dim_get_rx_moderation(u8 cq_period_mode, int ix)
{
diff --git a/lib/dim/rdma_dim.c b/lib/dim/rdma_dim.c
new file mode 100644
index 000000000000..f7e26c7b4749
--- /dev/null
+++ b/lib/dim/rdma_dim.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <linux/dim.h>
+
+static int rdma_dim_step(struct dim *dim)
+{
+ if (dim->tune_state == DIM_GOING_RIGHT) {
+ if (dim->profile_ix == (RDMA_DIM_PARAMS_NUM_PROFILES - 1))
+ return DIM_ON_EDGE;
+ dim->profile_ix++;
+ dim->steps_right++;
+ }
+ if (dim->tune_state == DIM_GOING_LEFT) {
+ if (dim->profile_ix == 0)
+ return DIM_ON_EDGE;
+ dim->profile_ix--;
+ dim->steps_left++;
+ }
+
+ return DIM_STEPPED;
+}
+
+static int rdma_dim_stats_compare(struct dim_stats *curr,
+ struct dim_stats *prev)
+{
+ /* first stat */
+ if (!prev->cpms)
+ return DIM_STATS_SAME;
+
+ if (IS_SIGNIFICANT_DIFF(curr->cpms, prev->cpms))
+ return (curr->cpms > prev->cpms) ? DIM_STATS_BETTER :
+ DIM_STATS_WORSE;
+
+ if (IS_SIGNIFICANT_DIFF(curr->cpe_ratio, prev->cpe_ratio))
+ return (curr->cpe_ratio > prev->cpe_ratio) ? DIM_STATS_BETTER :
+ DIM_STATS_WORSE;
+
+ return DIM_STATS_SAME;
+}
+
+static bool rdma_dim_decision(struct dim_stats *curr_stats, struct dim *dim)
+{
+ int prev_ix = dim->profile_ix;
+ u8 state = dim->tune_state;
+ int stats_res;
+ int step_res;
+
+ if (state != DIM_PARKING_ON_TOP && state != DIM_PARKING_TIRED) {
+ stats_res = rdma_dim_stats_compare(curr_stats,
+ &dim->prev_stats);
+
+ switch (stats_res) {
+ case DIM_STATS_SAME:
+ if (curr_stats->cpe_ratio <= 50 * prev_ix)
+ dim->profile_ix = 0;
+ break;
+ case DIM_STATS_WORSE:
+ dim_turn(dim);
+ /* fall through */
+ case DIM_STATS_BETTER:
+ step_res = rdma_dim_step(dim);
+ if (step_res == DIM_ON_EDGE)
+ dim_turn(dim);
+ break;
+ }
+ }
+
+ dim->prev_stats = *curr_stats;
+
+ return dim->profile_ix != prev_ix;
+}
+
+void rdma_dim(struct dim *dim, u64 completions)
+{
+ struct dim_sample *curr_sample = &dim->measuring_sample;
+ struct dim_stats curr_stats;
+ u32 nevents;
+
+ dim_update_sample_with_comps(curr_sample->event_ctr + 1, 0, 0,
+ curr_sample->comp_ctr + completions,
+ &dim->measuring_sample);
+
+ switch (dim->state) {
+ case DIM_MEASURE_IN_PROGRESS:
+ nevents = curr_sample->event_ctr - dim->start_sample.event_ctr;
+ if (nevents < DIM_NEVENTS)
+ break;
+ dim_calc_stats(&dim->start_sample, curr_sample, &curr_stats);
+ if (rdma_dim_decision(&curr_stats, dim)) {
+ dim->state = DIM_APPLY_NEW_PROFILE;
+ schedule_work(&dim->work);
+ break;
+ }
+ /* fall through */
+ case DIM_START_MEASURE:
+ dim->state = DIM_MEASURE_IN_PROGRESS;
+ dim_update_sample_with_comps(curr_sample->event_ctr, 0, 0,
+ curr_sample->comp_ctr,
+ &dim->start_sample);
+ break;
+ case DIM_APPLY_NEW_PROFILE:
+ break;
+ }
+}
+EXPORT_SYMBOL(rdma_dim);
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 8a16c2d498e9..c60409138e13 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -993,20 +993,14 @@ static __initdata int ddebug_init_success;
static int __init dynamic_debug_init_debugfs(void)
{
- struct dentry *dir, *file;
+ struct dentry *dir;
if (!ddebug_init_success)
return -ENODEV;
dir = debugfs_create_dir("dynamic_debug", NULL);
- if (!dir)
- return -ENOMEM;
- file = debugfs_create_file("control", 0644, dir, NULL,
- &ddebug_proc_fops);
- if (!file) {
- debugfs_remove(dir);
- return -ENOMEM;
- }
+ debugfs_create_file("control", 0644, dir, NULL, &ddebug_proc_fops);
+
return 0;
}
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index 3cb21b2bf088..8186ca84910b 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -166,10 +166,10 @@ static int debugfs_ul_get(void *data, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(fops_ul, debugfs_ul_get, debugfs_ul_set, "%llu\n");
-static struct dentry *debugfs_create_ul(const char *name, umode_t mode,
- struct dentry *parent, unsigned long *value)
+static void debugfs_create_ul(const char *name, umode_t mode,
+ struct dentry *parent, unsigned long *value)
{
- return debugfs_create_file(name, mode, parent, value, &fops_ul);
+ debugfs_create_file(name, mode, parent, value, &fops_ul);
}
#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
@@ -185,12 +185,11 @@ static int debugfs_stacktrace_depth_set(void *data, u64 val)
DEFINE_SIMPLE_ATTRIBUTE(fops_stacktrace_depth, debugfs_ul_get,
debugfs_stacktrace_depth_set, "%llu\n");
-static struct dentry *debugfs_create_stacktrace_depth(
- const char *name, umode_t mode,
- struct dentry *parent, unsigned long *value)
+static void debugfs_create_stacktrace_depth(const char *name, umode_t mode,
+ struct dentry *parent,
+ unsigned long *value)
{
- return debugfs_create_file(name, mode, parent, value,
- &fops_stacktrace_depth);
+ debugfs_create_file(name, mode, parent, value, &fops_stacktrace_depth);
}
#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
@@ -202,51 +201,31 @@ struct dentry *fault_create_debugfs_attr(const char *name,
struct dentry *dir;
dir = debugfs_create_dir(name, parent);
- if (!dir)
- return ERR_PTR(-ENOMEM);
-
- if (!debugfs_create_ul("probability", mode, dir, &attr->probability))
- goto fail;
- if (!debugfs_create_ul("interval", mode, dir, &attr->interval))
- goto fail;
- if (!debugfs_create_atomic_t("times", mode, dir, &attr->times))
- goto fail;
- if (!debugfs_create_atomic_t("space", mode, dir, &attr->space))
- goto fail;
- if (!debugfs_create_ul("verbose", mode, dir, &attr->verbose))
- goto fail;
- if (!debugfs_create_u32("verbose_ratelimit_interval_ms", mode, dir,
- &attr->ratelimit_state.interval))
- goto fail;
- if (!debugfs_create_u32("verbose_ratelimit_burst", mode, dir,
- &attr->ratelimit_state.burst))
- goto fail;
- if (!debugfs_create_bool("task-filter", mode, dir, &attr->task_filter))
- goto fail;
+ if (IS_ERR(dir))
+ return dir;
+
+ debugfs_create_ul("probability", mode, dir, &attr->probability);
+ debugfs_create_ul("interval", mode, dir, &attr->interval);
+ debugfs_create_atomic_t("times", mode, dir, &attr->times);
+ debugfs_create_atomic_t("space", mode, dir, &attr->space);
+ debugfs_create_ul("verbose", mode, dir, &attr->verbose);
+ debugfs_create_u32("verbose_ratelimit_interval_ms", mode, dir,
+ &attr->ratelimit_state.interval);
+ debugfs_create_u32("verbose_ratelimit_burst", mode, dir,
+ &attr->ratelimit_state.burst);
+ debugfs_create_bool("task-filter", mode, dir, &attr->task_filter);
#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
-
- if (!debugfs_create_stacktrace_depth("stacktrace-depth", mode, dir,
- &attr->stacktrace_depth))
- goto fail;
- if (!debugfs_create_ul("require-start", mode, dir,
- &attr->require_start))
- goto fail;
- if (!debugfs_create_ul("require-end", mode, dir, &attr->require_end))
- goto fail;
- if (!debugfs_create_ul("reject-start", mode, dir, &attr->reject_start))
- goto fail;
- if (!debugfs_create_ul("reject-end", mode, dir, &attr->reject_end))
- goto fail;
-
+ debugfs_create_stacktrace_depth("stacktrace-depth", mode, dir,
+ &attr->stacktrace_depth);
+ debugfs_create_ul("require-start", mode, dir, &attr->require_start);
+ debugfs_create_ul("require-end", mode, dir, &attr->require_end);
+ debugfs_create_ul("reject-start", mode, dir, &attr->reject_start);
+ debugfs_create_ul("reject-end", mode, dir, &attr->reject_end);
#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
attr->dname = dget(dir);
return dir;
-fail:
- debugfs_remove_recursive(dir);
-
- return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL_GPL(fault_create_debugfs_attr);
diff --git a/lib/fonts/fonts.c b/lib/fonts/fonts.c
index 9969358a7af5..e7258d8c252b 100644
--- a/lib/fonts/fonts.c
+++ b/lib/fonts/fonts.c
@@ -20,56 +20,42 @@
#endif
#include <linux/font.h>
-#define NO_FONTS
-
static const struct font_desc *fonts[] = {
#ifdef CONFIG_FONT_8x8
-#undef NO_FONTS
- &font_vga_8x8,
+ &font_vga_8x8,
#endif
#ifdef CONFIG_FONT_8x16
-#undef NO_FONTS
- &font_vga_8x16,
+ &font_vga_8x16,
#endif
#ifdef CONFIG_FONT_6x11
-#undef NO_FONTS
- &font_vga_6x11,
+ &font_vga_6x11,
#endif
#ifdef CONFIG_FONT_7x14
-#undef NO_FONTS
- &font_7x14,
+ &font_7x14,
#endif
#ifdef CONFIG_FONT_SUN8x16
-#undef NO_FONTS
- &font_sun_8x16,
+ &font_sun_8x16,
#endif
#ifdef CONFIG_FONT_SUN12x22
-#undef NO_FONTS
- &font_sun_12x22,
+ &font_sun_12x22,
#endif
#ifdef CONFIG_FONT_10x18
-#undef NO_FONTS
- &font_10x18,
+ &font_10x18,
#endif
#ifdef CONFIG_FONT_ACORN_8x8
-#undef NO_FONTS
- &font_acorn_8x8,
+ &font_acorn_8x8,
#endif
#ifdef CONFIG_FONT_PEARL_8x8
-#undef NO_FONTS
- &font_pearl_8x8,
+ &font_pearl_8x8,
#endif
#ifdef CONFIG_FONT_MINI_4x6
-#undef NO_FONTS
- &font_mini_4x6,
+ &font_mini_4x6,
#endif
#ifdef CONFIG_FONT_6x10
-#undef NO_FONTS
- &font_6x10,
+ &font_6x10,
#endif
#ifdef CONFIG_FONT_TER16x32
-#undef NO_FONTS
- &font_ter_16x32,
+ &font_ter_16x32,
#endif
};
@@ -90,16 +76,17 @@ static const struct font_desc *fonts[] = {
* specified font.
*
*/
-
const struct font_desc *find_font(const char *name)
{
- unsigned int i;
+ unsigned int i;
- for (i = 0; i < num_fonts; i++)
- if (!strcmp(fonts[i]->name, name))
- return fonts[i];
- return NULL;
+ BUILD_BUG_ON(!num_fonts);
+ for (i = 0; i < num_fonts; i++)
+ if (!strcmp(fonts[i]->name, name))
+ return fonts[i];
+ return NULL;
}
+EXPORT_SYMBOL(find_font);
/**
@@ -116,44 +103,46 @@ const struct font_desc *find_font(const char *name)
* chosen font.
*
*/
-
const struct font_desc *get_default_font(int xres, int yres, u32 font_w,
u32 font_h)
{
- int i, c, cc;
- const struct font_desc *f, *g;
-
- g = NULL;
- cc = -10000;
- for(i=0; i<num_fonts; i++) {
- f = fonts[i];
- c = f->pref;
+ int i, c, cc, res;
+ const struct font_desc *f, *g;
+
+ g = NULL;
+ cc = -10000;
+ for (i = 0; i < num_fonts; i++) {
+ f = fonts[i];
+ c = f->pref;
#if defined(__mc68000__)
#ifdef CONFIG_FONT_PEARL_8x8
- if (MACH_IS_AMIGA && f->idx == PEARL8x8_IDX)
- c = 100;
+ if (MACH_IS_AMIGA && f->idx == PEARL8x8_IDX)
+ c = 100;
#endif
#ifdef CONFIG_FONT_6x11
- if (MACH_IS_MAC && xres < 640 && f->idx == VGA6x11_IDX)
- c = 100;
+ if (MACH_IS_MAC && xres < 640 && f->idx == VGA6x11_IDX)
+ c = 100;
#endif
#endif
- if ((yres < 400) == (f->height <= 8))
- c += 1000;
+ if ((yres < 400) == (f->height <= 8))
+ c += 1000;
+
+ /* prefer a bigger font for high resolution */
+ res = (xres / f->width) * (yres / f->height) / 1000;
+ if (res > 20)
+ c += 20 - res;
- if ((font_w & (1 << (f->width - 1))) &&
- (font_h & (1 << (f->height - 1))))
- c += 1000;
+ if ((font_w & (1 << (f->width - 1))) &&
+ (font_h & (1 << (f->height - 1))))
+ c += 1000;
- if (c > cc) {
- cc = c;
- g = f;
+ if (c > cc) {
+ cc = c;
+ g = f;
+ }
}
- }
- return g;
+ return g;
}
-
-EXPORT_SYMBOL(find_font);
EXPORT_SYMBOL(get_default_font);
MODULE_AUTHOR("James Simmons <jsimmons@users.sf.net>");
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 5257f74fccf3..9fc31292cfa1 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -327,21 +327,45 @@ EXPORT_SYMBOL(gen_pool_alloc_algo_owner);
* gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
* @pool: pool to allocate from
* @size: number of bytes to allocate from the pool
- * @dma: dma-view physical address return value. Use NULL if unneeded.
+ * @dma: dma-view physical address return value. Use %NULL if unneeded.
*
* Allocate the requested number of bytes from the specified pool.
* Uses the pool allocation function (with first-fit algorithm by default).
* Can not be used in NMI handler on architectures without
* NMI-safe cmpxchg implementation.
+ *
+ * Return: virtual address of the allocated memory, or %NULL on failure
*/
void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
{
+ return gen_pool_dma_alloc_algo(pool, size, dma, pool->algo, pool->data);
+}
+EXPORT_SYMBOL(gen_pool_dma_alloc);
+
+/**
+ * gen_pool_dma_alloc_algo - allocate special memory from the pool for DMA
+ * usage with the given pool algorithm
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ * @dma: DMA-view physical address return value. Use %NULL if unneeded.
+ * @algo: algorithm passed from caller
+ * @data: data passed to algorithm
+ *
+ * Allocate the requested number of bytes from the specified pool. Uses the
+ * given pool allocation function. Can not be used in NMI handler on
+ * architectures without NMI-safe cmpxchg implementation.
+ *
+ * Return: virtual address of the allocated memory, or %NULL on failure
+ */
+void *gen_pool_dma_alloc_algo(struct gen_pool *pool, size_t size,
+ dma_addr_t *dma, genpool_algo_t algo, void *data)
+{
unsigned long vaddr;
if (!pool)
return NULL;
- vaddr = gen_pool_alloc(pool, size);
+ vaddr = gen_pool_alloc_algo(pool, size, algo, data);
if (!vaddr)
return NULL;
@@ -350,7 +374,102 @@ void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
return (void *)vaddr;
}
-EXPORT_SYMBOL(gen_pool_dma_alloc);
+EXPORT_SYMBOL(gen_pool_dma_alloc_algo);
+
+/**
+ * gen_pool_dma_alloc_align - allocate special memory from the pool for DMA
+ * usage with the given alignment
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ * @dma: DMA-view physical address return value. Use %NULL if unneeded.
+ * @align: alignment in bytes for starting address
+ *
+ * Allocate the requested number bytes from the specified pool, with the given
+ * alignment restriction. Can not be used in NMI handler on architectures
+ * without NMI-safe cmpxchg implementation.
+ *
+ * Return: virtual address of the allocated memory, or %NULL on failure
+ */
+void *gen_pool_dma_alloc_align(struct gen_pool *pool, size_t size,
+ dma_addr_t *dma, int align)
+{
+ struct genpool_data_align data = { .align = align };
+
+ return gen_pool_dma_alloc_algo(pool, size, dma,
+ gen_pool_first_fit_align, &data);
+}
+EXPORT_SYMBOL(gen_pool_dma_alloc_align);
+
+/**
+ * gen_pool_dma_zalloc - allocate special zeroed memory from the pool for
+ * DMA usage
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ * @dma: dma-view physical address return value. Use %NULL if unneeded.
+ *
+ * Allocate the requested number of zeroed bytes from the specified pool.
+ * Uses the pool allocation function (with first-fit algorithm by default).
+ * Can not be used in NMI handler on architectures without
+ * NMI-safe cmpxchg implementation.
+ *
+ * Return: virtual address of the allocated zeroed memory, or %NULL on failure
+ */
+void *gen_pool_dma_zalloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
+{
+ return gen_pool_dma_zalloc_algo(pool, size, dma, pool->algo, pool->data);
+}
+EXPORT_SYMBOL(gen_pool_dma_zalloc);
+
+/**
+ * gen_pool_dma_zalloc_algo - allocate special zeroed memory from the pool for
+ * DMA usage with the given pool algorithm
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ * @dma: DMA-view physical address return value. Use %NULL if unneeded.
+ * @algo: algorithm passed from caller
+ * @data: data passed to algorithm
+ *
+ * Allocate the requested number of zeroed bytes from the specified pool. Uses
+ * the given pool allocation function. Can not be used in NMI handler on
+ * architectures without NMI-safe cmpxchg implementation.
+ *
+ * Return: virtual address of the allocated zeroed memory, or %NULL on failure
+ */
+void *gen_pool_dma_zalloc_algo(struct gen_pool *pool, size_t size,
+ dma_addr_t *dma, genpool_algo_t algo, void *data)
+{
+ void *vaddr = gen_pool_dma_alloc_algo(pool, size, dma, algo, data);
+
+ if (vaddr)
+ memset(vaddr, 0, size);
+
+ return vaddr;
+}
+EXPORT_SYMBOL(gen_pool_dma_zalloc_algo);
+
+/**
+ * gen_pool_dma_zalloc_align - allocate special zeroed memory from the pool for
+ * DMA usage with the given alignment
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ * @dma: DMA-view physical address return value. Use %NULL if unneeded.
+ * @align: alignment in bytes for starting address
+ *
+ * Allocate the requested number of zeroed bytes from the specified pool,
+ * with the given alignment restriction. Can not be used in NMI handler on
+ * architectures without NMI-safe cmpxchg implementation.
+ *
+ * Return: virtual address of the allocated zeroed memory, or %NULL on failure
+ */
+void *gen_pool_dma_zalloc_align(struct gen_pool *pool, size_t size,
+ dma_addr_t *dma, int align)
+{
+ struct genpool_data_align data = { .align = align };
+
+ return gen_pool_dma_zalloc_algo(pool, size, dma,
+ gen_pool_first_fit_align, &data);
+}
+EXPORT_SYMBOL(gen_pool_dma_zalloc_align);
/**
* gen_pool_free - free allocated special memory back to the pool
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 063213685563..0a2ffadc6d71 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -30,6 +30,8 @@ early_param("nohugeiomap", set_nohugeiomap);
void __init ioremap_huge_init(void)
{
if (!ioremap_huge_disabled) {
+ if (arch_ioremap_p4d_supported())
+ ioremap_p4d_capable = 1;
if (arch_ioremap_pud_supported())
ioremap_pud_capable = 1;
if (arch_ioremap_pmd_supported())
@@ -86,6 +88,9 @@ static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long addr,
if ((end - addr) != PMD_SIZE)
return 0;
+ if (!IS_ALIGNED(addr, PMD_SIZE))
+ return 0;
+
if (!IS_ALIGNED(phys_addr, PMD_SIZE))
return 0;
@@ -126,6 +131,9 @@ static int ioremap_try_huge_pud(pud_t *pud, unsigned long addr,
if ((end - addr) != PUD_SIZE)
return 0;
+ if (!IS_ALIGNED(addr, PUD_SIZE))
+ return 0;
+
if (!IS_ALIGNED(phys_addr, PUD_SIZE))
return 0;
@@ -166,6 +174,9 @@ static int ioremap_try_huge_p4d(p4d_t *p4d, unsigned long addr,
if ((end - addr) != P4D_SIZE)
return 0;
+ if (!IS_ALIGNED(addr, P4D_SIZE))
+ return 0;
+
if (!IS_ALIGNED(phys_addr, P4D_SIZE))
return 0;
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index f99c41d4eb54..f1e0569b4539 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -1634,9 +1634,9 @@ EXPORT_SYMBOL(dup_iter);
* on-stack array was used or not (and regardless of whether this function
* returns an error or not).
*
- * Return: 0 on success or negative error code on error.
+ * Return: Negative error code on error, bytes imported on success
*/
-int import_iovec(int type, const struct iovec __user * uvector,
+ssize_t import_iovec(int type, const struct iovec __user * uvector,
unsigned nr_segs, unsigned fast_segs,
struct iovec **iov, struct iov_iter *i)
{
@@ -1652,16 +1652,17 @@ int import_iovec(int type, const struct iovec __user * uvector,
}
iov_iter_init(i, type, p, nr_segs, n);
*iov = p == *iov ? NULL : p;
- return 0;
+ return n;
}
EXPORT_SYMBOL(import_iovec);
#ifdef CONFIG_COMPAT
#include <linux/compat.h>
-int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
- unsigned nr_segs, unsigned fast_segs,
- struct iovec **iov, struct iov_iter *i)
+ssize_t compat_import_iovec(int type,
+ const struct compat_iovec __user * uvector,
+ unsigned nr_segs, unsigned fast_segs,
+ struct iovec **iov, struct iov_iter *i)
{
ssize_t n;
struct iovec *p;
@@ -1675,7 +1676,7 @@ int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
}
iov_iter_init(i, type, p, nr_segs, n);
*iov = p == *iov ? NULL : p;
- return 0;
+ return n;
}
#endif
diff --git a/lib/jedec_ddr_data.c b/lib/jedec_ddr_data.c
deleted file mode 100644
index d0b312e28d36..000000000000
--- a/lib/jedec_ddr_data.c
+++ /dev/null
@@ -1,132 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * DDR addressing details and AC timing parameters from JEDEC specs
- *
- * Copyright (C) 2012 Texas Instruments, Inc.
- *
- * Aneesh V <aneesh@ti.com>
- */
-
-#include <memory/jedec_ddr.h>
-#include <linux/module.h>
-
-/* LPDDR2 addressing details from JESD209-2 section 2.4 */
-const struct lpddr2_addressing
- lpddr2_jedec_addressing_table[NUM_DDR_ADDR_TABLE_ENTRIES] = {
- {B4, T_REFI_15_6, T_RFC_90}, /* 64M */
- {B4, T_REFI_15_6, T_RFC_90}, /* 128M */
- {B4, T_REFI_7_8, T_RFC_90}, /* 256M */
- {B4, T_REFI_7_8, T_RFC_90}, /* 512M */
- {B8, T_REFI_7_8, T_RFC_130}, /* 1GS4 */
- {B8, T_REFI_3_9, T_RFC_130}, /* 2GS4 */
- {B8, T_REFI_3_9, T_RFC_130}, /* 4G */
- {B8, T_REFI_3_9, T_RFC_210}, /* 8G */
- {B4, T_REFI_7_8, T_RFC_130}, /* 1GS2 */
- {B4, T_REFI_3_9, T_RFC_130}, /* 2GS2 */
-};
-EXPORT_SYMBOL_GPL(lpddr2_jedec_addressing_table);
-
-/* LPDDR2 AC timing parameters from JESD209-2 section 12 */
-const struct lpddr2_timings
- lpddr2_jedec_timings[NUM_DDR_TIMING_TABLE_ENTRIES] = {
- /* Speed bin 400(200 MHz) */
- [0] = {
- .max_freq = 200000000,
- .min_freq = 10000000,
- .tRPab = 21000,
- .tRCD = 18000,
- .tWR = 15000,
- .tRAS_min = 42000,
- .tRRD = 10000,
- .tWTR = 10000,
- .tXP = 7500,
- .tRTP = 7500,
- .tCKESR = 15000,
- .tDQSCK_max = 5500,
- .tFAW = 50000,
- .tZQCS = 90000,
- .tZQCL = 360000,
- .tZQinit = 1000000,
- .tRAS_max_ns = 70000,
- .tDQSCK_max_derated = 6000,
- },
- /* Speed bin 533(266 MHz) */
- [1] = {
- .max_freq = 266666666,
- .min_freq = 10000000,
- .tRPab = 21000,
- .tRCD = 18000,
- .tWR = 15000,
- .tRAS_min = 42000,
- .tRRD = 10000,
- .tWTR = 7500,
- .tXP = 7500,
- .tRTP = 7500,
- .tCKESR = 15000,
- .tDQSCK_max = 5500,
- .tFAW = 50000,
- .tZQCS = 90000,
- .tZQCL = 360000,
- .tZQinit = 1000000,
- .tRAS_max_ns = 70000,
- .tDQSCK_max_derated = 6000,
- },
- /* Speed bin 800(400 MHz) */
- [2] = {
- .max_freq = 400000000,
- .min_freq = 10000000,
- .tRPab = 21000,
- .tRCD = 18000,
- .tWR = 15000,
- .tRAS_min = 42000,
- .tRRD = 10000,
- .tWTR = 7500,
- .tXP = 7500,
- .tRTP = 7500,
- .tCKESR = 15000,
- .tDQSCK_max = 5500,
- .tFAW = 50000,
- .tZQCS = 90000,
- .tZQCL = 360000,
- .tZQinit = 1000000,
- .tRAS_max_ns = 70000,
- .tDQSCK_max_derated = 6000,
- },
- /* Speed bin 1066(533 MHz) */
- [3] = {
- .max_freq = 533333333,
- .min_freq = 10000000,
- .tRPab = 21000,
- .tRCD = 18000,
- .tWR = 15000,
- .tRAS_min = 42000,
- .tRRD = 10000,
- .tWTR = 7500,
- .tXP = 7500,
- .tRTP = 7500,
- .tCKESR = 15000,
- .tDQSCK_max = 5500,
- .tFAW = 50000,
- .tZQCS = 90000,
- .tZQCL = 360000,
- .tZQinit = 1000000,
- .tRAS_max_ns = 70000,
- .tDQSCK_max_derated = 5620,
- },
-};
-EXPORT_SYMBOL_GPL(lpddr2_jedec_timings);
-
-const struct lpddr2_min_tck lpddr2_jedec_min_tck = {
- .tRPab = 3,
- .tRCD = 3,
- .tWR = 3,
- .tRASmin = 3,
- .tRRD = 2,
- .tWTR = 2,
- .tXP = 2,
- .tRTP = 2,
- .tCKE = 3,
- .tCKESR = 3,
- .tFAW = 8
-};
-EXPORT_SYMBOL_GPL(lpddr2_jedec_min_tck);
diff --git a/lib/kobject.c b/lib/kobject.c
index f2ccdbac8ed9..83198cb37d8d 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -498,8 +498,10 @@ int kobject_rename(struct kobject *kobj, const char *new_name)
kobj = kobject_get(kobj);
if (!kobj)
return -EINVAL;
- if (!kobj->parent)
+ if (!kobj->parent) {
+ kobject_put(kobj);
return -EINVAL;
+ }
devpath = kobject_get_path(kobj, GFP_KERNEL);
if (!devpath) {
diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h
index 08c60d10747f..3bb6260d8f42 100644
--- a/lib/mpi/longlong.h
+++ b/lib/mpi/longlong.h
@@ -397,8 +397,8 @@ do { \
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
__asm__ ("addl %5,%1\n" \
"adcl %3,%0" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "%0" ((USItype)(ah)), \
"g" ((USItype)(bh)), \
"%1" ((USItype)(al)), \
@@ -406,22 +406,22 @@ do { \
#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
__asm__ ("subl %5,%1\n" \
"sbbl %3,%0" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "0" ((USItype)(ah)), \
"g" ((USItype)(bh)), \
"1" ((USItype)(al)), \
"g" ((USItype)(bl)))
#define umul_ppmm(w1, w0, u, v) \
__asm__ ("mull %3" \
- : "=a" ((USItype)(w0)), \
- "=d" ((USItype)(w1)) \
+ : "=a" (w0), \
+ "=d" (w1) \
: "%0" ((USItype)(u)), \
"rm" ((USItype)(v)))
#define udiv_qrnnd(q, r, n1, n0, d) \
__asm__ ("divl %4" \
- : "=a" ((USItype)(q)), \
- "=d" ((USItype)(r)) \
+ : "=a" (q), \
+ "=d" (r) \
: "0" ((USItype)(n0)), \
"1" ((USItype)(n1)), \
"rm" ((USItype)(d)))
diff --git a/lib/notifier-error-inject.c b/lib/notifier-error-inject.c
index 3d2ba7cf83f4..21016b32d313 100644
--- a/lib/notifier-error-inject.c
+++ b/lib/notifier-error-inject.c
@@ -59,33 +59,22 @@ struct dentry *notifier_err_inject_init(const char *name, struct dentry *parent,
err_inject->nb.priority = priority;
dir = debugfs_create_dir(name, parent);
- if (!dir)
- return ERR_PTR(-ENOMEM);
actions_dir = debugfs_create_dir("actions", dir);
- if (!actions_dir)
- goto fail;
for (action = err_inject->actions; action->name; action++) {
struct dentry *action_dir;
action_dir = debugfs_create_dir(action->name, actions_dir);
- if (!action_dir)
- goto fail;
/*
* Create debugfs r/w file containing action->error. If
* notifier call chain is called with action->val, it will
* fail with the error code
*/
- if (!debugfs_create_errno("error", mode, action_dir,
- &action->error))
- goto fail;
+ debugfs_create_errno("error", mode, action_dir, &action->error);
}
return dir;
-fail:
- debugfs_remove_recursive(dir);
- return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL_GPL(notifier_err_inject_init);
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 071a76c7bac0..4f6c6ebbbbde 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -70,11 +70,14 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
return -ENOMEM;
ref->force_atomic = flags & PERCPU_REF_INIT_ATOMIC;
+ ref->allow_reinit = flags & PERCPU_REF_ALLOW_REINIT;
- if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD))
+ if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) {
ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
- else
+ ref->allow_reinit = true;
+ } else {
start_count += PERCPU_COUNT_BIAS;
+ }
if (flags & PERCPU_REF_INIT_DEAD)
ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
@@ -120,6 +123,9 @@ static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu)
ref->confirm_switch = NULL;
wake_up_all(&percpu_ref_switch_waitq);
+ if (!ref->allow_reinit)
+ percpu_ref_exit(ref);
+
/* drop ref from percpu_ref_switch_to_atomic() */
percpu_ref_put(ref);
}
@@ -195,6 +201,9 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
return;
+ if (WARN_ON_ONCE(!ref->allow_reinit))
+ return;
+
atomic_long_add(PERCPU_COUNT_BIAS, &ref->count);
/*
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index e723eacf7868..0083b5cc646c 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -12,9 +12,6 @@ raid6_pq-$(CONFIG_S390) += s390vx8.o recov_s390xc.o
hostprogs-y += mktables
-quiet_cmd_unroll = UNROLL $@
- cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$(UNROLL) < $< > $@
-
ifeq ($(CONFIG_ALTIVEC),y)
altivec_flags := -maltivec $(call cc-option,-mabi=altivec)
@@ -26,7 +23,6 @@ CFLAGS_REMOVE_altivec1.o += -msoft-float
CFLAGS_REMOVE_altivec2.o += -msoft-float
CFLAGS_REMOVE_altivec4.o += -msoft-float
CFLAGS_REMOVE_altivec8.o += -msoft-float
-CFLAGS_REMOVE_altivec8.o += -msoft-float
CFLAGS_REMOVE_vpermxor1.o += -msoft-float
CFLAGS_REMOVE_vpermxor2.o += -msoft-float
CFLAGS_REMOVE_vpermxor4.o += -msoft-float
@@ -51,111 +47,39 @@ CFLAGS_REMOVE_neon8.o += -mgeneral-regs-only
endif
endif
-targets += int1.c
-$(obj)/int1.c: UNROLL := 1
-$(obj)/int1.c: $(src)/int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-targets += int2.c
-$(obj)/int2.c: UNROLL := 2
-$(obj)/int2.c: $(src)/int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-targets += int4.c
-$(obj)/int4.c: UNROLL := 4
-$(obj)/int4.c: $(src)/int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-targets += int8.c
-$(obj)/int8.c: UNROLL := 8
-$(obj)/int8.c: $(src)/int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-targets += int16.c
-$(obj)/int16.c: UNROLL := 16
-$(obj)/int16.c: $(src)/int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
+quiet_cmd_unroll = UNROLL $@
+ cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$* < $< > $@
-targets += int32.c
-$(obj)/int32.c: UNROLL := 32
-$(obj)/int32.c: $(src)/int.uc $(src)/unroll.awk FORCE
+targets += int1.c int2.c int4.c int8.c int16.c int32.c
+$(obj)/int%.c: $(src)/int.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
CFLAGS_altivec1.o += $(altivec_flags)
-targets += altivec1.c
-$(obj)/altivec1.c: UNROLL := 1
-$(obj)/altivec1.c: $(src)/altivec.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
CFLAGS_altivec2.o += $(altivec_flags)
-targets += altivec2.c
-$(obj)/altivec2.c: UNROLL := 2
-$(obj)/altivec2.c: $(src)/altivec.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
CFLAGS_altivec4.o += $(altivec_flags)
-targets += altivec4.c
-$(obj)/altivec4.c: UNROLL := 4
-$(obj)/altivec4.c: $(src)/altivec.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
CFLAGS_altivec8.o += $(altivec_flags)
-targets += altivec8.c
-$(obj)/altivec8.c: UNROLL := 8
-$(obj)/altivec8.c: $(src)/altivec.uc $(src)/unroll.awk FORCE
+targets += altivec1.c altivec2.c altivec4.c altivec8.c
+$(obj)/altivec%.c: $(src)/altivec.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
CFLAGS_vpermxor1.o += $(altivec_flags)
-targets += vpermxor1.c
-$(obj)/vpermxor1.c: UNROLL := 1
-$(obj)/vpermxor1.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
CFLAGS_vpermxor2.o += $(altivec_flags)
-targets += vpermxor2.c
-$(obj)/vpermxor2.c: UNROLL := 2
-$(obj)/vpermxor2.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
CFLAGS_vpermxor4.o += $(altivec_flags)
-targets += vpermxor4.c
-$(obj)/vpermxor4.c: UNROLL := 4
-$(obj)/vpermxor4.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
CFLAGS_vpermxor8.o += $(altivec_flags)
-targets += vpermxor8.c
-$(obj)/vpermxor8.c: UNROLL := 8
-$(obj)/vpermxor8.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
+targets += vpermxor1.c vpermxor2.c vpermxor4.c vpermxor8.c
+$(obj)/vpermxor%.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
CFLAGS_neon1.o += $(NEON_FLAGS)
-targets += neon1.c
-$(obj)/neon1.c: UNROLL := 1
-$(obj)/neon1.c: $(src)/neon.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
CFLAGS_neon2.o += $(NEON_FLAGS)
-targets += neon2.c
-$(obj)/neon2.c: UNROLL := 2
-$(obj)/neon2.c: $(src)/neon.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
CFLAGS_neon4.o += $(NEON_FLAGS)
-targets += neon4.c
-$(obj)/neon4.c: UNROLL := 4
-$(obj)/neon4.c: $(src)/neon.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
CFLAGS_neon8.o += $(NEON_FLAGS)
-targets += neon8.c
-$(obj)/neon8.c: UNROLL := 8
-$(obj)/neon8.c: $(src)/neon.uc $(src)/unroll.awk FORCE
+targets += neon1.c neon2.c neon4.c neon8.c
+$(obj)/neon%.c: $(src)/neon.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
targets += s390vx8.c
-$(obj)/s390vx8.c: UNROLL := 8
-$(obj)/s390vx8.c: $(src)/s390vx.uc $(src)/unroll.awk FORCE
+$(obj)/s390vx%.c: $(src)/s390vx.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
quiet_cmd_mktable = TABLE $@
diff --git a/lib/rbtree.c b/lib/rbtree.c
index 1ef6e25d031c..abc86c6a3177 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -83,14 +83,10 @@ __rb_rotate_set_parents(struct rb_node *old, struct rb_node *new,
static __always_inline void
__rb_insert(struct rb_node *node, struct rb_root *root,
- bool newleft, struct rb_node **leftmost,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
{
struct rb_node *parent = rb_red_parent(node), *gparent, *tmp;
- if (newleft)
- *leftmost = node;
-
while (true) {
/*
* Loop invariant: node is red.
@@ -437,38 +433,19 @@ static const struct rb_augment_callbacks dummy_callbacks = {
void rb_insert_color(struct rb_node *node, struct rb_root *root)
{
- __rb_insert(node, root, false, NULL, dummy_rotate);
+ __rb_insert(node, root, dummy_rotate);
}
EXPORT_SYMBOL(rb_insert_color);
void rb_erase(struct rb_node *node, struct rb_root *root)
{
struct rb_node *rebalance;
- rebalance = __rb_erase_augmented(node, root,
- NULL, &dummy_callbacks);
+ rebalance = __rb_erase_augmented(node, root, &dummy_callbacks);
if (rebalance)
____rb_erase_color(rebalance, root, dummy_rotate);
}
EXPORT_SYMBOL(rb_erase);
-void rb_insert_color_cached(struct rb_node *node,
- struct rb_root_cached *root, bool leftmost)
-{
- __rb_insert(node, &root->rb_root, leftmost,
- &root->rb_leftmost, dummy_rotate);
-}
-EXPORT_SYMBOL(rb_insert_color_cached);
-
-void rb_erase_cached(struct rb_node *node, struct rb_root_cached *root)
-{
- struct rb_node *rebalance;
- rebalance = __rb_erase_augmented(node, &root->rb_root,
- &root->rb_leftmost, &dummy_callbacks);
- if (rebalance)
- ____rb_erase_color(rebalance, &root->rb_root, dummy_rotate);
-}
-EXPORT_SYMBOL(rb_erase_cached);
-
/*
* Augmented rbtree manipulation functions.
*
@@ -477,10 +454,9 @@ EXPORT_SYMBOL(rb_erase_cached);
*/
void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
- bool newleft, struct rb_node **leftmost,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
{
- __rb_insert(node, root, newleft, leftmost, augment_rotate);
+ __rb_insert(node, root, augment_rotate);
}
EXPORT_SYMBOL(__rb_insert_augmented);
@@ -591,16 +567,6 @@ void rb_replace_node(struct rb_node *victim, struct rb_node *new,
}
EXPORT_SYMBOL(rb_replace_node);
-void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
- struct rb_root_cached *root)
-{
- rb_replace_node(victim, new, &root->rb_root);
-
- if (root->rb_leftmost == victim)
- root->rb_leftmost = new;
-}
-EXPORT_SYMBOL(rb_replace_node_cached);
-
void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new,
struct rb_root *root)
{
diff --git a/lib/string.c b/lib/string.c
index 6016eb3ac73d..461fb620f85f 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -400,6 +400,9 @@ EXPORT_SYMBOL(strncmp);
* strchr - Find the first occurrence of a character in a string
* @s: The string to be searched
* @c: The character to search for
+ *
+ * Note that the %NUL-terminator is considered part of the string, and can
+ * be searched for.
*/
char *strchr(const char *s, int c)
{
@@ -453,12 +456,18 @@ EXPORT_SYMBOL(strrchr);
* @s: The string to be searched
* @count: The number of characters to be searched
* @c: The character to search for
+ *
+ * Note that the %NUL-terminator is considered part of the string, and can
+ * be searched for.
*/
char *strnchr(const char *s, size_t count, int c)
{
- for (; count-- && *s != '\0'; ++s)
+ while (count--) {
if (*s == (char)c)
return (char *)s;
+ if (*s++ == '\0')
+ break;
+ }
return NULL;
}
EXPORT_SYMBOL(strnchr);
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 3a90a9e2b94a..963050c0283e 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -231,35 +231,36 @@ static bool unescape_special(char **src, char **dst)
* @src: source buffer (escaped)
* @dst: destination buffer (unescaped)
* @size: size of the destination buffer (0 to unlimit)
- * @flags: combination of the flags (bitwise OR):
- * %UNESCAPE_SPACE:
+ * @flags: combination of the flags.
+ *
+ * Description:
+ * The function unquotes characters in the given string.
+ *
+ * Because the size of the output will be the same as or less than the size of
+ * the input, the transformation may be performed in place.
+ *
+ * Caller must provide valid source and destination pointers. Be aware that
+ * destination buffer will always be NULL-terminated. Source string must be
+ * NULL-terminated as well. The supported flags are::
+ *
+ * UNESCAPE_SPACE:
* '\f' - form feed
* '\n' - new line
* '\r' - carriage return
* '\t' - horizontal tab
* '\v' - vertical tab
- * %UNESCAPE_OCTAL:
+ * UNESCAPE_OCTAL:
* '\NNN' - byte with octal value NNN (1 to 3 digits)
- * %UNESCAPE_HEX:
+ * UNESCAPE_HEX:
* '\xHH' - byte with hexadecimal value HH (1 to 2 digits)
- * %UNESCAPE_SPECIAL:
+ * UNESCAPE_SPECIAL:
* '\"' - double quote
* '\\' - backslash
* '\a' - alert (BEL)
* '\e' - escape
- * %UNESCAPE_ANY:
+ * UNESCAPE_ANY:
* all previous together
*
- * Description:
- * The function unquotes characters in the given string.
- *
- * Because the size of the output will be the same as or less than the size of
- * the input, the transformation may be performed in place.
- *
- * Caller must provide valid source and destination pointers. Be aware that
- * destination buffer will always be NULL-terminated. Source string must be
- * NULL-terminated as well.
- *
* Return:
* The amount of the characters processed to the destination buffer excluding
* trailing '\0' is returned.
@@ -441,7 +442,29 @@ static bool escape_hex(unsigned char c, char **dst, char *end)
* @isz: source buffer size
* @dst: destination buffer (escaped)
* @osz: destination buffer size
- * @flags: combination of the flags (bitwise OR):
+ * @flags: combination of the flags
+ * @only: NULL-terminated string containing characters used to limit
+ * the selected escape class. If characters are included in @only
+ * that would not normally be escaped by the classes selected
+ * in @flags, they will be copied to @dst unescaped.
+ *
+ * Description:
+ * The process of escaping byte buffer includes several parts. They are applied
+ * in the following sequence.
+ *
+ * 1. The character is matched to the printable class, if asked, and in
+ * case of match it passes through to the output.
+ * 2. The character is not matched to the one from @only string and thus
+ * must go as-is to the output.
+ * 3. The character is checked if it falls into the class given by @flags.
+ * %ESCAPE_OCTAL and %ESCAPE_HEX are going last since they cover any
+ * character. Note that they actually can't go together, otherwise
+ * %ESCAPE_HEX will be ignored.
+ *
+ * Caller must provide valid source and destination pointers. Be aware that
+ * destination buffer will not be NULL-terminated, thus caller have to append
+ * it if needs. The supported flags are::
+ *
* %ESCAPE_SPACE: (special white space, not space itself)
* '\f' - form feed
* '\n' - new line
@@ -464,26 +487,6 @@ static bool escape_hex(unsigned char c, char **dst, char *end)
* all previous together
* %ESCAPE_HEX:
* '\xHH' - byte with hexadecimal value HH (2 digits)
- * @only: NULL-terminated string containing characters used to limit
- * the selected escape class. If characters are included in @only
- * that would not normally be escaped by the classes selected
- * in @flags, they will be copied to @dst unescaped.
- *
- * Description:
- * The process of escaping byte buffer includes several parts. They are applied
- * in the following sequence.
- * 1. The character is matched to the printable class, if asked, and in
- * case of match it passes through to the output.
- * 2. The character is not matched to the one from @only string and thus
- * must go as-is to the output.
- * 3. The character is checked if it falls into the class given by @flags.
- * %ESCAPE_OCTAL and %ESCAPE_HEX are going last since they cover any
- * character. Note that they actually can't go together, otherwise
- * %ESCAPE_HEX will be ignored.
- *
- * Caller must provide valid source and destination pointers. Be aware that
- * destination buffer will not be NULL-terminated, thus caller have to append
- * it if needs.
*
* Return:
* The total size of the escaped output that would be generated for
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index 83ea6c4e623c..6ca97a63b3d6 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -886,8 +886,11 @@ static int __init test_firmware_init(void)
return -ENOMEM;
rc = __test_firmware_config_init();
- if (rc)
+ if (rc) {
+ kfree(test_fw_config);
+ pr_err("could not init firmware test config: %d\n", rc);
return rc;
+ }
rc = misc_register(&test_fw_misc_device);
if (rc) {
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index e3c593c38eff..b63b367a94e8 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -7,16 +7,17 @@
#define pr_fmt(fmt) "kasan test: %s " fmt, __func__
+#include <linux/bitops.h>
#include <linux/delay.h>
+#include <linux/kasan.h>
#include <linux/kernel.h>
-#include <linux/mman.h>
#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/module.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/uaccess.h>
-#include <linux/module.h>
-#include <linux/kasan.h>
/*
* Note: test functions are marked noinline so that their names appear in
@@ -619,6 +620,95 @@ static noinline void __init kasan_strings(void)
strnlen(ptr, 1);
}
+static noinline void __init kasan_bitops(void)
+{
+ /*
+ * Allocate 1 more byte, which causes kzalloc to round up to 16-bytes;
+ * this way we do not actually corrupt other memory.
+ */
+ long *bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
+ if (!bits)
+ return;
+
+ /*
+ * Below calls try to access bit within allocated memory; however, the
+ * below accesses are still out-of-bounds, since bitops are defined to
+ * operate on the whole long the bit is in.
+ */
+ pr_info("out-of-bounds in set_bit\n");
+ set_bit(BITS_PER_LONG, bits);
+
+ pr_info("out-of-bounds in __set_bit\n");
+ __set_bit(BITS_PER_LONG, bits);
+
+ pr_info("out-of-bounds in clear_bit\n");
+ clear_bit(BITS_PER_LONG, bits);
+
+ pr_info("out-of-bounds in __clear_bit\n");
+ __clear_bit(BITS_PER_LONG, bits);
+
+ pr_info("out-of-bounds in clear_bit_unlock\n");
+ clear_bit_unlock(BITS_PER_LONG, bits);
+
+ pr_info("out-of-bounds in __clear_bit_unlock\n");
+ __clear_bit_unlock(BITS_PER_LONG, bits);
+
+ pr_info("out-of-bounds in change_bit\n");
+ change_bit(BITS_PER_LONG, bits);
+
+ pr_info("out-of-bounds in __change_bit\n");
+ __change_bit(BITS_PER_LONG, bits);
+
+ /*
+ * Below calls try to access bit beyond allocated memory.
+ */
+ pr_info("out-of-bounds in test_and_set_bit\n");
+ test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+ pr_info("out-of-bounds in __test_and_set_bit\n");
+ __test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+ pr_info("out-of-bounds in test_and_set_bit_lock\n");
+ test_and_set_bit_lock(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+ pr_info("out-of-bounds in test_and_clear_bit\n");
+ test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+ pr_info("out-of-bounds in __test_and_clear_bit\n");
+ __test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+ pr_info("out-of-bounds in test_and_change_bit\n");
+ test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+ pr_info("out-of-bounds in __test_and_change_bit\n");
+ __test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+ pr_info("out-of-bounds in test_bit\n");
+ (void)test_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+#if defined(clear_bit_unlock_is_negative_byte)
+ pr_info("out-of-bounds in clear_bit_unlock_is_negative_byte\n");
+ clear_bit_unlock_is_negative_byte(BITS_PER_LONG + BITS_PER_BYTE, bits);
+#endif
+ kfree(bits);
+}
+
+static noinline void __init kmalloc_double_kzfree(void)
+{
+ char *ptr;
+ size_t size = 16;
+
+ pr_info("double-free (kzfree)\n");
+ ptr = kmalloc(size, GFP_KERNEL);
+ if (!ptr) {
+ pr_err("Allocation failed\n");
+ return;
+ }
+
+ kzfree(ptr);
+ kzfree(ptr);
+}
+
static int __init kmalloc_tests_init(void)
{
/*
@@ -660,6 +750,8 @@ static int __init kmalloc_tests_init(void)
kasan_memchr();
kasan_memcmp();
kasan_strings();
+ kasan_bitops();
+ kmalloc_double_kzfree();
kasan_restore_multi_shot(multishot);
diff --git a/lib/test_meminit.c b/lib/test_meminit.c
new file mode 100644
index 000000000000..9729f271d150
--- /dev/null
+++ b/lib/test_meminit.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test cases for SL[AOU]B/page initialization at alloc/free time.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+#define GARBAGE_INT (0x09A7BA9E)
+#define GARBAGE_BYTE (0x9E)
+
+#define REPORT_FAILURES_IN_FN() \
+ do { \
+ if (failures) \
+ pr_info("%s failed %d out of %d times\n", \
+ __func__, failures, num_tests); \
+ else \
+ pr_info("all %d tests in %s passed\n", \
+ num_tests, __func__); \
+ } while (0)
+
+/* Calculate the number of uninitialized bytes in the buffer. */
+static int __init count_nonzero_bytes(void *ptr, size_t size)
+{
+ int i, ret = 0;
+ unsigned char *p = (unsigned char *)ptr;
+
+ for (i = 0; i < size; i++)
+ if (p[i])
+ ret++;
+ return ret;
+}
+
+/* Fill a buffer with garbage, skipping |skip| first bytes. */
+static void __init fill_with_garbage_skip(void *ptr, int size, size_t skip)
+{
+ unsigned int *p = (unsigned int *)((char *)ptr + skip);
+ int i = 0;
+
+ WARN_ON(skip > size);
+ size -= skip;
+
+ while (size >= sizeof(*p)) {
+ p[i] = GARBAGE_INT;
+ i++;
+ size -= sizeof(*p);
+ }
+ if (size)
+ memset(&p[i], GARBAGE_BYTE, size);
+}
+
+static void __init fill_with_garbage(void *ptr, size_t size)
+{
+ fill_with_garbage_skip(ptr, size, 0);
+}
+
+static int __init do_alloc_pages_order(int order, int *total_failures)
+{
+ struct page *page;
+ void *buf;
+ size_t size = PAGE_SIZE << order;
+
+ page = alloc_pages(GFP_KERNEL, order);
+ buf = page_address(page);
+ fill_with_garbage(buf, size);
+ __free_pages(page, order);
+
+ page = alloc_pages(GFP_KERNEL, order);
+ buf = page_address(page);
+ if (count_nonzero_bytes(buf, size))
+ (*total_failures)++;
+ fill_with_garbage(buf, size);
+ __free_pages(page, order);
+ return 1;
+}
+
+/* Test the page allocator by calling alloc_pages with different orders. */
+static int __init test_pages(int *total_failures)
+{
+ int failures = 0, num_tests = 0;
+ int i;
+
+ for (i = 0; i < 10; i++)
+ num_tests += do_alloc_pages_order(i, &failures);
+
+ REPORT_FAILURES_IN_FN();
+ *total_failures += failures;
+ return num_tests;
+}
+
+/* Test kmalloc() with given parameters. */
+static int __init do_kmalloc_size(size_t size, int *total_failures)
+{
+ void *buf;
+
+ buf = kmalloc(size, GFP_KERNEL);
+ fill_with_garbage(buf, size);
+ kfree(buf);
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (count_nonzero_bytes(buf, size))
+ (*total_failures)++;
+ fill_with_garbage(buf, size);
+ kfree(buf);
+ return 1;
+}
+
+/* Test vmalloc() with given parameters. */
+static int __init do_vmalloc_size(size_t size, int *total_failures)
+{
+ void *buf;
+
+ buf = vmalloc(size);
+ fill_with_garbage(buf, size);
+ vfree(buf);
+
+ buf = vmalloc(size);
+ if (count_nonzero_bytes(buf, size))
+ (*total_failures)++;
+ fill_with_garbage(buf, size);
+ vfree(buf);
+ return 1;
+}
+
+/* Test kmalloc()/vmalloc() by allocating objects of different sizes. */
+static int __init test_kvmalloc(int *total_failures)
+{
+ int failures = 0, num_tests = 0;
+ int i, size;
+
+ for (i = 0; i < 20; i++) {
+ size = 1 << i;
+ num_tests += do_kmalloc_size(size, &failures);
+ num_tests += do_vmalloc_size(size, &failures);
+ }
+
+ REPORT_FAILURES_IN_FN();
+ *total_failures += failures;
+ return num_tests;
+}
+
+#define CTOR_BYTES (sizeof(unsigned int))
+#define CTOR_PATTERN (0x41414141)
+/* Initialize the first 4 bytes of the object. */
+static void test_ctor(void *obj)
+{
+ *(unsigned int *)obj = CTOR_PATTERN;
+}
+
+/*
+ * Check the invariants for the buffer allocated from a slab cache.
+ * If the cache has a test constructor, the first 4 bytes of the object must
+ * always remain equal to CTOR_PATTERN.
+ * If the cache isn't an RCU-typesafe one, or if the allocation is done with
+ * __GFP_ZERO, then the object contents must be zeroed after allocation.
+ * If the cache is an RCU-typesafe one, the object contents must never be
+ * zeroed after the first use. This is checked by memcmp() in
+ * do_kmem_cache_size().
+ */
+static bool __init check_buf(void *buf, int size, bool want_ctor,
+ bool want_rcu, bool want_zero)
+{
+ int bytes;
+ bool fail = false;
+
+ bytes = count_nonzero_bytes(buf, size);
+ WARN_ON(want_ctor && want_zero);
+ if (want_zero)
+ return bytes;
+ if (want_ctor) {
+ if (*(unsigned int *)buf != CTOR_PATTERN)
+ fail = 1;
+ } else {
+ if (bytes)
+ fail = !want_rcu;
+ }
+ return fail;
+}
+
+/*
+ * Test kmem_cache with given parameters:
+ * want_ctor - use a constructor;
+ * want_rcu - use SLAB_TYPESAFE_BY_RCU;
+ * want_zero - use __GFP_ZERO.
+ */
+static int __init do_kmem_cache_size(size_t size, bool want_ctor,
+ bool want_rcu, bool want_zero,
+ int *total_failures)
+{
+ struct kmem_cache *c;
+ int iter;
+ bool fail = false;
+ gfp_t alloc_mask = GFP_KERNEL | (want_zero ? __GFP_ZERO : 0);
+ void *buf, *buf_copy;
+
+ c = kmem_cache_create("test_cache", size, 1,
+ want_rcu ? SLAB_TYPESAFE_BY_RCU : 0,
+ want_ctor ? test_ctor : NULL);
+ for (iter = 0; iter < 10; iter++) {
+ buf = kmem_cache_alloc(c, alloc_mask);
+ /* Check that buf is zeroed, if it must be. */
+ fail = check_buf(buf, size, want_ctor, want_rcu, want_zero);
+ fill_with_garbage_skip(buf, size, want_ctor ? CTOR_BYTES : 0);
+
+ if (!want_rcu) {
+ kmem_cache_free(c, buf);
+ continue;
+ }
+
+ /*
+ * If this is an RCU cache, use a critical section to ensure we
+ * can touch objects after they're freed.
+ */
+ rcu_read_lock();
+ /*
+ * Copy the buffer to check that it's not wiped on
+ * free().
+ */
+ buf_copy = kmalloc(size, GFP_ATOMIC);
+ if (buf_copy)
+ memcpy(buf_copy, buf, size);
+
+ kmem_cache_free(c, buf);
+ /*
+ * Check that |buf| is intact after kmem_cache_free().
+ * |want_zero| is false, because we wrote garbage to
+ * the buffer already.
+ */
+ fail |= check_buf(buf, size, want_ctor, want_rcu,
+ false);
+ if (buf_copy) {
+ fail |= (bool)memcmp(buf, buf_copy, size);
+ kfree(buf_copy);
+ }
+ rcu_read_unlock();
+ }
+ kmem_cache_destroy(c);
+
+ *total_failures += fail;
+ return 1;
+}
+
+/*
+ * Check that the data written to an RCU-allocated object survives
+ * reallocation.
+ */
+static int __init do_kmem_cache_rcu_persistent(int size, int *total_failures)
+{
+ struct kmem_cache *c;
+ void *buf, *buf_contents, *saved_ptr;
+ void **used_objects;
+ int i, iter, maxiter = 1024;
+ bool fail = false;
+
+ c = kmem_cache_create("test_cache", size, size, SLAB_TYPESAFE_BY_RCU,
+ NULL);
+ buf = kmem_cache_alloc(c, GFP_KERNEL);
+ saved_ptr = buf;
+ fill_with_garbage(buf, size);
+ buf_contents = kmalloc(size, GFP_KERNEL);
+ if (!buf_contents)
+ goto out;
+ used_objects = kmalloc_array(maxiter, sizeof(void *), GFP_KERNEL);
+ if (!used_objects) {
+ kfree(buf_contents);
+ goto out;
+ }
+ memcpy(buf_contents, buf, size);
+ kmem_cache_free(c, buf);
+ /*
+ * Run for a fixed number of iterations. If we never hit saved_ptr,
+ * assume the test passes.
+ */
+ for (iter = 0; iter < maxiter; iter++) {
+ buf = kmem_cache_alloc(c, GFP_KERNEL);
+ used_objects[iter] = buf;
+ if (buf == saved_ptr) {
+ fail = memcmp(buf_contents, buf, size);
+ for (i = 0; i <= iter; i++)
+ kmem_cache_free(c, used_objects[i]);
+ goto free_out;
+ }
+ }
+
+free_out:
+ kmem_cache_destroy(c);
+ kfree(buf_contents);
+ kfree(used_objects);
+out:
+ *total_failures += fail;
+ return 1;
+}
+
+/*
+ * Test kmem_cache allocation by creating caches of different sizes, with and
+ * without constructors, with and without SLAB_TYPESAFE_BY_RCU.
+ */
+static int __init test_kmemcache(int *total_failures)
+{
+ int failures = 0, num_tests = 0;
+ int i, flags, size;
+ bool ctor, rcu, zero;
+
+ for (i = 0; i < 10; i++) {
+ size = 8 << i;
+ for (flags = 0; flags < 8; flags++) {
+ ctor = flags & 1;
+ rcu = flags & 2;
+ zero = flags & 4;
+ if (ctor & zero)
+ continue;
+ num_tests += do_kmem_cache_size(size, ctor, rcu, zero,
+ &failures);
+ }
+ }
+ REPORT_FAILURES_IN_FN();
+ *total_failures += failures;
+ return num_tests;
+}
+
+/* Test the behavior of SLAB_TYPESAFE_BY_RCU caches of different sizes. */
+static int __init test_rcu_persistent(int *total_failures)
+{
+ int failures = 0, num_tests = 0;
+ int i, size;
+
+ for (i = 0; i < 10; i++) {
+ size = 8 << i;
+ num_tests += do_kmem_cache_rcu_persistent(size, &failures);
+ }
+ REPORT_FAILURES_IN_FN();
+ *total_failures += failures;
+ return num_tests;
+}
+
+/*
+ * Run the tests. Each test function returns the number of executed tests and
+ * updates |failures| with the number of failed tests.
+ */
+static int __init test_meminit_init(void)
+{
+ int failures = 0, num_tests = 0;
+
+ num_tests += test_pages(&failures);
+ num_tests += test_kvmalloc(&failures);
+ num_tests += test_kmemcache(&failures);
+ num_tests += test_rcu_persistent(&failures);
+
+ if (failures == 0)
+ pr_info("all %d tests passed!\n", num_tests);
+ else
+ pr_info("failures: %d out of %d\n", failures, num_tests);
+
+ return failures ? -EINVAL : 0;
+}
+module_init(test_meminit_init);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/test_overflow.c b/lib/test_overflow.c
index fc680562d8b6..7a4b6f6c5473 100644
--- a/lib/test_overflow.c
+++ b/lib/test_overflow.c
@@ -486,16 +486,17 @@ static int __init test_overflow_shift(void)
* Deal with the various forms of allocator arguments. See comments above
* the DEFINE_TEST_ALLOC() instances for mapping of the "bits".
*/
-#define alloc010(alloc, arg, sz) alloc(sz, GFP_KERNEL)
-#define alloc011(alloc, arg, sz) alloc(sz, GFP_KERNEL, NUMA_NO_NODE)
+#define alloc_GFP (GFP_KERNEL | __GFP_NOWARN)
+#define alloc010(alloc, arg, sz) alloc(sz, alloc_GFP)
+#define alloc011(alloc, arg, sz) alloc(sz, alloc_GFP, NUMA_NO_NODE)
#define alloc000(alloc, arg, sz) alloc(sz)
#define alloc001(alloc, arg, sz) alloc(sz, NUMA_NO_NODE)
-#define alloc110(alloc, arg, sz) alloc(arg, sz, GFP_KERNEL)
+#define alloc110(alloc, arg, sz) alloc(arg, sz, alloc_GFP)
#define free0(free, arg, ptr) free(ptr)
#define free1(free, arg, ptr) free(arg, ptr)
-/* Wrap around to 8K */
-#define TEST_SIZE (9 << PAGE_SHIFT)
+/* Wrap around to 16K */
+#define TEST_SIZE (5 * 4096)
#define DEFINE_TEST_ALLOC(func, free_func, want_arg, want_gfp, want_node)\
static int __init test_ ## func (void *arg) \
diff --git a/lib/test_string.c b/lib/test_string.c
index bf8def01ed20..7b31f4a505bf 100644
--- a/lib/test_string.c
+++ b/lib/test_string.c
@@ -36,7 +36,7 @@ static __init int memset16_selftest(void)
fail:
kfree(p);
if (i < 256)
- return (i << 24) | (j << 16) | k;
+ return (i << 24) | (j << 16) | k | 0x8000;
return 0;
}
@@ -72,7 +72,7 @@ static __init int memset32_selftest(void)
fail:
kfree(p);
if (i < 256)
- return (i << 24) | (j << 16) | k;
+ return (i << 24) | (j << 16) | k | 0x8000;
return 0;
}
@@ -108,7 +108,74 @@ static __init int memset64_selftest(void)
fail:
kfree(p);
if (i < 256)
- return (i << 24) | (j << 16) | k;
+ return (i << 24) | (j << 16) | k | 0x8000;
+ return 0;
+}
+
+static __init int strchr_selftest(void)
+{
+ const char *test_string = "abcdefghijkl";
+ const char *empty_string = "";
+ char *result;
+ int i;
+
+ for (i = 0; i < strlen(test_string) + 1; i++) {
+ result = strchr(test_string, test_string[i]);
+ if (result - test_string != i)
+ return i + 'a';
+ }
+
+ result = strchr(empty_string, '\0');
+ if (result != empty_string)
+ return 0x101;
+
+ result = strchr(empty_string, 'a');
+ if (result)
+ return 0x102;
+
+ result = strchr(test_string, 'z');
+ if (result)
+ return 0x103;
+
+ return 0;
+}
+
+static __init int strnchr_selftest(void)
+{
+ const char *test_string = "abcdefghijkl";
+ const char *empty_string = "";
+ char *result;
+ int i, j;
+
+ for (i = 0; i < strlen(test_string) + 1; i++) {
+ for (j = 0; j < strlen(test_string) + 2; j++) {
+ result = strnchr(test_string, j, test_string[i]);
+ if (j <= i) {
+ if (!result)
+ continue;
+ return ((i + 'a') << 8) | j;
+ }
+ if (result - test_string != i)
+ return ((i + 'a') << 8) | j;
+ }
+ }
+
+ result = strnchr(empty_string, 0, '\0');
+ if (result)
+ return 0x10001;
+
+ result = strnchr(empty_string, 1, '\0');
+ if (result != empty_string)
+ return 0x10002;
+
+ result = strnchr(empty_string, 1, 'a');
+ if (result)
+ return 0x10003;
+
+ result = strnchr(NULL, 0, '\0');
+ if (result)
+ return 0x10004;
+
return 0;
}
@@ -131,6 +198,16 @@ static __init int string_selftest_init(void)
if (subtest)
goto fail;
+ test = 4;
+ subtest = strchr_selftest();
+ if (subtest)
+ goto fail;
+
+ test = 5;
+ subtest = strnchr_selftest();
+ if (subtest)
+ goto fail;
+
pr_info("String selftests succeeded\n");
return 0;
fail:
diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c
index 2d1c1f241fd9..e630e7ff57f1 100644
--- a/lib/vdso/gettimeofday.c
+++ b/lib/vdso/gettimeofday.c
@@ -51,7 +51,7 @@ static int do_hres(const struct vdso_data *vd, clockid_t clk,
ns = vdso_ts->nsec;
last = vd->cycle_last;
if (unlikely((s64)cycles < 0))
- return clock_gettime_fallback(clk, ts);
+ return -1;
ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
ns >>= vd->shift;
@@ -82,14 +82,14 @@ static void do_coarse(const struct vdso_data *vd, clockid_t clk,
}
static __maybe_unused int
-__cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
+__cvdso_clock_gettime_common(clockid_t clock, struct __kernel_timespec *ts)
{
const struct vdso_data *vd = __arch_get_vdso_data();
u32 msk;
/* Check for negative values or invalid clocks */
if (unlikely((u32) clock >= MAX_CLOCKS))
- goto fallback;
+ return -1;
/*
* Convert the clockid to a bitmask and use it to check which
@@ -104,9 +104,17 @@ __cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
} else if (msk & VDSO_RAW) {
return do_hres(&vd[CS_RAW], clock, ts);
}
+ return -1;
+}
+
+static __maybe_unused int
+__cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
+{
+ int ret = __cvdso_clock_gettime_common(clock, ts);
-fallback:
- return clock_gettime_fallback(clock, ts);
+ if (unlikely(ret))
+ return clock_gettime_fallback(clock, ts);
+ return 0;
}
static __maybe_unused int
@@ -115,20 +123,21 @@ __cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res)
struct __kernel_timespec ts;
int ret;
- if (res == NULL)
- goto fallback;
+ ret = __cvdso_clock_gettime_common(clock, &ts);
- ret = __cvdso_clock_gettime(clock, &ts);
+#ifdef VDSO_HAS_32BIT_FALLBACK
+ if (unlikely(ret))
+ return clock_gettime32_fallback(clock, res);
+#else
+ if (unlikely(ret))
+ ret = clock_gettime_fallback(clock, &ts);
+#endif
- if (ret == 0) {
+ if (likely(!ret)) {
res->tv_sec = ts.tv_sec;
res->tv_nsec = ts.tv_nsec;
}
-
return ret;
-
-fallback:
- return clock_gettime_fallback(clock, (struct __kernel_timespec *)res);
}
static __maybe_unused int
@@ -169,17 +178,18 @@ static __maybe_unused time_t __cvdso_time(time_t *time)
#ifdef VDSO_HAS_CLOCK_GETRES
static __maybe_unused
-int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
+int __cvdso_clock_getres_common(clockid_t clock, struct __kernel_timespec *res)
{
const struct vdso_data *vd = __arch_get_vdso_data();
- u64 ns;
+ u64 hrtimer_res;
u32 msk;
- u64 hrtimer_res = READ_ONCE(vd[CS_HRES_COARSE].hrtimer_res);
+ u64 ns;
/* Check for negative values or invalid clocks */
if (unlikely((u32) clock >= MAX_CLOCKS))
- goto fallback;
+ return -1;
+ hrtimer_res = READ_ONCE(vd[CS_HRES_COARSE].hrtimer_res);
/*
* Convert the clockid to a bitmask and use it to check which
* clocks are handled in the VDSO directly.
@@ -201,18 +211,22 @@ int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
*/
ns = hrtimer_res;
} else {
- goto fallback;
+ return -1;
}
- if (res) {
- res->tv_sec = 0;
- res->tv_nsec = ns;
- }
+ res->tv_sec = 0;
+ res->tv_nsec = ns;
return 0;
+}
+
+int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
+{
+ int ret = __cvdso_clock_getres_common(clock, res);
-fallback:
- return clock_getres_fallback(clock, res);
+ if (unlikely(ret))
+ return clock_getres_fallback(clock, res);
+ return 0;
}
static __maybe_unused int
@@ -221,19 +235,20 @@ __cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res)
struct __kernel_timespec ts;
int ret;
- if (res == NULL)
- goto fallback;
+ ret = __cvdso_clock_getres_common(clock, &ts);
- ret = __cvdso_clock_getres(clock, &ts);
+#ifdef VDSO_HAS_32BIT_FALLBACK
+ if (unlikely(ret))
+ return clock_getres32_fallback(clock, res);
+#else
+ if (unlikely(ret))
+ ret = clock_getres_fallback(clock, &ts);
+#endif
- if (ret == 0) {
+ if (likely(!ret)) {
res->tv_sec = ts.tv_sec;
res->tv_nsec = ts.tv_nsec;
}
-
return ret;
-
-fallback:
- return clock_getres_fallback(clock, (struct __kernel_timespec *)res);
}
#endif /* VDSO_HAS_CLOCK_GETRES */