summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/.gitignore4
-rw-r--r--lib/Kconfig11
-rw-r--r--lib/Kconfig.debug193
-rw-r--r--lib/Kconfig.kasan4
-rw-r--r--lib/Kconfig.kcsan199
-rw-r--r--lib/Kconfig.kgdb23
-rw-r--r--lib/Kconfig.ubsan75
-rw-r--r--lib/Makefile20
-rw-r--r--lib/bch.c154
-rw-r--r--lib/bitmap.c40
-rw-r--r--lib/bootconfig.c35
-rw-r--r--lib/bsearch.c22
-rw-r--r--lib/bug.c3
-rw-r--r--lib/checksum.c20
-rw-r--r--lib/cpumask.c29
-rw-r--r--lib/crypto/chacha.c1
-rw-r--r--lib/crypto/sha256.c20
-rw-r--r--lib/debug_locks.c2
-rw-r--r--lib/dump_stack.c2
-rw-r--r--lib/dynamic_debug.c39
-rw-r--r--lib/fault-inject.c4
-rw-r--r--lib/flex_proportions.c7
-rw-r--r--lib/ioremap.c47
-rw-r--r--lib/iov_iter.c7
-rw-r--r--lib/kobject.c18
-rw-r--r--lib/kunit/Kconfig29
-rw-r--r--lib/kunit/Makefile4
-rw-r--r--lib/kunit/assert.c79
-rw-r--r--lib/kunit/debugfs.c116
-rw-r--r--lib/kunit/debugfs.h30
-rw-r--r--lib/kunit/kunit-test.c44
-rw-r--r--lib/kunit/test.c148
-rw-r--r--lib/linear_ranges.c245
-rw-r--r--lib/list-test.c4
-rw-r--r--lib/logic_pio.c22
-rw-r--r--lib/lz4/lz4_decompress.c3
-rw-r--r--lib/lzo/lzo1x_compress.c13
-rw-r--r--lib/math/Kconfig7
-rw-r--r--lib/math/prime_numbers.c10
-rw-r--r--lib/mpi/longlong.h36
-rw-r--r--lib/nlattr.c248
-rw-r--r--lib/objagg.c4
-rw-r--r--lib/packing.c1
-rw-r--r--lib/percpu-refcount.c13
-rw-r--r--lib/radix-tree.c28
-rw-r--r--lib/raid6/.gitignore1
-rw-r--r--lib/raid6/algos.c8
-rw-r--r--lib/raid6/avx2.c4
-rw-r--r--lib/raid6/recov_avx2.c6
-rw-r--r--lib/raid6/recov_ssse3.c6
-rw-r--r--lib/raid6/test/Makefile9
-rw-r--r--lib/rbtree.c4
-rw-r--r--lib/rhashtable.c17
-rw-r--r--lib/scatterlist.c2
-rw-r--r--lib/seq_buf.c1
-rw-r--r--lib/sha1.c24
-rw-r--r--lib/smp_processor_id.c10
-rw-r--r--lib/stackdepot.c39
-rw-r--r--lib/strncpy_from_user.c5
-rw-r--r--lib/strnlen_user.c4
-rw-r--r--lib/test_bitmap.c2
-rw-r--r--lib/test_bitops.c109
-rw-r--r--lib/test_bpf.c4
-rw-r--r--lib/test_firmware.c81
-rw-r--r--lib/test_hmm.c1163
-rw-r--r--lib/test_hmm_uapi.h59
-rw-r--r--lib/test_kasan.c48
-rw-r--r--lib/test_kmod.c2
-rw-r--r--lib/test_linear_ranges.c228
-rw-r--r--lib/test_lockup.c599
-rw-r--r--lib/test_min_heap.c194
-rw-r--r--lib/test_objagg.c4
-rw-r--r--lib/test_printf.c36
-rw-r--r--lib/test_stackinit.c28
-rw-r--r--lib/test_sysctl.c15
-rw-r--r--lib/test_vmalloc.c26
-rw-r--r--lib/test_xarray.c55
-rw-r--r--lib/ts_bm.c2
-rw-r--r--lib/ts_fsm.c2
-rw-r--r--lib/ts_kmp.c2
-rw-r--r--lib/ubsan.c80
-rw-r--r--lib/usercopy.c13
-rw-r--r--lib/uuid.c10
-rw-r--r--lib/vdso/gettimeofday.c166
-rw-r--r--lib/vsprintf.c58
-rw-r--r--lib/xarray.c9
-rw-r--r--lib/zlib_inflate/inffast.c91
87 files changed, 4591 insertions, 698 deletions
diff --git a/lib/.gitignore b/lib/.gitignore
index f2a39c9e5485..327cb2c7f2c9 100644
--- a/lib/.gitignore
+++ b/lib/.gitignore
@@ -1,6 +1,4 @@
-#
-# Generated files
-#
+# SPDX-License-Identifier: GPL-2.0-only
gen_crc32table
gen_crc64table
crc32table.h
diff --git a/lib/Kconfig b/lib/Kconfig
index bc7e56370129..df3f3da95990 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -19,6 +19,9 @@ config RAID6_PQ_BENCHMARK
Benchmark all available RAID6 PQ functions on init and choose the
fastest one.
+config LINEAR_RANGES
+ tristate
+
config PACKING
bool "Generic bitfield packing and unpacking"
default n
@@ -80,6 +83,9 @@ config ARCH_USE_CMPXCHG_LOCKREF
config ARCH_HAS_FAST_MULTIPLIER
bool
+config ARCH_USE_SYM_ANNOTATIONS
+ bool
+
config INDIRECT_PIO
bool "Access I/O in non-MMIO mode"
depends on ARM64
@@ -427,7 +433,7 @@ config INTERVAL_TREE
See:
- Documentation/rbtree.txt
+ Documentation/core-api/rbtree.rst
for more information.
@@ -615,6 +621,9 @@ config ARCH_HAS_PMEM_API
config MEMREGION
bool
+config ARCH_HAS_MEMREMAP_COMPAT_ALIGN
+ bool
+
# use memcpy to implement user copies for nommu architectures
config UACCESS_MEMCPY
bool
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 69def4a9df00..9ad9210d70a1 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -98,7 +98,8 @@ config DYNAMIC_DEBUG
bool "Enable dynamic printk() support"
default n
depends on PRINTK
- depends on DEBUG_FS
+ depends on (DEBUG_FS || PROC_FS)
+ select DYNAMIC_DEBUG_CORE
help
Compiles debug level messages into the kernel, which would not
@@ -116,8 +117,9 @@ config DYNAMIC_DEBUG
Usage:
Dynamic debugging is controlled via the 'dynamic_debug/control' file,
- which is contained in the 'debugfs' filesystem. Thus, the debugfs
- filesystem must first be mounted before making use of this feature.
+ which is contained in the 'debugfs' filesystem or procfs.
+ Thus, the debugfs or procfs filesystem must first be mounted before
+ making use of this feature.
We refer the control file as: <debugfs>/dynamic_debug/control. This
file contains a list of the debug statements that can be enabled. The
format for each line of the file is:
@@ -164,6 +166,17 @@ config DYNAMIC_DEBUG
See Documentation/admin-guide/dynamic-debug-howto.rst for additional
information.
+config DYNAMIC_DEBUG_CORE
+ bool "Enable core function of dynamic debug support"
+ depends on PRINTK
+ depends on (DEBUG_FS || PROC_FS)
+ help
+ Enable core functional support of dynamic debug. It is useful
+ when you want to tie dynamic debug to your kernel modules with
+ DYNAMIC_DEBUG_MODULE defined for each of them, especially for
+ the case of embedded system where the kernel image size is
+ sensitive for people.
+
config SYMBOLIC_ERRNAME
bool "Support symbolic error names in printf"
default y if PRINTK
@@ -212,6 +225,22 @@ config DEBUG_INFO_REDUCED
DEBUG_INFO build and compile times are reduced too.
Only works with newer gcc versions.
+config DEBUG_INFO_COMPRESSED
+ bool "Compressed debugging information"
+ depends on DEBUG_INFO
+ depends on $(cc-option,-gz=zlib)
+ depends on $(ld-option,--compress-debug-sections=zlib)
+ help
+ Compress the debug information using zlib. Requires GCC 5.0+ or Clang
+ 5.0+, binutils 2.26+, and zlib.
+
+ Users of dpkg-deb via scripts/package/builddeb may find an increase in
+ size of their debug .deb packages with this config set, due to the
+ debug info being compressed with zlib, then the object files being
+ recompressed with a different compression scheme. But this is still
+ preferable to setting $KDEB_COMPRESS to "none" which would be even
+ larger.
+
config DEBUG_INFO_SPLIT
bool "Produce split debuginfo in .dwo files"
depends on DEBUG_INFO
@@ -241,6 +270,8 @@ config DEBUG_INFO_DWARF4
config DEBUG_INFO_BTF
bool "Generate BTF typeinfo"
depends on DEBUG_INFO
+ depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED
+ depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST
help
Generate deduplicated BTF type information from DWARF debug info.
Turning this on expects presence of pahole tool, which will convert
@@ -266,7 +297,7 @@ config ENABLE_MUST_CHECK
attribute warn_unused_result" messages.
config FRAME_WARN
- int "Warn for stack frames larger than (needs gcc 4.4)"
+ int "Warn for stack frames larger than"
range 0 8192
default 2048 if GCC_PLUGIN_LATENT_ENTROPY
default 1280 if (!64BIT && PARISC)
@@ -276,7 +307,6 @@ config FRAME_WARN
Tell gcc to warn at build time for stack frames larger than this.
Setting this too low will cause a lot of warnings.
Setting it to 0 disables the warning.
- Requires gcc 4.4
config STRIP_ASM_SYMS
bool "Strip assembler-generated symbols during link"
@@ -305,18 +335,6 @@ config HEADERS_INSTALL
user-space program samples. It is also needed by some features such
as uapi header sanity checks.
-config OPTIMIZE_INLINING
- def_bool y
- help
- This option determines if the kernel forces gcc to inline the functions
- developers have marked 'inline'. Doing so takes away freedom from gcc to
- do what it thinks is best, which is desirable for the gcc 3.x series of
- compilers. The gcc 4.x series have a rewritten inlining algorithm and
- enabling this option will generate a smaller kernel there. Hopefully
- this algorithm is so good that allowing gcc 4.x and above to make the
- decision will become the default in the future. Until then this option
- is there to test gcc for this.
-
config DEBUG_SECTION_MISMATCH
bool "Enable full Section mismatch analysis"
help
@@ -379,6 +397,11 @@ config STACK_VALIDATION
For more information, see
tools/objtool/Documentation/stack-validation.txt.
+config VMLINUX_VALIDATION
+ bool
+ depends on STACK_VALIDATION && DEBUG_ENTRY && !PARAVIRT
+ default y
+
config DEBUG_FORCE_WEAK_PER_CPU
bool "Force weak per-cpu definitions"
depends on DEBUG_KERNEL
@@ -431,6 +454,16 @@ config MAGIC_SYSRQ_SERIAL
This option allows you to decide whether you want to enable the
magic SysRq key.
+config MAGIC_SYSRQ_SERIAL_SEQUENCE
+ string "Char sequence that enables magic SysRq over serial"
+ depends on MAGIC_SYSRQ_SERIAL
+ default ""
+ help
+ Specifies a sequence of characters that can follow BREAK to enable
+ SysRq on a serial console.
+
+ If unsure, leave an empty string and the option will not be enabled.
+
config DEBUG_FS
bool "Debug Filesystem"
help
@@ -653,6 +686,12 @@ config SCHED_STACK_END_CHECK
data corruption or a sporadic crash at a later stage once the region
is examined. The runtime overhead introduced is minimal.
+config ARCH_HAS_DEBUG_VM_PGTABLE
+ bool
+ help
+ An architecture should select this when it can successfully
+ build and run DEBUG_VM_PGTABLE.
+
config DEBUG_VM
bool "Debug VM"
depends on DEBUG_KERNEL
@@ -688,6 +727,22 @@ config DEBUG_VM_PGFLAGS
If unsure, say N.
+config DEBUG_VM_PGTABLE
+ bool "Debug arch page table for semantics compliance"
+ depends on MMU
+ depends on ARCH_HAS_DEBUG_VM_PGTABLE
+ default y if DEBUG_VM
+ help
+ This option provides a debug method which can be used to test
+ architecture page table helper functions on various platforms in
+ verifying if they comply with expected generic MM semantics. This
+ will help architecture code in making sure that any changes or
+ new additions of these helpers still conform to expected
+ semantics of the generic MM. Platforms will have to opt in for
+ this through ARCH_HAS_DEBUG_VM_PGTABLE.
+
+ If unsure, say N.
+
config ARCH_HAS_DEBUG_VIRTUAL
bool
@@ -766,7 +821,7 @@ config HAVE_DEBUG_STACKOVERFLOW
config DEBUG_STACKOVERFLOW
bool "Check for stack overflows"
depends on DEBUG_KERNEL && HAVE_DEBUG_STACKOVERFLOW
- ---help---
+ help
Say Y here if you want to check for overflows of kernel, IRQ
and exception stacks (if your architecture uses them). This
option will show detailed messages if free stack space drops
@@ -978,6 +1033,18 @@ config WQ_WATCHDOG
state. This can be configured through kernel parameter
"workqueue.watchdog_thresh" and its sysfs counterpart.
+config TEST_LOCKUP
+ tristate "Test module to generate lockups"
+ help
+ This builds the "test_lockup" module that helps to make sure
+ that watchdogs and lockup detectors are working properly.
+
+ Depending on module parameters it could emulate soft or hard
+ lockup, "hung task", or locking arbitrary lock for a long time.
+ Also it could generate series of lockups with cooling-down periods.
+
+ If unsure, say N.
+
endmenu # "Debug lockups and hangs"
menu "Scheduler Debugging"
@@ -1086,6 +1153,23 @@ config PROVE_LOCKING
For more details, see Documentation/locking/lockdep-design.rst.
+config PROVE_RAW_LOCK_NESTING
+ bool "Enable raw_spinlock - spinlock nesting checks"
+ depends on PROVE_LOCKING
+ default n
+ help
+ Enable the raw_spinlock vs. spinlock nesting checks which ensure
+ that the lock nesting rules for PREEMPT_RT enabled kernels are
+ not violated.
+
+ NOTE: There are known nesting problems. So if you enable this
+ option expect lockdep splats until these problems have been fully
+ addressed which is work in progress. This config switch allows to
+ identify and analyze these problems. It will be removed and the
+ check permanentely enabled once the main issues have been fixed.
+
+ If unsure, select N.
+
config LOCK_STAT
bool "Lock usage statistics"
depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
@@ -1481,10 +1565,12 @@ config PROVIDE_OHCI1394_DMA_INIT
This code (~1k) is freed after boot. By then, the firewire stack
in charge of the OHCI-1394 controllers should be used instead.
- See Documentation/debugging-via-ohci1394.txt for more information.
+ See Documentation/core-api/debugging-via-ohci1394.rst for more information.
source "samples/Kconfig"
+source "lib/Kconfig.kcsan"
+
config ARCH_HAS_DEVMEM_IS_ALLOWED
bool
@@ -1628,7 +1714,7 @@ config FAILSLAB
Provide fault-injection capability for kmalloc.
config FAIL_PAGE_ALLOC
- bool "Fault-injection capabilitiy for alloc_pages()"
+ bool "Fault-injection capability for alloc_pages()"
depends on FAULT_INJECTION
help
Provide fault-injection capability for alloc_pages().
@@ -1740,6 +1826,15 @@ config KCOV_INSTRUMENT_ALL
filesystem fuzzing with AFL) then you will want to enable coverage
for more specific subsets of files, and should say n here.
+config KCOV_IRQ_AREA_SIZE
+ hex "Size of interrupt coverage collection area in words"
+ depends on KCOV
+ default 0x40000
+ help
+ KCOV uses preallocated per-cpu areas to collect coverage from
+ soft interrupts. This specifies the size of those areas in the
+ number of unsigned long words.
+
menuconfig RUNTIME_TESTING_MENU
bool "Runtime Testing"
def_bool y
@@ -1769,6 +1864,16 @@ config TEST_LIST_SORT
If unsure, say N.
+config TEST_MIN_HEAP
+ tristate "Min heap test"
+ depends on DEBUG_KERNEL || m
+ help
+ Enable this to turn on min heap function tests. This test is
+ executed only once during system boot (so affects only boot time),
+ or at module load time.
+
+ If unsure, say N.
+
config TEST_SORT
tristate "Array-based sort test"
depends on DEBUG_KERNEL || m
@@ -1850,7 +1955,7 @@ config ASYNC_RAID6_TEST
tristate "Self test for hardware accelerated raid6 recovery"
depends on ASYNC_RAID6_RECOV
select ASYNC_MEMCPY
- ---help---
+ help
This is a one-shot self test that permutes through the
recovery of all the possible two disk failure scenarios for a
N-disk array. Recovery is performed with the asynchronous
@@ -1947,6 +2052,19 @@ config TEST_LKM
If unsure, say N.
+config TEST_BITOPS
+ tristate "Test module for compilation of bitops operations"
+ depends on m
+ help
+ This builds the "test_bitops" module that is much like the
+ TEST_LKM module except that it does a basic exercise of the
+ set/clear_bit macros and get_count_order/long to make sure there are
+ no compiler warnings from C=1 sparse checker or -Wextra
+ compilations. It has no dependencies and doesn't run or load unless
+ explicitly requested by name. for example: modprobe test_bitops.
+
+ If unsure, say N.
+
config TEST_VMALLOC
tristate "Test module for stress/performance analysis of vmalloc allocator"
default n
@@ -2025,8 +2143,9 @@ config TEST_SYSCTL
If unsure, say N.
config SYSCTL_KUNIT_TEST
- tristate "KUnit test for sysctl"
+ tristate "KUnit test for sysctl" if !KUNIT_ALL_TESTS
depends on KUNIT
+ default KUNIT_ALL_TESTS
help
This builds the proc sysctl unit test, which runs on boot.
Tests the API contract and implementation correctness of sysctl.
@@ -2036,8 +2155,9 @@ config SYSCTL_KUNIT_TEST
If unsure, say N.
config LIST_KUNIT_TEST
- tristate "KUnit Test for Kernel Linked-list structures"
+ tristate "KUnit Test for Kernel Linked-list structures" if !KUNIT_ALL_TESTS
depends on KUNIT
+ default KUNIT_ALL_TESTS
help
This builds the linked list KUnit test suite.
It tests that the API and basic functionality of the list_head type
@@ -2053,6 +2173,18 @@ config LIST_KUNIT_TEST
If unsure, say N.
+config LINEAR_RANGES_TEST
+ tristate "KUnit test for linear_ranges"
+ depends on KUNIT
+ select LINEAR_RANGES
+ help
+ This builds the linear_ranges unit test, which runs on boot.
+ Tests the linear_ranges logic correctness.
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
config TEST_UDELAY
tristate "udelay test driver"
help
@@ -2162,11 +2294,24 @@ config TEST_MEMINIT
If unsure, say N.
+config TEST_HMM
+ tristate "Test HMM (Heterogeneous Memory Management)"
+ depends on TRANSPARENT_HUGEPAGE
+ depends on DEVICE_PRIVATE
+ select HMM_MIRROR
+ select MMU_NOTIFIER
+ help
+ This is a pseudo device driver solely for testing HMM.
+ Say M here if you want to build the HMM test module.
+ Doing so will allow you to run tools/testing/selftest/vm/hmm-tests.
+
+ If unsure, say N.
+
endif # RUNTIME_TESTING_MENU
config MEMTEST
bool "Memtest"
- ---help---
+ help
This option adds a kernel parameter 'memtest', which allows memtest
to be set.
memtest=0, mean disabled; -- default
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 81f5464ea9e1..34b84bcbd3d9 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -15,11 +15,15 @@ config CC_HAS_KASAN_GENERIC
config CC_HAS_KASAN_SW_TAGS
def_bool $(cc-option, -fsanitize=kernel-hwaddress)
+config CC_HAS_WORKING_NOSANITIZE_ADDRESS
+ def_bool !CC_IS_GCC || GCC_VERSION >= 80300
+
config KASAN
bool "KASAN: runtime memory debugger"
depends on (HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC) || \
(HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS)
depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
+ depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
help
Enables KASAN (KernelAddressSANitizer) - runtime memory debugger,
designed to find out-of-bounds accesses and use-after-free bugs.
diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan
new file mode 100644
index 000000000000..5ee88e5119c2
--- /dev/null
+++ b/lib/Kconfig.kcsan
@@ -0,0 +1,199 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config HAVE_ARCH_KCSAN
+ bool
+
+config HAVE_KCSAN_COMPILER
+ def_bool CC_IS_CLANG && $(cc-option,-fsanitize=thread -mllvm -tsan-distinguish-volatile=1)
+ help
+ For the list of compilers that support KCSAN, please see
+ <file:Documentation/dev-tools/kcsan.rst>.
+
+config KCSAN_KCOV_BROKEN
+ def_bool KCOV && CC_HAS_SANCOV_TRACE_PC
+ depends on CC_IS_CLANG
+ depends on !$(cc-option,-Werror=unused-command-line-argument -fsanitize=thread -fsanitize-coverage=trace-pc)
+ help
+ Some versions of clang support either KCSAN and KCOV but not the
+ combination of the two.
+ See https://bugs.llvm.org/show_bug.cgi?id=45831 for the status
+ in newer releases.
+
+menuconfig KCSAN
+ bool "KCSAN: dynamic data race detector"
+ depends on HAVE_ARCH_KCSAN && HAVE_KCSAN_COMPILER
+ depends on DEBUG_KERNEL && !KASAN
+ depends on !KCSAN_KCOV_BROKEN
+ select STACKTRACE
+ help
+ The Kernel Concurrency Sanitizer (KCSAN) is a dynamic
+ data-race detector that relies on compile-time instrumentation.
+ KCSAN uses a watchpoint-based sampling approach to detect races.
+
+ While KCSAN's primary purpose is to detect data races, it
+ also provides assertions to check data access constraints.
+ These assertions can expose bugs that do not manifest as
+ data races.
+
+ See <file:Documentation/dev-tools/kcsan.rst> for more details.
+
+if KCSAN
+
+config KCSAN_VERBOSE
+ bool "Show verbose reports with more information about system state"
+ depends on PROVE_LOCKING
+ help
+ If enabled, reports show more information about the system state that
+ may help better analyze and debug races. This includes held locks and
+ IRQ trace events.
+
+ While this option should generally be benign, we call into more
+ external functions on report generation; if a race report is
+ generated from any one of them, system stability may suffer due to
+ deadlocks or recursion. If in doubt, say N.
+
+config KCSAN_DEBUG
+ bool "Debugging of KCSAN internals"
+
+config KCSAN_SELFTEST
+ bool "Perform short selftests on boot"
+ default y
+ help
+ Run KCSAN selftests on boot. On test failure, causes the kernel to panic.
+
+config KCSAN_EARLY_ENABLE
+ bool "Early enable during boot"
+ default y
+ help
+ If KCSAN should be enabled globally as soon as possible. KCSAN can
+ later be enabled/disabled via debugfs.
+
+config KCSAN_NUM_WATCHPOINTS
+ int "Number of available watchpoints"
+ default 64
+ help
+ Total number of available watchpoints. An address range maps into a
+ specific watchpoint slot as specified in kernel/kcsan/encoding.h.
+ Although larger number of watchpoints may not be usable due to
+ limited number of CPUs, a larger value helps to improve performance
+ due to reducing cache-line contention. The chosen default is a
+ conservative value; we should almost never observe "no_capacity"
+ events (see /sys/kernel/debug/kcsan).
+
+config KCSAN_UDELAY_TASK
+ int "Delay in microseconds (for tasks)"
+ default 80
+ help
+ For tasks, the microsecond delay after setting up a watchpoint.
+
+config KCSAN_UDELAY_INTERRUPT
+ int "Delay in microseconds (for interrupts)"
+ default 20
+ help
+ For interrupts, the microsecond delay after setting up a watchpoint.
+ Interrupts have tighter latency requirements, and their delay should
+ be lower than for tasks.
+
+config KCSAN_DELAY_RANDOMIZE
+ bool "Randomize above delays"
+ default y
+ help
+ If delays should be randomized, where the maximum is KCSAN_UDELAY_*.
+ If false, the chosen delays are always the KCSAN_UDELAY_* values
+ as defined above.
+
+config KCSAN_SKIP_WATCH
+ int "Skip instructions before setting up watchpoint"
+ default 4000
+ help
+ The number of per-CPU memory operations to skip, before another
+ watchpoint is set up, i.e. one in KCSAN_WATCH_SKIP per-CPU
+ memory operations are used to set up a watchpoint. A smaller value
+ results in more aggressive race detection, whereas a larger value
+ improves system performance at the cost of missing some races.
+
+config KCSAN_SKIP_WATCH_RANDOMIZE
+ bool "Randomize watchpoint instruction skip count"
+ default y
+ help
+ If instruction skip count should be randomized, where the maximum is
+ KCSAN_WATCH_SKIP. If false, the chosen value is always
+ KCSAN_WATCH_SKIP.
+
+config KCSAN_INTERRUPT_WATCHER
+ bool "Interruptible watchers"
+ help
+ If enabled, a task that set up a watchpoint may be interrupted while
+ delayed. This option will allow KCSAN to detect races between
+ interrupted tasks and other threads of execution on the same CPU.
+
+ Currently disabled by default, because not all safe per-CPU access
+ primitives and patterns may be accounted for, and therefore could
+ result in false positives.
+
+config KCSAN_REPORT_ONCE_IN_MS
+ int "Duration in milliseconds, in which any given race is only reported once"
+ default 3000
+ help
+ Any given race is only reported once in the defined time window.
+ Different races may still generate reports within a duration that is
+ smaller than the duration defined here. This allows rate limiting
+ reporting to avoid flooding the console with reports. Setting this
+ to 0 disables rate limiting.
+
+# The main purpose of the below options is to control reported data races (e.g.
+# in fuzzer configs), and are not expected to be switched frequently by other
+# users. We could turn some of them into boot parameters, but given they should
+# not be switched normally, let's keep them here to simplify configuration.
+#
+# The defaults below are chosen to be very conservative, and may miss certain
+# bugs.
+
+config KCSAN_REPORT_RACE_UNKNOWN_ORIGIN
+ bool "Report races of unknown origin"
+ default y
+ help
+ If KCSAN should report races where only one access is known, and the
+ conflicting access is of unknown origin. This type of race is
+ reported if it was only possible to infer a race due to a data value
+ change while an access is being delayed on a watchpoint.
+
+config KCSAN_REPORT_VALUE_CHANGE_ONLY
+ bool "Only report races where watcher observed a data value change"
+ default y
+ help
+ If enabled and a conflicting write is observed via a watchpoint, but
+ the data value of the memory location was observed to remain
+ unchanged, do not report the data race.
+
+config KCSAN_ASSUME_PLAIN_WRITES_ATOMIC
+ bool "Assume that plain aligned writes up to word size are atomic"
+ default y
+ help
+ Assume that plain aligned writes up to word size are atomic by
+ default, and also not subject to other unsafe compiler optimizations
+ resulting in data races. This will cause KCSAN to not report data
+ races due to conflicts where the only plain accesses are aligned
+ writes up to word size: conflicts between marked reads and plain
+ aligned writes up to word size will not be reported as data races;
+ notice that data races between two conflicting plain aligned writes
+ will also not be reported.
+
+config KCSAN_IGNORE_ATOMICS
+ bool "Do not instrument marked atomic accesses"
+ help
+ Never instrument marked atomic accesses. This option can be used for
+ additional filtering. Conflicting marked atomic reads and plain
+ writes will never be reported as a data race, however, will cause
+ plain reads and marked writes to result in "unknown origin" reports.
+ If combined with CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN=n, data
+ races where at least one access is marked atomic will never be
+ reported.
+
+ Similar to KCSAN_ASSUME_PLAIN_WRITES_ATOMIC, but including unaligned
+ accesses, conflicting marked atomic reads and plain writes will not
+ be reported as data races; however, unlike that option, data races
+ due to two conflicting plain writes will be reported (aligned and
+ unaligned, if CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=n).
+
+endif # KCSAN
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index 933680b59e2d..256f2486f9bd 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -3,6 +3,11 @@
config HAVE_ARCH_KGDB
bool
+# set if architecture has the its kgdb_arch_handle_qxfer_pkt
+# function to enable gdb stub to address XML packet sent from GDB.
+config HAVE_ARCH_KGDB_QXFER_PKT
+ bool
+
menuconfig KGDB
bool "KGDB: kernel debugger"
depends on HAVE_ARCH_KGDB
@@ -124,4 +129,22 @@ config KDB_CONTINUE_CATASTROPHIC
CONFIG_KDB_CONTINUE_CATASTROPHIC == 2. KDB forces a reboot.
If you are not sure, say 0.
+config ARCH_HAS_EARLY_DEBUG
+ bool
+ default n
+ help
+ If an architecture can definitely handle entering the debugger
+ when early_param's are parsed then it select this config.
+ Otherwise, if "kgdbwait" is passed on the kernel command line it
+ won't actually be processed until dbg_late_init() just after the
+ call to kgdb_arch_late() is made.
+
+ NOTE: Even if this isn't selected by an architecture we will
+ still try to register kgdb to handle breakpoints and crashes
+ when early_param's are parsed, we just won't act on the
+ "kgdbwait" parameter until dbg_late_init(). If you get a
+ crash and try to drop into kgdb somewhere between these two
+ places you might or might not end up being able to use kgdb
+ depending on exactly how far along the architecture has initted.
+
endif # KGDB
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index 0e04fcb3ab3d..774315de555a 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -2,18 +2,61 @@
config ARCH_HAS_UBSAN_SANITIZE_ALL
bool
-config UBSAN
+menuconfig UBSAN
bool "Undefined behaviour sanity checker"
help
- This option enables undefined behaviour sanity checker
+ This option enables the Undefined Behaviour sanity checker.
Compile-time instrumentation is used to detect various undefined
- behaviours in runtime. Various types of checks may be enabled
- via boot parameter ubsan_handle
- (see: Documentation/dev-tools/ubsan.rst).
+ behaviours at runtime. For more details, see:
+ Documentation/dev-tools/ubsan.rst
+
+if UBSAN
+
+config UBSAN_TRAP
+ bool "On Sanitizer warnings, abort the running kernel code"
+ depends on $(cc-option, -fsanitize-undefined-trap-on-error)
+ help
+ Building kernels with Sanitizer features enabled tends to grow
+ the kernel size by around 5%, due to adding all the debugging
+ text on failure paths. To avoid this, Sanitizer instrumentation
+ can just issue a trap. This reduces the kernel size overhead but
+ turns all warnings (including potentially harmless conditions)
+ into full exceptions that abort the running kernel code
+ (regardless of context, locks held, etc), which may destabilize
+ the system. For some system builders this is an acceptable
+ trade-off.
+
+config UBSAN_KCOV_BROKEN
+ def_bool KCOV && CC_HAS_SANCOV_TRACE_PC
+ depends on CC_IS_CLANG
+ depends on !$(cc-option,-Werror=unused-command-line-argument -fsanitize=bounds -fsanitize-coverage=trace-pc)
+ help
+ Some versions of clang support either UBSAN or KCOV but not the
+ combination of the two.
+ See https://bugs.llvm.org/show_bug.cgi?id=45831 for the status
+ in newer releases.
+
+config UBSAN_BOUNDS
+ bool "Perform array index bounds checking"
+ default UBSAN
+ depends on !UBSAN_KCOV_BROKEN
+ help
+ This option enables detection of directly indexed out of bounds
+ array accesses, where the array size is known at compile time.
+ Note that this does not protect array overflows via bad calls
+ to the {str,mem}*cpy() family of functions (that is addressed
+ by CONFIG_FORTIFY_SOURCE).
+
+config UBSAN_MISC
+ bool "Enable all other Undefined Behavior sanity checks"
+ default UBSAN
+ help
+ This option enables all sanity checks that don't have their
+ own Kconfig options. Disable this if you only want to have
+ individually selected checks.
config UBSAN_SANITIZE_ALL
bool "Enable instrumentation for the entire kernel"
- depends on UBSAN
depends on ARCH_HAS_UBSAN_SANITIZE_ALL
# We build with -Wno-maybe-uninitilzed, but we still want to
@@ -28,22 +71,20 @@ config UBSAN_SANITIZE_ALL
Enabling this option will get kernel image size increased
significantly.
-config UBSAN_NO_ALIGNMENT
- bool "Disable checking of pointers alignment"
- depends on UBSAN
- default y if HAVE_EFFICIENT_UNALIGNED_ACCESS
+config UBSAN_ALIGNMENT
+ bool "Enable checks for pointers alignment"
+ default !HAVE_EFFICIENT_UNALIGNED_ACCESS
+ depends on !UBSAN_TRAP
help
- This option disables the check of unaligned memory accesses.
- This option should be used when building allmodconfig.
- Disabling this option on architectures that support unaligned
+ This option enables the check of unaligned memory accesses.
+ Enabling this option on architectures that support unaligned
accesses may produce a lot of false positives.
-config UBSAN_ALIGNMENT
- def_bool !UBSAN_NO_ALIGNMENT
-
config TEST_UBSAN
tristate "Module for testing for undefined behavior detection"
- depends on m && UBSAN
+ depends on m
help
This is a test module for UBSAN.
It triggers various undefined behavior, and detect it.
+
+endif # if UBSAN
diff --git a/lib/Makefile b/lib/Makefile
index 611872c06926..b1c42c10073b 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -25,6 +25,9 @@ KASAN_SANITIZE_string.o := n
CFLAGS_string.o := $(call cc-option, -fno-stack-protector)
endif
+# Used by KCSAN while enabled, avoid recursion.
+KCSAN_SANITIZE_random32.o := n
+
lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o timerqueue.o xarray.o \
idr.o extable.o sha1.o irq_regs.o argv_split.o \
@@ -56,6 +59,8 @@ obj-y += kstrtox.o
obj-$(CONFIG_FIND_BIT_BENCHMARK) += find_bit_benchmark.o
obj-$(CONFIG_TEST_BPF) += test_bpf.o
obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
+obj-$(CONFIG_TEST_BITOPS) += test_bitops.o
+CFLAGS_test_bitops.o += -Werror
obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o
obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o
obj-$(CONFIG_TEST_IDA) += test_ida.o
@@ -67,6 +72,7 @@ CFLAGS_test_ubsan.o += $(call cc-disable-warning, vla)
UBSAN_SANITIZE_test_ubsan.o := y
obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o
+obj-$(CONFIG_TEST_MIN_HEAP) += test_min_heap.o
obj-$(CONFIG_TEST_LKM) += test_module.o
obj-$(CONFIG_TEST_VMALLOC) += test_vmalloc.o
obj-$(CONFIG_TEST_OVERFLOW) += test_overflow.o
@@ -86,9 +92,12 @@ obj-$(CONFIG_TEST_KMOD) += test_kmod.o
obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o
obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o
obj-$(CONFIG_TEST_OBJAGG) += test_objagg.o
+CFLAGS_test_stackinit.o += $(call cc-disable-warning, switch-unreachable)
obj-$(CONFIG_TEST_STACKINIT) += test_stackinit.o
obj-$(CONFIG_TEST_BLACKHOLE_DEV) += test_blackhole_dev.o
obj-$(CONFIG_TEST_MEMINIT) += test_meminit.o
+obj-$(CONFIG_TEST_LOCKUP) += test_lockup.o
+obj-$(CONFIG_TEST_HMM) += test_hmm.o
obj-$(CONFIG_TEST_LIVEPATCH) += livepatch/
@@ -122,6 +131,7 @@ obj-$(CONFIG_DEBUG_LIST) += list_debug.o
obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
obj-$(CONFIG_BITREVERSE) += bitrev.o
+obj-$(CONFIG_LINEAR_RANGES) += linear_ranges.o
obj-$(CONFIG_PACKING) += packing.o
obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
obj-$(CONFIG_CRC16) += crc16.o
@@ -183,7 +193,7 @@ lib-$(CONFIG_GENERIC_BUG) += bug.o
obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o
-obj-$(CONFIG_DYNAMIC_DEBUG) += dynamic_debug.o
+obj-$(CONFIG_DYNAMIC_DEBUG_CORE) += dynamic_debug.o
obj-$(CONFIG_SYMBOLIC_ERRNAME) += errname.o
obj-$(CONFIG_NLATTR) += nlattr.o
@@ -220,6 +230,10 @@ obj-$(CONFIG_MEMREGION) += memregion.o
obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
obj-$(CONFIG_IRQ_POLL) += irq_poll.o
+# stackdepot.c should not be instrumented or call instrumented functions.
+# Prevent the compiler from calling builtins like memcmp() or bcmp() from this
+# file.
+CFLAGS_stackdepot.o += -fno-builtin
obj-$(CONFIG_STACKDEPOT) += stackdepot.o
KASAN_SANITIZE_stackdepot.o := n
KCOV_INSTRUMENT_stackdepot.o := n
@@ -279,10 +293,13 @@ quiet_cmd_build_OID_registry = GEN $@
clean-files += oid_registry_data.c
obj-$(CONFIG_UCS2_STRING) += ucs2_string.o
+ifneq ($(CONFIG_UBSAN_TRAP),y)
obj-$(CONFIG_UBSAN) += ubsan.o
+endif
UBSAN_SANITIZE_ubsan.o := n
KASAN_SANITIZE_ubsan.o := n
+KCSAN_SANITIZE_ubsan.o := n
CFLAGS_ubsan.o := $(call cc-option, -fno-stack-protector) $(DISABLE_STACKLEAK_PLUGIN)
obj-$(CONFIG_SBITMAP) += sbitmap.o
@@ -300,3 +317,4 @@ obj-$(CONFIG_OBJAGG) += objagg.o
# KUnit tests
obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o
+obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o
diff --git a/lib/bch.c b/lib/bch.c
index 5db6d3a4c8a6..7c031ee8b93b 100644
--- a/lib/bch.c
+++ b/lib/bch.c
@@ -23,15 +23,15 @@
* This library provides runtime configurable encoding/decoding of binary
* Bose-Chaudhuri-Hocquenghem (BCH) codes.
*
- * Call init_bch to get a pointer to a newly allocated bch_control structure for
+ * Call bch_init to get a pointer to a newly allocated bch_control structure for
* the given m (Galois field order), t (error correction capability) and
* (optional) primitive polynomial parameters.
*
- * Call encode_bch to compute and store ecc parity bytes to a given buffer.
- * Call decode_bch to detect and locate errors in received data.
+ * Call bch_encode to compute and store ecc parity bytes to a given buffer.
+ * Call bch_decode to detect and locate errors in received data.
*
* On systems supporting hw BCH features, intermediate results may be provided
- * to decode_bch in order to skip certain steps. See decode_bch() documentation
+ * to bch_decode in order to skip certain steps. See bch_decode() documentation
* for details.
*
* Option CONFIG_BCH_CONST_PARAMS can be used to force fixed values of
@@ -102,7 +102,7 @@
*/
struct gf_poly {
unsigned int deg; /* polynomial degree */
- unsigned int c[0]; /* polynomial terms */
+ unsigned int c[]; /* polynomial terms */
};
/* given its degree, compute a polynomial size in bytes */
@@ -114,10 +114,53 @@ struct gf_poly_deg1 {
unsigned int c[2];
};
+static u8 swap_bits_table[] = {
+ 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0,
+ 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0,
+ 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
+ 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8,
+ 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4,
+ 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
+ 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec,
+ 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc,
+ 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
+ 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2,
+ 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea,
+ 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
+ 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6,
+ 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6,
+ 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
+ 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe,
+ 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1,
+ 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
+ 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9,
+ 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9,
+ 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
+ 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5,
+ 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed,
+ 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
+ 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3,
+ 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3,
+ 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
+ 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb,
+ 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7,
+ 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
+ 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef,
+ 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff,
+};
+
+static u8 swap_bits(struct bch_control *bch, u8 in)
+{
+ if (!bch->swap_bits)
+ return in;
+
+ return swap_bits_table[in];
+}
+
/*
- * same as encode_bch(), but process input data one byte at a time
+ * same as bch_encode(), but process input data one byte at a time
*/
-static void encode_bch_unaligned(struct bch_control *bch,
+static void bch_encode_unaligned(struct bch_control *bch,
const unsigned char *data, unsigned int len,
uint32_t *ecc)
{
@@ -126,7 +169,9 @@ static void encode_bch_unaligned(struct bch_control *bch,
const int l = BCH_ECC_WORDS(bch)-1;
while (len--) {
- p = bch->mod8_tab + (l+1)*(((ecc[0] >> 24)^(*data++)) & 0xff);
+ u8 tmp = swap_bits(bch, *data++);
+
+ p = bch->mod8_tab + (l+1)*(((ecc[0] >> 24)^(tmp)) & 0xff);
for (i = 0; i < l; i++)
ecc[i] = ((ecc[i] << 8)|(ecc[i+1] >> 24))^(*p++);
@@ -145,10 +190,16 @@ static void load_ecc8(struct bch_control *bch, uint32_t *dst,
unsigned int i, nwords = BCH_ECC_WORDS(bch)-1;
for (i = 0; i < nwords; i++, src += 4)
- dst[i] = (src[0] << 24)|(src[1] << 16)|(src[2] << 8)|src[3];
+ dst[i] = ((u32)swap_bits(bch, src[0]) << 24) |
+ ((u32)swap_bits(bch, src[1]) << 16) |
+ ((u32)swap_bits(bch, src[2]) << 8) |
+ swap_bits(bch, src[3]);
memcpy(pad, src, BCH_ECC_BYTES(bch)-4*nwords);
- dst[nwords] = (pad[0] << 24)|(pad[1] << 16)|(pad[2] << 8)|pad[3];
+ dst[nwords] = ((u32)swap_bits(bch, pad[0]) << 24) |
+ ((u32)swap_bits(bch, pad[1]) << 16) |
+ ((u32)swap_bits(bch, pad[2]) << 8) |
+ swap_bits(bch, pad[3]);
}
/*
@@ -161,20 +212,20 @@ static void store_ecc8(struct bch_control *bch, uint8_t *dst,
unsigned int i, nwords = BCH_ECC_WORDS(bch)-1;
for (i = 0; i < nwords; i++) {
- *dst++ = (src[i] >> 24);
- *dst++ = (src[i] >> 16) & 0xff;
- *dst++ = (src[i] >> 8) & 0xff;
- *dst++ = (src[i] >> 0) & 0xff;
+ *dst++ = swap_bits(bch, src[i] >> 24);
+ *dst++ = swap_bits(bch, src[i] >> 16);
+ *dst++ = swap_bits(bch, src[i] >> 8);
+ *dst++ = swap_bits(bch, src[i]);
}
- pad[0] = (src[nwords] >> 24);
- pad[1] = (src[nwords] >> 16) & 0xff;
- pad[2] = (src[nwords] >> 8) & 0xff;
- pad[3] = (src[nwords] >> 0) & 0xff;
+ pad[0] = swap_bits(bch, src[nwords] >> 24);
+ pad[1] = swap_bits(bch, src[nwords] >> 16);
+ pad[2] = swap_bits(bch, src[nwords] >> 8);
+ pad[3] = swap_bits(bch, src[nwords]);
memcpy(dst, pad, BCH_ECC_BYTES(bch)-4*nwords);
}
/**
- * encode_bch - calculate BCH ecc parity of data
+ * bch_encode - calculate BCH ecc parity of data
* @bch: BCH control structure
* @data: data to encode
* @len: data length in bytes
@@ -187,7 +238,7 @@ static void store_ecc8(struct bch_control *bch, uint8_t *dst,
* The exact number of computed ecc parity bits is given by member @ecc_bits of
* @bch; it may be less than m*t for large values of t.
*/
-void encode_bch(struct bch_control *bch, const uint8_t *data,
+void bch_encode(struct bch_control *bch, const uint8_t *data,
unsigned int len, uint8_t *ecc)
{
const unsigned int l = BCH_ECC_WORDS(bch)-1;
@@ -215,7 +266,7 @@ void encode_bch(struct bch_control *bch, const uint8_t *data,
m = ((unsigned long)data) & 3;
if (m) {
mlen = (len < (4-m)) ? len : 4-m;
- encode_bch_unaligned(bch, data, mlen, bch->ecc_buf);
+ bch_encode_unaligned(bch, data, mlen, bch->ecc_buf);
data += mlen;
len -= mlen;
}
@@ -240,7 +291,13 @@ void encode_bch(struct bch_control *bch, const uint8_t *data,
*/
while (mlen--) {
/* input data is read in big-endian format */
- w = r[0]^cpu_to_be32(*pdata++);
+ w = cpu_to_be32(*pdata++);
+ if (bch->swap_bits)
+ w = (u32)swap_bits(bch, w) |
+ ((u32)swap_bits(bch, w >> 8) << 8) |
+ ((u32)swap_bits(bch, w >> 16) << 16) |
+ ((u32)swap_bits(bch, w >> 24) << 24);
+ w ^= r[0];
p0 = tab0 + (l+1)*((w >> 0) & 0xff);
p1 = tab1 + (l+1)*((w >> 8) & 0xff);
p2 = tab2 + (l+1)*((w >> 16) & 0xff);
@@ -255,13 +312,13 @@ void encode_bch(struct bch_control *bch, const uint8_t *data,
/* process last unaligned bytes */
if (len)
- encode_bch_unaligned(bch, data, len, bch->ecc_buf);
+ bch_encode_unaligned(bch, data, len, bch->ecc_buf);
/* store ecc parity bytes into original parity buffer */
if (ecc)
store_ecc8(bch, ecc, bch->ecc_buf);
}
-EXPORT_SYMBOL_GPL(encode_bch);
+EXPORT_SYMBOL_GPL(bch_encode);
static inline int modulo(struct bch_control *bch, unsigned int v)
{
@@ -952,7 +1009,7 @@ static int chien_search(struct bch_control *bch, unsigned int len,
#endif /* USE_CHIEN_SEARCH */
/**
- * decode_bch - decode received codeword and find bit error locations
+ * bch_decode - decode received codeword and find bit error locations
* @bch: BCH control structure
* @data: received data, ignored if @calc_ecc is provided
* @len: data length in bytes, must always be provided
@@ -966,22 +1023,22 @@ static int chien_search(struct bch_control *bch, unsigned int len,
* invalid parameters were provided
*
* Depending on the available hw BCH support and the need to compute @calc_ecc
- * separately (using encode_bch()), this function should be called with one of
+ * separately (using bch_encode()), this function should be called with one of
* the following parameter configurations -
*
* by providing @data and @recv_ecc only:
- * decode_bch(@bch, @data, @len, @recv_ecc, NULL, NULL, @errloc)
+ * bch_decode(@bch, @data, @len, @recv_ecc, NULL, NULL, @errloc)
*
* by providing @recv_ecc and @calc_ecc:
- * decode_bch(@bch, NULL, @len, @recv_ecc, @calc_ecc, NULL, @errloc)
+ * bch_decode(@bch, NULL, @len, @recv_ecc, @calc_ecc, NULL, @errloc)
*
* by providing ecc = recv_ecc XOR calc_ecc:
- * decode_bch(@bch, NULL, @len, NULL, ecc, NULL, @errloc)
+ * bch_decode(@bch, NULL, @len, NULL, ecc, NULL, @errloc)
*
* by providing syndrome results @syn:
- * decode_bch(@bch, NULL, @len, NULL, NULL, @syn, @errloc)
+ * bch_decode(@bch, NULL, @len, NULL, NULL, @syn, @errloc)
*
- * Once decode_bch() has successfully returned with a positive value, error
+ * Once bch_decode() has successfully returned with a positive value, error
* locations returned in array @errloc should be interpreted as follows -
*
* if (errloc[n] >= 8*len), then n-th error is located in ecc (no need for
@@ -993,7 +1050,7 @@ static int chien_search(struct bch_control *bch, unsigned int len,
* Note that this function does not perform any data correction by itself, it
* merely indicates error locations.
*/
-int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len,
+int bch_decode(struct bch_control *bch, const uint8_t *data, unsigned int len,
const uint8_t *recv_ecc, const uint8_t *calc_ecc,
const unsigned int *syn, unsigned int *errloc)
{
@@ -1012,7 +1069,7 @@ int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len,
/* compute received data ecc into an internal buffer */
if (!data || !recv_ecc)
return -EINVAL;
- encode_bch(bch, data, len, NULL);
+ bch_encode(bch, data, len, NULL);
} else {
/* load provided calculated ecc */
load_ecc8(bch, bch->ecc_buf, calc_ecc);
@@ -1048,12 +1105,14 @@ int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len,
break;
}
errloc[i] = nbits-1-errloc[i];
- errloc[i] = (errloc[i] & ~7)|(7-(errloc[i] & 7));
+ if (!bch->swap_bits)
+ errloc[i] = (errloc[i] & ~7) |
+ (7-(errloc[i] & 7));
}
}
return (err >= 0) ? err : -EBADMSG;
}
-EXPORT_SYMBOL_GPL(decode_bch);
+EXPORT_SYMBOL_GPL(bch_decode);
/*
* generate Galois field lookup tables
@@ -1236,27 +1295,29 @@ finish:
}
/**
- * init_bch - initialize a BCH encoder/decoder
+ * bch_init - initialize a BCH encoder/decoder
* @m: Galois field order, should be in the range 5-15
* @t: maximum error correction capability, in bits
* @prim_poly: user-provided primitive polynomial (or 0 to use default)
+ * @swap_bits: swap bits within data and syndrome bytes
*
* Returns:
* a newly allocated BCH control structure if successful, NULL otherwise
*
* This initialization can take some time, as lookup tables are built for fast
* encoding/decoding; make sure not to call this function from a time critical
- * path. Usually, init_bch() should be called on module/driver init and
- * free_bch() should be called to release memory on exit.
+ * path. Usually, bch_init() should be called on module/driver init and
+ * bch_free() should be called to release memory on exit.
*
* You may provide your own primitive polynomial of degree @m in argument
- * @prim_poly, or let init_bch() use its default polynomial.
+ * @prim_poly, or let bch_init() use its default polynomial.
*
- * Once init_bch() has successfully returned a pointer to a newly allocated
+ * Once bch_init() has successfully returned a pointer to a newly allocated
* BCH control structure, ecc length in bytes is given by member @ecc_bytes of
* the structure.
*/
-struct bch_control *init_bch(int m, int t, unsigned int prim_poly)
+struct bch_control *bch_init(int m, int t, unsigned int prim_poly,
+ bool swap_bits)
{
int err = 0;
unsigned int i, words;
@@ -1321,6 +1382,7 @@ struct bch_control *init_bch(int m, int t, unsigned int prim_poly)
bch->syn = bch_alloc(2*t*sizeof(*bch->syn), &err);
bch->cache = bch_alloc(2*t*sizeof(*bch->cache), &err);
bch->elp = bch_alloc((t+1)*sizeof(struct gf_poly_deg1), &err);
+ bch->swap_bits = swap_bits;
for (i = 0; i < ARRAY_SIZE(bch->poly_2t); i++)
bch->poly_2t[i] = bch_alloc(GF_POLY_SZ(2*t), &err);
@@ -1347,16 +1409,16 @@ struct bch_control *init_bch(int m, int t, unsigned int prim_poly)
return bch;
fail:
- free_bch(bch);
+ bch_free(bch);
return NULL;
}
-EXPORT_SYMBOL_GPL(init_bch);
+EXPORT_SYMBOL_GPL(bch_init);
/**
- * free_bch - free the BCH control structure
+ * bch_free - free the BCH control structure
* @bch: BCH control structure to release
*/
-void free_bch(struct bch_control *bch)
+void bch_free(struct bch_control *bch)
{
unsigned int i;
@@ -1377,7 +1439,7 @@ void free_bch(struct bch_control *bch)
kfree(bch);
}
}
-EXPORT_SYMBOL_GPL(free_bch);
+EXPORT_SYMBOL_GPL(bch_free);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ivan Djelic <ivan.djelic@parrot.com>");
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 89260aa342d6..0364452b1617 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -182,21 +182,22 @@ EXPORT_SYMBOL(__bitmap_shift_left);
*
* In pictures, example for a big-endian 32-bit architecture:
*
- * @src:
- * 31 63
- * | |
- * 10000000 11000001 11110010 00010101 10000000 11000001 01110010 00010101
- * | | | |
- * 16 14 0 32
- *
- * if @cut is 3, and @first is 14, bits 14-16 in @src are cut and @dst is:
- *
- * 31 63
- * | |
- * 10110000 00011000 00110010 00010101 00010000 00011000 00101110 01000010
- * | | |
- * 14 (bit 17 0 32
- * from @src)
+ * The @src bitmap is::
+ *
+ * 31 63
+ * | |
+ * 10000000 11000001 11110010 00010101 10000000 11000001 01110010 00010101
+ * | | | |
+ * 16 14 0 32
+ *
+ * if @cut is 3, and @first is 14, bits 14-16 in @src are cut and @dst is::
+ *
+ * 31 63
+ * | |
+ * 10110000 00011000 00110010 00010101 00010000 00011000 00101110 01000010
+ * | | |
+ * 14 (bit 17 0 32
+ * from @src)
*
* Note that @dst and @src might overlap partially or entirely.
*
@@ -740,8 +741,9 @@ int bitmap_parse(const char *start, unsigned int buflen,
int chunks = BITS_TO_U32(nmaskbits);
u32 *bitmap = (u32 *)maskp;
int unset_bit;
+ int chunk;
- while (1) {
+ for (chunk = 0; ; chunk++) {
end = bitmap_find_region_reverse(start, end);
if (start > end)
break;
@@ -749,7 +751,11 @@ int bitmap_parse(const char *start, unsigned int buflen,
if (!chunks--)
return -EOVERFLOW;
- end = bitmap_get_x32_reverse(start, end, bitmap++);
+#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
+ end = bitmap_get_x32_reverse(start, end, &bitmap[chunk ^ 1]);
+#else
+ end = bitmap_get_x32_reverse(start, end, &bitmap[chunk]);
+#endif
if (IS_ERR(end))
return PTR_ERR(end);
}
diff --git a/lib/bootconfig.c b/lib/bootconfig.c
index ec3ce7fd299f..912ef4921398 100644
--- a/lib/bootconfig.c
+++ b/lib/bootconfig.c
@@ -29,12 +29,14 @@ static int xbc_node_num __initdata;
static char *xbc_data __initdata;
static size_t xbc_data_size __initdata;
static struct xbc_node *last_parent __initdata;
+static const char *xbc_err_msg __initdata;
+static int xbc_err_pos __initdata;
static int __init xbc_parse_error(const char *msg, const char *p)
{
- int pos = p - xbc_data;
+ xbc_err_msg = msg;
+ xbc_err_pos = (int)(p - xbc_data);
- pr_err("Parse error at pos %d: %s\n", pos, msg);
return -EINVAL;
}
@@ -738,33 +740,44 @@ void __init xbc_destroy_all(void)
/**
* xbc_init() - Parse given XBC file and build XBC internal tree
* @buf: boot config text
+ * @emsg: A pointer of const char * to store the error message
+ * @epos: A pointer of int to store the error position
*
* This parses the boot config text in @buf. @buf must be a
* null terminated string and smaller than XBC_DATA_MAX.
* Return the number of stored nodes (>0) if succeeded, or -errno
* if there is any error.
+ * In error cases, @emsg will be updated with an error message and
+ * @epos will be updated with the error position which is the byte offset
+ * of @buf. If the error is not a parser error, @epos will be -1.
*/
-int __init xbc_init(char *buf)
+int __init xbc_init(char *buf, const char **emsg, int *epos)
{
char *p, *q;
int ret, c;
+ if (epos)
+ *epos = -1;
+
if (xbc_data) {
- pr_err("Error: bootconfig is already initialized.\n");
+ if (emsg)
+ *emsg = "Bootconfig is already initialized";
return -EBUSY;
}
ret = strlen(buf);
if (ret > XBC_DATA_MAX - 1 || ret == 0) {
- pr_err("Error: Config data is %s.\n",
- ret ? "too big" : "empty");
+ if (emsg)
+ *emsg = ret ? "Config data is too big" :
+ "Config data is empty";
return -ERANGE;
}
xbc_nodes = memblock_alloc(sizeof(struct xbc_node) * XBC_NODE_MAX,
SMP_CACHE_BYTES);
if (!xbc_nodes) {
- pr_err("Failed to allocate memory for bootconfig nodes.\n");
+ if (emsg)
+ *emsg = "Failed to allocate bootconfig nodes";
return -ENOMEM;
}
memset(xbc_nodes, 0, sizeof(struct xbc_node) * XBC_NODE_MAX);
@@ -814,9 +827,13 @@ int __init xbc_init(char *buf)
if (!ret)
ret = xbc_verify_tree();
- if (ret < 0)
+ if (ret < 0) {
+ if (epos)
+ *epos = xbc_err_pos;
+ if (emsg)
+ *emsg = xbc_err_msg;
xbc_destroy_all();
- else
+ } else
ret = xbc_node_num;
return ret;
diff --git a/lib/bsearch.c b/lib/bsearch.c
index 8b3aae5ae77a..bf86aa66f2b2 100644
--- a/lib/bsearch.c
+++ b/lib/bsearch.c
@@ -28,27 +28,9 @@
* the key and elements in the array are of the same type, you can use
* the same comparison function for both sort() and bsearch().
*/
-void *bsearch(const void *key, const void *base, size_t num, size_t size,
- cmp_func_t cmp)
+void *bsearch(const void *key, const void *base, size_t num, size_t size, cmp_func_t cmp)
{
- const char *pivot;
- int result;
-
- while (num > 0) {
- pivot = base + (num >> 1) * size;
- result = cmp(key, pivot);
-
- if (result == 0)
- return (void *)pivot;
-
- if (result > 0) {
- base = pivot + size;
- num--;
- }
- num >>= 1;
- }
-
- return NULL;
+ return __inline_bsearch(key, base, num, size, cmp);
}
EXPORT_SYMBOL(bsearch);
NOKPROBE_SYMBOL(bsearch);
diff --git a/lib/bug.c b/lib/bug.c
index 8c98af0bf585..7103440c0ee1 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -47,6 +47,7 @@
#include <linux/bug.h>
#include <linux/sched.h>
#include <linux/rculist.h>
+#include <linux/ftrace.h>
extern struct bug_entry __start___bug_table[], __stop___bug_table[];
@@ -153,6 +154,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
if (!bug)
return BUG_TRAP_TYPE_NONE;
+ disable_trace_on_warning();
+
file = NULL;
line = 0;
warning = 0;
diff --git a/lib/checksum.c b/lib/checksum.c
index de032ad96f4a..7ac65a0000ff 100644
--- a/lib/checksum.c
+++ b/lib/checksum.c
@@ -146,26 +146,6 @@ __sum16 ip_compute_csum(const void *buff, int len)
EXPORT_SYMBOL(ip_compute_csum);
/*
- * copy from fs while checksumming, otherwise like csum_partial
- */
-__wsum
-csum_partial_copy_from_user(const void __user *src, void *dst, int len,
- __wsum sum, int *csum_err)
-{
- int missing;
-
- missing = __copy_from_user(dst, src, len);
- if (missing) {
- memset(dst + len - missing, 0, missing);
- *csum_err = -EFAULT;
- } else
- *csum_err = 0;
-
- return csum_partial(dst, len, sum);
-}
-EXPORT_SYMBOL(csum_partial_copy_from_user);
-
-/*
* copy from ds while checksumming, otherwise like csum_partial
*/
__wsum
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 0cb672eb107c..fb22fb266f93 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -232,3 +232,32 @@ unsigned int cpumask_local_spread(unsigned int i, int node)
BUG();
}
EXPORT_SYMBOL(cpumask_local_spread);
+
+static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
+
+/**
+ * Returns an arbitrary cpu within srcp1 & srcp2.
+ *
+ * Iterated calls using the same srcp1 and srcp2 will be distributed within
+ * their intersection.
+ *
+ * Returns >= nr_cpu_ids if the intersection is empty.
+ */
+int cpumask_any_and_distribute(const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ int next, prev;
+
+ /* NOTE: our first selection will skip 0. */
+ prev = __this_cpu_read(distribute_cpu_mask_prev);
+
+ next = cpumask_next_and(prev, src1p, src2p);
+ if (next >= nr_cpu_ids)
+ next = cpumask_first_and(src1p, src2p);
+
+ if (next < nr_cpu_ids)
+ __this_cpu_write(distribute_cpu_mask_prev, next);
+
+ return next;
+}
+EXPORT_SYMBOL(cpumask_any_and_distribute);
diff --git a/lib/crypto/chacha.c b/lib/crypto/chacha.c
index 65ead6b0c7e0..4ccbec442469 100644
--- a/lib/crypto/chacha.c
+++ b/lib/crypto/chacha.c
@@ -10,7 +10,6 @@
#include <linux/export.h>
#include <linux/bitops.h>
#include <linux/string.h>
-#include <linux/cryptohash.h>
#include <asm/unaligned.h>
#include <crypto/chacha.h>
diff --git a/lib/crypto/sha256.c b/lib/crypto/sha256.c
index 66cb04b0cf4e..2e621697c5c3 100644
--- a/lib/crypto/sha256.c
+++ b/lib/crypto/sha256.c
@@ -206,7 +206,7 @@ static void sha256_transform(u32 *state, const u8 *input)
memzero_explicit(W, 64 * sizeof(u32));
}
-int sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len)
+void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len)
{
unsigned int partial, done;
const u8 *src;
@@ -232,18 +232,16 @@ int sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len)
partial = 0;
}
memcpy(sctx->buf + partial, src, len - done);
-
- return 0;
}
EXPORT_SYMBOL(sha256_update);
-int sha224_update(struct sha256_state *sctx, const u8 *data, unsigned int len)
+void sha224_update(struct sha256_state *sctx, const u8 *data, unsigned int len)
{
- return sha256_update(sctx, data, len);
+ sha256_update(sctx, data, len);
}
EXPORT_SYMBOL(sha224_update);
-static int __sha256_final(struct sha256_state *sctx, u8 *out, int digest_words)
+static void __sha256_final(struct sha256_state *sctx, u8 *out, int digest_words)
{
__be32 *dst = (__be32 *)out;
__be64 bits;
@@ -268,19 +266,17 @@ static int __sha256_final(struct sha256_state *sctx, u8 *out, int digest_words)
/* Zeroize sensitive information. */
memset(sctx, 0, sizeof(*sctx));
-
- return 0;
}
-int sha256_final(struct sha256_state *sctx, u8 *out)
+void sha256_final(struct sha256_state *sctx, u8 *out)
{
- return __sha256_final(sctx, out, 8);
+ __sha256_final(sctx, out, 8);
}
EXPORT_SYMBOL(sha256_final);
-int sha224_final(struct sha256_state *sctx, u8 *out)
+void sha224_final(struct sha256_state *sctx, u8 *out)
{
- return __sha256_final(sctx, out, 7);
+ __sha256_final(sctx, out, 7);
}
EXPORT_SYMBOL(sha224_final);
diff --git a/lib/debug_locks.c b/lib/debug_locks.c
index a75ee30b77cb..06d3135bd184 100644
--- a/lib/debug_locks.c
+++ b/lib/debug_locks.c
@@ -36,7 +36,7 @@ EXPORT_SYMBOL_GPL(debug_locks_silent);
/*
* Generic 'turn off all lock debugging' function:
*/
-int debug_locks_off(void)
+noinstr int debug_locks_off(void)
{
if (debug_locks && __debug_locks_off()) {
if (!debug_locks_silent) {
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index 33ffbf308853..a00ee6eedc7c 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -74,7 +74,7 @@ void show_regs_print_info(const char *log_lvl)
static void __dump_stack(void)
{
dump_stack_print_info(KERN_DEFAULT);
- show_stack(NULL, NULL);
+ show_stack(NULL, NULL, KERN_DEFAULT);
}
/**
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index c60409138e13..321437bbf87d 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -876,6 +876,14 @@ static const struct file_operations ddebug_proc_fops = {
.write = ddebug_proc_write
};
+static const struct proc_ops proc_fops = {
+ .proc_open = ddebug_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = seq_release_private,
+ .proc_write = ddebug_proc_write
+};
+
/*
* Allocate a new ddebug_table for the given module
* and add it to the global list.
@@ -991,15 +999,25 @@ static void ddebug_remove_all_tables(void)
static __initdata int ddebug_init_success;
-static int __init dynamic_debug_init_debugfs(void)
+static int __init dynamic_debug_init_control(void)
{
- struct dentry *dir;
+ struct proc_dir_entry *procfs_dir;
+ struct dentry *debugfs_dir;
if (!ddebug_init_success)
return -ENODEV;
- dir = debugfs_create_dir("dynamic_debug", NULL);
- debugfs_create_file("control", 0644, dir, NULL, &ddebug_proc_fops);
+ /* Create the control file in debugfs if it is enabled */
+ if (debugfs_initialized()) {
+ debugfs_dir = debugfs_create_dir("dynamic_debug", NULL);
+ debugfs_create_file("control", 0644, debugfs_dir, NULL,
+ &ddebug_proc_fops);
+ }
+
+ /* Also create the control file in procfs */
+ procfs_dir = proc_mkdir("dynamic_debug", NULL);
+ if (procfs_dir)
+ proc_create("control", 0644, procfs_dir, &proc_fops);
return 0;
}
@@ -1013,9 +1031,14 @@ static int __init dynamic_debug_init(void)
int n = 0, entries = 0, modct = 0;
int verbose_bytes = 0;
- if (__start___verbose == __stop___verbose) {
- pr_warn("_ddebug table is empty in a CONFIG_DYNAMIC_DEBUG build\n");
- return 1;
+ if (&__start___verbose == &__stop___verbose) {
+ if (IS_ENABLED(CONFIG_DYNAMIC_DEBUG)) {
+ pr_warn("_ddebug table is empty in a CONFIG_DYNAMIC_DEBUG build\n");
+ return 1;
+ }
+ pr_info("Ignore empty _ddebug table in a CONFIG_DYNAMIC_DEBUG_CORE build\n");
+ ddebug_init_success = 1;
+ return 0;
}
iter = __start___verbose;
modname = iter->modname;
@@ -1077,4 +1100,4 @@ out_err:
early_initcall(dynamic_debug_init);
/* Debugfs setup must be done later */
-fs_initcall(dynamic_debug_init_debugfs);
+fs_initcall(dynamic_debug_init_control);
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index 8186ca84910b..ce12621b4275 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -106,7 +106,9 @@ bool should_fail(struct fault_attr *attr, ssize_t size)
unsigned int fail_nth = READ_ONCE(current->fail_nth);
if (fail_nth) {
- if (!WRITE_ONCE(current->fail_nth, fail_nth - 1))
+ fail_nth--;
+ WRITE_ONCE(current->fail_nth, fail_nth);
+ if (!fail_nth)
goto fail;
return false;
diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c
index 7852bfff50b1..451543937524 100644
--- a/lib/flex_proportions.c
+++ b/lib/flex_proportions.c
@@ -266,8 +266,7 @@ void __fprop_inc_percpu_max(struct fprop_global *p,
if (numerator >
(((u64)denominator) * max_frac) >> FPROP_FRAC_SHIFT)
return;
- } else
- fprop_reflect_period_percpu(p, pl);
- percpu_counter_add_batch(&pl->events, 1, PROP_BATCH);
- percpu_counter_add(&p->events, 1);
+ }
+
+ __fprop_inc_percpu(p, pl);
}
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 3f0e18543de8..5ee3526f71b8 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -12,7 +12,6 @@
#include <linux/io.h>
#include <linux/export.h>
#include <asm/cacheflush.h>
-#include <asm/pgtable.h>
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
static int __read_mostly ioremap_p4d_capable;
@@ -61,13 +60,14 @@ static inline int ioremap_pmd_enabled(void) { return 0; }
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
+ unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
+ pgtbl_mod_mask *mask)
{
pte_t *pte;
u64 pfn;
pfn = phys_addr >> PAGE_SHIFT;
- pte = pte_alloc_kernel(pmd, addr);
+ pte = pte_alloc_kernel_track(pmd, addr, mask);
if (!pte)
return -ENOMEM;
do {
@@ -75,6 +75,7 @@ static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
pfn++;
} while (pte++, addr += PAGE_SIZE, addr != end);
+ *mask |= PGTBL_PTE_MODIFIED;
return 0;
}
@@ -101,21 +102,24 @@ static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long addr,
}
static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
+ unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
+ pgtbl_mod_mask *mask)
{
pmd_t *pmd;
unsigned long next;
- pmd = pmd_alloc(&init_mm, pud, addr);
+ pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
if (!pmd)
return -ENOMEM;
do {
next = pmd_addr_end(addr, end);
- if (ioremap_try_huge_pmd(pmd, addr, next, phys_addr, prot))
+ if (ioremap_try_huge_pmd(pmd, addr, next, phys_addr, prot)) {
+ *mask |= PGTBL_PMD_MODIFIED;
continue;
+ }
- if (ioremap_pte_range(pmd, addr, next, phys_addr, prot))
+ if (ioremap_pte_range(pmd, addr, next, phys_addr, prot, mask))
return -ENOMEM;
} while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
return 0;
@@ -144,21 +148,24 @@ static int ioremap_try_huge_pud(pud_t *pud, unsigned long addr,
}
static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
+ unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
+ pgtbl_mod_mask *mask)
{
pud_t *pud;
unsigned long next;
- pud = pud_alloc(&init_mm, p4d, addr);
+ pud = pud_alloc_track(&init_mm, p4d, addr, mask);
if (!pud)
return -ENOMEM;
do {
next = pud_addr_end(addr, end);
- if (ioremap_try_huge_pud(pud, addr, next, phys_addr, prot))
+ if (ioremap_try_huge_pud(pud, addr, next, phys_addr, prot)) {
+ *mask |= PGTBL_PUD_MODIFIED;
continue;
+ }
- if (ioremap_pmd_range(pud, addr, next, phys_addr, prot))
+ if (ioremap_pmd_range(pud, addr, next, phys_addr, prot, mask))
return -ENOMEM;
} while (pud++, phys_addr += (next - addr), addr = next, addr != end);
return 0;
@@ -187,21 +194,24 @@ static int ioremap_try_huge_p4d(p4d_t *p4d, unsigned long addr,
}
static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
+ unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
+ pgtbl_mod_mask *mask)
{
p4d_t *p4d;
unsigned long next;
- p4d = p4d_alloc(&init_mm, pgd, addr);
+ p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
if (!p4d)
return -ENOMEM;
do {
next = p4d_addr_end(addr, end);
- if (ioremap_try_huge_p4d(p4d, addr, next, phys_addr, prot))
+ if (ioremap_try_huge_p4d(p4d, addr, next, phys_addr, prot)) {
+ *mask |= PGTBL_P4D_MODIFIED;
continue;
+ }
- if (ioremap_pud_range(p4d, addr, next, phys_addr, prot))
+ if (ioremap_pud_range(p4d, addr, next, phys_addr, prot, mask))
return -ENOMEM;
} while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
return 0;
@@ -214,6 +224,7 @@ int ioremap_page_range(unsigned long addr,
unsigned long start;
unsigned long next;
int err;
+ pgtbl_mod_mask mask = 0;
might_sleep();
BUG_ON(addr >= end);
@@ -222,13 +233,17 @@ int ioremap_page_range(unsigned long addr,
pgd = pgd_offset_k(addr);
do {
next = pgd_addr_end(addr, end);
- err = ioremap_p4d_range(pgd, addr, next, phys_addr, prot);
+ err = ioremap_p4d_range(pgd, addr, next, phys_addr, prot,
+ &mask);
if (err)
break;
} while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
flush_cache_vmap(start, end);
+ if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
+ arch_sync_kernel_mappings(start, end);
+
return err;
}
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 51595bf3af85..bf538c2bec77 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -8,6 +8,7 @@
#include <linux/splice.h>
#include <net/checksum.h>
#include <linux/scatterlist.h>
+#include <linux/instrumented.h>
#define PIPE_PARANOIA /* for now */
@@ -138,7 +139,7 @@
static int copyout(void __user *to, const void *from, size_t n)
{
if (access_ok(to, n)) {
- kasan_check_read(from, n);
+ instrument_copy_to_user(to, from, n);
n = raw_copy_to_user(to, from, n);
}
return n;
@@ -147,7 +148,7 @@ static int copyout(void __user *to, const void *from, size_t n)
static int copyin(void *to, const void __user *from, size_t n)
{
if (access_ok(from, n)) {
- kasan_check_write(to, n);
+ instrument_copy_from_user(to, from, n);
n = raw_copy_from_user(to, from, n);
}
return n;
@@ -639,7 +640,7 @@ EXPORT_SYMBOL(_copy_to_iter);
static int copyout_mcsafe(void __user *to, const void *from, size_t n)
{
if (access_ok(to, n)) {
- kasan_check_read(from, n);
+ instrument_copy_to_user(to, from, n);
n = copy_to_user_mcsafe((__force void *) to, from, n);
}
return n;
diff --git a/lib/kobject.c b/lib/kobject.c
index 83198cb37d8d..1e4b7382a88e 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -6,7 +6,7 @@
* Copyright (c) 2006-2007 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (c) 2006-2007 Novell Inc.
*
- * Please see the file Documentation/kobject.txt for critical information
+ * Please see the file Documentation/core-api/kobject.rst for critical information
* about using the kobject interface.
*/
@@ -620,6 +620,13 @@ void kobject_del(struct kobject *kobj)
if (ktype)
sysfs_remove_groups(kobj, ktype->default_groups);
+ /* send "remove" if the caller did not do it but sent "add" */
+ if (kobj->state_add_uevent_sent && !kobj->state_remove_uevent_sent) {
+ pr_debug("kobject: '%s' (%p): auto cleanup 'remove' event\n",
+ kobject_name(kobj), kobj);
+ kobject_uevent(kobj, KOBJ_REMOVE);
+ }
+
sysfs_remove_dir(kobj);
sysfs_put(sd);
@@ -670,16 +677,9 @@ static void kobject_cleanup(struct kobject *kobj)
kobject_name(kobj), kobj, __func__, kobj->parent);
if (t && !t->release)
- pr_debug("kobject: '%s' (%p): does not have a release() function, it is broken and must be fixed. See Documentation/kobject.txt.\n",
+ pr_debug("kobject: '%s' (%p): does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n",
kobject_name(kobj), kobj);
- /* send "remove" if the caller did not do it but sent "add" */
- if (kobj->state_add_uevent_sent && !kobj->state_remove_uevent_sent) {
- pr_debug("kobject: '%s' (%p): auto cleanup 'remove' event\n",
- kobject_name(kobj), kobj);
- kobject_uevent(kobj, KOBJ_REMOVE);
- }
-
/* remove from sysfs if the caller did not do it */
if (kobj->state_in_sysfs) {
pr_debug("kobject: '%s' (%p): auto cleanup kobject_del\n",
diff --git a/lib/kunit/Kconfig b/lib/kunit/Kconfig
index 065aa16f448b..00909e6a2443 100644
--- a/lib/kunit/Kconfig
+++ b/lib/kunit/Kconfig
@@ -14,8 +14,18 @@ menuconfig KUNIT
if KUNIT
+config KUNIT_DEBUGFS
+ bool "KUnit - Enable /sys/kernel/debug/kunit debugfs representation" if !KUNIT_ALL_TESTS
+ default KUNIT_ALL_TESTS
+ help
+ Enable debugfs representation for kunit. Currently this consists
+ of /sys/kernel/debug/kunit/<test_suite>/results files for each
+ test suite, which allow users to see results of the last test suite
+ run that occurred.
+
config KUNIT_TEST
- tristate "KUnit test for KUnit"
+ tristate "KUnit test for KUnit" if !KUNIT_ALL_TESTS
+ default KUNIT_ALL_TESTS
help
Enables the unit tests for the KUnit test framework. These tests test
the KUnit test framework itself; the tests are both written using
@@ -24,7 +34,8 @@ config KUNIT_TEST
expected.
config KUNIT_EXAMPLE_TEST
- tristate "Example test for KUnit"
+ tristate "Example test for KUnit" if !KUNIT_ALL_TESTS
+ default KUNIT_ALL_TESTS
help
Enables an example unit test that illustrates some of the basic
features of KUnit. This test only exists to help new users understand
@@ -33,4 +44,18 @@ config KUNIT_EXAMPLE_TEST
is intended for curious hackers who would like to understand how to
use KUnit for kernel development.
+config KUNIT_ALL_TESTS
+ tristate "All KUnit tests with satisfied dependencies"
+ help
+ Enables all KUnit tests, if they can be enabled.
+ KUnit tests run during boot and output the results to the debug log
+ in TAP format (http://testanything.org/). Only useful for kernel devs
+ running the KUnit test harness, and not intended for inclusion into a
+ production build.
+
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
endif # KUNIT
diff --git a/lib/kunit/Makefile b/lib/kunit/Makefile
index fab55649b69a..724b94311ca3 100644
--- a/lib/kunit/Makefile
+++ b/lib/kunit/Makefile
@@ -5,6 +5,10 @@ kunit-objs += test.o \
assert.o \
try-catch.o
+ifeq ($(CONFIG_KUNIT_DEBUGFS),y)
+kunit-objs += debugfs.o
+endif
+
obj-$(CONFIG_KUNIT_TEST) += kunit-test.o
# string-stream-test compiles built-in only.
diff --git a/lib/kunit/assert.c b/lib/kunit/assert.c
index b24bebca052d..33acdaa28a7d 100644
--- a/lib/kunit/assert.c
+++ b/lib/kunit/assert.c
@@ -6,6 +6,7 @@
* Author: Brendan Higgins <brendanhiggins@google.com>
*/
#include <kunit/assert.h>
+#include <kunit/test.h>
#include "string-stream.h"
@@ -53,12 +54,12 @@ void kunit_unary_assert_format(const struct kunit_assert *assert,
kunit_base_assert_format(assert, stream);
if (unary_assert->expected_true)
string_stream_add(stream,
- "\tExpected %s to be true, but is false\n",
- unary_assert->condition);
+ KUNIT_SUBTEST_INDENT "Expected %s to be true, but is false\n",
+ unary_assert->condition);
else
string_stream_add(stream,
- "\tExpected %s to be false, but is true\n",
- unary_assert->condition);
+ KUNIT_SUBTEST_INDENT "Expected %s to be false, but is true\n",
+ unary_assert->condition);
kunit_assert_print_msg(assert, stream);
}
EXPORT_SYMBOL_GPL(kunit_unary_assert_format);
@@ -72,13 +73,13 @@ void kunit_ptr_not_err_assert_format(const struct kunit_assert *assert,
kunit_base_assert_format(assert, stream);
if (!ptr_assert->value) {
string_stream_add(stream,
- "\tExpected %s is not null, but is\n",
- ptr_assert->text);
+ KUNIT_SUBTEST_INDENT "Expected %s is not null, but is\n",
+ ptr_assert->text);
} else if (IS_ERR(ptr_assert->value)) {
string_stream_add(stream,
- "\tExpected %s is not error, but is: %ld\n",
- ptr_assert->text,
- PTR_ERR(ptr_assert->value));
+ KUNIT_SUBTEST_INDENT "Expected %s is not error, but is: %ld\n",
+ ptr_assert->text,
+ PTR_ERR(ptr_assert->value));
}
kunit_assert_print_msg(assert, stream);
}
@@ -92,16 +93,16 @@ void kunit_binary_assert_format(const struct kunit_assert *assert,
kunit_base_assert_format(assert, stream);
string_stream_add(stream,
- "\tExpected %s %s %s, but\n",
- binary_assert->left_text,
- binary_assert->operation,
- binary_assert->right_text);
- string_stream_add(stream, "\t\t%s == %lld\n",
- binary_assert->left_text,
- binary_assert->left_value);
- string_stream_add(stream, "\t\t%s == %lld",
- binary_assert->right_text,
- binary_assert->right_value);
+ KUNIT_SUBTEST_INDENT "Expected %s %s %s, but\n",
+ binary_assert->left_text,
+ binary_assert->operation,
+ binary_assert->right_text);
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %lld\n",
+ binary_assert->left_text,
+ binary_assert->left_value);
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %lld",
+ binary_assert->right_text,
+ binary_assert->right_value);
kunit_assert_print_msg(assert, stream);
}
EXPORT_SYMBOL_GPL(kunit_binary_assert_format);
@@ -114,16 +115,16 @@ void kunit_binary_ptr_assert_format(const struct kunit_assert *assert,
kunit_base_assert_format(assert, stream);
string_stream_add(stream,
- "\tExpected %s %s %s, but\n",
- binary_assert->left_text,
- binary_assert->operation,
- binary_assert->right_text);
- string_stream_add(stream, "\t\t%s == %pK\n",
- binary_assert->left_text,
- binary_assert->left_value);
- string_stream_add(stream, "\t\t%s == %pK",
- binary_assert->right_text,
- binary_assert->right_value);
+ KUNIT_SUBTEST_INDENT "Expected %s %s %s, but\n",
+ binary_assert->left_text,
+ binary_assert->operation,
+ binary_assert->right_text);
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %px\n",
+ binary_assert->left_text,
+ binary_assert->left_value);
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %px",
+ binary_assert->right_text,
+ binary_assert->right_value);
kunit_assert_print_msg(assert, stream);
}
EXPORT_SYMBOL_GPL(kunit_binary_ptr_assert_format);
@@ -136,16 +137,16 @@ void kunit_binary_str_assert_format(const struct kunit_assert *assert,
kunit_base_assert_format(assert, stream);
string_stream_add(stream,
- "\tExpected %s %s %s, but\n",
- binary_assert->left_text,
- binary_assert->operation,
- binary_assert->right_text);
- string_stream_add(stream, "\t\t%s == %s\n",
- binary_assert->left_text,
- binary_assert->left_value);
- string_stream_add(stream, "\t\t%s == %s",
- binary_assert->right_text,
- binary_assert->right_value);
+ KUNIT_SUBTEST_INDENT "Expected %s %s %s, but\n",
+ binary_assert->left_text,
+ binary_assert->operation,
+ binary_assert->right_text);
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %s\n",
+ binary_assert->left_text,
+ binary_assert->left_value);
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %s",
+ binary_assert->right_text,
+ binary_assert->right_value);
kunit_assert_print_msg(assert, stream);
}
EXPORT_SYMBOL_GPL(kunit_binary_str_assert_format);
diff --git a/lib/kunit/debugfs.c b/lib/kunit/debugfs.c
new file mode 100644
index 000000000000..9214c493d8b7
--- /dev/null
+++ b/lib/kunit/debugfs.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020, Oracle and/or its affiliates.
+ * Author: Alan Maguire <alan.maguire@oracle.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+
+#include <kunit/test.h>
+
+#include "string-stream.h"
+
+#define KUNIT_DEBUGFS_ROOT "kunit"
+#define KUNIT_DEBUGFS_RESULTS "results"
+
+/*
+ * Create a debugfs representation of test suites:
+ *
+ * Path Semantics
+ * /sys/kernel/debug/kunit/<testsuite>/results Show results of last run for
+ * testsuite
+ *
+ */
+
+static struct dentry *debugfs_rootdir;
+
+void kunit_debugfs_cleanup(void)
+{
+ debugfs_remove_recursive(debugfs_rootdir);
+}
+
+void kunit_debugfs_init(void)
+{
+ if (!debugfs_rootdir)
+ debugfs_rootdir = debugfs_create_dir(KUNIT_DEBUGFS_ROOT, NULL);
+}
+
+static void debugfs_print_result(struct seq_file *seq,
+ struct kunit_suite *suite,
+ struct kunit_case *test_case)
+{
+ if (!test_case || !test_case->log)
+ return;
+
+ seq_printf(seq, "%s", test_case->log);
+}
+
+/*
+ * /sys/kernel/debug/kunit/<testsuite>/results shows all results for testsuite.
+ */
+static int debugfs_print_results(struct seq_file *seq, void *v)
+{
+ struct kunit_suite *suite = (struct kunit_suite *)seq->private;
+ bool success = kunit_suite_has_succeeded(suite);
+ struct kunit_case *test_case;
+
+ if (!suite || !suite->log)
+ return 0;
+
+ seq_printf(seq, "%s", suite->log);
+
+ kunit_suite_for_each_test_case(suite, test_case)
+ debugfs_print_result(seq, suite, test_case);
+
+ seq_printf(seq, "%s %d - %s\n",
+ kunit_status_to_string(success), 1, suite->name);
+ return 0;
+}
+
+static int debugfs_release(struct inode *inode, struct file *file)
+{
+ return single_release(inode, file);
+}
+
+static int debugfs_results_open(struct inode *inode, struct file *file)
+{
+ struct kunit_suite *suite;
+
+ suite = (struct kunit_suite *)inode->i_private;
+
+ return single_open(file, debugfs_print_results, suite);
+}
+
+static const struct file_operations debugfs_results_fops = {
+ .open = debugfs_results_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = debugfs_release,
+};
+
+void kunit_debugfs_create_suite(struct kunit_suite *suite)
+{
+ struct kunit_case *test_case;
+
+ /* Allocate logs before creating debugfs representation. */
+ suite->log = kzalloc(KUNIT_LOG_SIZE, GFP_KERNEL);
+ kunit_suite_for_each_test_case(suite, test_case)
+ test_case->log = kzalloc(KUNIT_LOG_SIZE, GFP_KERNEL);
+
+ suite->debugfs = debugfs_create_dir(suite->name, debugfs_rootdir);
+
+ debugfs_create_file(KUNIT_DEBUGFS_RESULTS, S_IFREG | 0444,
+ suite->debugfs,
+ suite, &debugfs_results_fops);
+}
+
+void kunit_debugfs_destroy_suite(struct kunit_suite *suite)
+{
+ struct kunit_case *test_case;
+
+ debugfs_remove_recursive(suite->debugfs);
+ kfree(suite->log);
+ kunit_suite_for_each_test_case(suite, test_case)
+ kfree(test_case->log);
+}
diff --git a/lib/kunit/debugfs.h b/lib/kunit/debugfs.h
new file mode 100644
index 000000000000..dcc7d7556107
--- /dev/null
+++ b/lib/kunit/debugfs.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020, Oracle and/or its affiliates.
+ */
+
+#ifndef _KUNIT_DEBUGFS_H
+#define _KUNIT_DEBUGFS_H
+
+#include <kunit/test.h>
+
+#ifdef CONFIG_KUNIT_DEBUGFS
+
+void kunit_debugfs_create_suite(struct kunit_suite *suite);
+void kunit_debugfs_destroy_suite(struct kunit_suite *suite);
+void kunit_debugfs_init(void);
+void kunit_debugfs_cleanup(void);
+
+#else
+
+static inline void kunit_debugfs_create_suite(struct kunit_suite *suite) { }
+
+static inline void kunit_debugfs_destroy_suite(struct kunit_suite *suite) { }
+
+static inline void kunit_debugfs_init(void) { }
+
+static inline void kunit_debugfs_cleanup(void) { }
+
+#endif /* CONFIG_KUNIT_DEBUGFS */
+
+#endif /* _KUNIT_DEBUGFS_H */
diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c
index ccb8d2e332f7..4f3d36a72f8f 100644
--- a/lib/kunit/kunit-test.c
+++ b/lib/kunit/kunit-test.c
@@ -134,7 +134,7 @@ static void kunit_resource_test_init_resources(struct kunit *test)
{
struct kunit_test_resource_context *ctx = test->priv;
- kunit_init_test(&ctx->test, "testing_test_init_test");
+ kunit_init_test(&ctx->test, "testing_test_init_test", NULL);
KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources));
}
@@ -301,7 +301,7 @@ static int kunit_resource_test_init(struct kunit *test)
test->priv = ctx;
- kunit_init_test(&ctx->test, "test_test_context");
+ kunit_init_test(&ctx->test, "test_test_context", NULL);
return 0;
}
@@ -329,6 +329,44 @@ static struct kunit_suite kunit_resource_test_suite = {
.exit = kunit_resource_test_exit,
.test_cases = kunit_resource_test_cases,
};
-kunit_test_suites(&kunit_try_catch_test_suite, &kunit_resource_test_suite);
+
+static void kunit_log_test(struct kunit *test);
+
+static struct kunit_case kunit_log_test_cases[] = {
+ KUNIT_CASE(kunit_log_test),
+ {}
+};
+
+static struct kunit_suite kunit_log_test_suite = {
+ .name = "kunit-log-test",
+ .test_cases = kunit_log_test_cases,
+};
+
+static void kunit_log_test(struct kunit *test)
+{
+ struct kunit_suite *suite = &kunit_log_test_suite;
+
+ kunit_log(KERN_INFO, test, "put this in log.");
+ kunit_log(KERN_INFO, test, "this too.");
+ kunit_log(KERN_INFO, suite, "add to suite log.");
+ kunit_log(KERN_INFO, suite, "along with this.");
+
+#ifdef CONFIG_KUNIT_DEBUGFS
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
+ strstr(test->log, "put this in log."));
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
+ strstr(test->log, "this too."));
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
+ strstr(suite->log, "add to suite log."));
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
+ strstr(suite->log, "along with this."));
+#else
+ KUNIT_EXPECT_PTR_EQ(test, test->log, (char *)NULL);
+ KUNIT_EXPECT_PTR_EQ(test, suite->log, (char *)NULL);
+#endif
+}
+
+kunit_test_suites(&kunit_try_catch_test_suite, &kunit_resource_test_suite,
+ &kunit_log_test_suite);
MODULE_LICENSE("GPL v2");
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index 9242f932896c..ccb2ffad8dcf 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/sched/debug.h>
+#include "debugfs.h"
#include "string-stream.h"
#include "try-catch-impl.h"
@@ -28,73 +29,117 @@ static void kunit_print_tap_version(void)
}
}
-static size_t kunit_test_cases_len(struct kunit_case *test_cases)
+/*
+ * Append formatted message to log, size of which is limited to
+ * KUNIT_LOG_SIZE bytes (including null terminating byte).
+ */
+void kunit_log_append(char *log, const char *fmt, ...)
+{
+ char line[KUNIT_LOG_SIZE];
+ va_list args;
+ int len_left;
+
+ if (!log)
+ return;
+
+ len_left = KUNIT_LOG_SIZE - strlen(log) - 1;
+ if (len_left <= 0)
+ return;
+
+ va_start(args, fmt);
+ vsnprintf(line, sizeof(line), fmt, args);
+ va_end(args);
+
+ strncat(log, line, len_left);
+}
+EXPORT_SYMBOL_GPL(kunit_log_append);
+
+size_t kunit_suite_num_test_cases(struct kunit_suite *suite)
{
struct kunit_case *test_case;
size_t len = 0;
- for (test_case = test_cases; test_case->run_case; test_case++)
+ kunit_suite_for_each_test_case(suite, test_case)
len++;
return len;
}
+EXPORT_SYMBOL_GPL(kunit_suite_num_test_cases);
static void kunit_print_subtest_start(struct kunit_suite *suite)
{
kunit_print_tap_version();
- pr_info("\t# Subtest: %s\n", suite->name);
- pr_info("\t1..%zd\n", kunit_test_cases_len(suite->test_cases));
+ kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "# Subtest: %s",
+ suite->name);
+ kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "1..%zd",
+ kunit_suite_num_test_cases(suite));
}
-static void kunit_print_ok_not_ok(bool should_indent,
+static void kunit_print_ok_not_ok(void *test_or_suite,
+ bool is_test,
bool is_ok,
size_t test_number,
const char *description)
{
- const char *indent, *ok_not_ok;
-
- if (should_indent)
- indent = "\t";
- else
- indent = "";
+ struct kunit_suite *suite = is_test ? NULL : test_or_suite;
+ struct kunit *test = is_test ? test_or_suite : NULL;
- if (is_ok)
- ok_not_ok = "ok";
+ /*
+ * We do not log the test suite results as doing so would
+ * mean debugfs display would consist of the test suite
+ * description and status prior to individual test results.
+ * Hence directly printk the suite status, and we will
+ * separately seq_printf() the suite status for the debugfs
+ * representation.
+ */
+ if (suite)
+ pr_info("%s %zd - %s\n",
+ kunit_status_to_string(is_ok),
+ test_number, description);
else
- ok_not_ok = "not ok";
-
- pr_info("%s%s %zd - %s\n", indent, ok_not_ok, test_number, description);
+ kunit_log(KERN_INFO, test, KUNIT_SUBTEST_INDENT "%s %zd - %s",
+ kunit_status_to_string(is_ok),
+ test_number, description);
}
-static bool kunit_suite_has_succeeded(struct kunit_suite *suite)
+bool kunit_suite_has_succeeded(struct kunit_suite *suite)
{
const struct kunit_case *test_case;
- for (test_case = suite->test_cases; test_case->run_case; test_case++)
+ kunit_suite_for_each_test_case(suite, test_case) {
if (!test_case->success)
return false;
+ }
return true;
}
+EXPORT_SYMBOL_GPL(kunit_suite_has_succeeded);
static void kunit_print_subtest_end(struct kunit_suite *suite)
{
static size_t kunit_suite_counter = 1;
- kunit_print_ok_not_ok(false,
+ kunit_print_ok_not_ok((void *)suite, false,
kunit_suite_has_succeeded(suite),
kunit_suite_counter++,
suite->name);
}
-static void kunit_print_test_case_ok_not_ok(struct kunit_case *test_case,
- size_t test_number)
+unsigned int kunit_test_case_num(struct kunit_suite *suite,
+ struct kunit_case *test_case)
{
- kunit_print_ok_not_ok(true,
- test_case->success,
- test_number,
- test_case->name);
+ struct kunit_case *tc;
+ unsigned int i = 1;
+
+ kunit_suite_for_each_test_case(suite, tc) {
+ if (tc == test_case)
+ return i;
+ i++;
+ }
+
+ return 0;
}
+EXPORT_SYMBOL_GPL(kunit_test_case_num);
static void kunit_print_string_stream(struct kunit *test,
struct string_stream *stream)
@@ -102,6 +147,9 @@ static void kunit_print_string_stream(struct kunit *test,
struct string_stream_fragment *fragment;
char *buf;
+ if (string_stream_is_empty(stream))
+ return;
+
buf = string_stream_get_string(stream);
if (!buf) {
kunit_err(test,
@@ -175,11 +223,14 @@ void kunit_do_assertion(struct kunit *test,
}
EXPORT_SYMBOL_GPL(kunit_do_assertion);
-void kunit_init_test(struct kunit *test, const char *name)
+void kunit_init_test(struct kunit *test, const char *name, char *log)
{
spin_lock_init(&test->lock);
INIT_LIST_HEAD(&test->resources);
test->name = name;
+ test->log = log;
+ if (test->log)
+ test->log[0] = '\0';
test->success = true;
}
EXPORT_SYMBOL_GPL(kunit_init_test);
@@ -290,7 +341,7 @@ static void kunit_run_case_catch_errors(struct kunit_suite *suite,
struct kunit_try_catch *try_catch;
struct kunit test;
- kunit_init_test(&test, test_case->name);
+ kunit_init_test(&test, test_case->name, test_case->log);
try_catch = &test.try_catch;
kunit_try_catch_init(try_catch,
@@ -303,19 +354,20 @@ static void kunit_run_case_catch_errors(struct kunit_suite *suite,
kunit_try_catch_run(try_catch, &context);
test_case->success = test.success;
+
+ kunit_print_ok_not_ok(&test, true, test_case->success,
+ kunit_test_case_num(suite, test_case),
+ test_case->name);
}
int kunit_run_tests(struct kunit_suite *suite)
{
struct kunit_case *test_case;
- size_t test_case_count = 1;
kunit_print_subtest_start(suite);
- for (test_case = suite->test_cases; test_case->run_case; test_case++) {
+ kunit_suite_for_each_test_case(suite, test_case)
kunit_run_case_catch_errors(suite, test_case);
- kunit_print_test_case_ok_not_ok(test_case, test_case_count++);
- }
kunit_print_subtest_end(suite);
@@ -323,6 +375,37 @@ int kunit_run_tests(struct kunit_suite *suite)
}
EXPORT_SYMBOL_GPL(kunit_run_tests);
+static void kunit_init_suite(struct kunit_suite *suite)
+{
+ kunit_debugfs_create_suite(suite);
+}
+
+int __kunit_test_suites_init(struct kunit_suite **suites)
+{
+ unsigned int i;
+
+ for (i = 0; suites[i] != NULL; i++) {
+ kunit_init_suite(suites[i]);
+ kunit_run_tests(suites[i]);
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__kunit_test_suites_init);
+
+static void kunit_exit_suite(struct kunit_suite *suite)
+{
+ kunit_debugfs_destroy_suite(suite);
+}
+
+void __kunit_test_suites_exit(struct kunit_suite **suites)
+{
+ unsigned int i;
+
+ for (i = 0; suites[i] != NULL; i++)
+ kunit_exit_suite(suites[i]);
+}
+EXPORT_SYMBOL_GPL(__kunit_test_suites_exit);
+
struct kunit_resource *kunit_alloc_and_get_resource(struct kunit *test,
kunit_resource_init_t init,
kunit_resource_free_t free,
@@ -489,12 +572,15 @@ EXPORT_SYMBOL_GPL(kunit_cleanup);
static int __init kunit_init(void)
{
+ kunit_debugfs_init();
+
return 0;
}
late_initcall(kunit_init);
static void __exit kunit_exit(void)
{
+ kunit_debugfs_cleanup();
}
module_exit(kunit_exit);
diff --git a/lib/linear_ranges.c b/lib/linear_ranges.c
new file mode 100644
index 000000000000..9495ef3572b7
--- /dev/null
+++ b/lib/linear_ranges.c
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * helpers to map values in a linear range to range index
+ *
+ * Original idea borrowed from regulator framework
+ *
+ * It might be useful if we could support also inversely proportional ranges?
+ * Copyright 2020 ROHM Semiconductors
+ */
+
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/linear_range.h>
+#include <linux/module.h>
+
+/**
+ * linear_range_values_in_range - return the amount of values in a range
+ * @r: pointer to linear range where values are counted
+ *
+ * Compute the amount of values in range pointed by @r. Note, values can
+ * be all equal - range with selectors 0,...,2 with step 0 still contains
+ * 3 values even though they are all equal.
+ *
+ * Return: the amount of values in range pointed by @r
+ */
+unsigned int linear_range_values_in_range(const struct linear_range *r)
+{
+ if (!r)
+ return 0;
+ return r->max_sel - r->min_sel + 1;
+}
+EXPORT_SYMBOL_GPL(linear_range_values_in_range);
+
+/**
+ * linear_range_values_in_range_array - return the amount of values in ranges
+ * @r: pointer to array of linear ranges where values are counted
+ * @ranges: amount of ranges we include in computation.
+ *
+ * Compute the amount of values in ranges pointed by @r. Note, values can
+ * be all equal - range with selectors 0,...,2 with step 0 still contains
+ * 3 values even though they are all equal.
+ *
+ * Return: the amount of values in first @ranges ranges pointed by @r
+ */
+unsigned int linear_range_values_in_range_array(const struct linear_range *r,
+ int ranges)
+{
+ int i, values_in_range = 0;
+
+ for (i = 0; i < ranges; i++) {
+ int values;
+
+ values = linear_range_values_in_range(&r[i]);
+ if (!values)
+ return values;
+
+ values_in_range += values;
+ }
+ return values_in_range;
+}
+EXPORT_SYMBOL_GPL(linear_range_values_in_range_array);
+
+/**
+ * linear_range_get_max_value - return the largest value in a range
+ * @r: pointer to linear range where value is looked from
+ *
+ * Return: the largest value in the given range
+ */
+unsigned int linear_range_get_max_value(const struct linear_range *r)
+{
+ return r->min + (r->max_sel - r->min_sel) * r->step;
+}
+EXPORT_SYMBOL_GPL(linear_range_get_max_value);
+
+/**
+ * linear_range_get_value - fetch a value from given range
+ * @r: pointer to linear range where value is looked from
+ * @selector: selector for which the value is searched
+ * @val: address where found value is updated
+ *
+ * Search given ranges for value which matches given selector.
+ *
+ * Return: 0 on success, -EINVAL given selector is not found from any of the
+ * ranges.
+ */
+int linear_range_get_value(const struct linear_range *r, unsigned int selector,
+ unsigned int *val)
+{
+ if (r->min_sel > selector || r->max_sel < selector)
+ return -EINVAL;
+
+ *val = r->min + (selector - r->min_sel) * r->step;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(linear_range_get_value);
+
+/**
+ * linear_range_get_value_array - fetch a value from array of ranges
+ * @r: pointer to array of linear ranges where value is looked from
+ * @ranges: amount of ranges in an array
+ * @selector: selector for which the value is searched
+ * @val: address where found value is updated
+ *
+ * Search through an array of ranges for value which matches given selector.
+ *
+ * Return: 0 on success, -EINVAL given selector is not found from any of the
+ * ranges.
+ */
+int linear_range_get_value_array(const struct linear_range *r, int ranges,
+ unsigned int selector, unsigned int *val)
+{
+ int i;
+
+ for (i = 0; i < ranges; i++)
+ if (r[i].min_sel <= selector && r[i].max_sel >= selector)
+ return linear_range_get_value(&r[i], selector, val);
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(linear_range_get_value_array);
+
+/**
+ * linear_range_get_selector_low - return linear range selector for value
+ * @r: pointer to linear range where selector is looked from
+ * @val: value for which the selector is searched
+ * @selector: address where found selector value is updated
+ * @found: flag to indicate that given value was in the range
+ *
+ * Return selector which which range value is closest match for given
+ * input value. Value is matching if it is equal or smaller than given
+ * value. If given value is in the range, then @found is set true.
+ *
+ * Return: 0 on success, -EINVAL if range is invalid or does not contain
+ * value smaller or equal to given value
+ */
+int linear_range_get_selector_low(const struct linear_range *r,
+ unsigned int val, unsigned int *selector,
+ bool *found)
+{
+ *found = false;
+
+ if (r->min > val)
+ return -EINVAL;
+
+ if (linear_range_get_max_value(r) < val) {
+ *selector = r->max_sel;
+ return 0;
+ }
+
+ *found = true;
+
+ if (r->step == 0)
+ *selector = r->min_sel;
+ else
+ *selector = (val - r->min) / r->step + r->min_sel;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(linear_range_get_selector_low);
+
+/**
+ * linear_range_get_selector_low_array - return linear range selector for value
+ * @r: pointer to array of linear ranges where selector is looked from
+ * @ranges: amount of ranges to scan from array
+ * @val: value for which the selector is searched
+ * @selector: address where found selector value is updated
+ * @found: flag to indicate that given value was in the range
+ *
+ * Scan array of ranges for selector which which range value matches given
+ * input value. Value is matching if it is equal or smaller than given
+ * value. If given value is found to be in a range scanning is stopped and
+ * @found is set true. If a range with values smaller than given value is found
+ * but the range max is being smaller than given value, then the ranges
+ * biggest selector is updated to @selector but scanning ranges is continued
+ * and @found is set to false.
+ *
+ * Return: 0 on success, -EINVAL if range array is invalid or does not contain
+ * range with a value smaller or equal to given value
+ */
+int linear_range_get_selector_low_array(const struct linear_range *r,
+ int ranges, unsigned int val,
+ unsigned int *selector, bool *found)
+{
+ int i;
+ int ret = -EINVAL;
+
+ for (i = 0; i < ranges; i++) {
+ int tmpret;
+
+ tmpret = linear_range_get_selector_low(&r[i], val, selector,
+ found);
+ if (!tmpret)
+ ret = 0;
+
+ if (*found)
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(linear_range_get_selector_low_array);
+
+/**
+ * linear_range_get_selector_high - return linear range selector for value
+ * @r: pointer to linear range where selector is looked from
+ * @val: value for which the selector is searched
+ * @selector: address where found selector value is updated
+ * @found: flag to indicate that given value was in the range
+ *
+ * Return selector which which range value is closest match for given
+ * input value. Value is matching if it is equal or higher than given
+ * value. If given value is in the range, then @found is set true.
+ *
+ * Return: 0 on success, -EINVAL if range is invalid or does not contain
+ * value greater or equal to given value
+ */
+int linear_range_get_selector_high(const struct linear_range *r,
+ unsigned int val, unsigned int *selector,
+ bool *found)
+{
+ *found = false;
+
+ if (linear_range_get_max_value(r) < val)
+ return -EINVAL;
+
+ if (r->min > val) {
+ *selector = r->min_sel;
+ return 0;
+ }
+
+ *found = true;
+
+ if (r->step == 0)
+ *selector = r->max_sel;
+ else
+ *selector = DIV_ROUND_UP(val - r->min, r->step) + r->min_sel;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(linear_range_get_selector_high);
+
+MODULE_DESCRIPTION("linear-ranges helper");
+MODULE_LICENSE("GPL");
diff --git a/lib/list-test.c b/lib/list-test.c
index 76babb1df889..ee09505df16f 100644
--- a/lib/list-test.c
+++ b/lib/list-test.c
@@ -659,7 +659,7 @@ static void list_test_list_for_each_prev_safe(struct kunit *test)
static void list_test_list_for_each_entry(struct kunit *test)
{
struct list_test_struct entries[5], *cur;
- static LIST_HEAD(list);
+ LIST_HEAD(list);
int i = 0;
for (i = 0; i < 5; ++i) {
@@ -680,7 +680,7 @@ static void list_test_list_for_each_entry(struct kunit *test)
static void list_test_list_for_each_entry_reverse(struct kunit *test)
{
struct list_test_struct entries[5], *cur;
- static LIST_HEAD(list);
+ LIST_HEAD(list);
int i = 0;
for (i = 0; i < 5; ++i) {
diff --git a/lib/logic_pio.c b/lib/logic_pio.c
index f511a99bb389..f32fe481b492 100644
--- a/lib/logic_pio.c
+++ b/lib/logic_pio.c
@@ -229,13 +229,13 @@ unsigned long logic_pio_trans_cpuaddr(resource_size_t addr)
}
#if defined(CONFIG_INDIRECT_PIO) && defined(PCI_IOBASE)
-#define BUILD_LOGIC_IO(bw, type) \
-type logic_in##bw(unsigned long addr) \
+#define BUILD_LOGIC_IO(bwl, type) \
+type logic_in##bwl(unsigned long addr) \
{ \
type ret = (type)~0; \
\
if (addr < MMIO_UPPER_LIMIT) { \
- ret = read##bw(PCI_IOBASE + addr); \
+ ret = _in##bwl(addr); \
} else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
struct logic_pio_hwaddr *entry = find_io_range(addr); \
\
@@ -248,10 +248,10 @@ type logic_in##bw(unsigned long addr) \
return ret; \
} \
\
-void logic_out##bw(type value, unsigned long addr) \
+void logic_out##bwl(type value, unsigned long addr) \
{ \
if (addr < MMIO_UPPER_LIMIT) { \
- write##bw(value, PCI_IOBASE + addr); \
+ _out##bwl(value, addr); \
} else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
struct logic_pio_hwaddr *entry = find_io_range(addr); \
\
@@ -263,11 +263,11 @@ void logic_out##bw(type value, unsigned long addr) \
} \
} \
\
-void logic_ins##bw(unsigned long addr, void *buffer, \
- unsigned int count) \
+void logic_ins##bwl(unsigned long addr, void *buffer, \
+ unsigned int count) \
{ \
if (addr < MMIO_UPPER_LIMIT) { \
- reads##bw(PCI_IOBASE + addr, buffer, count); \
+ reads##bwl(PCI_IOBASE + addr, buffer, count); \
} else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
struct logic_pio_hwaddr *entry = find_io_range(addr); \
\
@@ -280,11 +280,11 @@ void logic_ins##bw(unsigned long addr, void *buffer, \
\
} \
\
-void logic_outs##bw(unsigned long addr, const void *buffer, \
- unsigned int count) \
+void logic_outs##bwl(unsigned long addr, const void *buffer, \
+ unsigned int count) \
{ \
if (addr < MMIO_UPPER_LIMIT) { \
- writes##bw(PCI_IOBASE + addr, buffer, count); \
+ writes##bwl(PCI_IOBASE + addr, buffer, count); \
} else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
struct logic_pio_hwaddr *entry = find_io_range(addr); \
\
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index 0c9d3ad17e0f..5371dab6b481 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -141,6 +141,9 @@ static FORCE_INLINE int LZ4_decompress_generic(
* space in the output for those 18 bytes earlier, upon
* entering the shortcut (in other words, there is a
* combined check for both stages).
+ *
+ * The & in the likely() below is intentionally not && so that
+ * some compilers can produce better parallelized runtime code
*/
if ((endOnInput ? length != RUN_MASK : length <= 8)
/*
diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c
index 717c940112f9..8ad5ba2b86e2 100644
--- a/lib/lzo/lzo1x_compress.c
+++ b/lib/lzo/lzo1x_compress.c
@@ -268,6 +268,19 @@ m_len_done:
*op++ = (M4_MARKER | ((m_off >> 11) & 8)
| (m_len - 2));
else {
+ if (unlikely(((m_off & 0x403f) == 0x403f)
+ && (m_len >= 261)
+ && (m_len <= 264))
+ && likely(bitstream_version)) {
+ // Under lzo-rle, block copies
+ // for 261 <= length <= 264 and
+ // (distance & 0x80f3) == 0x80f3
+ // can result in ambiguous
+ // output. Adjust length
+ // to 260 to prevent ambiguity.
+ ip -= m_len - 260;
+ m_len = 260;
+ }
m_len -= M4_MAX_LEN;
*op++ = (M4_MARKER | ((m_off >> 11) & 8));
while (unlikely(m_len > 255)) {
diff --git a/lib/math/Kconfig b/lib/math/Kconfig
index 15bd50d92308..f19bc9734fa7 100644
--- a/lib/math/Kconfig
+++ b/lib/math/Kconfig
@@ -6,7 +6,12 @@ config CORDIC
calculations are in fixed point. Module will be called cordic.
config PRIME_NUMBERS
- tristate
+ tristate "Simple prime number generator for testing"
+ help
+ This option provides a simple prime number generator for test
+ modules.
+
+ If unsure, say N.
config RATIONAL
bool
diff --git a/lib/math/prime_numbers.c b/lib/math/prime_numbers.c
index 052f5b727be7..d42cebf7407f 100644
--- a/lib/math/prime_numbers.c
+++ b/lib/math/prime_numbers.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-#define pr_fmt(fmt) "prime numbers: " fmt "\n"
+#define pr_fmt(fmt) "prime numbers: " fmt
#include <linux/module.h>
#include <linux/mutex.h>
@@ -253,7 +253,7 @@ static void dump_primes(void)
if (buf)
bitmap_print_to_pagebuf(true, buf, p->primes, p->sz);
- pr_info("primes.{last=%lu, .sz=%lu, .primes[]=...x%lx} = %s",
+ pr_info("primes.{last=%lu, .sz=%lu, .primes[]=...x%lx} = %s\n",
p->last, p->sz, p->primes[BITS_TO_LONGS(p->sz) - 1], buf);
rcu_read_unlock();
@@ -273,7 +273,7 @@ static int selftest(unsigned long max)
bool fast = is_prime_number(x);
if (slow != fast) {
- pr_err("inconsistent result for is-prime(%lu): slow=%s, fast=%s!",
+ pr_err("inconsistent result for is-prime(%lu): slow=%s, fast=%s!\n",
x, slow ? "yes" : "no", fast ? "yes" : "no");
goto err;
}
@@ -282,14 +282,14 @@ static int selftest(unsigned long max)
continue;
if (next_prime_number(last) != x) {
- pr_err("incorrect result for next-prime(%lu): expected %lu, got %lu",
+ pr_err("incorrect result for next-prime(%lu): expected %lu, got %lu\n",
last, x, next_prime_number(last));
goto err;
}
last = x;
}
- pr_info("selftest(%lu) passed, last prime was %lu", x, last);
+ pr_info("%s(%lu) passed, last prime was %lu\n", __func__, x, last);
return 0;
err:
diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h
index 2dceaca27489..afbd99987cf8 100644
--- a/lib/mpi/longlong.h
+++ b/lib/mpi/longlong.h
@@ -653,7 +653,7 @@ do { \
************** MIPS/64 **************
***************************************/
#if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64
-#if defined(__mips_isa_rev) && __mips_isa_rev >= 6
+#if defined(__mips_isa_rev) && __mips_isa_rev >= 6 && defined(CONFIG_CC_IS_GCC)
/*
* GCC ends up emitting a __multi3 intrinsic call for MIPS64r6 with the plain C
* code below, so we special case MIPS64r6 until the compiler can do better.
@@ -722,22 +722,22 @@ do { \
do { \
if (__builtin_constant_p(bh) && (bh) == 0) \
__asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "%r" ((USItype)(ah)), \
"%r" ((USItype)(al)), \
"rI" ((USItype)(bl))); \
else if (__builtin_constant_p(bh) && (bh) == ~(USItype) 0) \
__asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "%r" ((USItype)(ah)), \
"%r" ((USItype)(al)), \
"rI" ((USItype)(bl))); \
else \
__asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "%r" ((USItype)(ah)), \
"r" ((USItype)(bh)), \
"%r" ((USItype)(al)), \
@@ -747,36 +747,36 @@ do { \
do { \
if (__builtin_constant_p(ah) && (ah) == 0) \
__asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "r" ((USItype)(bh)), \
"rI" ((USItype)(al)), \
"r" ((USItype)(bl))); \
else if (__builtin_constant_p(ah) && (ah) == ~(USItype) 0) \
__asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "r" ((USItype)(bh)), \
"rI" ((USItype)(al)), \
"r" ((USItype)(bl))); \
else if (__builtin_constant_p(bh) && (bh) == 0) \
__asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "r" ((USItype)(ah)), \
"rI" ((USItype)(al)), \
"r" ((USItype)(bl))); \
else if (__builtin_constant_p(bh) && (bh) == ~(USItype) 0) \
__asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "r" ((USItype)(ah)), \
"rI" ((USItype)(al)), \
"r" ((USItype)(bl))); \
else \
__asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "r" ((USItype)(ah)), \
"r" ((USItype)(bh)), \
"rI" ((USItype)(al)), \
@@ -787,7 +787,7 @@ do { \
do { \
USItype __m0 = (m0), __m1 = (m1); \
__asm__ ("mulhwu %0,%1,%2" \
- : "=r" ((USItype) ph) \
+ : "=r" (ph) \
: "%r" (__m0), \
"r" (__m1)); \
(pl) = __m0 * __m1; \
diff --git a/lib/nlattr.c b/lib/nlattr.c
index cace9b307781..bc5b5cf608c4 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -44,8 +44,22 @@ static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = {
[NLA_S64] = sizeof(s64),
};
+/*
+ * Nested policies might refer back to the original
+ * policy in some cases, and userspace could try to
+ * abuse that and recurse by nesting in the right
+ * ways. Limit recursion to avoid this problem.
+ */
+#define MAX_POLICY_RECURSION_DEPTH 10
+
+static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype,
+ const struct nla_policy *policy,
+ unsigned int validate,
+ struct netlink_ext_ack *extack,
+ struct nlattr **tb, unsigned int depth);
+
static int validate_nla_bitfield32(const struct nlattr *nla,
- const u32 *valid_flags_mask)
+ const u32 valid_flags_mask)
{
const struct nla_bitfield32 *bf = nla_data(nla);
@@ -53,11 +67,11 @@ static int validate_nla_bitfield32(const struct nlattr *nla,
return -EINVAL;
/*disallow invalid bit selector */
- if (bf->selector & ~*valid_flags_mask)
+ if (bf->selector & ~valid_flags_mask)
return -EINVAL;
/*disallow invalid bit values */
- if (bf->value & ~*valid_flags_mask)
+ if (bf->value & ~valid_flags_mask)
return -EINVAL;
/*disallow valid bit values that are not selected*/
@@ -70,7 +84,7 @@ static int validate_nla_bitfield32(const struct nlattr *nla,
static int nla_validate_array(const struct nlattr *head, int len, int maxtype,
const struct nla_policy *policy,
struct netlink_ext_ack *extack,
- unsigned int validate)
+ unsigned int validate, unsigned int depth)
{
const struct nlattr *entry;
int rem;
@@ -87,8 +101,9 @@ static int nla_validate_array(const struct nlattr *head, int len, int maxtype,
return -ERANGE;
}
- ret = __nla_validate(nla_data(entry), nla_len(entry),
- maxtype, policy, validate, extack);
+ ret = __nla_validate_parse(nla_data(entry), nla_len(entry),
+ maxtype, policy, validate, extack,
+ NULL, depth + 1);
if (ret < 0)
return ret;
}
@@ -96,17 +111,58 @@ static int nla_validate_array(const struct nlattr *head, int len, int maxtype,
return 0;
}
-static int nla_validate_int_range(const struct nla_policy *pt,
- const struct nlattr *nla,
- struct netlink_ext_ack *extack)
+void nla_get_range_unsigned(const struct nla_policy *pt,
+ struct netlink_range_validation *range)
{
- bool validate_min, validate_max;
- s64 value;
+ WARN_ON_ONCE(pt->validation_type != NLA_VALIDATE_RANGE_PTR &&
+ (pt->min < 0 || pt->max < 0));
- validate_min = pt->validation_type == NLA_VALIDATE_RANGE ||
- pt->validation_type == NLA_VALIDATE_MIN;
- validate_max = pt->validation_type == NLA_VALIDATE_RANGE ||
- pt->validation_type == NLA_VALIDATE_MAX;
+ range->min = 0;
+
+ switch (pt->type) {
+ case NLA_U8:
+ range->max = U8_MAX;
+ break;
+ case NLA_U16:
+ range->max = U16_MAX;
+ break;
+ case NLA_U32:
+ range->max = U32_MAX;
+ break;
+ case NLA_U64:
+ case NLA_MSECS:
+ range->max = U64_MAX;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ switch (pt->validation_type) {
+ case NLA_VALIDATE_RANGE:
+ range->min = pt->min;
+ range->max = pt->max;
+ break;
+ case NLA_VALIDATE_RANGE_PTR:
+ *range = *pt->range;
+ break;
+ case NLA_VALIDATE_MIN:
+ range->min = pt->min;
+ break;
+ case NLA_VALIDATE_MAX:
+ range->max = pt->max;
+ break;
+ default:
+ break;
+ }
+}
+
+static int nla_validate_int_range_unsigned(const struct nla_policy *pt,
+ const struct nlattr *nla,
+ struct netlink_ext_ack *extack)
+{
+ struct netlink_range_validation range;
+ u64 value;
switch (pt->type) {
case NLA_U8:
@@ -118,6 +174,77 @@ static int nla_validate_int_range(const struct nla_policy *pt,
case NLA_U32:
value = nla_get_u32(nla);
break;
+ case NLA_U64:
+ case NLA_MSECS:
+ value = nla_get_u64(nla);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ nla_get_range_unsigned(pt, &range);
+
+ if (value < range.min || value > range.max) {
+ NL_SET_ERR_MSG_ATTR(extack, nla,
+ "integer out of range");
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+void nla_get_range_signed(const struct nla_policy *pt,
+ struct netlink_range_validation_signed *range)
+{
+ switch (pt->type) {
+ case NLA_S8:
+ range->min = S8_MIN;
+ range->max = S8_MAX;
+ break;
+ case NLA_S16:
+ range->min = S16_MIN;
+ range->max = S16_MAX;
+ break;
+ case NLA_S32:
+ range->min = S32_MIN;
+ range->max = S32_MAX;
+ break;
+ case NLA_S64:
+ range->min = S64_MIN;
+ range->max = S64_MAX;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ switch (pt->validation_type) {
+ case NLA_VALIDATE_RANGE:
+ range->min = pt->min;
+ range->max = pt->max;
+ break;
+ case NLA_VALIDATE_RANGE_PTR:
+ *range = *pt->range_signed;
+ break;
+ case NLA_VALIDATE_MIN:
+ range->min = pt->min;
+ break;
+ case NLA_VALIDATE_MAX:
+ range->max = pt->max;
+ break;
+ default:
+ break;
+ }
+}
+
+static int nla_validate_int_range_signed(const struct nla_policy *pt,
+ const struct nlattr *nla,
+ struct netlink_ext_ack *extack)
+{
+ struct netlink_range_validation_signed range;
+ s64 value;
+
+ switch (pt->type) {
case NLA_S8:
value = nla_get_s8(nla);
break;
@@ -130,22 +257,13 @@ static int nla_validate_int_range(const struct nla_policy *pt,
case NLA_S64:
value = nla_get_s64(nla);
break;
- case NLA_U64:
- /* treat this one specially, since it may not fit into s64 */
- if ((validate_min && nla_get_u64(nla) < pt->min) ||
- (validate_max && nla_get_u64(nla) > pt->max)) {
- NL_SET_ERR_MSG_ATTR(extack, nla,
- "integer out of range");
- return -ERANGE;
- }
- return 0;
default:
- WARN_ON(1);
return -EINVAL;
}
- if ((validate_min && value < pt->min) ||
- (validate_max && value > pt->max)) {
+ nla_get_range_signed(pt, &range);
+
+ if (value < range.min || value > range.max) {
NL_SET_ERR_MSG_ATTR(extack, nla,
"integer out of range");
return -ERANGE;
@@ -154,9 +272,31 @@ static int nla_validate_int_range(const struct nla_policy *pt,
return 0;
}
+static int nla_validate_int_range(const struct nla_policy *pt,
+ const struct nlattr *nla,
+ struct netlink_ext_ack *extack)
+{
+ switch (pt->type) {
+ case NLA_U8:
+ case NLA_U16:
+ case NLA_U32:
+ case NLA_U64:
+ case NLA_MSECS:
+ return nla_validate_int_range_unsigned(pt, nla, extack);
+ case NLA_S8:
+ case NLA_S16:
+ case NLA_S32:
+ case NLA_S64:
+ return nla_validate_int_range_signed(pt, nla, extack);
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+}
+
static int validate_nla(const struct nlattr *nla, int maxtype,
const struct nla_policy *policy, unsigned int validate,
- struct netlink_ext_ack *extack)
+ struct netlink_ext_ack *extack, unsigned int depth)
{
u16 strict_start_type = policy[0].strict_start_type;
const struct nla_policy *pt;
@@ -174,7 +314,9 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
BUG_ON(pt->type > NLA_TYPE_MAX);
if ((nla_attr_len[pt->type] && attrlen != nla_attr_len[pt->type]) ||
- (pt->type == NLA_EXACT_LEN_WARN && attrlen != pt->len)) {
+ (pt->type == NLA_EXACT_LEN &&
+ pt->validation_type == NLA_VALIDATE_WARN_TOO_LONG &&
+ attrlen != pt->len)) {
pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n",
current->comm, type);
if (validate & NL_VALIDATE_STRICT_ATTRS) {
@@ -200,15 +342,10 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
}
switch (pt->type) {
- case NLA_EXACT_LEN:
- if (attrlen != pt->len)
- goto out_err;
- break;
-
case NLA_REJECT:
- if (extack && pt->validation_data) {
+ if (extack && pt->reject_message) {
NL_SET_BAD_ATTR(extack, nla);
- extack->_msg = pt->validation_data;
+ extack->_msg = pt->reject_message;
return -EINVAL;
}
err = -EINVAL;
@@ -223,7 +360,7 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
if (attrlen != sizeof(struct nla_bitfield32))
goto out_err;
- err = validate_nla_bitfield32(nla, pt->validation_data);
+ err = validate_nla_bitfield32(nla, pt->bitfield32_valid);
if (err)
goto out_err;
break;
@@ -268,10 +405,11 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
break;
if (attrlen < NLA_HDRLEN)
goto out_err;
- if (pt->validation_data) {
- err = __nla_validate(nla_data(nla), nla_len(nla), pt->len,
- pt->validation_data, validate,
- extack);
+ if (pt->nested_policy) {
+ err = __nla_validate_parse(nla_data(nla), nla_len(nla),
+ pt->len, pt->nested_policy,
+ validate, extack, NULL,
+ depth + 1);
if (err < 0) {
/*
* return directly to preserve the inner
@@ -289,12 +427,12 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
break;
if (attrlen < NLA_HDRLEN)
goto out_err;
- if (pt->validation_data) {
+ if (pt->nested_policy) {
int err;
err = nla_validate_array(nla_data(nla), nla_len(nla),
- pt->len, pt->validation_data,
- extack, validate);
+ pt->len, pt->nested_policy,
+ extack, validate, depth);
if (err < 0) {
/*
* return directly to preserve the inner
@@ -317,6 +455,13 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
goto out_err;
break;
+ case NLA_EXACT_LEN:
+ if (pt->validation_type != NLA_VALIDATE_WARN_TOO_LONG) {
+ if (attrlen != pt->len)
+ goto out_err;
+ break;
+ }
+ /* fall through */
default:
if (pt->len)
minlen = pt->len;
@@ -332,6 +477,7 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
case NLA_VALIDATE_NONE:
/* nothing to do */
break;
+ case NLA_VALIDATE_RANGE_PTR:
case NLA_VALIDATE_RANGE:
case NLA_VALIDATE_MIN:
case NLA_VALIDATE_MAX:
@@ -358,11 +504,17 @@ static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype,
const struct nla_policy *policy,
unsigned int validate,
struct netlink_ext_ack *extack,
- struct nlattr **tb)
+ struct nlattr **tb, unsigned int depth)
{
const struct nlattr *nla;
int rem;
+ if (depth >= MAX_POLICY_RECURSION_DEPTH) {
+ NL_SET_ERR_MSG(extack,
+ "allowed policy recursion depth exceeded");
+ return -EINVAL;
+ }
+
if (tb)
memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
@@ -379,7 +531,7 @@ static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype,
}
if (policy) {
int err = validate_nla(nla, maxtype, policy,
- validate, extack);
+ validate, extack, depth);
if (err < 0)
return err;
@@ -421,7 +573,7 @@ int __nla_validate(const struct nlattr *head, int len, int maxtype,
struct netlink_ext_ack *extack)
{
return __nla_validate_parse(head, len, maxtype, policy, validate,
- extack, NULL);
+ extack, NULL, 0);
}
EXPORT_SYMBOL(__nla_validate);
@@ -476,7 +628,7 @@ int __nla_parse(struct nlattr **tb, int maxtype,
struct netlink_ext_ack *extack)
{
return __nla_validate_parse(head, len, maxtype, policy, validate,
- extack, tb);
+ extack, tb, 0);
}
EXPORT_SYMBOL(__nla_parse);
diff --git a/lib/objagg.c b/lib/objagg.c
index 55621fb82e0a..5e1676ccdadd 100644
--- a/lib/objagg.c
+++ b/lib/objagg.c
@@ -28,7 +28,7 @@ struct objagg_hints_node {
struct objagg_hints_node *parent;
unsigned int root_id;
struct objagg_obj_stats_info stats_info;
- unsigned long obj[0];
+ unsigned long obj[];
};
static struct objagg_hints_node *
@@ -66,7 +66,7 @@ struct objagg_obj {
* including nested objects
*/
struct objagg_obj_stats stats;
- unsigned long obj[0];
+ unsigned long obj[];
};
static unsigned int objagg_obj_ref_inc(struct objagg_obj *objagg_obj)
diff --git a/lib/packing.c b/lib/packing.c
index 50d1e9f2f5a7..6ed72dccfdb5 100644
--- a/lib/packing.c
+++ b/lib/packing.c
@@ -73,6 +73,7 @@ static void adjust_for_msb_right_quirk(u64 *to_write, int *box_start_bit,
* @endbit: The index (in logical notation, compensated for quirks) where
* the packed value ends within pbuf. Must be smaller than, or equal
* to, startbit.
+ * @pbuflen: The length in bytes of the packed buffer pointed to by @pbuf.
* @op: If PACK, then uval will be treated as const pointer and copied (packed)
* into pbuf, between startbit and endbit.
* If UNPACK, then pbuf will be treated as const pointer and the logical
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 4f6c6ebbbbde..0ba686b8fe57 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-#define pr_fmt(fmt) "%s: " fmt "\n", __func__
+#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -50,9 +50,10 @@ static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
* @flags: PERCPU_REF_INIT_* flags
* @gfp: allocation mask to use
*
- * Initializes @ref. If @flags is zero, @ref starts in percpu mode with a
- * refcount of 1; analagous to atomic_long_set(ref, 1). See the
- * definitions of PERCPU_REF_INIT_* flags for flag behaviors.
+ * Initializes @ref. @ref starts out in percpu mode with a refcount of 1 unless
+ * @flags contains PERCPU_REF_INIT_ATOMIC or PERCPU_REF_INIT_DEAD. These flags
+ * change the start state to atomic with the latter setting the initial refcount
+ * to 0. See the definitions of PERCPU_REF_INIT_* flags for flag behaviors.
*
* Note that @release must not sleep - it may potentially be called from RCU
* callback context by percpu_ref_kill().
@@ -140,8 +141,8 @@ static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
for_each_possible_cpu(cpu)
count += *per_cpu_ptr(percpu_count, cpu);
- pr_debug("global %ld percpu %ld",
- atomic_long_read(&ref->count), (long)count);
+ pr_debug("global %lu percpu %lu\n",
+ atomic_long_read(&ref->count), count);
/*
* It's crucial that we sum the percpu counters _before_ adding the sum
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index c8fa1d274530..34e406fe561f 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/kmemleak.h>
#include <linux/percpu.h>
+#include <linux/local_lock.h>
#include <linux/preempt.h> /* in_interrupt() */
#include <linux/radix-tree.h>
#include <linux/rcupdate.h>
@@ -27,7 +28,6 @@
#include <linux/string.h>
#include <linux/xarray.h>
-
/*
* Radix tree node cache.
*/
@@ -56,22 +56,12 @@ struct kmem_cache *radix_tree_node_cachep;
#define IDR_PRELOAD_SIZE (IDR_MAX_PATH * 2 - 1)
/*
- * The IDA is even shorter since it uses a bitmap at the last level.
- */
-#define IDA_INDEX_BITS (8 * sizeof(int) - 1 - ilog2(IDA_BITMAP_BITS))
-#define IDA_MAX_PATH (DIV_ROUND_UP(IDA_INDEX_BITS, \
- RADIX_TREE_MAP_SHIFT))
-#define IDA_PRELOAD_SIZE (IDA_MAX_PATH * 2 - 1)
-
-/*
* Per-cpu pool of preloaded nodes
*/
-struct radix_tree_preload {
- unsigned nr;
- /* nodes->parent points to next preallocated node */
- struct radix_tree_node *nodes;
+DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = {
+ .lock = INIT_LOCAL_LOCK(lock),
};
-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
+EXPORT_PER_CPU_SYMBOL_GPL(radix_tree_preloads);
static inline struct radix_tree_node *entry_to_node(void *ptr)
{
@@ -340,14 +330,14 @@ static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
*/
gfp_mask &= ~__GFP_ACCOUNT;
- preempt_disable();
+ local_lock(&radix_tree_preloads.lock);
rtp = this_cpu_ptr(&radix_tree_preloads);
while (rtp->nr < nr) {
- preempt_enable();
+ local_unlock(&radix_tree_preloads.lock);
node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
if (node == NULL)
goto out;
- preempt_disable();
+ local_lock(&radix_tree_preloads.lock);
rtp = this_cpu_ptr(&radix_tree_preloads);
if (rtp->nr < nr) {
node->parent = rtp->nodes;
@@ -389,7 +379,7 @@ int radix_tree_maybe_preload(gfp_t gfp_mask)
if (gfpflags_allow_blocking(gfp_mask))
return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
/* Preloading doesn't help anything with this gfp mask, skip it */
- preempt_disable();
+ local_lock(&radix_tree_preloads.lock);
return 0;
}
EXPORT_SYMBOL(radix_tree_maybe_preload);
@@ -1478,7 +1468,7 @@ EXPORT_SYMBOL(radix_tree_tagged);
void idr_preload(gfp_t gfp_mask)
{
if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE))
- preempt_disable();
+ local_lock(&radix_tree_preloads.lock);
}
EXPORT_SYMBOL(idr_preload);
diff --git a/lib/raid6/.gitignore b/lib/raid6/.gitignore
index 3de0d8921286..6be57745afd1 100644
--- a/lib/raid6/.gitignore
+++ b/lib/raid6/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
mktables
altivec*.c
int*.c
diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c
index bf1b4765c8f6..6d5e5000fdd7 100644
--- a/lib/raid6/algos.c
+++ b/lib/raid6/algos.c
@@ -34,10 +34,8 @@ const struct raid6_calls * const raid6_algos[] = {
&raid6_avx512x2,
&raid6_avx512x1,
#endif
-#ifdef CONFIG_AS_AVX2
&raid6_avx2x2,
&raid6_avx2x1,
-#endif
&raid6_sse2x2,
&raid6_sse2x1,
&raid6_sse1x2,
@@ -51,11 +49,9 @@ const struct raid6_calls * const raid6_algos[] = {
&raid6_avx512x2,
&raid6_avx512x1,
#endif
-#ifdef CONFIG_AS_AVX2
&raid6_avx2x4,
&raid6_avx2x2,
&raid6_avx2x1,
-#endif
&raid6_sse2x4,
&raid6_sse2x2,
&raid6_sse2x1,
@@ -97,13 +93,11 @@ void (*raid6_datap_recov)(int, size_t, int, void **);
EXPORT_SYMBOL_GPL(raid6_datap_recov);
const struct raid6_recov_calls *const raid6_recov_algos[] = {
+#ifdef CONFIG_X86
#ifdef CONFIG_AS_AVX512
&raid6_recov_avx512,
#endif
-#ifdef CONFIG_AS_AVX2
&raid6_recov_avx2,
-#endif
-#ifdef CONFIG_AS_SSSE3
&raid6_recov_ssse3,
#endif
#ifdef CONFIG_S390
diff --git a/lib/raid6/avx2.c b/lib/raid6/avx2.c
index 87184b6da28a..f299476e1d76 100644
--- a/lib/raid6/avx2.c
+++ b/lib/raid6/avx2.c
@@ -13,8 +13,6 @@
*
*/
-#ifdef CONFIG_AS_AVX2
-
#include <linux/raid/pq.h>
#include "x86.h"
@@ -470,5 +468,3 @@ const struct raid6_calls raid6_avx2x4 = {
1 /* Has cache hints */
};
#endif
-
-#endif /* CONFIG_AS_AVX2 */
diff --git a/lib/raid6/recov_avx2.c b/lib/raid6/recov_avx2.c
index 7a3b5e7f66ee..4e8095403ee2 100644
--- a/lib/raid6/recov_avx2.c
+++ b/lib/raid6/recov_avx2.c
@@ -4,8 +4,6 @@
* Author: Jim Kukunas <james.t.kukunas@linux.intel.com>
*/
-#ifdef CONFIG_AS_AVX2
-
#include <linux/raid/pq.h>
#include "x86.h"
@@ -313,7 +311,3 @@ const struct raid6_recov_calls raid6_recov_avx2 = {
#endif
.priority = 2,
};
-
-#else
-#warning "your version of binutils lacks AVX2 support"
-#endif
diff --git a/lib/raid6/recov_ssse3.c b/lib/raid6/recov_ssse3.c
index 1de97d2405d0..4bfa3c6b60de 100644
--- a/lib/raid6/recov_ssse3.c
+++ b/lib/raid6/recov_ssse3.c
@@ -3,8 +3,6 @@
* Copyright (C) 2012 Intel Corporation
*/
-#ifdef CONFIG_AS_SSSE3
-
#include <linux/raid/pq.h>
#include "x86.h"
@@ -328,7 +326,3 @@ const struct raid6_recov_calls raid6_recov_ssse3 = {
#endif
.priority = 1,
};
-
-#else
-#warning "your version of binutils lacks SSSE3 support"
-#endif
diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile
index 3ab8720aa2f8..a4c7cd74cff5 100644
--- a/lib/raid6/test/Makefile
+++ b/lib/raid6/test/Makefile
@@ -34,14 +34,9 @@ endif
ifeq ($(IS_X86),yes)
OBJS += mmx.o sse1.o sse2.o avx2.o recov_ssse3.o recov_avx2.o avx512.o recov_avx512.o
- CFLAGS += $(shell echo "pshufb %xmm0, %xmm0" | \
- gcc -c -x assembler - >&/dev/null && \
- rm ./-.o && echo -DCONFIG_AS_SSSE3=1)
- CFLAGS += $(shell echo "vpbroadcastb %xmm0, %ymm1" | \
- gcc -c -x assembler - >&/dev/null && \
- rm ./-.o && echo -DCONFIG_AS_AVX2=1)
+ CFLAGS += -DCONFIG_X86
CFLAGS += $(shell echo "vpmovm2b %k1, %zmm5" | \
- gcc -c -x assembler - >&/dev/null && \
+ gcc -c -x assembler - >/dev/null 2>&1 && \
rm ./-.o && echo -DCONFIG_AS_AVX512=1)
else ifeq ($(HAS_NEON),yes)
OBJS += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o
diff --git a/lib/rbtree.c b/lib/rbtree.c
index abc86c6a3177..8545872e61db 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -503,7 +503,7 @@ struct rb_node *rb_next(const struct rb_node *node)
if (node->rb_right) {
node = node->rb_right;
while (node->rb_left)
- node=node->rb_left;
+ node = node->rb_left;
return (struct rb_node *)node;
}
@@ -535,7 +535,7 @@ struct rb_node *rb_prev(const struct rb_node *node)
if (node->rb_left) {
node = node->rb_left;
while (node->rb_right)
- node=node->rb_right;
+ node = node->rb_right;
return (struct rb_node *)node;
}
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index bdb7e4cadf05..9f6890aedd1a 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -63,13 +63,22 @@ EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
#define ASSERT_RHT_MUTEX(HT)
#endif
+static inline union nested_table *nested_table_top(
+ const struct bucket_table *tbl)
+{
+ /* The top-level bucket entry does not need RCU protection
+ * because it's set at the same time as tbl->nest.
+ */
+ return (void *)rcu_dereference_protected(tbl->buckets[0], 1);
+}
+
static void nested_table_free(union nested_table *ntbl, unsigned int size)
{
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
const unsigned int len = 1 << shift;
unsigned int i;
- ntbl = rcu_dereference_raw(ntbl->table);
+ ntbl = rcu_dereference_protected(ntbl->table, 1);
if (!ntbl)
return;
@@ -89,7 +98,7 @@ static void nested_bucket_table_free(const struct bucket_table *tbl)
union nested_table *ntbl;
unsigned int i;
- ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
+ ntbl = nested_table_top(tbl);
for (i = 0; i < len; i++)
nested_table_free(ntbl + i, size);
@@ -1173,7 +1182,7 @@ struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl,
unsigned int subhash = hash;
union nested_table *ntbl;
- ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
+ ntbl = nested_table_top(tbl);
ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash);
subhash >>= tbl->nest;
@@ -1213,7 +1222,7 @@ struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht,
unsigned int size = tbl->size >> tbl->nest;
union nested_table *ntbl;
- ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
+ ntbl = nested_table_top(tbl);
hash >>= tbl->nest;
ntbl = nested_table_alloc(ht, &ntbl[index].table,
size <= (1 << shift));
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 5813072bc589..5d63a8857f36 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -832,7 +832,7 @@ EXPORT_SYMBOL(sg_miter_stop);
* @buflen: The number of bytes to copy
* @skip: Number of bytes to skip before copying
* @to_buffer: transfer direction (true == from an sg list to a
- * buffer, false == from a buffer to an sg list
+ * buffer, false == from a buffer to an sg list)
*
* Returns the number of copied bytes.
*
diff --git a/lib/seq_buf.c b/lib/seq_buf.c
index 4e865d42ab03..707453f5d58e 100644
--- a/lib/seq_buf.c
+++ b/lib/seq_buf.c
@@ -91,6 +91,7 @@ int seq_buf_printf(struct seq_buf *s, const char *fmt, ...)
return ret;
}
+EXPORT_SYMBOL_GPL(seq_buf_printf);
#ifdef CONFIG_BINARY_PRINTF
/**
diff --git a/lib/sha1.c b/lib/sha1.c
index 1d96d2c02b82..49257a915bb6 100644
--- a/lib/sha1.c
+++ b/lib/sha1.c
@@ -9,7 +9,7 @@
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/bitops.h>
-#include <linux/cryptohash.h>
+#include <crypto/sha.h>
#include <asm/unaligned.h>
/*
@@ -64,22 +64,24 @@
#define T_60_79(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0xca62c1d6, A, B, C, D, E )
/**
- * sha_transform - single block SHA1 transform
+ * sha1_transform - single block SHA1 transform (deprecated)
*
* @digest: 160 bit digest to update
* @data: 512 bits of data to hash
* @array: 16 words of workspace (see note)
*
- * This function generates a SHA1 digest for a single 512-bit block.
- * Be warned, it does not handle padding and message digest, do not
- * confuse it with the full FIPS 180-1 digest algorithm for variable
- * length messages.
+ * This function executes SHA-1's internal compression function. It updates the
+ * 160-bit internal state (@digest) with a single 512-bit data block (@data).
+ *
+ * Don't use this function. SHA-1 is no longer considered secure. And even if
+ * you do have to use SHA-1, this isn't the correct way to hash something with
+ * SHA-1 as this doesn't handle padding and finalization.
*
* Note: If the hash is security sensitive, the caller should be sure
* to clear the workspace. This is left to the caller to avoid
* unnecessary clears between chained hashing operations.
*/
-void sha_transform(__u32 *digest, const char *data, __u32 *array)
+void sha1_transform(__u32 *digest, const char *data, __u32 *array)
{
__u32 A, B, C, D, E;
@@ -185,13 +187,13 @@ void sha_transform(__u32 *digest, const char *data, __u32 *array)
digest[3] += D;
digest[4] += E;
}
-EXPORT_SYMBOL(sha_transform);
+EXPORT_SYMBOL(sha1_transform);
/**
- * sha_init - initialize the vectors for a SHA1 digest
+ * sha1_init - initialize the vectors for a SHA1 digest
* @buf: vector to initialize
*/
-void sha_init(__u32 *buf)
+void sha1_init(__u32 *buf)
{
buf[0] = 0x67452301;
buf[1] = 0xefcdab89;
@@ -199,4 +201,4 @@ void sha_init(__u32 *buf)
buf[3] = 0x10325476;
buf[4] = 0xc3d2e1f0;
}
-EXPORT_SYMBOL(sha_init);
+EXPORT_SYMBOL(sha1_init);
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index bd9571653288..525222e4f409 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -8,7 +8,7 @@
#include <linux/kprobes.h>
#include <linux/sched.h>
-notrace static nokprobe_inline
+noinstr static
unsigned int check_preemption_disabled(const char *what1, const char *what2)
{
int this_cpu = raw_smp_processor_id();
@@ -37,6 +37,7 @@ unsigned int check_preemption_disabled(const char *what1, const char *what2)
*/
preempt_disable_notrace();
+ instrumentation_begin();
if (!printk_ratelimit())
goto out_enable;
@@ -45,6 +46,7 @@ unsigned int check_preemption_disabled(const char *what1, const char *what2)
printk("caller is %pS\n", __builtin_return_address(0));
dump_stack();
+ instrumentation_end();
out_enable:
preempt_enable_no_resched_notrace();
@@ -52,16 +54,14 @@ out:
return this_cpu;
}
-notrace unsigned int debug_smp_processor_id(void)
+noinstr unsigned int debug_smp_processor_id(void)
{
return check_preemption_disabled("smp_processor_id", "");
}
EXPORT_SYMBOL(debug_smp_processor_id);
-NOKPROBE_SYMBOL(debug_smp_processor_id);
-notrace void __this_cpu_preempt_check(const char *op)
+noinstr void __this_cpu_preempt_check(const char *op)
{
check_preemption_disabled("__this_cpu_", op);
}
EXPORT_SYMBOL(__this_cpu_preempt_check);
-NOKPROBE_SYMBOL(__this_cpu_preempt_check);
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 81c69c08d1d1..2caffc64e4c8 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -20,6 +20,7 @@
*/
#include <linux/gfp.h>
+#include <linux/interrupt.h>
#include <linux/jhash.h>
#include <linux/kernel.h>
#include <linux/mm.h>
@@ -202,9 +203,20 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle,
unsigned long **entries)
{
union handle_parts parts = { .handle = handle };
- void *slab = stack_slabs[parts.slabindex];
+ void *slab;
size_t offset = parts.offset << STACK_ALLOC_ALIGN;
- struct stack_record *stack = slab + offset;
+ struct stack_record *stack;
+
+ *entries = NULL;
+ if (parts.slabindex > depot_index) {
+ WARN(1, "slab index %d out of bounds (%d) for stack id %08x\n",
+ parts.slabindex, depot_index, handle);
+ return 0;
+ }
+ slab = stack_slabs[parts.slabindex];
+ if (!slab)
+ return 0;
+ stack = slab + offset;
*entries = stack->entries;
return stack->size;
@@ -305,3 +317,26 @@ fast_exit:
return retval;
}
EXPORT_SYMBOL_GPL(stack_depot_save);
+
+static inline int in_irqentry_text(unsigned long ptr)
+{
+ return (ptr >= (unsigned long)&__irqentry_text_start &&
+ ptr < (unsigned long)&__irqentry_text_end) ||
+ (ptr >= (unsigned long)&__softirqentry_text_start &&
+ ptr < (unsigned long)&__softirqentry_text_end);
+}
+
+unsigned int filter_irq_stacks(unsigned long *entries,
+ unsigned int nr_entries)
+{
+ unsigned int i;
+
+ for (i = 0; i < nr_entries; i++) {
+ if (in_irqentry_text(entries[i])) {
+ /* Include the irqentry function into the stack. */
+ return i + 1;
+ }
+ }
+ return nr_entries;
+}
+EXPORT_SYMBOL_GPL(filter_irq_stacks);
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index 706020b06617..34696a348864 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -98,6 +98,7 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
{
unsigned long max_addr, src_addr;
+ might_fault();
if (unlikely(count <= 0))
return 0;
@@ -116,9 +117,9 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
kasan_check_write(dst, count);
check_object_size(dst, count, false);
- if (user_access_begin(src, max)) {
+ if (user_read_access_begin(src, max)) {
retval = do_strncpy_from_user(dst, src, count, max);
- user_access_end();
+ user_read_access_end();
return retval;
}
}
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index 41670d4a5816..1616710b8a82 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -109,9 +109,9 @@ long strnlen_user(const char __user *str, long count)
if (max > count)
max = count;
- if (user_access_begin(str, max)) {
+ if (user_read_access_begin(str, max)) {
retval = do_strnlen_user(str, count, max);
- user_access_end();
+ user_read_access_end();
return retval;
}
}
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index 61ed71c1daba..6b13150667f5 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -278,6 +278,8 @@ static void __init test_replace(void)
unsigned int nlongs = DIV_ROUND_UP(nbits, BITS_PER_LONG);
DECLARE_BITMAP(bmap, 1024);
+ BUILD_BUG_ON(EXP2_IN_BITS < nbits * 2);
+
bitmap_zero(bmap, 1024);
bitmap_replace(bmap, &exp2[0 * nlongs], &exp2[1 * nlongs], exp2_to_exp3_mask, nbits);
expect_eq_bitmap(bmap, exp3_0_1, nbits);
diff --git a/lib/test_bitops.c b/lib/test_bitops.c
new file mode 100644
index 000000000000..ced25e3a779b
--- /dev/null
+++ b/lib/test_bitops.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 Intel Corporation
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+
+/* a tiny module only meant to test
+ *
+ * set/clear_bit
+ * get_count_order/long
+ */
+
+/* use an enum because thats the most common BITMAP usage */
+enum bitops_fun {
+ BITOPS_4 = 4,
+ BITOPS_7 = 7,
+ BITOPS_11 = 11,
+ BITOPS_31 = 31,
+ BITOPS_88 = 88,
+ BITOPS_LAST = 255,
+ BITOPS_LENGTH = 256
+};
+
+static DECLARE_BITMAP(g_bitmap, BITOPS_LENGTH);
+
+static unsigned int order_comb[][2] = {
+ {0x00000003, 2},
+ {0x00000004, 2},
+ {0x00001fff, 13},
+ {0x00002000, 13},
+ {0x50000000, 31},
+ {0x80000000, 31},
+ {0x80003000, 32},
+};
+
+#ifdef CONFIG_64BIT
+static unsigned long order_comb_long[][2] = {
+ {0x0000000300000000, 34},
+ {0x0000000400000000, 34},
+ {0x00001fff00000000, 45},
+ {0x0000200000000000, 45},
+ {0x5000000000000000, 63},
+ {0x8000000000000000, 63},
+ {0x8000300000000000, 64},
+};
+#endif
+
+static int __init test_bitops_startup(void)
+{
+ int i;
+
+ pr_warn("Loaded test module\n");
+ set_bit(BITOPS_4, g_bitmap);
+ set_bit(BITOPS_7, g_bitmap);
+ set_bit(BITOPS_11, g_bitmap);
+ set_bit(BITOPS_31, g_bitmap);
+ set_bit(BITOPS_88, g_bitmap);
+
+ for (i = 0; i < ARRAY_SIZE(order_comb); i++) {
+ if (order_comb[i][1] != get_count_order(order_comb[i][0]))
+ pr_warn("get_count_order wrong for %x\n",
+ order_comb[i][0]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(order_comb); i++) {
+ if (order_comb[i][1] != get_count_order_long(order_comb[i][0]))
+ pr_warn("get_count_order_long wrong for %x\n",
+ order_comb[i][0]);
+ }
+
+#ifdef CONFIG_64BIT
+ for (i = 0; i < ARRAY_SIZE(order_comb_long); i++) {
+ if (order_comb_long[i][1] !=
+ get_count_order_long(order_comb_long[i][0]))
+ pr_warn("get_count_order_long wrong for %lx\n",
+ order_comb_long[i][0]);
+ }
+#endif
+ return 0;
+}
+
+static void __exit test_bitops_unstartup(void)
+{
+ int bit_set;
+
+ clear_bit(BITOPS_4, g_bitmap);
+ clear_bit(BITOPS_7, g_bitmap);
+ clear_bit(BITOPS_11, g_bitmap);
+ clear_bit(BITOPS_31, g_bitmap);
+ clear_bit(BITOPS_88, g_bitmap);
+
+ bit_set = find_first_bit(g_bitmap, BITOPS_LAST);
+ if (bit_set != BITOPS_LAST)
+ pr_err("ERROR: FOUND SET BIT %d\n", bit_set);
+
+ pr_warn("Unloaded test module\n");
+}
+
+module_init(test_bitops_startup);
+module_exit(test_bitops_unstartup);
+
+MODULE_AUTHOR("Jesse Brandeburg <jesse.brandeburg@intel.com>, Wei Yang <richard.weiyang@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Bit testing module");
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index cecb230833be..a5fddf9ebcb7 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -6660,14 +6660,14 @@ static int __run_one(const struct bpf_prog *fp, const void *data,
u64 start, finish;
int ret = 0, i;
- preempt_disable();
+ migrate_disable();
start = ktime_get_ns();
for (i = 0; i < runs; i++)
ret = BPF_PROG_RUN(fp, data);
finish = ktime_get_ns();
- preempt_enable();
+ migrate_enable();
*duration = finish - start;
do_div(*duration, runs);
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index 251213c872b5..9fee2b93a8d1 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -24,6 +24,7 @@
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/vmalloc.h>
+#include <linux/efi_embedded_fw.h>
#define TEST_FIRMWARE_NAME "test-firmware.bin"
#define TEST_FIRMWARE_NUM_REQS 4
@@ -309,27 +310,13 @@ static int test_dev_config_update_bool(const char *buf, size_t size,
return ret;
}
-static ssize_t
-test_dev_config_show_bool(char *buf,
- bool config)
+static ssize_t test_dev_config_show_bool(char *buf, bool val)
{
- bool val;
-
- mutex_lock(&test_fw_mutex);
- val = config;
- mutex_unlock(&test_fw_mutex);
-
return snprintf(buf, PAGE_SIZE, "%d\n", val);
}
-static ssize_t test_dev_config_show_int(char *buf, int cfg)
+static ssize_t test_dev_config_show_int(char *buf, int val)
{
- int val;
-
- mutex_lock(&test_fw_mutex);
- val = cfg;
- mutex_unlock(&test_fw_mutex);
-
return snprintf(buf, PAGE_SIZE, "%d\n", val);
}
@@ -353,14 +340,8 @@ static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
return size;
}
-static ssize_t test_dev_config_show_u8(char *buf, u8 cfg)
+static ssize_t test_dev_config_show_u8(char *buf, u8 val)
{
- u8 val;
-
- mutex_lock(&test_fw_mutex);
- val = cfg;
- mutex_unlock(&test_fw_mutex);
-
return snprintf(buf, PAGE_SIZE, "%u\n", val);
}
@@ -507,6 +488,57 @@ out:
}
static DEVICE_ATTR_WO(trigger_request);
+#ifdef CONFIG_EFI_EMBEDDED_FIRMWARE
+static ssize_t trigger_request_platform_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ static const u8 test_data[] = {
+ 0x55, 0xaa, 0x55, 0xaa, 0x01, 0x02, 0x03, 0x04,
+ 0x55, 0xaa, 0x55, 0xaa, 0x05, 0x06, 0x07, 0x08,
+ 0x55, 0xaa, 0x55, 0xaa, 0x10, 0x20, 0x30, 0x40,
+ 0x55, 0xaa, 0x55, 0xaa, 0x50, 0x60, 0x70, 0x80
+ };
+ struct efi_embedded_fw efi_embedded_fw;
+ const struct firmware *firmware = NULL;
+ char *name;
+ int rc;
+
+ name = kstrndup(buf, count, GFP_KERNEL);
+ if (!name)
+ return -ENOSPC;
+
+ pr_info("inserting test platform fw '%s'\n", name);
+ efi_embedded_fw.name = name;
+ efi_embedded_fw.data = (void *)test_data;
+ efi_embedded_fw.length = sizeof(test_data);
+ list_add(&efi_embedded_fw.list, &efi_embedded_fw_list);
+
+ pr_info("loading '%s'\n", name);
+ rc = firmware_request_platform(&firmware, name, dev);
+ if (rc) {
+ pr_info("load of '%s' failed: %d\n", name, rc);
+ goto out;
+ }
+ if (firmware->size != sizeof(test_data) ||
+ memcmp(firmware->data, test_data, sizeof(test_data)) != 0) {
+ pr_info("firmware contents mismatch for '%s'\n", name);
+ rc = -EINVAL;
+ goto out;
+ }
+ pr_info("loaded: %zu\n", firmware->size);
+ rc = count;
+
+out:
+ release_firmware(firmware);
+ list_del(&efi_embedded_fw.list);
+ kfree(name);
+
+ return rc;
+}
+static DEVICE_ATTR_WO(trigger_request_platform);
+#endif
+
static DECLARE_COMPLETION(async_fw_done);
static void trigger_async_request_cb(const struct firmware *fw, void *context)
@@ -903,6 +935,9 @@ static struct attribute *test_dev_attrs[] = {
TEST_FW_DEV_ATTR(trigger_request),
TEST_FW_DEV_ATTR(trigger_async_request),
TEST_FW_DEV_ATTR(trigger_custom_fallback),
+#ifdef CONFIG_EFI_EMBEDDED_FIRMWARE
+ TEST_FW_DEV_ATTR(trigger_request_platform),
+#endif
/* These use the config and can use the test_result */
TEST_FW_DEV_ATTR(trigger_batched_requests),
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
new file mode 100644
index 000000000000..a2a82262b97b
--- /dev/null
+++ b/lib/test_hmm.c
@@ -0,0 +1,1163 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This is a module to test the HMM (Heterogeneous Memory Management)
+ * mirror and zone device private memory migration APIs of the kernel.
+ * Userspace programs can register with the driver to mirror their own address
+ * space and can use the device to read/write any valid virtual address.
+ */
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/delay.h>
+#include <linux/pagemap.h>
+#include <linux/hmm.h>
+#include <linux/vmalloc.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
+#include <linux/sched/mm.h>
+#include <linux/platform_device.h>
+
+#include "test_hmm_uapi.h"
+
+#define DMIRROR_NDEVICES 2
+#define DMIRROR_RANGE_FAULT_TIMEOUT 1000
+#define DEVMEM_CHUNK_SIZE (256 * 1024 * 1024U)
+#define DEVMEM_CHUNKS_RESERVE 16
+
+static const struct dev_pagemap_ops dmirror_devmem_ops;
+static const struct mmu_interval_notifier_ops dmirror_min_ops;
+static dev_t dmirror_dev;
+static struct page *dmirror_zero_page;
+
+struct dmirror_device;
+
+struct dmirror_bounce {
+ void *ptr;
+ unsigned long size;
+ unsigned long addr;
+ unsigned long cpages;
+};
+
+#define DPT_XA_TAG_WRITE 3UL
+
+/*
+ * Data structure to track address ranges and register for mmu interval
+ * notifier updates.
+ */
+struct dmirror_interval {
+ struct mmu_interval_notifier notifier;
+ struct dmirror *dmirror;
+};
+
+/*
+ * Data attached to the open device file.
+ * Note that it might be shared after a fork().
+ */
+struct dmirror {
+ struct dmirror_device *mdevice;
+ struct xarray pt;
+ struct mmu_interval_notifier notifier;
+ struct mutex mutex;
+};
+
+/*
+ * ZONE_DEVICE pages for migration and simulating device memory.
+ */
+struct dmirror_chunk {
+ struct dev_pagemap pagemap;
+ struct dmirror_device *mdevice;
+};
+
+/*
+ * Per device data.
+ */
+struct dmirror_device {
+ struct cdev cdevice;
+ struct hmm_devmem *devmem;
+
+ unsigned int devmem_capacity;
+ unsigned int devmem_count;
+ struct dmirror_chunk **devmem_chunks;
+ struct mutex devmem_lock; /* protects the above */
+
+ unsigned long calloc;
+ unsigned long cfree;
+ struct page *free_pages;
+ spinlock_t lock; /* protects the above */
+};
+
+static struct dmirror_device dmirror_devices[DMIRROR_NDEVICES];
+
+static int dmirror_bounce_init(struct dmirror_bounce *bounce,
+ unsigned long addr,
+ unsigned long size)
+{
+ bounce->addr = addr;
+ bounce->size = size;
+ bounce->cpages = 0;
+ bounce->ptr = vmalloc(size);
+ if (!bounce->ptr)
+ return -ENOMEM;
+ return 0;
+}
+
+static void dmirror_bounce_fini(struct dmirror_bounce *bounce)
+{
+ vfree(bounce->ptr);
+}
+
+static int dmirror_fops_open(struct inode *inode, struct file *filp)
+{
+ struct cdev *cdev = inode->i_cdev;
+ struct dmirror *dmirror;
+ int ret;
+
+ /* Mirror this process address space */
+ dmirror = kzalloc(sizeof(*dmirror), GFP_KERNEL);
+ if (dmirror == NULL)
+ return -ENOMEM;
+
+ dmirror->mdevice = container_of(cdev, struct dmirror_device, cdevice);
+ mutex_init(&dmirror->mutex);
+ xa_init(&dmirror->pt);
+
+ ret = mmu_interval_notifier_insert(&dmirror->notifier, current->mm,
+ 0, ULONG_MAX & PAGE_MASK, &dmirror_min_ops);
+ if (ret) {
+ kfree(dmirror);
+ return ret;
+ }
+
+ filp->private_data = dmirror;
+ return 0;
+}
+
+static int dmirror_fops_release(struct inode *inode, struct file *filp)
+{
+ struct dmirror *dmirror = filp->private_data;
+
+ mmu_interval_notifier_remove(&dmirror->notifier);
+ xa_destroy(&dmirror->pt);
+ kfree(dmirror);
+ return 0;
+}
+
+static struct dmirror_device *dmirror_page_to_device(struct page *page)
+
+{
+ return container_of(page->pgmap, struct dmirror_chunk,
+ pagemap)->mdevice;
+}
+
+static int dmirror_do_fault(struct dmirror *dmirror, struct hmm_range *range)
+{
+ unsigned long *pfns = range->hmm_pfns;
+ unsigned long pfn;
+
+ for (pfn = (range->start >> PAGE_SHIFT);
+ pfn < (range->end >> PAGE_SHIFT);
+ pfn++, pfns++) {
+ struct page *page;
+ void *entry;
+
+ /*
+ * Since we asked for hmm_range_fault() to populate pages,
+ * it shouldn't return an error entry on success.
+ */
+ WARN_ON(*pfns & HMM_PFN_ERROR);
+ WARN_ON(!(*pfns & HMM_PFN_VALID));
+
+ page = hmm_pfn_to_page(*pfns);
+ WARN_ON(!page);
+
+ entry = page;
+ if (*pfns & HMM_PFN_WRITE)
+ entry = xa_tag_pointer(entry, DPT_XA_TAG_WRITE);
+ else if (WARN_ON(range->default_flags & HMM_PFN_WRITE))
+ return -EFAULT;
+ entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC);
+ if (xa_is_err(entry))
+ return xa_err(entry);
+ }
+
+ return 0;
+}
+
+static void dmirror_do_update(struct dmirror *dmirror, unsigned long start,
+ unsigned long end)
+{
+ unsigned long pfn;
+ void *entry;
+
+ /*
+ * The XArray doesn't hold references to pages since it relies on
+ * the mmu notifier to clear page pointers when they become stale.
+ * Therefore, it is OK to just clear the entry.
+ */
+ xa_for_each_range(&dmirror->pt, pfn, entry, start >> PAGE_SHIFT,
+ end >> PAGE_SHIFT)
+ xa_erase(&dmirror->pt, pfn);
+}
+
+static bool dmirror_interval_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
+{
+ struct dmirror *dmirror = container_of(mni, struct dmirror, notifier);
+
+ if (mmu_notifier_range_blockable(range))
+ mutex_lock(&dmirror->mutex);
+ else if (!mutex_trylock(&dmirror->mutex))
+ return false;
+
+ mmu_interval_set_seq(mni, cur_seq);
+ dmirror_do_update(dmirror, range->start, range->end);
+
+ mutex_unlock(&dmirror->mutex);
+ return true;
+}
+
+static const struct mmu_interval_notifier_ops dmirror_min_ops = {
+ .invalidate = dmirror_interval_invalidate,
+};
+
+static int dmirror_range_fault(struct dmirror *dmirror,
+ struct hmm_range *range)
+{
+ struct mm_struct *mm = dmirror->notifier.mm;
+ unsigned long timeout =
+ jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+ int ret;
+
+ while (true) {
+ if (time_after(jiffies, timeout)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ range->notifier_seq = mmu_interval_read_begin(range->notifier);
+ mmap_read_lock(mm);
+ ret = hmm_range_fault(range);
+ mmap_read_unlock(mm);
+ if (ret) {
+ if (ret == -EBUSY)
+ continue;
+ goto out;
+ }
+
+ mutex_lock(&dmirror->mutex);
+ if (mmu_interval_read_retry(range->notifier,
+ range->notifier_seq)) {
+ mutex_unlock(&dmirror->mutex);
+ continue;
+ }
+ break;
+ }
+
+ ret = dmirror_do_fault(dmirror, range);
+
+ mutex_unlock(&dmirror->mutex);
+out:
+ return ret;
+}
+
+static int dmirror_fault(struct dmirror *dmirror, unsigned long start,
+ unsigned long end, bool write)
+{
+ struct mm_struct *mm = dmirror->notifier.mm;
+ unsigned long addr;
+ unsigned long pfns[64];
+ struct hmm_range range = {
+ .notifier = &dmirror->notifier,
+ .hmm_pfns = pfns,
+ .pfn_flags_mask = 0,
+ .default_flags =
+ HMM_PFN_REQ_FAULT | (write ? HMM_PFN_REQ_WRITE : 0),
+ .dev_private_owner = dmirror->mdevice,
+ };
+ int ret = 0;
+
+ /* Since the mm is for the mirrored process, get a reference first. */
+ if (!mmget_not_zero(mm))
+ return 0;
+
+ for (addr = start; addr < end; addr = range.end) {
+ range.start = addr;
+ range.end = min(addr + (ARRAY_SIZE(pfns) << PAGE_SHIFT), end);
+
+ ret = dmirror_range_fault(dmirror, &range);
+ if (ret)
+ break;
+ }
+
+ mmput(mm);
+ return ret;
+}
+
+static int dmirror_do_read(struct dmirror *dmirror, unsigned long start,
+ unsigned long end, struct dmirror_bounce *bounce)
+{
+ unsigned long pfn;
+ void *ptr;
+
+ ptr = bounce->ptr + ((start - bounce->addr) & PAGE_MASK);
+
+ for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) {
+ void *entry;
+ struct page *page;
+ void *tmp;
+
+ entry = xa_load(&dmirror->pt, pfn);
+ page = xa_untag_pointer(entry);
+ if (!page)
+ return -ENOENT;
+
+ tmp = kmap(page);
+ memcpy(ptr, tmp, PAGE_SIZE);
+ kunmap(page);
+
+ ptr += PAGE_SIZE;
+ bounce->cpages++;
+ }
+
+ return 0;
+}
+
+static int dmirror_read(struct dmirror *dmirror, struct hmm_dmirror_cmd *cmd)
+{
+ struct dmirror_bounce bounce;
+ unsigned long start, end;
+ unsigned long size = cmd->npages << PAGE_SHIFT;
+ int ret;
+
+ start = cmd->addr;
+ end = start + size;
+ if (end < start)
+ return -EINVAL;
+
+ ret = dmirror_bounce_init(&bounce, start, size);
+ if (ret)
+ return ret;
+
+ while (1) {
+ mutex_lock(&dmirror->mutex);
+ ret = dmirror_do_read(dmirror, start, end, &bounce);
+ mutex_unlock(&dmirror->mutex);
+ if (ret != -ENOENT)
+ break;
+
+ start = cmd->addr + (bounce.cpages << PAGE_SHIFT);
+ ret = dmirror_fault(dmirror, start, end, false);
+ if (ret)
+ break;
+ cmd->faults++;
+ }
+
+ if (ret == 0) {
+ if (copy_to_user(u64_to_user_ptr(cmd->ptr), bounce.ptr,
+ bounce.size))
+ ret = -EFAULT;
+ }
+ cmd->cpages = bounce.cpages;
+ dmirror_bounce_fini(&bounce);
+ return ret;
+}
+
+static int dmirror_do_write(struct dmirror *dmirror, unsigned long start,
+ unsigned long end, struct dmirror_bounce *bounce)
+{
+ unsigned long pfn;
+ void *ptr;
+
+ ptr = bounce->ptr + ((start - bounce->addr) & PAGE_MASK);
+
+ for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) {
+ void *entry;
+ struct page *page;
+ void *tmp;
+
+ entry = xa_load(&dmirror->pt, pfn);
+ page = xa_untag_pointer(entry);
+ if (!page || xa_pointer_tag(entry) != DPT_XA_TAG_WRITE)
+ return -ENOENT;
+
+ tmp = kmap(page);
+ memcpy(tmp, ptr, PAGE_SIZE);
+ kunmap(page);
+
+ ptr += PAGE_SIZE;
+ bounce->cpages++;
+ }
+
+ return 0;
+}
+
+static int dmirror_write(struct dmirror *dmirror, struct hmm_dmirror_cmd *cmd)
+{
+ struct dmirror_bounce bounce;
+ unsigned long start, end;
+ unsigned long size = cmd->npages << PAGE_SHIFT;
+ int ret;
+
+ start = cmd->addr;
+ end = start + size;
+ if (end < start)
+ return -EINVAL;
+
+ ret = dmirror_bounce_init(&bounce, start, size);
+ if (ret)
+ return ret;
+ if (copy_from_user(bounce.ptr, u64_to_user_ptr(cmd->ptr),
+ bounce.size)) {
+ ret = -EFAULT;
+ goto fini;
+ }
+
+ while (1) {
+ mutex_lock(&dmirror->mutex);
+ ret = dmirror_do_write(dmirror, start, end, &bounce);
+ mutex_unlock(&dmirror->mutex);
+ if (ret != -ENOENT)
+ break;
+
+ start = cmd->addr + (bounce.cpages << PAGE_SHIFT);
+ ret = dmirror_fault(dmirror, start, end, true);
+ if (ret)
+ break;
+ cmd->faults++;
+ }
+
+fini:
+ cmd->cpages = bounce.cpages;
+ dmirror_bounce_fini(&bounce);
+ return ret;
+}
+
+static bool dmirror_allocate_chunk(struct dmirror_device *mdevice,
+ struct page **ppage)
+{
+ struct dmirror_chunk *devmem;
+ struct resource *res;
+ unsigned long pfn;
+ unsigned long pfn_first;
+ unsigned long pfn_last;
+ void *ptr;
+
+ mutex_lock(&mdevice->devmem_lock);
+
+ if (mdevice->devmem_count == mdevice->devmem_capacity) {
+ struct dmirror_chunk **new_chunks;
+ unsigned int new_capacity;
+
+ new_capacity = mdevice->devmem_capacity +
+ DEVMEM_CHUNKS_RESERVE;
+ new_chunks = krealloc(mdevice->devmem_chunks,
+ sizeof(new_chunks[0]) * new_capacity,
+ GFP_KERNEL);
+ if (!new_chunks)
+ goto err;
+ mdevice->devmem_capacity = new_capacity;
+ mdevice->devmem_chunks = new_chunks;
+ }
+
+ res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE,
+ "hmm_dmirror");
+ if (IS_ERR(res))
+ goto err;
+
+ devmem = kzalloc(sizeof(*devmem), GFP_KERNEL);
+ if (!devmem)
+ goto err_release;
+
+ devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
+ devmem->pagemap.res = *res;
+ devmem->pagemap.ops = &dmirror_devmem_ops;
+ devmem->pagemap.owner = mdevice;
+
+ ptr = memremap_pages(&devmem->pagemap, numa_node_id());
+ if (IS_ERR(ptr))
+ goto err_free;
+
+ devmem->mdevice = mdevice;
+ pfn_first = devmem->pagemap.res.start >> PAGE_SHIFT;
+ pfn_last = pfn_first +
+ (resource_size(&devmem->pagemap.res) >> PAGE_SHIFT);
+ mdevice->devmem_chunks[mdevice->devmem_count++] = devmem;
+
+ mutex_unlock(&mdevice->devmem_lock);
+
+ pr_info("added new %u MB chunk (total %u chunks, %u MB) PFNs [0x%lx 0x%lx)\n",
+ DEVMEM_CHUNK_SIZE / (1024 * 1024),
+ mdevice->devmem_count,
+ mdevice->devmem_count * (DEVMEM_CHUNK_SIZE / (1024 * 1024)),
+ pfn_first, pfn_last);
+
+ spin_lock(&mdevice->lock);
+ for (pfn = pfn_first; pfn < pfn_last; pfn++) {
+ struct page *page = pfn_to_page(pfn);
+
+ page->zone_device_data = mdevice->free_pages;
+ mdevice->free_pages = page;
+ }
+ if (ppage) {
+ *ppage = mdevice->free_pages;
+ mdevice->free_pages = (*ppage)->zone_device_data;
+ mdevice->calloc++;
+ }
+ spin_unlock(&mdevice->lock);
+
+ return true;
+
+err_free:
+ kfree(devmem);
+err_release:
+ release_mem_region(res->start, resource_size(res));
+err:
+ mutex_unlock(&mdevice->devmem_lock);
+ return false;
+}
+
+static struct page *dmirror_devmem_alloc_page(struct dmirror_device *mdevice)
+{
+ struct page *dpage = NULL;
+ struct page *rpage;
+
+ /*
+ * This is a fake device so we alloc real system memory to store
+ * our device memory.
+ */
+ rpage = alloc_page(GFP_HIGHUSER);
+ if (!rpage)
+ return NULL;
+
+ spin_lock(&mdevice->lock);
+
+ if (mdevice->free_pages) {
+ dpage = mdevice->free_pages;
+ mdevice->free_pages = dpage->zone_device_data;
+ mdevice->calloc++;
+ spin_unlock(&mdevice->lock);
+ } else {
+ spin_unlock(&mdevice->lock);
+ if (!dmirror_allocate_chunk(mdevice, &dpage))
+ goto error;
+ }
+
+ dpage->zone_device_data = rpage;
+ get_page(dpage);
+ lock_page(dpage);
+ return dpage;
+
+error:
+ __free_page(rpage);
+ return NULL;
+}
+
+static void dmirror_migrate_alloc_and_copy(struct migrate_vma *args,
+ struct dmirror *dmirror)
+{
+ struct dmirror_device *mdevice = dmirror->mdevice;
+ const unsigned long *src = args->src;
+ unsigned long *dst = args->dst;
+ unsigned long addr;
+
+ for (addr = args->start; addr < args->end; addr += PAGE_SIZE,
+ src++, dst++) {
+ struct page *spage;
+ struct page *dpage;
+ struct page *rpage;
+
+ if (!(*src & MIGRATE_PFN_MIGRATE))
+ continue;
+
+ /*
+ * Note that spage might be NULL which is OK since it is an
+ * unallocated pte_none() or read-only zero page.
+ */
+ spage = migrate_pfn_to_page(*src);
+
+ /*
+ * Don't migrate device private pages from our own driver or
+ * others. For our own we would do a device private memory copy
+ * not a migration and for others, we would need to fault the
+ * other device's page into system memory first.
+ */
+ if (spage && is_zone_device_page(spage))
+ continue;
+
+ dpage = dmirror_devmem_alloc_page(mdevice);
+ if (!dpage)
+ continue;
+
+ rpage = dpage->zone_device_data;
+ if (spage)
+ copy_highpage(rpage, spage);
+ else
+ clear_highpage(rpage);
+
+ /*
+ * Normally, a device would use the page->zone_device_data to
+ * point to the mirror but here we use it to hold the page for
+ * the simulated device memory and that page holds the pointer
+ * to the mirror.
+ */
+ rpage->zone_device_data = dmirror;
+
+ *dst = migrate_pfn(page_to_pfn(dpage)) |
+ MIGRATE_PFN_LOCKED;
+ if ((*src & MIGRATE_PFN_WRITE) ||
+ (!spage && args->vma->vm_flags & VM_WRITE))
+ *dst |= MIGRATE_PFN_WRITE;
+ }
+}
+
+static int dmirror_migrate_finalize_and_map(struct migrate_vma *args,
+ struct dmirror *dmirror)
+{
+ unsigned long start = args->start;
+ unsigned long end = args->end;
+ const unsigned long *src = args->src;
+ const unsigned long *dst = args->dst;
+ unsigned long pfn;
+
+ /* Map the migrated pages into the device's page tables. */
+ mutex_lock(&dmirror->mutex);
+
+ for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++,
+ src++, dst++) {
+ struct page *dpage;
+ void *entry;
+
+ if (!(*src & MIGRATE_PFN_MIGRATE))
+ continue;
+
+ dpage = migrate_pfn_to_page(*dst);
+ if (!dpage)
+ continue;
+
+ /*
+ * Store the page that holds the data so the page table
+ * doesn't have to deal with ZONE_DEVICE private pages.
+ */
+ entry = dpage->zone_device_data;
+ if (*dst & MIGRATE_PFN_WRITE)
+ entry = xa_tag_pointer(entry, DPT_XA_TAG_WRITE);
+ entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC);
+ if (xa_is_err(entry)) {
+ mutex_unlock(&dmirror->mutex);
+ return xa_err(entry);
+ }
+ }
+
+ mutex_unlock(&dmirror->mutex);
+ return 0;
+}
+
+static int dmirror_migrate(struct dmirror *dmirror,
+ struct hmm_dmirror_cmd *cmd)
+{
+ unsigned long start, end, addr;
+ unsigned long size = cmd->npages << PAGE_SHIFT;
+ struct mm_struct *mm = dmirror->notifier.mm;
+ struct vm_area_struct *vma;
+ unsigned long src_pfns[64];
+ unsigned long dst_pfns[64];
+ struct dmirror_bounce bounce;
+ struct migrate_vma args;
+ unsigned long next;
+ int ret;
+
+ start = cmd->addr;
+ end = start + size;
+ if (end < start)
+ return -EINVAL;
+
+ /* Since the mm is for the mirrored process, get a reference first. */
+ if (!mmget_not_zero(mm))
+ return -EINVAL;
+
+ mmap_read_lock(mm);
+ for (addr = start; addr < end; addr = next) {
+ vma = find_vma(mm, addr);
+ if (!vma || addr < vma->vm_start ||
+ !(vma->vm_flags & VM_READ)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ next = min(end, addr + (ARRAY_SIZE(src_pfns) << PAGE_SHIFT));
+ if (next > vma->vm_end)
+ next = vma->vm_end;
+
+ args.vma = vma;
+ args.src = src_pfns;
+ args.dst = dst_pfns;
+ args.start = addr;
+ args.end = next;
+ args.src_owner = NULL;
+ ret = migrate_vma_setup(&args);
+ if (ret)
+ goto out;
+
+ dmirror_migrate_alloc_and_copy(&args, dmirror);
+ migrate_vma_pages(&args);
+ dmirror_migrate_finalize_and_map(&args, dmirror);
+ migrate_vma_finalize(&args);
+ }
+ mmap_read_unlock(mm);
+ mmput(mm);
+
+ /* Return the migrated data for verification. */
+ ret = dmirror_bounce_init(&bounce, start, size);
+ if (ret)
+ return ret;
+ mutex_lock(&dmirror->mutex);
+ ret = dmirror_do_read(dmirror, start, end, &bounce);
+ mutex_unlock(&dmirror->mutex);
+ if (ret == 0) {
+ if (copy_to_user(u64_to_user_ptr(cmd->ptr), bounce.ptr,
+ bounce.size))
+ ret = -EFAULT;
+ }
+ cmd->cpages = bounce.cpages;
+ dmirror_bounce_fini(&bounce);
+ return ret;
+
+out:
+ mmap_read_unlock(mm);
+ mmput(mm);
+ return ret;
+}
+
+static void dmirror_mkentry(struct dmirror *dmirror, struct hmm_range *range,
+ unsigned char *perm, unsigned long entry)
+{
+ struct page *page;
+
+ if (entry & HMM_PFN_ERROR) {
+ *perm = HMM_DMIRROR_PROT_ERROR;
+ return;
+ }
+ if (!(entry & HMM_PFN_VALID)) {
+ *perm = HMM_DMIRROR_PROT_NONE;
+ return;
+ }
+
+ page = hmm_pfn_to_page(entry);
+ if (is_device_private_page(page)) {
+ /* Is the page migrated to this device or some other? */
+ if (dmirror->mdevice == dmirror_page_to_device(page))
+ *perm = HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL;
+ else
+ *perm = HMM_DMIRROR_PROT_DEV_PRIVATE_REMOTE;
+ } else if (is_zero_pfn(page_to_pfn(page)))
+ *perm = HMM_DMIRROR_PROT_ZERO;
+ else
+ *perm = HMM_DMIRROR_PROT_NONE;
+ if (entry & HMM_PFN_WRITE)
+ *perm |= HMM_DMIRROR_PROT_WRITE;
+ else
+ *perm |= HMM_DMIRROR_PROT_READ;
+}
+
+static bool dmirror_snapshot_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
+{
+ struct dmirror_interval *dmi =
+ container_of(mni, struct dmirror_interval, notifier);
+ struct dmirror *dmirror = dmi->dmirror;
+
+ if (mmu_notifier_range_blockable(range))
+ mutex_lock(&dmirror->mutex);
+ else if (!mutex_trylock(&dmirror->mutex))
+ return false;
+
+ /*
+ * Snapshots only need to set the sequence number since any
+ * invalidation in the interval invalidates the whole snapshot.
+ */
+ mmu_interval_set_seq(mni, cur_seq);
+
+ mutex_unlock(&dmirror->mutex);
+ return true;
+}
+
+static const struct mmu_interval_notifier_ops dmirror_mrn_ops = {
+ .invalidate = dmirror_snapshot_invalidate,
+};
+
+static int dmirror_range_snapshot(struct dmirror *dmirror,
+ struct hmm_range *range,
+ unsigned char *perm)
+{
+ struct mm_struct *mm = dmirror->notifier.mm;
+ struct dmirror_interval notifier;
+ unsigned long timeout =
+ jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+ unsigned long i;
+ unsigned long n;
+ int ret = 0;
+
+ notifier.dmirror = dmirror;
+ range->notifier = &notifier.notifier;
+
+ ret = mmu_interval_notifier_insert(range->notifier, mm,
+ range->start, range->end - range->start,
+ &dmirror_mrn_ops);
+ if (ret)
+ return ret;
+
+ while (true) {
+ if (time_after(jiffies, timeout)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ range->notifier_seq = mmu_interval_read_begin(range->notifier);
+
+ mmap_read_lock(mm);
+ ret = hmm_range_fault(range);
+ mmap_read_unlock(mm);
+ if (ret) {
+ if (ret == -EBUSY)
+ continue;
+ goto out;
+ }
+
+ mutex_lock(&dmirror->mutex);
+ if (mmu_interval_read_retry(range->notifier,
+ range->notifier_seq)) {
+ mutex_unlock(&dmirror->mutex);
+ continue;
+ }
+ break;
+ }
+
+ n = (range->end - range->start) >> PAGE_SHIFT;
+ for (i = 0; i < n; i++)
+ dmirror_mkentry(dmirror, range, perm + i, range->hmm_pfns[i]);
+
+ mutex_unlock(&dmirror->mutex);
+out:
+ mmu_interval_notifier_remove(range->notifier);
+ return ret;
+}
+
+static int dmirror_snapshot(struct dmirror *dmirror,
+ struct hmm_dmirror_cmd *cmd)
+{
+ struct mm_struct *mm = dmirror->notifier.mm;
+ unsigned long start, end;
+ unsigned long size = cmd->npages << PAGE_SHIFT;
+ unsigned long addr;
+ unsigned long next;
+ unsigned long pfns[64];
+ unsigned char perm[64];
+ char __user *uptr;
+ struct hmm_range range = {
+ .hmm_pfns = pfns,
+ .dev_private_owner = dmirror->mdevice,
+ };
+ int ret = 0;
+
+ start = cmd->addr;
+ end = start + size;
+ if (end < start)
+ return -EINVAL;
+
+ /* Since the mm is for the mirrored process, get a reference first. */
+ if (!mmget_not_zero(mm))
+ return -EINVAL;
+
+ /*
+ * Register a temporary notifier to detect invalidations even if it
+ * overlaps with other mmu_interval_notifiers.
+ */
+ uptr = u64_to_user_ptr(cmd->ptr);
+ for (addr = start; addr < end; addr = next) {
+ unsigned long n;
+
+ next = min(addr + (ARRAY_SIZE(pfns) << PAGE_SHIFT), end);
+ range.start = addr;
+ range.end = next;
+
+ ret = dmirror_range_snapshot(dmirror, &range, perm);
+ if (ret)
+ break;
+
+ n = (range.end - range.start) >> PAGE_SHIFT;
+ if (copy_to_user(uptr, perm, n)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ cmd->cpages += n;
+ uptr += n;
+ }
+ mmput(mm);
+
+ return ret;
+}
+
+static long dmirror_fops_unlocked_ioctl(struct file *filp,
+ unsigned int command,
+ unsigned long arg)
+{
+ void __user *uarg = (void __user *)arg;
+ struct hmm_dmirror_cmd cmd;
+ struct dmirror *dmirror;
+ int ret;
+
+ dmirror = filp->private_data;
+ if (!dmirror)
+ return -EINVAL;
+
+ if (copy_from_user(&cmd, uarg, sizeof(cmd)))
+ return -EFAULT;
+
+ if (cmd.addr & ~PAGE_MASK)
+ return -EINVAL;
+ if (cmd.addr >= (cmd.addr + (cmd.npages << PAGE_SHIFT)))
+ return -EINVAL;
+
+ cmd.cpages = 0;
+ cmd.faults = 0;
+
+ switch (command) {
+ case HMM_DMIRROR_READ:
+ ret = dmirror_read(dmirror, &cmd);
+ break;
+
+ case HMM_DMIRROR_WRITE:
+ ret = dmirror_write(dmirror, &cmd);
+ break;
+
+ case HMM_DMIRROR_MIGRATE:
+ ret = dmirror_migrate(dmirror, &cmd);
+ break;
+
+ case HMM_DMIRROR_SNAPSHOT:
+ ret = dmirror_snapshot(dmirror, &cmd);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ if (ret)
+ return ret;
+
+ if (copy_to_user(uarg, &cmd, sizeof(cmd)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static const struct file_operations dmirror_fops = {
+ .open = dmirror_fops_open,
+ .release = dmirror_fops_release,
+ .unlocked_ioctl = dmirror_fops_unlocked_ioctl,
+ .llseek = default_llseek,
+ .owner = THIS_MODULE,
+};
+
+static void dmirror_devmem_free(struct page *page)
+{
+ struct page *rpage = page->zone_device_data;
+ struct dmirror_device *mdevice;
+
+ if (rpage)
+ __free_page(rpage);
+
+ mdevice = dmirror_page_to_device(page);
+
+ spin_lock(&mdevice->lock);
+ mdevice->cfree++;
+ page->zone_device_data = mdevice->free_pages;
+ mdevice->free_pages = page;
+ spin_unlock(&mdevice->lock);
+}
+
+static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
+ struct dmirror_device *mdevice)
+{
+ const unsigned long *src = args->src;
+ unsigned long *dst = args->dst;
+ unsigned long start = args->start;
+ unsigned long end = args->end;
+ unsigned long addr;
+
+ for (addr = start; addr < end; addr += PAGE_SIZE,
+ src++, dst++) {
+ struct page *dpage, *spage;
+
+ spage = migrate_pfn_to_page(*src);
+ if (!spage || !(*src & MIGRATE_PFN_MIGRATE))
+ continue;
+ spage = spage->zone_device_data;
+
+ dpage = alloc_page_vma(GFP_HIGHUSER_MOVABLE, args->vma, addr);
+ if (!dpage)
+ continue;
+
+ lock_page(dpage);
+ copy_highpage(dpage, spage);
+ *dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
+ if (*src & MIGRATE_PFN_WRITE)
+ *dst |= MIGRATE_PFN_WRITE;
+ }
+ return 0;
+}
+
+static void dmirror_devmem_fault_finalize_and_map(struct migrate_vma *args,
+ struct dmirror *dmirror)
+{
+ /* Invalidate the device's page table mapping. */
+ mutex_lock(&dmirror->mutex);
+ dmirror_do_update(dmirror, args->start, args->end);
+ mutex_unlock(&dmirror->mutex);
+}
+
+static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf)
+{
+ struct migrate_vma args;
+ unsigned long src_pfns;
+ unsigned long dst_pfns;
+ struct page *rpage;
+ struct dmirror *dmirror;
+ vm_fault_t ret;
+
+ /*
+ * Normally, a device would use the page->zone_device_data to point to
+ * the mirror but here we use it to hold the page for the simulated
+ * device memory and that page holds the pointer to the mirror.
+ */
+ rpage = vmf->page->zone_device_data;
+ dmirror = rpage->zone_device_data;
+
+ /* FIXME demonstrate how we can adjust migrate range */
+ args.vma = vmf->vma;
+ args.start = vmf->address;
+ args.end = args.start + PAGE_SIZE;
+ args.src = &src_pfns;
+ args.dst = &dst_pfns;
+ args.src_owner = dmirror->mdevice;
+
+ if (migrate_vma_setup(&args))
+ return VM_FAULT_SIGBUS;
+
+ ret = dmirror_devmem_fault_alloc_and_copy(&args, dmirror->mdevice);
+ if (ret)
+ return ret;
+ migrate_vma_pages(&args);
+ dmirror_devmem_fault_finalize_and_map(&args, dmirror);
+ migrate_vma_finalize(&args);
+ return 0;
+}
+
+static const struct dev_pagemap_ops dmirror_devmem_ops = {
+ .page_free = dmirror_devmem_free,
+ .migrate_to_ram = dmirror_devmem_fault,
+};
+
+static int dmirror_device_init(struct dmirror_device *mdevice, int id)
+{
+ dev_t dev;
+ int ret;
+
+ dev = MKDEV(MAJOR(dmirror_dev), id);
+ mutex_init(&mdevice->devmem_lock);
+ spin_lock_init(&mdevice->lock);
+
+ cdev_init(&mdevice->cdevice, &dmirror_fops);
+ mdevice->cdevice.owner = THIS_MODULE;
+ ret = cdev_add(&mdevice->cdevice, dev, 1);
+ if (ret)
+ return ret;
+
+ /* Build a list of free ZONE_DEVICE private struct pages */
+ dmirror_allocate_chunk(mdevice, NULL);
+
+ return 0;
+}
+
+static void dmirror_device_remove(struct dmirror_device *mdevice)
+{
+ unsigned int i;
+
+ if (mdevice->devmem_chunks) {
+ for (i = 0; i < mdevice->devmem_count; i++) {
+ struct dmirror_chunk *devmem =
+ mdevice->devmem_chunks[i];
+
+ memunmap_pages(&devmem->pagemap);
+ release_mem_region(devmem->pagemap.res.start,
+ resource_size(&devmem->pagemap.res));
+ kfree(devmem);
+ }
+ kfree(mdevice->devmem_chunks);
+ }
+
+ cdev_del(&mdevice->cdevice);
+}
+
+static int __init hmm_dmirror_init(void)
+{
+ int ret;
+ int id;
+
+ ret = alloc_chrdev_region(&dmirror_dev, 0, DMIRROR_NDEVICES,
+ "HMM_DMIRROR");
+ if (ret)
+ goto err_unreg;
+
+ for (id = 0; id < DMIRROR_NDEVICES; id++) {
+ ret = dmirror_device_init(dmirror_devices + id, id);
+ if (ret)
+ goto err_chrdev;
+ }
+
+ /*
+ * Allocate a zero page to simulate a reserved page of device private
+ * memory which is always zero. The zero_pfn page isn't used just to
+ * make the code here simpler (i.e., we need a struct page for it).
+ */
+ dmirror_zero_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
+ if (!dmirror_zero_page) {
+ ret = -ENOMEM;
+ goto err_chrdev;
+ }
+
+ pr_info("HMM test module loaded. This is only for testing HMM.\n");
+ return 0;
+
+err_chrdev:
+ while (--id >= 0)
+ dmirror_device_remove(dmirror_devices + id);
+ unregister_chrdev_region(dmirror_dev, DMIRROR_NDEVICES);
+err_unreg:
+ return ret;
+}
+
+static void __exit hmm_dmirror_exit(void)
+{
+ int id;
+
+ if (dmirror_zero_page)
+ __free_page(dmirror_zero_page);
+ for (id = 0; id < DMIRROR_NDEVICES; id++)
+ dmirror_device_remove(dmirror_devices + id);
+ unregister_chrdev_region(dmirror_dev, DMIRROR_NDEVICES);
+}
+
+module_init(hmm_dmirror_init);
+module_exit(hmm_dmirror_exit);
+MODULE_LICENSE("GPL");
diff --git a/lib/test_hmm_uapi.h b/lib/test_hmm_uapi.h
new file mode 100644
index 000000000000..67b3b2e6ff5d
--- /dev/null
+++ b/lib/test_hmm_uapi.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This is a module to test the HMM (Heterogeneous Memory Management) API
+ * of the kernel. It allows a userspace program to expose its entire address
+ * space through the HMM test module device file.
+ */
+#ifndef _LIB_TEST_HMM_UAPI_H
+#define _LIB_TEST_HMM_UAPI_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/*
+ * Structure to pass to the HMM test driver to mimic a device accessing
+ * system memory and ZONE_DEVICE private memory through device page tables.
+ *
+ * @addr: (in) user address the device will read/write
+ * @ptr: (in) user address where device data is copied to/from
+ * @npages: (in) number of pages to read/write
+ * @cpages: (out) number of pages copied
+ * @faults: (out) number of device page faults seen
+ */
+struct hmm_dmirror_cmd {
+ __u64 addr;
+ __u64 ptr;
+ __u64 npages;
+ __u64 cpages;
+ __u64 faults;
+};
+
+/* Expose the address space of the calling process through hmm device file */
+#define HMM_DMIRROR_READ _IOWR('H', 0x00, struct hmm_dmirror_cmd)
+#define HMM_DMIRROR_WRITE _IOWR('H', 0x01, struct hmm_dmirror_cmd)
+#define HMM_DMIRROR_MIGRATE _IOWR('H', 0x02, struct hmm_dmirror_cmd)
+#define HMM_DMIRROR_SNAPSHOT _IOWR('H', 0x03, struct hmm_dmirror_cmd)
+
+/*
+ * Values returned in hmm_dmirror_cmd.ptr for HMM_DMIRROR_SNAPSHOT.
+ * HMM_DMIRROR_PROT_ERROR: no valid mirror PTE for this page
+ * HMM_DMIRROR_PROT_NONE: unpopulated PTE or PTE with no access
+ * HMM_DMIRROR_PROT_READ: read-only PTE
+ * HMM_DMIRROR_PROT_WRITE: read/write PTE
+ * HMM_DMIRROR_PROT_ZERO: special read-only zero page
+ * HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL: Migrated device private page on the
+ * device the ioctl() is made
+ * HMM_DMIRROR_PROT_DEV_PRIVATE_REMOTE: Migrated device private page on some
+ * other device
+ */
+enum {
+ HMM_DMIRROR_PROT_ERROR = 0xFF,
+ HMM_DMIRROR_PROT_NONE = 0x00,
+ HMM_DMIRROR_PROT_READ = 0x01,
+ HMM_DMIRROR_PROT_WRITE = 0x02,
+ HMM_DMIRROR_PROT_ZERO = 0x10,
+ HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL = 0x20,
+ HMM_DMIRROR_PROT_DEV_PRIVATE_REMOTE = 0x30,
+};
+
+#endif /* _LIB_TEST_HMM_UAPI_H */
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 3872d250ed2c..dc2c6a51d11a 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -24,6 +24,14 @@
#include <asm/page.h>
/*
+ * We assign some test results to these globals to make sure the tests
+ * are not eliminated as dead code.
+ */
+
+int kasan_int_result;
+void *kasan_ptr_result;
+
+/*
* Note: test functions are marked noinline so that their names appear in
* reports.
*/
@@ -285,6 +293,24 @@ static noinline void __init kmalloc_oob_in_memset(void)
kfree(ptr);
}
+static noinline void __init kmalloc_memmove_invalid_size(void)
+{
+ char *ptr;
+ size_t size = 64;
+ volatile size_t invalid_size = -2;
+
+ pr_info("invalid size in memmove\n");
+ ptr = kmalloc(size, GFP_KERNEL);
+ if (!ptr) {
+ pr_err("Allocation failed\n");
+ return;
+ }
+
+ memset((char *)ptr, 0, 64);
+ memmove((char *)ptr, (char *)ptr + 4, invalid_size);
+ kfree(ptr);
+}
+
static noinline void __init kmalloc_uaf(void)
{
char *ptr;
@@ -604,7 +630,7 @@ static noinline void __init kasan_memchr(void)
if (!ptr)
return;
- memchr(ptr, '1', size + 1);
+ kasan_ptr_result = memchr(ptr, '1', size + 1);
kfree(ptr);
}
@@ -620,7 +646,7 @@ static noinline void __init kasan_memcmp(void)
return;
memset(arr, 0, sizeof(arr));
- memcmp(ptr, arr, size+1);
+ kasan_int_result = memcmp(ptr, arr, size + 1);
kfree(ptr);
}
@@ -643,22 +669,22 @@ static noinline void __init kasan_strings(void)
* will likely point to zeroed byte.
*/
ptr += 16;
- strchr(ptr, '1');
+ kasan_ptr_result = strchr(ptr, '1');
pr_info("use-after-free in strrchr\n");
- strrchr(ptr, '1');
+ kasan_ptr_result = strrchr(ptr, '1');
pr_info("use-after-free in strcmp\n");
- strcmp(ptr, "2");
+ kasan_int_result = strcmp(ptr, "2");
pr_info("use-after-free in strncmp\n");
- strncmp(ptr, "2", 1);
+ kasan_int_result = strncmp(ptr, "2", 1);
pr_info("use-after-free in strlen\n");
- strlen(ptr);
+ kasan_int_result = strlen(ptr);
pr_info("use-after-free in strnlen\n");
- strnlen(ptr, 1);
+ kasan_int_result = strnlen(ptr, 1);
}
static noinline void __init kasan_bitops(void)
@@ -725,11 +751,12 @@ static noinline void __init kasan_bitops(void)
__test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
pr_info("out-of-bounds in test_bit\n");
- (void)test_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+ kasan_int_result = test_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
#if defined(clear_bit_unlock_is_negative_byte)
pr_info("out-of-bounds in clear_bit_unlock_is_negative_byte\n");
- clear_bit_unlock_is_negative_byte(BITS_PER_LONG + BITS_PER_BYTE, bits);
+ kasan_int_result = clear_bit_unlock_is_negative_byte(BITS_PER_LONG +
+ BITS_PER_BYTE, bits);
#endif
kfree(bits);
}
@@ -799,6 +826,7 @@ static int __init kmalloc_tests_init(void)
kmalloc_oob_memset_4();
kmalloc_oob_memset_8();
kmalloc_oob_memset_16();
+ kmalloc_memmove_invalid_size();
kmalloc_uaf();
kmalloc_uaf_memset();
kmalloc_uaf2();
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index 9cf77628fc91..e651c37d56db 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -204,7 +204,7 @@ static void test_kmod_put_module(struct kmod_test_device_info *info)
case TEST_KMOD_DRIVER:
break;
case TEST_KMOD_FS_TYPE:
- if (info && info->fs_sync && info->fs_sync->owner)
+ if (info->fs_sync && info->fs_sync->owner)
module_put(info->fs_sync->owner);
break;
default:
diff --git a/lib/test_linear_ranges.c b/lib/test_linear_ranges.c
new file mode 100644
index 000000000000..676e0b8abcdd
--- /dev/null
+++ b/lib/test_linear_ranges.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for the linear_ranges helper.
+ *
+ * Copyright (C) 2020, ROHM Semiconductors.
+ * Author: Matti Vaittinen <matti.vaittien@fi.rohmeurope.com>
+ */
+#include <kunit/test.h>
+
+#include <linux/linear_range.h>
+
+/* First things first. I deeply dislike unit-tests. I have seen all the hell
+ * breaking loose when people who think the unit tests are "the silver bullet"
+ * to kill bugs get to decide how a company should implement testing strategy...
+ *
+ * Believe me, it may get _really_ ridiculous. It is tempting to think that
+ * walking through all the possible execution branches will nail down 100% of
+ * bugs. This may lead to ideas about demands to get certain % of "test
+ * coverage" - measured as line coverage. And that is one of the worst things
+ * you can do.
+ *
+ * Ask people to provide line coverage and they do. I've seen clever tools
+ * which generate test cases to test the existing functions - and by default
+ * these tools expect code to be correct and just generate checks which are
+ * passing when ran against current code-base. Run this generator and you'll get
+ * tests that do not test code is correct but just verify nothing changes.
+ * Problem is that testing working code is pointless. And if it is not
+ * working, your test must not assume it is working. You won't catch any bugs
+ * by such tests. What you can do is to generate a huge amount of tests.
+ * Especially if you were are asked to proivde 100% line-coverage x_x. So what
+ * does these tests - which are not finding any bugs now - do?
+ *
+ * They add inertia to every future development. I think it was Terry Pratchet
+ * who wrote someone having same impact as thick syrup has to chronometre.
+ * Excessive amount of unit-tests have this effect to development. If you do
+ * actually find _any_ bug from code in such environment and try fixing it...
+ * ...chances are you also need to fix the test cases. In sunny day you fix one
+ * test. But I've done refactoring which resulted 500+ broken tests (which had
+ * really zero value other than proving to managers that we do do "quality")...
+ *
+ * After this being said - there are situations where UTs can be handy. If you
+ * have algorithms which take some input and should produce output - then you
+ * can implement few, carefully selected simple UT-cases which test this. I've
+ * previously used this for example for netlink and device-tree data parsing
+ * functions. Feed some data examples to functions and verify the output is as
+ * expected. I am not covering all the cases but I will see the logic should be
+ * working.
+ *
+ * Here we also do some minor testing. I don't want to go through all branches
+ * or test more or less obvious things - but I want to see the main logic is
+ * working. And I definitely don't want to add 500+ test cases that break when
+ * some simple fix is done x_x. So - let's only add few, well selected tests
+ * which ensure as much logic is good as possible.
+ */
+
+/*
+ * Test Range 1:
+ * selectors: 2 3 4 5 6
+ * values (5): 10 20 30 40 50
+ *
+ * Test Range 2:
+ * selectors: 7 8 9 10
+ * values (4): 100 150 200 250
+ */
+
+#define RANGE1_MIN 10
+#define RANGE1_MIN_SEL 2
+#define RANGE1_STEP 10
+
+/* 2, 3, 4, 5, 6 */
+static const unsigned int range1_sels[] = { RANGE1_MIN_SEL, RANGE1_MIN_SEL + 1,
+ RANGE1_MIN_SEL + 2,
+ RANGE1_MIN_SEL + 3,
+ RANGE1_MIN_SEL + 4 };
+/* 10, 20, 30, 40, 50 */
+static const unsigned int range1_vals[] = { RANGE1_MIN, RANGE1_MIN +
+ RANGE1_STEP,
+ RANGE1_MIN + RANGE1_STEP * 2,
+ RANGE1_MIN + RANGE1_STEP * 3,
+ RANGE1_MIN + RANGE1_STEP * 4 };
+
+#define RANGE2_MIN 100
+#define RANGE2_MIN_SEL 7
+#define RANGE2_STEP 50
+
+/* 7, 8, 9, 10 */
+static const unsigned int range2_sels[] = { RANGE2_MIN_SEL, RANGE2_MIN_SEL + 1,
+ RANGE2_MIN_SEL + 2,
+ RANGE2_MIN_SEL + 3 };
+/* 100, 150, 200, 250 */
+static const unsigned int range2_vals[] = { RANGE2_MIN, RANGE2_MIN +
+ RANGE2_STEP,
+ RANGE2_MIN + RANGE2_STEP * 2,
+ RANGE2_MIN + RANGE2_STEP * 3 };
+
+#define RANGE1_NUM_VALS (ARRAY_SIZE(range1_vals))
+#define RANGE2_NUM_VALS (ARRAY_SIZE(range2_vals))
+#define RANGE_NUM_VALS (RANGE1_NUM_VALS + RANGE2_NUM_VALS)
+
+#define RANGE1_MAX_SEL (RANGE1_MIN_SEL + RANGE1_NUM_VALS - 1)
+#define RANGE1_MAX_VAL (range1_vals[RANGE1_NUM_VALS - 1])
+
+#define RANGE2_MAX_SEL (RANGE2_MIN_SEL + RANGE2_NUM_VALS - 1)
+#define RANGE2_MAX_VAL (range2_vals[RANGE2_NUM_VALS - 1])
+
+#define SMALLEST_SEL RANGE1_MIN_SEL
+#define SMALLEST_VAL RANGE1_MIN
+
+static struct linear_range testr[] = {
+ {
+ .min = RANGE1_MIN,
+ .min_sel = RANGE1_MIN_SEL,
+ .max_sel = RANGE1_MAX_SEL,
+ .step = RANGE1_STEP,
+ }, {
+ .min = RANGE2_MIN,
+ .min_sel = RANGE2_MIN_SEL,
+ .max_sel = RANGE2_MAX_SEL,
+ .step = RANGE2_STEP
+ },
+};
+
+static void range_test_get_value(struct kunit *test)
+{
+ int ret, i;
+ unsigned int sel, val;
+
+ for (i = 0; i < RANGE1_NUM_VALS; i++) {
+ sel = range1_sels[i];
+ ret = linear_range_get_value_array(&testr[0], 2, sel, &val);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, val, range1_vals[i]);
+ }
+ for (i = 0; i < RANGE2_NUM_VALS; i++) {
+ sel = range2_sels[i];
+ ret = linear_range_get_value_array(&testr[0], 2, sel, &val);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, val, range2_vals[i]);
+ }
+ ret = linear_range_get_value_array(&testr[0], 2, sel + 1, &val);
+ KUNIT_EXPECT_NE(test, 0, ret);
+}
+
+static void range_test_get_selector_high(struct kunit *test)
+{
+ int ret, i;
+ unsigned int sel;
+ bool found;
+
+ for (i = 0; i < RANGE1_NUM_VALS; i++) {
+ ret = linear_range_get_selector_high(&testr[0], range1_vals[i],
+ &sel, &found);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, sel, range1_sels[i]);
+ KUNIT_EXPECT_TRUE(test, found);
+ }
+
+ ret = linear_range_get_selector_high(&testr[0], RANGE1_MAX_VAL + 1,
+ &sel, &found);
+ KUNIT_EXPECT_LE(test, ret, 0);
+
+ ret = linear_range_get_selector_high(&testr[0], RANGE1_MIN - 1,
+ &sel, &found);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_FALSE(test, found);
+ KUNIT_EXPECT_EQ(test, sel, range1_sels[0]);
+}
+
+static void range_test_get_value_amount(struct kunit *test)
+{
+ int ret;
+
+ ret = linear_range_values_in_range_array(&testr[0], 2);
+ KUNIT_EXPECT_EQ(test, (int)RANGE_NUM_VALS, ret);
+}
+
+static void range_test_get_selector_low(struct kunit *test)
+{
+ int i, ret;
+ unsigned int sel;
+ bool found;
+
+ for (i = 0; i < RANGE1_NUM_VALS; i++) {
+ ret = linear_range_get_selector_low_array(&testr[0], 2,
+ range1_vals[i], &sel,
+ &found);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, sel, range1_sels[i]);
+ KUNIT_EXPECT_TRUE(test, found);
+ }
+ for (i = 0; i < RANGE2_NUM_VALS; i++) {
+ ret = linear_range_get_selector_low_array(&testr[0], 2,
+ range2_vals[i], &sel,
+ &found);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, sel, range2_sels[i]);
+ KUNIT_EXPECT_TRUE(test, found);
+ }
+
+ /*
+ * Seek value greater than range max => get_selector_*_low should
+ * return Ok - but set found to false as value is not in range
+ */
+ ret = linear_range_get_selector_low_array(&testr[0], 2,
+ range2_vals[RANGE2_NUM_VALS - 1] + 1,
+ &sel, &found);
+
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, sel, range2_sels[RANGE2_NUM_VALS - 1]);
+ KUNIT_EXPECT_FALSE(test, found);
+}
+
+static struct kunit_case range_test_cases[] = {
+ KUNIT_CASE(range_test_get_value_amount),
+ KUNIT_CASE(range_test_get_selector_high),
+ KUNIT_CASE(range_test_get_selector_low),
+ KUNIT_CASE(range_test_get_value),
+ {},
+};
+
+static struct kunit_suite range_test_module = {
+ .name = "linear-ranges-test",
+ .test_cases = range_test_cases,
+};
+
+kunit_test_suites(&range_test_module);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/test_lockup.c b/lib/test_lockup.c
new file mode 100644
index 000000000000..bd7c7ff39f6b
--- /dev/null
+++ b/lib/test_lockup.c
@@ -0,0 +1,599 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test module to generate lockups
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/clock.h>
+#include <linux/cpu.h>
+#include <linux/nmi.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/file.h>
+
+static unsigned int time_secs;
+module_param(time_secs, uint, 0600);
+MODULE_PARM_DESC(time_secs, "lockup time in seconds, default 0");
+
+static unsigned int time_nsecs;
+module_param(time_nsecs, uint, 0600);
+MODULE_PARM_DESC(time_nsecs, "nanoseconds part of lockup time, default 0");
+
+static unsigned int cooldown_secs;
+module_param(cooldown_secs, uint, 0600);
+MODULE_PARM_DESC(cooldown_secs, "cooldown time between iterations in seconds, default 0");
+
+static unsigned int cooldown_nsecs;
+module_param(cooldown_nsecs, uint, 0600);
+MODULE_PARM_DESC(cooldown_nsecs, "nanoseconds part of cooldown, default 0");
+
+static unsigned int iterations = 1;
+module_param(iterations, uint, 0600);
+MODULE_PARM_DESC(iterations, "lockup iterations, default 1");
+
+static bool all_cpus;
+module_param(all_cpus, bool, 0400);
+MODULE_PARM_DESC(all_cpus, "trigger lockup at all cpus at once");
+
+static int wait_state;
+static char *state = "R";
+module_param(state, charp, 0400);
+MODULE_PARM_DESC(state, "wait in 'R' running (default), 'D' uninterruptible, 'K' killable, 'S' interruptible state");
+
+static bool use_hrtimer;
+module_param(use_hrtimer, bool, 0400);
+MODULE_PARM_DESC(use_hrtimer, "use high-resolution timer for sleeping");
+
+static bool iowait;
+module_param(iowait, bool, 0400);
+MODULE_PARM_DESC(iowait, "account sleep time as iowait");
+
+static bool lock_read;
+module_param(lock_read, bool, 0400);
+MODULE_PARM_DESC(lock_read, "lock read-write locks for read");
+
+static bool lock_single;
+module_param(lock_single, bool, 0400);
+MODULE_PARM_DESC(lock_single, "acquire locks only at one cpu");
+
+static bool reacquire_locks;
+module_param(reacquire_locks, bool, 0400);
+MODULE_PARM_DESC(reacquire_locks, "release and reacquire locks/irq/preempt between iterations");
+
+static bool touch_softlockup;
+module_param(touch_softlockup, bool, 0600);
+MODULE_PARM_DESC(touch_softlockup, "touch soft-lockup watchdog between iterations");
+
+static bool touch_hardlockup;
+module_param(touch_hardlockup, bool, 0600);
+MODULE_PARM_DESC(touch_hardlockup, "touch hard-lockup watchdog between iterations");
+
+static bool call_cond_resched;
+module_param(call_cond_resched, bool, 0600);
+MODULE_PARM_DESC(call_cond_resched, "call cond_resched() between iterations");
+
+static bool measure_lock_wait;
+module_param(measure_lock_wait, bool, 0400);
+MODULE_PARM_DESC(measure_lock_wait, "measure lock wait time");
+
+static unsigned long lock_wait_threshold = ULONG_MAX;
+module_param(lock_wait_threshold, ulong, 0400);
+MODULE_PARM_DESC(lock_wait_threshold, "print lock wait time longer than this in nanoseconds, default off");
+
+static bool test_disable_irq;
+module_param_named(disable_irq, test_disable_irq, bool, 0400);
+MODULE_PARM_DESC(disable_irq, "disable interrupts: generate hard-lockups");
+
+static bool disable_softirq;
+module_param(disable_softirq, bool, 0400);
+MODULE_PARM_DESC(disable_softirq, "disable bottom-half irq handlers");
+
+static bool disable_preempt;
+module_param(disable_preempt, bool, 0400);
+MODULE_PARM_DESC(disable_preempt, "disable preemption: generate soft-lockups");
+
+static bool lock_rcu;
+module_param(lock_rcu, bool, 0400);
+MODULE_PARM_DESC(lock_rcu, "grab rcu_read_lock: generate rcu stalls");
+
+static bool lock_mmap_sem;
+module_param(lock_mmap_sem, bool, 0400);
+MODULE_PARM_DESC(lock_mmap_sem, "lock mm->mmap_lock: block procfs interfaces");
+
+static unsigned long lock_rwsem_ptr;
+module_param_unsafe(lock_rwsem_ptr, ulong, 0400);
+MODULE_PARM_DESC(lock_rwsem_ptr, "lock rw_semaphore at address");
+
+static unsigned long lock_mutex_ptr;
+module_param_unsafe(lock_mutex_ptr, ulong, 0400);
+MODULE_PARM_DESC(lock_mutex_ptr, "lock mutex at address");
+
+static unsigned long lock_spinlock_ptr;
+module_param_unsafe(lock_spinlock_ptr, ulong, 0400);
+MODULE_PARM_DESC(lock_spinlock_ptr, "lock spinlock at address");
+
+static unsigned long lock_rwlock_ptr;
+module_param_unsafe(lock_rwlock_ptr, ulong, 0400);
+MODULE_PARM_DESC(lock_rwlock_ptr, "lock rwlock at address");
+
+static unsigned int alloc_pages_nr;
+module_param_unsafe(alloc_pages_nr, uint, 0600);
+MODULE_PARM_DESC(alloc_pages_nr, "allocate and free pages under locks");
+
+static unsigned int alloc_pages_order;
+module_param(alloc_pages_order, uint, 0400);
+MODULE_PARM_DESC(alloc_pages_order, "page order to allocate");
+
+static gfp_t alloc_pages_gfp = GFP_KERNEL;
+module_param_unsafe(alloc_pages_gfp, uint, 0400);
+MODULE_PARM_DESC(alloc_pages_gfp, "allocate pages with this gfp_mask, default GFP_KERNEL");
+
+static bool alloc_pages_atomic;
+module_param(alloc_pages_atomic, bool, 0400);
+MODULE_PARM_DESC(alloc_pages_atomic, "allocate pages with GFP_ATOMIC");
+
+static bool reallocate_pages;
+module_param(reallocate_pages, bool, 0400);
+MODULE_PARM_DESC(reallocate_pages, "free and allocate pages between iterations");
+
+struct file *test_file;
+static struct inode *test_inode;
+static char test_file_path[256];
+module_param_string(file_path, test_file_path, sizeof(test_file_path), 0400);
+MODULE_PARM_DESC(file_path, "file path to test");
+
+static bool test_lock_inode;
+module_param_named(lock_inode, test_lock_inode, bool, 0400);
+MODULE_PARM_DESC(lock_inode, "lock file -> inode -> i_rwsem");
+
+static bool test_lock_mapping;
+module_param_named(lock_mapping, test_lock_mapping, bool, 0400);
+MODULE_PARM_DESC(lock_mapping, "lock file -> mapping -> i_mmap_rwsem");
+
+static bool test_lock_sb_umount;
+module_param_named(lock_sb_umount, test_lock_sb_umount, bool, 0400);
+MODULE_PARM_DESC(lock_sb_umount, "lock file -> sb -> s_umount");
+
+static atomic_t alloc_pages_failed = ATOMIC_INIT(0);
+
+static atomic64_t max_lock_wait = ATOMIC64_INIT(0);
+
+static struct task_struct *main_task;
+static int master_cpu;
+
+static void test_lock(bool master, bool verbose)
+{
+ u64 uninitialized_var(wait_start);
+
+ if (measure_lock_wait)
+ wait_start = local_clock();
+
+ if (lock_mutex_ptr && master) {
+ if (verbose)
+ pr_notice("lock mutex %ps\n", (void *)lock_mutex_ptr);
+ mutex_lock((struct mutex *)lock_mutex_ptr);
+ }
+
+ if (lock_rwsem_ptr && master) {
+ if (verbose)
+ pr_notice("lock rw_semaphore %ps\n",
+ (void *)lock_rwsem_ptr);
+ if (lock_read)
+ down_read((struct rw_semaphore *)lock_rwsem_ptr);
+ else
+ down_write((struct rw_semaphore *)lock_rwsem_ptr);
+ }
+
+ if (lock_mmap_sem && master) {
+ if (verbose)
+ pr_notice("lock mmap_lock pid=%d\n", main_task->pid);
+ if (lock_read)
+ mmap_read_lock(main_task->mm);
+ else
+ mmap_write_lock(main_task->mm);
+ }
+
+ if (test_disable_irq)
+ local_irq_disable();
+
+ if (disable_softirq)
+ local_bh_disable();
+
+ if (disable_preempt)
+ preempt_disable();
+
+ if (lock_rcu)
+ rcu_read_lock();
+
+ if (lock_spinlock_ptr && master) {
+ if (verbose)
+ pr_notice("lock spinlock %ps\n",
+ (void *)lock_spinlock_ptr);
+ spin_lock((spinlock_t *)lock_spinlock_ptr);
+ }
+
+ if (lock_rwlock_ptr && master) {
+ if (verbose)
+ pr_notice("lock rwlock %ps\n",
+ (void *)lock_rwlock_ptr);
+ if (lock_read)
+ read_lock((rwlock_t *)lock_rwlock_ptr);
+ else
+ write_lock((rwlock_t *)lock_rwlock_ptr);
+ }
+
+ if (measure_lock_wait) {
+ s64 cur_wait = local_clock() - wait_start;
+ s64 max_wait = atomic64_read(&max_lock_wait);
+
+ do {
+ if (cur_wait < max_wait)
+ break;
+ max_wait = atomic64_cmpxchg(&max_lock_wait,
+ max_wait, cur_wait);
+ } while (max_wait != cur_wait);
+
+ if (cur_wait > lock_wait_threshold)
+ pr_notice_ratelimited("lock wait %lld ns\n", cur_wait);
+ }
+}
+
+static void test_unlock(bool master, bool verbose)
+{
+ if (lock_rwlock_ptr && master) {
+ if (lock_read)
+ read_unlock((rwlock_t *)lock_rwlock_ptr);
+ else
+ write_unlock((rwlock_t *)lock_rwlock_ptr);
+ if (verbose)
+ pr_notice("unlock rwlock %ps\n",
+ (void *)lock_rwlock_ptr);
+ }
+
+ if (lock_spinlock_ptr && master) {
+ spin_unlock((spinlock_t *)lock_spinlock_ptr);
+ if (verbose)
+ pr_notice("unlock spinlock %ps\n",
+ (void *)lock_spinlock_ptr);
+ }
+
+ if (lock_rcu)
+ rcu_read_unlock();
+
+ if (disable_preempt)
+ preempt_enable();
+
+ if (disable_softirq)
+ local_bh_enable();
+
+ if (test_disable_irq)
+ local_irq_enable();
+
+ if (lock_mmap_sem && master) {
+ if (lock_read)
+ mmap_read_unlock(main_task->mm);
+ else
+ mmap_write_unlock(main_task->mm);
+ if (verbose)
+ pr_notice("unlock mmap_lock pid=%d\n", main_task->pid);
+ }
+
+ if (lock_rwsem_ptr && master) {
+ if (lock_read)
+ up_read((struct rw_semaphore *)lock_rwsem_ptr);
+ else
+ up_write((struct rw_semaphore *)lock_rwsem_ptr);
+ if (verbose)
+ pr_notice("unlock rw_semaphore %ps\n",
+ (void *)lock_rwsem_ptr);
+ }
+
+ if (lock_mutex_ptr && master) {
+ mutex_unlock((struct mutex *)lock_mutex_ptr);
+ if (verbose)
+ pr_notice("unlock mutex %ps\n",
+ (void *)lock_mutex_ptr);
+ }
+}
+
+static void test_alloc_pages(struct list_head *pages)
+{
+ struct page *page;
+ unsigned int i;
+
+ for (i = 0; i < alloc_pages_nr; i++) {
+ page = alloc_pages(alloc_pages_gfp, alloc_pages_order);
+ if (!page) {
+ atomic_inc(&alloc_pages_failed);
+ break;
+ }
+ list_add(&page->lru, pages);
+ }
+}
+
+static void test_free_pages(struct list_head *pages)
+{
+ struct page *page, *next;
+
+ list_for_each_entry_safe(page, next, pages, lru)
+ __free_pages(page, alloc_pages_order);
+ INIT_LIST_HEAD(pages);
+}
+
+static void test_wait(unsigned int secs, unsigned int nsecs)
+{
+ if (wait_state == TASK_RUNNING) {
+ if (secs)
+ mdelay(secs * MSEC_PER_SEC);
+ if (nsecs)
+ ndelay(nsecs);
+ return;
+ }
+
+ __set_current_state(wait_state);
+ if (use_hrtimer) {
+ ktime_t time;
+
+ time = ns_to_ktime((u64)secs * NSEC_PER_SEC + nsecs);
+ schedule_hrtimeout(&time, HRTIMER_MODE_REL);
+ } else {
+ schedule_timeout(secs * HZ + nsecs_to_jiffies(nsecs));
+ }
+}
+
+static void test_lockup(bool master)
+{
+ u64 lockup_start = local_clock();
+ unsigned int iter = 0;
+ LIST_HEAD(pages);
+
+ pr_notice("Start on CPU%d\n", raw_smp_processor_id());
+
+ test_lock(master, true);
+
+ test_alloc_pages(&pages);
+
+ while (iter++ < iterations && !signal_pending(main_task)) {
+
+ if (iowait)
+ current->in_iowait = 1;
+
+ test_wait(time_secs, time_nsecs);
+
+ if (iowait)
+ current->in_iowait = 0;
+
+ if (reallocate_pages)
+ test_free_pages(&pages);
+
+ if (reacquire_locks)
+ test_unlock(master, false);
+
+ if (touch_softlockup)
+ touch_softlockup_watchdog();
+
+ if (touch_hardlockup)
+ touch_nmi_watchdog();
+
+ if (call_cond_resched)
+ cond_resched();
+
+ test_wait(cooldown_secs, cooldown_nsecs);
+
+ if (reacquire_locks)
+ test_lock(master, false);
+
+ if (reallocate_pages)
+ test_alloc_pages(&pages);
+ }
+
+ pr_notice("Finish on CPU%d in %lld ns\n", raw_smp_processor_id(),
+ local_clock() - lockup_start);
+
+ test_free_pages(&pages);
+
+ test_unlock(master, true);
+}
+
+DEFINE_PER_CPU(struct work_struct, test_works);
+
+static void test_work_fn(struct work_struct *work)
+{
+ test_lockup(!lock_single ||
+ work == per_cpu_ptr(&test_works, master_cpu));
+}
+
+static bool test_kernel_ptr(unsigned long addr, int size)
+{
+ void *ptr = (void *)addr;
+ char buf;
+
+ if (!addr)
+ return false;
+
+ /* should be at least readable kernel address */
+ if (access_ok(ptr, 1) ||
+ access_ok(ptr + size - 1, 1) ||
+ get_kernel_nofault(buf, ptr) ||
+ get_kernel_nofault(buf, ptr + size - 1)) {
+ pr_err("invalid kernel ptr: %#lx\n", addr);
+ return true;
+ }
+
+ return false;
+}
+
+static bool __maybe_unused test_magic(unsigned long addr, int offset,
+ unsigned int expected)
+{
+ void *ptr = (void *)addr + offset;
+ unsigned int magic = 0;
+
+ if (!addr)
+ return false;
+
+ if (get_kernel_nofault(magic, ptr) || magic != expected) {
+ pr_err("invalid magic at %#lx + %#x = %#x, expected %#x\n",
+ addr, offset, magic, expected);
+ return true;
+ }
+
+ return false;
+}
+
+static int __init test_lockup_init(void)
+{
+ u64 test_start = local_clock();
+
+ main_task = current;
+
+ switch (state[0]) {
+ case 'S':
+ wait_state = TASK_INTERRUPTIBLE;
+ break;
+ case 'D':
+ wait_state = TASK_UNINTERRUPTIBLE;
+ break;
+ case 'K':
+ wait_state = TASK_KILLABLE;
+ break;
+ case 'R':
+ wait_state = TASK_RUNNING;
+ break;
+ default:
+ pr_err("unknown state=%s\n", state);
+ return -EINVAL;
+ }
+
+ if (alloc_pages_atomic)
+ alloc_pages_gfp = GFP_ATOMIC;
+
+ if (test_kernel_ptr(lock_spinlock_ptr, sizeof(spinlock_t)) ||
+ test_kernel_ptr(lock_rwlock_ptr, sizeof(rwlock_t)) ||
+ test_kernel_ptr(lock_mutex_ptr, sizeof(struct mutex)) ||
+ test_kernel_ptr(lock_rwsem_ptr, sizeof(struct rw_semaphore)))
+ return -EINVAL;
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+ if (test_magic(lock_spinlock_ptr,
+ offsetof(spinlock_t, rlock.magic),
+ SPINLOCK_MAGIC) ||
+ test_magic(lock_rwlock_ptr,
+ offsetof(rwlock_t, magic),
+ RWLOCK_MAGIC) ||
+ test_magic(lock_mutex_ptr,
+ offsetof(struct mutex, wait_lock.rlock.magic),
+ SPINLOCK_MAGIC) ||
+ test_magic(lock_rwsem_ptr,
+ offsetof(struct rw_semaphore, wait_lock.magic),
+ SPINLOCK_MAGIC))
+ return -EINVAL;
+#endif
+
+ if ((wait_state != TASK_RUNNING ||
+ (call_cond_resched && !reacquire_locks) ||
+ (alloc_pages_nr && gfpflags_allow_blocking(alloc_pages_gfp))) &&
+ (test_disable_irq || disable_softirq || disable_preempt ||
+ lock_rcu || lock_spinlock_ptr || lock_rwlock_ptr)) {
+ pr_err("refuse to sleep in atomic context\n");
+ return -EINVAL;
+ }
+
+ if (lock_mmap_sem && !main_task->mm) {
+ pr_err("no mm to lock mmap_lock\n");
+ return -EINVAL;
+ }
+
+ if (test_file_path[0]) {
+ test_file = filp_open(test_file_path, O_RDONLY, 0);
+ if (IS_ERR(test_file)) {
+ pr_err("cannot find file_path\n");
+ return -EINVAL;
+ }
+ test_inode = file_inode(test_file);
+ } else if (test_lock_inode ||
+ test_lock_mapping ||
+ test_lock_sb_umount) {
+ pr_err("no file to lock\n");
+ return -EINVAL;
+ }
+
+ if (test_lock_inode && test_inode)
+ lock_rwsem_ptr = (unsigned long)&test_inode->i_rwsem;
+
+ if (test_lock_mapping && test_file && test_file->f_mapping)
+ lock_rwsem_ptr = (unsigned long)&test_file->f_mapping->i_mmap_rwsem;
+
+ if (test_lock_sb_umount && test_inode)
+ lock_rwsem_ptr = (unsigned long)&test_inode->i_sb->s_umount;
+
+ pr_notice("START pid=%d time=%u +%u ns cooldown=%u +%u ns iterations=%u state=%s %s%s%s%s%s%s%s%s%s%s%s\n",
+ main_task->pid, time_secs, time_nsecs,
+ cooldown_secs, cooldown_nsecs, iterations, state,
+ all_cpus ? "all_cpus " : "",
+ iowait ? "iowait " : "",
+ test_disable_irq ? "disable_irq " : "",
+ disable_softirq ? "disable_softirq " : "",
+ disable_preempt ? "disable_preempt " : "",
+ lock_rcu ? "lock_rcu " : "",
+ lock_read ? "lock_read " : "",
+ touch_softlockup ? "touch_softlockup " : "",
+ touch_hardlockup ? "touch_hardlockup " : "",
+ call_cond_resched ? "call_cond_resched " : "",
+ reacquire_locks ? "reacquire_locks " : "");
+
+ if (alloc_pages_nr)
+ pr_notice("ALLOCATE PAGES nr=%u order=%u gfp=%pGg %s\n",
+ alloc_pages_nr, alloc_pages_order, &alloc_pages_gfp,
+ reallocate_pages ? "reallocate_pages " : "");
+
+ if (all_cpus) {
+ unsigned int cpu;
+
+ cpus_read_lock();
+
+ preempt_disable();
+ master_cpu = smp_processor_id();
+ for_each_online_cpu(cpu) {
+ INIT_WORK(per_cpu_ptr(&test_works, cpu), test_work_fn);
+ queue_work_on(cpu, system_highpri_wq,
+ per_cpu_ptr(&test_works, cpu));
+ }
+ preempt_enable();
+
+ for_each_online_cpu(cpu)
+ flush_work(per_cpu_ptr(&test_works, cpu));
+
+ cpus_read_unlock();
+ } else {
+ test_lockup(true);
+ }
+
+ if (measure_lock_wait)
+ pr_notice("Maximum lock wait: %lld ns\n",
+ atomic64_read(&max_lock_wait));
+
+ if (alloc_pages_nr)
+ pr_notice("Page allocation failed %u times\n",
+ atomic_read(&alloc_pages_failed));
+
+ pr_notice("FINISH in %llu ns\n", local_clock() - test_start);
+
+ if (test_file)
+ fput(test_file);
+
+ if (signal_pending(main_task))
+ return -EINTR;
+
+ return -EAGAIN;
+}
+module_init(test_lockup_init);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Konstantin Khlebnikov <khlebnikov@yandex-team.ru>");
+MODULE_DESCRIPTION("Test module to generate lockups");
diff --git a/lib/test_min_heap.c b/lib/test_min_heap.c
new file mode 100644
index 000000000000..d19c8080fd4d
--- /dev/null
+++ b/lib/test_min_heap.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define pr_fmt(fmt) "min_heap_test: " fmt
+
+/*
+ * Test cases for the min max heap.
+ */
+
+#include <linux/log2.h>
+#include <linux/min_heap.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/random.h>
+
+static __init bool less_than(const void *lhs, const void *rhs)
+{
+ return *(int *)lhs < *(int *)rhs;
+}
+
+static __init bool greater_than(const void *lhs, const void *rhs)
+{
+ return *(int *)lhs > *(int *)rhs;
+}
+
+static __init void swap_ints(void *lhs, void *rhs)
+{
+ int temp = *(int *)lhs;
+
+ *(int *)lhs = *(int *)rhs;
+ *(int *)rhs = temp;
+}
+
+static __init int pop_verify_heap(bool min_heap,
+ struct min_heap *heap,
+ const struct min_heap_callbacks *funcs)
+{
+ int *values = heap->data;
+ int err = 0;
+ int last;
+
+ last = values[0];
+ min_heap_pop(heap, funcs);
+ while (heap->nr > 0) {
+ if (min_heap) {
+ if (last > values[0]) {
+ pr_err("error: expected %d <= %d\n", last,
+ values[0]);
+ err++;
+ }
+ } else {
+ if (last < values[0]) {
+ pr_err("error: expected %d >= %d\n", last,
+ values[0]);
+ err++;
+ }
+ }
+ last = values[0];
+ min_heap_pop(heap, funcs);
+ }
+ return err;
+}
+
+static __init int test_heapify_all(bool min_heap)
+{
+ int values[] = { 3, 1, 2, 4, 0x8000000, 0x7FFFFFF, 0,
+ -3, -1, -2, -4, 0x8000000, 0x7FFFFFF };
+ struct min_heap heap = {
+ .data = values,
+ .nr = ARRAY_SIZE(values),
+ .size = ARRAY_SIZE(values),
+ };
+ struct min_heap_callbacks funcs = {
+ .elem_size = sizeof(int),
+ .less = min_heap ? less_than : greater_than,
+ .swp = swap_ints,
+ };
+ int i, err;
+
+ /* Test with known set of values. */
+ min_heapify_all(&heap, &funcs);
+ err = pop_verify_heap(min_heap, &heap, &funcs);
+
+
+ /* Test with randomly generated values. */
+ heap.nr = ARRAY_SIZE(values);
+ for (i = 0; i < heap.nr; i++)
+ values[i] = get_random_int();
+
+ min_heapify_all(&heap, &funcs);
+ err += pop_verify_heap(min_heap, &heap, &funcs);
+
+ return err;
+}
+
+static __init int test_heap_push(bool min_heap)
+{
+ const int data[] = { 3, 1, 2, 4, 0x80000000, 0x7FFFFFFF, 0,
+ -3, -1, -2, -4, 0x80000000, 0x7FFFFFFF };
+ int values[ARRAY_SIZE(data)];
+ struct min_heap heap = {
+ .data = values,
+ .nr = 0,
+ .size = ARRAY_SIZE(values),
+ };
+ struct min_heap_callbacks funcs = {
+ .elem_size = sizeof(int),
+ .less = min_heap ? less_than : greater_than,
+ .swp = swap_ints,
+ };
+ int i, temp, err;
+
+ /* Test with known set of values copied from data. */
+ for (i = 0; i < ARRAY_SIZE(data); i++)
+ min_heap_push(&heap, &data[i], &funcs);
+
+ err = pop_verify_heap(min_heap, &heap, &funcs);
+
+ /* Test with randomly generated values. */
+ while (heap.nr < heap.size) {
+ temp = get_random_int();
+ min_heap_push(&heap, &temp, &funcs);
+ }
+ err += pop_verify_heap(min_heap, &heap, &funcs);
+
+ return err;
+}
+
+static __init int test_heap_pop_push(bool min_heap)
+{
+ const int data[] = { 3, 1, 2, 4, 0x80000000, 0x7FFFFFFF, 0,
+ -3, -1, -2, -4, 0x80000000, 0x7FFFFFFF };
+ int values[ARRAY_SIZE(data)];
+ struct min_heap heap = {
+ .data = values,
+ .nr = 0,
+ .size = ARRAY_SIZE(values),
+ };
+ struct min_heap_callbacks funcs = {
+ .elem_size = sizeof(int),
+ .less = min_heap ? less_than : greater_than,
+ .swp = swap_ints,
+ };
+ int i, temp, err;
+
+ /* Fill values with data to pop and replace. */
+ temp = min_heap ? 0x80000000 : 0x7FFFFFFF;
+ for (i = 0; i < ARRAY_SIZE(data); i++)
+ min_heap_push(&heap, &temp, &funcs);
+
+ /* Test with known set of values copied from data. */
+ for (i = 0; i < ARRAY_SIZE(data); i++)
+ min_heap_pop_push(&heap, &data[i], &funcs);
+
+ err = pop_verify_heap(min_heap, &heap, &funcs);
+
+ heap.nr = 0;
+ for (i = 0; i < ARRAY_SIZE(data); i++)
+ min_heap_push(&heap, &temp, &funcs);
+
+ /* Test with randomly generated values. */
+ for (i = 0; i < ARRAY_SIZE(data); i++) {
+ temp = get_random_int();
+ min_heap_pop_push(&heap, &temp, &funcs);
+ }
+ err += pop_verify_heap(min_heap, &heap, &funcs);
+
+ return err;
+}
+
+static int __init test_min_heap_init(void)
+{
+ int err = 0;
+
+ err += test_heapify_all(true);
+ err += test_heapify_all(false);
+ err += test_heap_push(true);
+ err += test_heap_push(false);
+ err += test_heap_pop_push(true);
+ err += test_heap_pop_push(false);
+ if (err) {
+ pr_err("test failed with %d errors\n", err);
+ return -EINVAL;
+ }
+ pr_info("test passed\n");
+ return 0;
+}
+module_init(test_min_heap_init);
+
+static void __exit test_min_heap_exit(void)
+{
+ /* do nothing */
+}
+module_exit(test_min_heap_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/test_objagg.c b/lib/test_objagg.c
index 72c1abfa154d..da137939a410 100644
--- a/lib/test_objagg.c
+++ b/lib/test_objagg.c
@@ -979,10 +979,10 @@ err_check_expect_stats2:
err_world2_obj_get:
for (i--; i >= 0; i--)
world_obj_put(&world2, objagg, hints_case->key_ids[i]);
- objagg_hints_put(hints);
- objagg_destroy(objagg2);
i = hints_case->key_ids_count;
+ objagg_destroy(objagg2);
err_check_expect_hints_stats:
+ objagg_hints_put(hints);
err_hints_get:
err_check_expect_stats:
err_world_obj_get:
diff --git a/lib/test_printf.c b/lib/test_printf.c
index 2d9f520d2f27..7ac87f18a10f 100644
--- a/lib/test_printf.c
+++ b/lib/test_printf.c
@@ -214,6 +214,7 @@ test_string(void)
#define PTR_STR "ffff0123456789ab"
#define PTR_VAL_NO_CRNG "(____ptrval____)"
#define ZEROS "00000000" /* hex 32 zero bits */
+#define ONES "ffffffff" /* hex 32 one bits */
static int __init
plain_format(void)
@@ -245,6 +246,7 @@ plain_format(void)
#define PTR_STR "456789ab"
#define PTR_VAL_NO_CRNG "(ptrval)"
#define ZEROS ""
+#define ONES ""
static int __init
plain_format(void)
@@ -330,14 +332,28 @@ test_hashed(const char *fmt, const void *p)
test(buf, fmt, p);
}
+/*
+ * NULL pointers aren't hashed.
+ */
static void __init
null_pointer(void)
{
- test_hashed("%p", NULL);
+ test(ZEROS "00000000", "%p", NULL);
test(ZEROS "00000000", "%px", NULL);
test("(null)", "%pE", NULL);
}
+/*
+ * Error pointers aren't hashed.
+ */
+static void __init
+error_pointer(void)
+{
+ test(ONES "fffffff5", "%p", ERR_PTR(-11));
+ test(ONES "fffffff5", "%px", ERR_PTR(-11));
+ test("(efault)", "%pE", ERR_PTR(-11));
+}
+
#define PTR_INVALID ((void *)0x000000ab)
static void __init
@@ -478,7 +494,7 @@ struct_va_format(void)
}
static void __init
-struct_rtc_time(void)
+time_and_date(void)
{
/* 1543210543 */
const struct rtc_time tm = {
@@ -489,14 +505,21 @@ struct_rtc_time(void)
.tm_mon = 10,
.tm_year = 118,
};
+ /* 2019-01-04T15:32:23 */
+ time64_t t = 1546615943;
- test("(%ptR?)", "%pt", &tm);
+ test("(%pt?)", "%pt", &tm);
test("2018-11-26T05:35:43", "%ptR", &tm);
test("0118-10-26T05:35:43", "%ptRr", &tm);
test("05:35:43|2018-11-26", "%ptRt|%ptRd", &tm, &tm);
test("05:35:43|0118-10-26", "%ptRtr|%ptRdr", &tm, &tm);
test("05:35:43|2018-11-26", "%ptRttr|%ptRdtr", &tm, &tm);
test("05:35:43 tr|2018-11-26 tr", "%ptRt tr|%ptRd tr", &tm, &tm);
+
+ test("2019-01-04T15:32:23", "%ptT", &t);
+ test("0119-00-04T15:32:23", "%ptTr", &t);
+ test("15:32:23|2019-01-04", "%ptTt|%ptTd", &t, &t);
+ test("15:32:23|0119-00-04", "%ptTtr|%ptTdr", &t, &t);
}
static void __init
@@ -621,7 +644,9 @@ static void __init fwnode_pointer(void)
test(second_name, "%pfwP", software_node_fwnode(&softnodes[1]));
test(third_name, "%pfwP", software_node_fwnode(&softnodes[2]));
- software_node_unregister_nodes(softnodes);
+ software_node_unregister(&softnodes[2]);
+ software_node_unregister(&softnodes[1]);
+ software_node_unregister(&softnodes[0]);
}
static void __init
@@ -649,6 +674,7 @@ test_pointer(void)
{
plain();
null_pointer();
+ error_pointer();
invalid_pointer();
symbol_ptr();
kernel_ptr();
@@ -661,7 +687,7 @@ test_pointer(void)
uuid();
dentry();
struct_va_format();
- struct_rtc_time();
+ time_and_date();
struct_clk();
bitmap();
netdev_features();
diff --git a/lib/test_stackinit.c b/lib/test_stackinit.c
index 2d7d257a430e..f93b1e145ada 100644
--- a/lib/test_stackinit.c
+++ b/lib/test_stackinit.c
@@ -92,8 +92,9 @@ static bool range_contains(char *haystack_start, size_t haystack_size,
* @var_type: type to be tested for zeroing initialization
* @which: is this a SCALAR, STRING, or STRUCT type?
* @init_level: what kind of initialization is performed
+ * @xfail: is this test expected to fail?
*/
-#define DEFINE_TEST_DRIVER(name, var_type, which) \
+#define DEFINE_TEST_DRIVER(name, var_type, which, xfail) \
/* Returns 0 on success, 1 on failure. */ \
static noinline __init int test_ ## name (void) \
{ \
@@ -139,13 +140,14 @@ static noinline __init int test_ ## name (void) \
for (sum = 0, i = 0; i < target_size; i++) \
sum += (check_buf[i] == 0xFF); \
\
- if (sum == 0) \
+ if (sum == 0) { \
pr_info(#name " ok\n"); \
- else \
- pr_warn(#name " FAIL (uninit bytes: %d)\n", \
- sum); \
- \
- return (sum != 0); \
+ return 0; \
+ } else { \
+ pr_warn(#name " %sFAIL (uninit bytes: %d)\n", \
+ (xfail) ? "X" : "", sum); \
+ return (xfail) ? 0 : 1; \
+ } \
}
#define DEFINE_TEST(name, var_type, which, init_level) \
/* no-op to force compiler into ignoring "uninitialized" vars */\
@@ -189,7 +191,7 @@ static noinline __init int leaf_ ## name(unsigned long sp, \
\
return (int)buf[0] | (int)buf[sizeof(buf) - 1]; \
} \
-DEFINE_TEST_DRIVER(name, var_type, which)
+DEFINE_TEST_DRIVER(name, var_type, which, 0)
/* Structure with no padding. */
struct test_packed {
@@ -326,8 +328,14 @@ static noinline __init int leaf_switch_2_none(unsigned long sp, bool fill,
return __leaf_switch_none(2, fill);
}
-DEFINE_TEST_DRIVER(switch_1_none, uint64_t, SCALAR);
-DEFINE_TEST_DRIVER(switch_2_none, uint64_t, SCALAR);
+/*
+ * These are expected to fail for most configurations because neither
+ * GCC nor Clang have a way to perform initialization of variables in
+ * non-code areas (i.e. in a switch statement before the first "case").
+ * https://bugs.llvm.org/show_bug.cgi?id=44916
+ */
+DEFINE_TEST_DRIVER(switch_1_none, uint64_t, SCALAR, 1);
+DEFINE_TEST_DRIVER(switch_2_none, uint64_t, SCALAR, 1);
static int __init test_stackinit_init(void)
{
diff --git a/lib/test_sysctl.c b/lib/test_sysctl.c
index 566dad3f4196..98bc92a91662 100644
--- a/lib/test_sysctl.c
+++ b/lib/test_sysctl.c
@@ -44,6 +44,8 @@ struct test_sysctl_data {
int int_0002;
int int_0003[4];
+ int boot_int;
+
unsigned int uint_0001;
char string_0001[65];
@@ -61,6 +63,8 @@ static struct test_sysctl_data test_data = {
.int_0003[2] = 2,
.int_0003[3] = 3,
+ .boot_int = 0,
+
.uint_0001 = 314,
.string_0001 = "(none)",
@@ -92,6 +96,15 @@ static struct ctl_table test_table[] = {
.proc_handler = proc_dointvec,
},
{
+ .procname = "boot_int",
+ .data = &test_data.boot_int,
+ .maxlen = sizeof(test_data.boot_int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+ {
.procname = "uint_0001",
.data = &test_data.uint_0001,
.maxlen = sizeof(unsigned int),
@@ -149,7 +162,7 @@ static int __init test_sysctl_init(void)
}
return 0;
}
-late_initcall(test_sysctl_init);
+module_init(test_sysctl_init);
static void __exit test_sysctl_exit(void)
{
diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c
index 8bbefcaddfe8..ddc9685702b1 100644
--- a/lib/test_vmalloc.c
+++ b/lib/test_vmalloc.c
@@ -91,12 +91,8 @@ static int random_size_align_alloc_test(void)
*/
size = ((rnd % 10) + 1) * PAGE_SIZE;
- ptr = __vmalloc_node_range(size, align,
- VMALLOC_START, VMALLOC_END,
- GFP_KERNEL | __GFP_ZERO,
- PAGE_KERNEL,
- 0, 0, __builtin_return_address(0));
-
+ ptr = __vmalloc_node(size, align, GFP_KERNEL | __GFP_ZERO, 0,
+ __builtin_return_address(0));
if (!ptr)
return -1;
@@ -118,12 +114,8 @@ static int align_shift_alloc_test(void)
for (i = 0; i < BITS_PER_LONG; i++) {
align = ((unsigned long) 1) << i;
- ptr = __vmalloc_node_range(PAGE_SIZE, align,
- VMALLOC_START, VMALLOC_END,
- GFP_KERNEL | __GFP_ZERO,
- PAGE_KERNEL,
- 0, 0, __builtin_return_address(0));
-
+ ptr = __vmalloc_node(PAGE_SIZE, align, GFP_KERNEL|__GFP_ZERO, 0,
+ __builtin_return_address(0));
if (!ptr)
return -1;
@@ -139,13 +131,9 @@ static int fix_align_alloc_test(void)
int i;
for (i = 0; i < test_loop_count; i++) {
- ptr = __vmalloc_node_range(5 * PAGE_SIZE,
- THREAD_ALIGN << 1,
- VMALLOC_START, VMALLOC_END,
- GFP_KERNEL | __GFP_ZERO,
- PAGE_KERNEL,
- 0, 0, __builtin_return_address(0));
-
+ ptr = __vmalloc_node(5 * PAGE_SIZE, THREAD_ALIGN << 1,
+ GFP_KERNEL | __GFP_ZERO, 0,
+ __builtin_return_address(0));
if (!ptr)
return -1;
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index 55c14e8c8859..d4f97925dbd8 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -12,6 +12,9 @@
static unsigned int tests_run;
static unsigned int tests_passed;
+static const unsigned int order_limit =
+ IS_ENABLED(CONFIG_XARRAY_MULTI) ? BITS_PER_LONG : 1;
+
#ifndef XA_DEBUG
# ifdef __KERNEL__
void xa_dump(const struct xarray *xa) { }
@@ -959,6 +962,20 @@ static noinline void check_multi_find_2(struct xarray *xa)
}
}
+static noinline void check_multi_find_3(struct xarray *xa)
+{
+ unsigned int order;
+
+ for (order = 5; order < order_limit; order++) {
+ unsigned long index = 1UL << (order - 5);
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+ xa_store_order(xa, 0, order - 4, xa_mk_index(0), GFP_KERNEL);
+ XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT));
+ xa_erase_index(xa, 0);
+ }
+}
+
static noinline void check_find_1(struct xarray *xa)
{
unsigned long i, j, k;
@@ -1081,6 +1098,7 @@ static noinline void check_find(struct xarray *xa)
for (i = 2; i < 10; i++)
check_multi_find_1(xa, i);
check_multi_find_2(xa);
+ check_multi_find_3(xa);
}
/* See find_swap_entry() in mm/shmem.c */
@@ -1138,6 +1156,42 @@ static noinline void check_find_entry(struct xarray *xa)
XA_BUG_ON(xa, !xa_empty(xa));
}
+static noinline void check_pause(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 0);
+ void *entry;
+ unsigned int order;
+ unsigned long index = 1;
+ unsigned int count = 0;
+
+ for (order = 0; order < order_limit; order++) {
+ XA_BUG_ON(xa, xa_store_order(xa, index, order,
+ xa_mk_index(index), GFP_KERNEL));
+ index += 1UL << order;
+ }
+
+ rcu_read_lock();
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ XA_BUG_ON(xa, entry != xa_mk_index(1UL << count));
+ count++;
+ }
+ rcu_read_unlock();
+ XA_BUG_ON(xa, count != order_limit);
+
+ count = 0;
+ xas_set(&xas, 0);
+ rcu_read_lock();
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ XA_BUG_ON(xa, entry != xa_mk_index(1UL << count));
+ count++;
+ xas_pause(&xas);
+ }
+ rcu_read_unlock();
+ XA_BUG_ON(xa, count != order_limit);
+
+ xa_destroy(xa);
+}
+
static noinline void check_move_tiny(struct xarray *xa)
{
XA_STATE(xas, xa, 0);
@@ -1646,6 +1700,7 @@ static int xarray_checks(void)
check_xa_alloc();
check_find(&array);
check_find_entry(&array);
+ check_pause(&array);
check_account(&array);
check_destroy(&array);
check_move(&array);
diff --git a/lib/ts_bm.c b/lib/ts_bm.c
index b352903c50e3..277cb4417ac2 100644
--- a/lib/ts_bm.c
+++ b/lib/ts_bm.c
@@ -52,7 +52,7 @@ struct ts_bm
u8 * pattern;
unsigned int patlen;
unsigned int bad_shift[ASIZE];
- unsigned int good_shift[0];
+ unsigned int good_shift[];
};
static unsigned int bm_find(struct ts_config *conf, struct ts_state *state)
diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c
index 9c873cadab7c..ab749ec10ab5 100644
--- a/lib/ts_fsm.c
+++ b/lib/ts_fsm.c
@@ -32,7 +32,7 @@
struct ts_fsm
{
unsigned int ntokens;
- struct ts_fsm_token tokens[0];
+ struct ts_fsm_token tokens[];
};
/* other values derived from ctype.h */
diff --git a/lib/ts_kmp.c b/lib/ts_kmp.c
index 94617e014b3a..c77a3d537f24 100644
--- a/lib/ts_kmp.c
+++ b/lib/ts_kmp.c
@@ -36,7 +36,7 @@ struct ts_kmp
{
u8 * pattern;
unsigned int pattern_len;
- unsigned int prefix_tbl[0];
+ unsigned int prefix_tbl[];
};
static unsigned int kmp_find(struct ts_config *conf, struct ts_state *state)
diff --git a/lib/ubsan.c b/lib/ubsan.c
index 7b9b58aee72c..cb9af3f6b77e 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -45,13 +45,6 @@ static bool was_reported(struct source_location *location)
return test_and_set_bit(REPORTED_BIT, &location->reported);
}
-static void print_source_location(const char *prefix,
- struct source_location *loc)
-{
- pr_err("%s %s:%d:%d\n", prefix, loc->file_name,
- loc->line & LINE_MASK, loc->column & COLUMN_MASK);
-}
-
static bool suppress_report(struct source_location *loc)
{
return current->in_ubsan || was_reported(loc);
@@ -140,13 +133,14 @@ static void val_to_string(char *str, size_t size, struct type_descriptor *type,
}
}
-static void ubsan_prologue(struct source_location *location)
+static void ubsan_prologue(struct source_location *loc, const char *reason)
{
current->in_ubsan++;
pr_err("========================================"
"========================================\n");
- print_source_location("UBSAN: Undefined behaviour in", location);
+ pr_err("UBSAN: %s in %s:%d:%d\n", reason, loc->file_name,
+ loc->line & LINE_MASK, loc->column & COLUMN_MASK);
}
static void ubsan_epilogue(void)
@@ -156,6 +150,17 @@ static void ubsan_epilogue(void)
"========================================\n");
current->in_ubsan--;
+
+ if (panic_on_warn) {
+ /*
+ * This thread may hit another WARN() in the panic path.
+ * Resetting this prevents additional WARN() from panicking the
+ * system on this thread. Other threads are blocked by the
+ * panic_mutex in panic().
+ */
+ panic_on_warn = 0;
+ panic("panic_on_warn set ...\n");
+ }
}
static void handle_overflow(struct overflow_data *data, void *lhs,
@@ -169,12 +174,12 @@ static void handle_overflow(struct overflow_data *data, void *lhs,
if (suppress_report(&data->location))
return;
- ubsan_prologue(&data->location);
+ ubsan_prologue(&data->location, type_is_signed(type) ?
+ "signed-integer-overflow" :
+ "unsigned-integer-overflow");
val_to_string(lhs_val_str, sizeof(lhs_val_str), type, lhs);
val_to_string(rhs_val_str, sizeof(rhs_val_str), type, rhs);
- pr_err("%s integer overflow:\n",
- type_is_signed(type) ? "signed" : "unsigned");
pr_err("%s %c %s cannot be represented in type %s\n",
lhs_val_str,
op,
@@ -184,7 +189,7 @@ static void handle_overflow(struct overflow_data *data, void *lhs,
ubsan_epilogue();
}
-void __ubsan_handle_add_overflow(struct overflow_data *data,
+void __ubsan_handle_add_overflow(void *data,
void *lhs, void *rhs)
{
@@ -192,29 +197,29 @@ void __ubsan_handle_add_overflow(struct overflow_data *data,
}
EXPORT_SYMBOL(__ubsan_handle_add_overflow);
-void __ubsan_handle_sub_overflow(struct overflow_data *data,
+void __ubsan_handle_sub_overflow(void *data,
void *lhs, void *rhs)
{
handle_overflow(data, lhs, rhs, '-');
}
EXPORT_SYMBOL(__ubsan_handle_sub_overflow);
-void __ubsan_handle_mul_overflow(struct overflow_data *data,
+void __ubsan_handle_mul_overflow(void *data,
void *lhs, void *rhs)
{
handle_overflow(data, lhs, rhs, '*');
}
EXPORT_SYMBOL(__ubsan_handle_mul_overflow);
-void __ubsan_handle_negate_overflow(struct overflow_data *data,
- void *old_val)
+void __ubsan_handle_negate_overflow(void *_data, void *old_val)
{
+ struct overflow_data *data = _data;
char old_val_str[VALUE_LENGTH];
if (suppress_report(&data->location))
return;
- ubsan_prologue(&data->location);
+ ubsan_prologue(&data->location, "negation-overflow");
val_to_string(old_val_str, sizeof(old_val_str), data->type, old_val);
@@ -226,15 +231,15 @@ void __ubsan_handle_negate_overflow(struct overflow_data *data,
EXPORT_SYMBOL(__ubsan_handle_negate_overflow);
-void __ubsan_handle_divrem_overflow(struct overflow_data *data,
- void *lhs, void *rhs)
+void __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs)
{
+ struct overflow_data *data = _data;
char rhs_val_str[VALUE_LENGTH];
if (suppress_report(&data->location))
return;
- ubsan_prologue(&data->location);
+ ubsan_prologue(&data->location, "division-overflow");
val_to_string(rhs_val_str, sizeof(rhs_val_str), data->type, rhs);
@@ -253,7 +258,7 @@ static void handle_null_ptr_deref(struct type_mismatch_data_common *data)
if (suppress_report(data->location))
return;
- ubsan_prologue(data->location);
+ ubsan_prologue(data->location, "null-ptr-deref");
pr_err("%s null pointer of type %s\n",
type_check_kinds[data->type_check_kind],
@@ -268,7 +273,7 @@ static void handle_misaligned_access(struct type_mismatch_data_common *data,
if (suppress_report(data->location))
return;
- ubsan_prologue(data->location);
+ ubsan_prologue(data->location, "misaligned-access");
pr_err("%s misaligned address %p for type %s\n",
type_check_kinds[data->type_check_kind],
@@ -284,7 +289,7 @@ static void handle_object_size_mismatch(struct type_mismatch_data_common *data,
if (suppress_report(data->location))
return;
- ubsan_prologue(data->location);
+ ubsan_prologue(data->location, "object-size-mismatch");
pr_err("%s address %p with insufficient space\n",
type_check_kinds[data->type_check_kind],
(void *) ptr);
@@ -321,10 +326,9 @@ void __ubsan_handle_type_mismatch(struct type_mismatch_data *data,
}
EXPORT_SYMBOL(__ubsan_handle_type_mismatch);
-void __ubsan_handle_type_mismatch_v1(struct type_mismatch_data_v1 *data,
- void *ptr)
+void __ubsan_handle_type_mismatch_v1(void *_data, void *ptr)
{
-
+ struct type_mismatch_data_v1 *data = _data;
struct type_mismatch_data_common common_data = {
.location = &data->location,
.type = data->type,
@@ -336,14 +340,15 @@ void __ubsan_handle_type_mismatch_v1(struct type_mismatch_data_v1 *data,
}
EXPORT_SYMBOL(__ubsan_handle_type_mismatch_v1);
-void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, void *index)
+void __ubsan_handle_out_of_bounds(void *_data, void *index)
{
+ struct out_of_bounds_data *data = _data;
char index_str[VALUE_LENGTH];
if (suppress_report(&data->location))
return;
- ubsan_prologue(&data->location);
+ ubsan_prologue(&data->location, "array-index-out-of-bounds");
val_to_string(index_str, sizeof(index_str), data->index_type, index);
pr_err("index %s is out of range for type %s\n", index_str,
@@ -352,9 +357,9 @@ void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, void *index)
}
EXPORT_SYMBOL(__ubsan_handle_out_of_bounds);
-void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
- void *lhs, void *rhs)
+void __ubsan_handle_shift_out_of_bounds(void *_data, void *lhs, void *rhs)
{
+ struct shift_out_of_bounds_data *data = _data;
struct type_descriptor *rhs_type = data->rhs_type;
struct type_descriptor *lhs_type = data->lhs_type;
char rhs_str[VALUE_LENGTH];
@@ -364,7 +369,7 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
if (suppress_report(&data->location))
goto out;
- ubsan_prologue(&data->location);
+ ubsan_prologue(&data->location, "shift-out-of-bounds");
val_to_string(rhs_str, sizeof(rhs_str), rhs_type, rhs);
val_to_string(lhs_str, sizeof(lhs_str), lhs_type, lhs);
@@ -394,24 +399,25 @@ out:
EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds);
-void __ubsan_handle_builtin_unreachable(struct unreachable_data *data)
+void __ubsan_handle_builtin_unreachable(void *_data)
{
- ubsan_prologue(&data->location);
+ struct unreachable_data *data = _data;
+ ubsan_prologue(&data->location, "unreachable");
pr_err("calling __builtin_unreachable()\n");
ubsan_epilogue();
panic("can't return from __builtin_unreachable()");
}
EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable);
-void __ubsan_handle_load_invalid_value(struct invalid_value_data *data,
- void *val)
+void __ubsan_handle_load_invalid_value(void *_data, void *val)
{
+ struct invalid_value_data *data = _data;
char val_str[VALUE_LENGTH];
if (suppress_report(&data->location))
return;
- ubsan_prologue(&data->location);
+ ubsan_prologue(&data->location, "invalid-load");
val_to_string(val_str, sizeof(val_str), data->type, val);
diff --git a/lib/usercopy.c b/lib/usercopy.c
index cbb4d9ec00f2..b26509f112f9 100644
--- a/lib/usercopy.c
+++ b/lib/usercopy.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/uaccess.h>
#include <linux/bitops.h>
+#include <linux/instrumented.h>
+#include <linux/uaccess.h>
/* out-of-line parts */
@@ -10,7 +11,7 @@ unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n
unsigned long res = n;
might_fault();
if (likely(access_ok(from, n))) {
- kasan_check_write(to, n);
+ instrument_copy_from_user(to, from, n);
res = raw_copy_from_user(to, from, n);
}
if (unlikely(res))
@@ -25,7 +26,7 @@ unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
if (likely(access_ok(to, n))) {
- kasan_check_read(from, n);
+ instrument_copy_to_user(to, from, n);
n = raw_copy_to_user(to, from, n);
}
return n;
@@ -58,7 +59,7 @@ int check_zeroed_user(const void __user *from, size_t size)
from -= align;
size += align;
- if (!user_access_begin(from, size))
+ if (!user_read_access_begin(from, size))
return -EFAULT;
unsafe_get_user(val, (unsigned long __user *) from, err_fault);
@@ -79,10 +80,10 @@ int check_zeroed_user(const void __user *from, size_t size)
val &= aligned_byte_mask(size);
done:
- user_access_end();
+ user_read_access_end();
return (val == 0);
err_fault:
- user_access_end();
+ user_read_access_end();
return -EFAULT;
}
EXPORT_SYMBOL(check_zeroed_user);
diff --git a/lib/uuid.c b/lib/uuid.c
index b6a1edb61d87..562d53977cab 100644
--- a/lib/uuid.c
+++ b/lib/uuid.c
@@ -40,6 +40,16 @@ void generate_random_uuid(unsigned char uuid[16])
}
EXPORT_SYMBOL(generate_random_uuid);
+void generate_random_guid(unsigned char guid[16])
+{
+ get_random_bytes(guid, 16);
+ /* Set GUID version to 4 --- truly random generation */
+ guid[7] = (guid[7] & 0x0F) | 0x40;
+ /* Set the GUID variant to DCE */
+ guid[8] = (guid[8] & 0x3F) | 0x80;
+}
+EXPORT_SYMBOL(generate_random_guid);
+
static void __uuid_gen_common(__u8 b[16])
{
prandom_bytes(b, 16);
diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c
index f8b8ec5e63ac..bcc9a98a0524 100644
--- a/lib/vdso/gettimeofday.c
+++ b/lib/vdso/gettimeofday.c
@@ -2,30 +2,9 @@
/*
* Generic userspace implementations of gettimeofday() and similar.
*/
-#include <linux/compiler.h>
-#include <linux/math64.h>
-#include <linux/time.h>
-#include <linux/kernel.h>
-#include <linux/hrtimer_defs.h>
#include <vdso/datapage.h>
#include <vdso/helpers.h>
-/*
- * The generic vDSO implementation requires that gettimeofday.h
- * provides:
- * - __arch_get_vdso_data(): to get the vdso datapage.
- * - __arch_get_hw_counter(): to get the hw counter based on the
- * clock_mode.
- * - gettimeofday_fallback(): fallback for gettimeofday.
- * - clock_gettime_fallback(): fallback for clock_gettime.
- * - clock_getres_fallback(): fallback for clock_getres.
- */
-#ifdef ENABLE_COMPAT_VDSO
-#include <asm/vdso/compat_gettimeofday.h>
-#else
-#include <asm/vdso/gettimeofday.h>
-#endif /* ENABLE_COMPAT_VDSO */
-
#ifndef vdso_calc_delta
/*
* Default implementation which works for all sane clocksources. That
@@ -38,6 +17,34 @@ u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
}
#endif
+#ifndef vdso_shift_ns
+static __always_inline u64 vdso_shift_ns(u64 ns, u32 shift)
+{
+ return ns >> shift;
+}
+#endif
+
+#ifndef __arch_vdso_hres_capable
+static inline bool __arch_vdso_hres_capable(void)
+{
+ return true;
+}
+#endif
+
+#ifndef vdso_clocksource_ok
+static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
+{
+ return vd->clock_mode != VDSO_CLOCKMODE_NONE;
+}
+#endif
+
+#ifndef vdso_cycles_ok
+static inline bool vdso_cycles_ok(u64 cycles)
+{
+ return true;
+}
+#endif
+
#ifdef CONFIG_TIME_NS
static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
struct __kernel_timespec *ts)
@@ -57,14 +64,17 @@ static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
do {
seq = vdso_read_begin(vd);
+
+ if (unlikely(!vdso_clocksource_ok(vd)))
+ return -1;
+
cycles = __arch_get_hw_counter(vd->clock_mode);
+ if (unlikely(!vdso_cycles_ok(cycles)))
+ return -1;
ns = vdso_ts->nsec;
last = vd->cycle_last;
- if (unlikely((s64)cycles < 0))
- return -1;
-
ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
- ns >>= vd->shift;
+ ns = vdso_shift_ns(ns, vd->shift);
sec = vdso_ts->sec;
} while (unlikely(vdso_read_retry(vd, seq)));
@@ -101,12 +111,16 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
u64 cycles, last, sec, ns;
u32 seq;
+ /* Allows to compile the high resolution parts out */
+ if (!__arch_vdso_hres_capable())
+ return -1;
+
do {
/*
- * Open coded to handle VCLOCK_TIMENS. Time namespace
+ * Open coded to handle VDSO_CLOCKMODE_TIMENS. Time namespace
* enabled tasks have a special VVAR page installed which
* has vd->seq set to 1 and vd->clock_mode set to
- * VCLOCK_TIMENS. For non time namespace affected tasks
+ * VDSO_CLOCKMODE_TIMENS. For non time namespace affected tasks
* this does not affect performance because if vd->seq is
* odd, i.e. a concurrent update is in progress the extra
* check for vd->clock_mode is just a few extra
@@ -115,20 +129,22 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
*/
while (unlikely((seq = READ_ONCE(vd->seq)) & 1)) {
if (IS_ENABLED(CONFIG_TIME_NS) &&
- vd->clock_mode == VCLOCK_TIMENS)
+ vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
return do_hres_timens(vd, clk, ts);
cpu_relax();
}
smp_rmb();
+ if (unlikely(!vdso_clocksource_ok(vd)))
+ return -1;
+
cycles = __arch_get_hw_counter(vd->clock_mode);
+ if (unlikely(!vdso_cycles_ok(cycles)))
+ return -1;
ns = vdso_ts->nsec;
last = vd->cycle_last;
- if (unlikely((s64)cycles < 0))
- return -1;
-
ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
- ns >>= vd->shift;
+ ns = vdso_shift_ns(ns, vd->shift);
sec = vdso_ts->sec;
} while (unlikely(vdso_read_retry(vd, seq)));
@@ -187,12 +203,12 @@ static __always_inline int do_coarse(const struct vdso_data *vd, clockid_t clk,
do {
/*
- * Open coded to handle VCLOCK_TIMENS. See comment in
+ * Open coded to handle VDSO_CLOCK_TIMENS. See comment in
* do_hres().
*/
while ((seq = READ_ONCE(vd->seq)) & 1) {
if (IS_ENABLED(CONFIG_TIME_NS) &&
- vd->clock_mode == VCLOCK_TIMENS)
+ vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
return do_coarse_timens(vd, clk, ts);
cpu_relax();
}
@@ -205,10 +221,10 @@ static __always_inline int do_coarse(const struct vdso_data *vd, clockid_t clk,
return 0;
}
-static __maybe_unused int
-__cvdso_clock_gettime_common(clockid_t clock, struct __kernel_timespec *ts)
+static __always_inline int
+__cvdso_clock_gettime_common(const struct vdso_data *vd, clockid_t clock,
+ struct __kernel_timespec *ts)
{
- const struct vdso_data *vd = __arch_get_vdso_data();
u32 msk;
/* Check for negative values or invalid clocks */
@@ -233,23 +249,31 @@ __cvdso_clock_gettime_common(clockid_t clock, struct __kernel_timespec *ts)
}
static __maybe_unused int
-__cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
+__cvdso_clock_gettime_data(const struct vdso_data *vd, clockid_t clock,
+ struct __kernel_timespec *ts)
{
- int ret = __cvdso_clock_gettime_common(clock, ts);
+ int ret = __cvdso_clock_gettime_common(vd, clock, ts);
if (unlikely(ret))
return clock_gettime_fallback(clock, ts);
return 0;
}
+static __maybe_unused int
+__cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
+{
+ return __cvdso_clock_gettime_data(__arch_get_vdso_data(), clock, ts);
+}
+
#ifdef BUILD_VDSO32
static __maybe_unused int
-__cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res)
+__cvdso_clock_gettime32_data(const struct vdso_data *vd, clockid_t clock,
+ struct old_timespec32 *res)
{
struct __kernel_timespec ts;
int ret;
- ret = __cvdso_clock_gettime_common(clock, &ts);
+ ret = __cvdso_clock_gettime_common(vd, clock, &ts);
if (unlikely(ret))
return clock_gettime32_fallback(clock, res);
@@ -260,12 +284,18 @@ __cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res)
return ret;
}
+
+static __maybe_unused int
+__cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res)
+{
+ return __cvdso_clock_gettime32_data(__arch_get_vdso_data(), clock, res);
+}
#endif /* BUILD_VDSO32 */
static __maybe_unused int
-__cvdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
+__cvdso_gettimeofday_data(const struct vdso_data *vd,
+ struct __kernel_old_timeval *tv, struct timezone *tz)
{
- const struct vdso_data *vd = __arch_get_vdso_data();
if (likely(tv != NULL)) {
struct __kernel_timespec ts;
@@ -279,7 +309,7 @@ __cvdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
if (unlikely(tz != NULL)) {
if (IS_ENABLED(CONFIG_TIME_NS) &&
- vd->clock_mode == VCLOCK_TIMENS)
+ vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
vd = __arch_get_timens_vdso_data();
tz->tz_minuteswest = vd[CS_HRES_COARSE].tz_minuteswest;
@@ -289,13 +319,20 @@ __cvdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
return 0;
}
+static __maybe_unused int
+__cvdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
+{
+ return __cvdso_gettimeofday_data(__arch_get_vdso_data(), tv, tz);
+}
+
#ifdef VDSO_HAS_TIME
-static __maybe_unused __kernel_old_time_t __cvdso_time(__kernel_old_time_t *time)
+static __maybe_unused __kernel_old_time_t
+__cvdso_time_data(const struct vdso_data *vd, __kernel_old_time_t *time)
{
- const struct vdso_data *vd = __arch_get_vdso_data();
__kernel_old_time_t t;
- if (IS_ENABLED(CONFIG_TIME_NS) && vd->clock_mode == VCLOCK_TIMENS)
+ if (IS_ENABLED(CONFIG_TIME_NS) &&
+ vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
vd = __arch_get_timens_vdso_data();
t = READ_ONCE(vd[CS_HRES_COARSE].basetime[CLOCK_REALTIME].sec);
@@ -305,13 +342,18 @@ static __maybe_unused __kernel_old_time_t __cvdso_time(__kernel_old_time_t *time
return t;
}
+
+static __maybe_unused __kernel_old_time_t __cvdso_time(__kernel_old_time_t *time)
+{
+ return __cvdso_time_data(__arch_get_vdso_data(), time);
+}
#endif /* VDSO_HAS_TIME */
#ifdef VDSO_HAS_CLOCK_GETRES
static __maybe_unused
-int __cvdso_clock_getres_common(clockid_t clock, struct __kernel_timespec *res)
+int __cvdso_clock_getres_common(const struct vdso_data *vd, clockid_t clock,
+ struct __kernel_timespec *res)
{
- const struct vdso_data *vd = __arch_get_vdso_data();
u32 msk;
u64 ns;
@@ -319,7 +361,8 @@ int __cvdso_clock_getres_common(clockid_t clock, struct __kernel_timespec *res)
if (unlikely((u32) clock >= MAX_CLOCKS))
return -1;
- if (IS_ENABLED(CONFIG_TIME_NS) && vd->clock_mode == VCLOCK_TIMENS)
+ if (IS_ENABLED(CONFIG_TIME_NS) &&
+ vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
vd = __arch_get_timens_vdso_data();
/*
@@ -349,23 +392,31 @@ int __cvdso_clock_getres_common(clockid_t clock, struct __kernel_timespec *res)
}
static __maybe_unused
-int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
+int __cvdso_clock_getres_data(const struct vdso_data *vd, clockid_t clock,
+ struct __kernel_timespec *res)
{
- int ret = __cvdso_clock_getres_common(clock, res);
+ int ret = __cvdso_clock_getres_common(vd, clock, res);
if (unlikely(ret))
return clock_getres_fallback(clock, res);
return 0;
}
+static __maybe_unused
+int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
+{
+ return __cvdso_clock_getres_data(__arch_get_vdso_data(), clock, res);
+}
+
#ifdef BUILD_VDSO32
static __maybe_unused int
-__cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res)
+__cvdso_clock_getres_time32_data(const struct vdso_data *vd, clockid_t clock,
+ struct old_timespec32 *res)
{
struct __kernel_timespec ts;
int ret;
- ret = __cvdso_clock_getres_common(clock, &ts);
+ ret = __cvdso_clock_getres_common(vd, clock, &ts);
if (unlikely(ret))
return clock_getres32_fallback(clock, res);
@@ -376,5 +427,12 @@ __cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res)
}
return ret;
}
+
+static __maybe_unused int
+__cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res)
+{
+ return __cvdso_clock_getres_time32_data(__arch_get_vdso_data(),
+ clock, res);
+}
#endif /* BUILD_VDSO32 */
#endif /* VDSO_HAS_CLOCK_GETRES */
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 7c488a1ce318..259e55895933 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -34,6 +34,7 @@
#include <linux/dcache.h>
#include <linux/cred.h>
#include <linux/rtc.h>
+#include <linux/time.h>
#include <linux/uuid.h>
#include <linux/of.h>
#include <net/addrconf.h>
@@ -58,7 +59,7 @@
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*
- * This function is obsolete. Please use kstrtoull instead.
+ * This function has caveats. Please use kstrtoull instead.
*/
unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base)
{
@@ -83,7 +84,7 @@ EXPORT_SYMBOL(simple_strtoull);
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*
- * This function is obsolete. Please use kstrtoul instead.
+ * This function has caveats. Please use kstrtoul instead.
*/
unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base)
{
@@ -97,7 +98,7 @@ EXPORT_SYMBOL(simple_strtoul);
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*
- * This function is obsolete. Please use kstrtol instead.
+ * This function has caveats. Please use kstrtol instead.
*/
long simple_strtol(const char *cp, char **endp, unsigned int base)
{
@@ -114,7 +115,7 @@ EXPORT_SYMBOL(simple_strtol);
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*
- * This function is obsolete. Please use kstrtoll instead.
+ * This function has caveats. Please use kstrtoll instead.
*/
long long simple_strtoll(const char *cp, char **endp, unsigned int base)
{
@@ -794,6 +795,13 @@ static char *ptr_to_id(char *buf, char *end, const void *ptr,
unsigned long hashval;
int ret;
+ /*
+ * Print the real pointer value for NULL and error pointers,
+ * as they are not actual addresses.
+ */
+ if (IS_ERR_OR_NULL(ptr))
+ return pointer_string(buf, end, ptr, spec);
+
/* When debugging early boot use non-cryptographically secure hash. */
if (unlikely(debug_boot_weak_hash)) {
hashval = hash_long((unsigned long)ptr, 32);
@@ -1820,14 +1828,39 @@ char *rtc_str(char *buf, char *end, const struct rtc_time *tm,
}
static noinline_for_stack
+char *time64_str(char *buf, char *end, const time64_t time,
+ struct printf_spec spec, const char *fmt)
+{
+ struct rtc_time rtc_time;
+ struct tm tm;
+
+ time64_to_tm(time, 0, &tm);
+
+ rtc_time.tm_sec = tm.tm_sec;
+ rtc_time.tm_min = tm.tm_min;
+ rtc_time.tm_hour = tm.tm_hour;
+ rtc_time.tm_mday = tm.tm_mday;
+ rtc_time.tm_mon = tm.tm_mon;
+ rtc_time.tm_year = tm.tm_year;
+ rtc_time.tm_wday = tm.tm_wday;
+ rtc_time.tm_yday = tm.tm_yday;
+
+ rtc_time.tm_isdst = 0;
+
+ return rtc_str(buf, end, &rtc_time, spec, fmt);
+}
+
+static noinline_for_stack
char *time_and_date(char *buf, char *end, void *ptr, struct printf_spec spec,
const char *fmt)
{
switch (fmt[1]) {
case 'R':
return rtc_str(buf, end, (const struct rtc_time *)ptr, spec, fmt);
+ case 'T':
+ return time64_str(buf, end, *(const time64_t *)ptr, spec, fmt);
default:
- return error_string(buf, end, "(%ptR?)", spec);
+ return error_string(buf, end, "(%pt?)", spec);
}
}
@@ -2143,8 +2176,9 @@ char *fwnode_string(char *buf, char *end, struct fwnode_handle *fwnode,
* - 'd[234]' For a dentry name (optionally 2-4 last components)
* - 'D[234]' Same as 'd' but for a struct file
* - 'g' For block_device name (gendisk + partition number)
- * - 't[R][dt][r]' For time and date as represented:
+ * - 't[RT][dt][r]' For time and date as represented by:
* R struct rtc_time
+ * T time64_t
* - 'C' For a clock, it prints the name (Common Clock Framework) or address
* (legacy clock framework) of the clock
* - 'Cn' For a clock, it prints the name (Common Clock Framework) or address
@@ -2168,6 +2202,10 @@ char *fwnode_string(char *buf, char *end, struct fwnode_handle *fwnode,
* f full name
* P node name, including a possible unit address
* - 'x' For printing the address. Equivalent to "%lx".
+ * - '[ku]s' For a BPF/tracing related format specifier, e.g. used out of
+ * bpf_trace_printk() where [ku] prefix specifies either kernel (k)
+ * or user (u) memory to probe, and:
+ * s a string, equivalent to "%s" on direct vsnprintf() use
*
* ** When making changes please also update:
* Documentation/core-api/printk-formats.rst
@@ -2251,6 +2289,14 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
if (!IS_ERR(ptr))
break;
return err_ptr(buf, end, ptr, spec);
+ case 'u':
+ case 'k':
+ switch (fmt[1]) {
+ case 's':
+ return string(buf, end, ptr, spec);
+ default:
+ return error_string(buf, end, "(einval)", spec);
+ }
}
/* default is to _not_ leak addresses, hash before printing */
diff --git a/lib/xarray.c b/lib/xarray.c
index 1d9fab7db8da..e9e641d3c0c3 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -970,7 +970,7 @@ void xas_pause(struct xa_state *xas)
xas->xa_node = XAS_RESTART;
if (node) {
- unsigned int offset = xas->xa_offset;
+ unsigned long offset = xas->xa_offset;
while (++offset < XA_CHUNK_SIZE) {
if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
break;
@@ -1208,6 +1208,8 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
}
entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
+ if (!entry && !(xa_track_free(xas->xa) && mark == XA_FREE_MARK))
+ continue;
if (!xa_is_node(entry))
return entry;
xas->xa_node = xa_to_node(entry);
@@ -1836,10 +1838,11 @@ static bool xas_sibling(struct xa_state *xas)
struct xa_node *node = xas->xa_node;
unsigned long mask;
- if (!node)
+ if (!IS_ENABLED(CONFIG_XARRAY_MULTI) || !node)
return false;
mask = (XA_CHUNK_SIZE << node->shift) - 1;
- return (xas->xa_index & mask) > (xas->xa_offset << node->shift);
+ return (xas->xa_index & mask) >
+ ((unsigned long)xas->xa_offset << node->shift);
}
/**
diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c
index 2c13ecc5bb2c..ed1f3df27260 100644
--- a/lib/zlib_inflate/inffast.c
+++ b/lib/zlib_inflate/inffast.c
@@ -10,17 +10,6 @@
#ifndef ASMINF
-/* Allow machine dependent optimization for post-increment or pre-increment.
- Based on testing to date,
- Pre-increment preferred for:
- - PowerPC G3 (Adler)
- - MIPS R5000 (Randers-Pehrson)
- Post-increment preferred for:
- - none
- No measurable difference:
- - Pentium III (Anderson)
- - M68060 (Nikl)
- */
union uu {
unsigned short us;
unsigned char b[2];
@@ -38,16 +27,6 @@ get_unaligned16(const unsigned short *p)
return mm.us;
}
-#ifdef POSTINC
-# define OFF 0
-# define PUP(a) *(a)++
-# define UP_UNALIGNED(a) get_unaligned16((a)++)
-#else
-# define OFF 1
-# define PUP(a) *++(a)
-# define UP_UNALIGNED(a) get_unaligned16(++(a))
-#endif
-
/*
Decode literal, length, and distance codes and write out the resulting
literal and match bytes until either not enough input or output is
@@ -115,9 +94,9 @@ void inflate_fast(z_streamp strm, unsigned start)
/* copy state to local variables */
state = (struct inflate_state *)strm->state;
- in = strm->next_in - OFF;
+ in = strm->next_in;
last = in + (strm->avail_in - 5);
- out = strm->next_out - OFF;
+ out = strm->next_out;
beg = out - (start - strm->avail_out);
end = out + (strm->avail_out - 257);
#ifdef INFLATE_STRICT
@@ -138,9 +117,9 @@ void inflate_fast(z_streamp strm, unsigned start)
input data or output space */
do {
if (bits < 15) {
- hold += (unsigned long)(PUP(in)) << bits;
+ hold += (unsigned long)(*in++) << bits;
bits += 8;
- hold += (unsigned long)(PUP(in)) << bits;
+ hold += (unsigned long)(*in++) << bits;
bits += 8;
}
this = lcode[hold & lmask];
@@ -150,14 +129,14 @@ void inflate_fast(z_streamp strm, unsigned start)
bits -= op;
op = (unsigned)(this.op);
if (op == 0) { /* literal */
- PUP(out) = (unsigned char)(this.val);
+ *out++ = (unsigned char)(this.val);
}
else if (op & 16) { /* length base */
len = (unsigned)(this.val);
op &= 15; /* number of extra bits */
if (op) {
if (bits < op) {
- hold += (unsigned long)(PUP(in)) << bits;
+ hold += (unsigned long)(*in++) << bits;
bits += 8;
}
len += (unsigned)hold & ((1U << op) - 1);
@@ -165,9 +144,9 @@ void inflate_fast(z_streamp strm, unsigned start)
bits -= op;
}
if (bits < 15) {
- hold += (unsigned long)(PUP(in)) << bits;
+ hold += (unsigned long)(*in++) << bits;
bits += 8;
- hold += (unsigned long)(PUP(in)) << bits;
+ hold += (unsigned long)(*in++) << bits;
bits += 8;
}
this = dcode[hold & dmask];
@@ -180,10 +159,10 @@ void inflate_fast(z_streamp strm, unsigned start)
dist = (unsigned)(this.val);
op &= 15; /* number of extra bits */
if (bits < op) {
- hold += (unsigned long)(PUP(in)) << bits;
+ hold += (unsigned long)(*in++) << bits;
bits += 8;
if (bits < op) {
- hold += (unsigned long)(PUP(in)) << bits;
+ hold += (unsigned long)(*in++) << bits;
bits += 8;
}
}
@@ -205,13 +184,13 @@ void inflate_fast(z_streamp strm, unsigned start)
state->mode = BAD;
break;
}
- from = window - OFF;
+ from = window;
if (write == 0) { /* very common case */
from += wsize - op;
if (op < len) { /* some from window */
len -= op;
do {
- PUP(out) = PUP(from);
+ *out++ = *from++;
} while (--op);
from = out - dist; /* rest from output */
}
@@ -222,14 +201,14 @@ void inflate_fast(z_streamp strm, unsigned start)
if (op < len) { /* some from end of window */
len -= op;
do {
- PUP(out) = PUP(from);
+ *out++ = *from++;
} while (--op);
- from = window - OFF;
+ from = window;
if (write < len) { /* some from start of window */
op = write;
len -= op;
do {
- PUP(out) = PUP(from);
+ *out++ = *from++;
} while (--op);
from = out - dist; /* rest from output */
}
@@ -240,21 +219,21 @@ void inflate_fast(z_streamp strm, unsigned start)
if (op < len) { /* some from window */
len -= op;
do {
- PUP(out) = PUP(from);
+ *out++ = *from++;
} while (--op);
from = out - dist; /* rest from output */
}
}
while (len > 2) {
- PUP(out) = PUP(from);
- PUP(out) = PUP(from);
- PUP(out) = PUP(from);
+ *out++ = *from++;
+ *out++ = *from++;
+ *out++ = *from++;
len -= 3;
}
if (len) {
- PUP(out) = PUP(from);
+ *out++ = *from++;
if (len > 1)
- PUP(out) = PUP(from);
+ *out++ = *from++;
}
}
else {
@@ -264,29 +243,29 @@ void inflate_fast(z_streamp strm, unsigned start)
from = out - dist; /* copy direct from output */
/* minimum length is three */
/* Align out addr */
- if (!((long)(out - 1 + OFF) & 1)) {
- PUP(out) = PUP(from);
+ if (!((long)(out - 1) & 1)) {
+ *out++ = *from++;
len--;
}
- sout = (unsigned short *)(out - OFF);
+ sout = (unsigned short *)(out);
if (dist > 2) {
unsigned short *sfrom;
- sfrom = (unsigned short *)(from - OFF);
+ sfrom = (unsigned short *)(from);
loops = len >> 1;
do
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
- PUP(sout) = PUP(sfrom);
+ *sout++ = *sfrom++;
#else
- PUP(sout) = UP_UNALIGNED(sfrom);
+ *sout++ = get_unaligned16(sfrom++);
#endif
while (--loops);
- out = (unsigned char *)sout + OFF;
- from = (unsigned char *)sfrom + OFF;
+ out = (unsigned char *)sout;
+ from = (unsigned char *)sfrom;
} else { /* dist == 1 or dist == 2 */
unsigned short pat16;
- pat16 = *(sout-1+OFF);
+ pat16 = *(sout-1);
if (dist == 1) {
union uu mm;
/* copy one char pattern to both bytes */
@@ -296,12 +275,12 @@ void inflate_fast(z_streamp strm, unsigned start)
}
loops = len >> 1;
do
- PUP(sout) = pat16;
+ *sout++ = pat16;
while (--loops);
- out = (unsigned char *)sout + OFF;
+ out = (unsigned char *)sout;
}
if (len & 1)
- PUP(out) = PUP(from);
+ *out++ = *from++;
}
}
else if ((op & 64) == 0) { /* 2nd level distance code */
@@ -336,8 +315,8 @@ void inflate_fast(z_streamp strm, unsigned start)
hold &= (1U << bits) - 1;
/* update state and return */
- strm->next_in = in + OFF;
- strm->next_out = out + OFF;
+ strm->next_in = in;
+ strm->next_out = out;
strm->avail_in = (unsigned)(in < last ? 5 + (last - in) : 5 - (in - last));
strm->avail_out = (unsigned)(out < end ?
257 + (end - out) : 257 - (out - end));