summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/.gitignore2
-rw-r--r--lib/Kconfig61
-rw-r--r--lib/Kconfig.debug357
-rw-r--r--lib/Kconfig.kasan12
-rw-r--r--lib/Kconfig.ubsan14
-rw-r--r--lib/Makefile55
-rw-r--r--lib/argv_split.c2
-rw-r--r--lib/atomic64.c14
-rw-r--r--lib/bch.c32
-rw-r--r--lib/bitmap.c58
-rw-r--r--lib/bucket_locks.c16
-rw-r--r--lib/chacha20.c6
-rw-r--r--lib/cpumask.c4
-rw-r--r--lib/crc-t10dif.c57
-rw-r--r--lib/crc32.c22
-rw-r--r--lib/crc32defs.h14
-rw-r--r--lib/crc64.c56
-rw-r--r--lib/debug_locks.c6
-rw-r--r--lib/debugobjects.c151
-rw-r--r--lib/dec_and_lock.c16
-rw-r--r--lib/decompress_bunzip2.c3
-rw-r--r--lib/devres.c114
-rw-r--r--lib/dma-debug.c1752
-rw-r--r--lib/dma-direct.c161
-rw-r--r--lib/dma-virt.c61
-rw-r--r--lib/dump_stack.c60
-rw-r--r--lib/errseq.c23
-rw-r--r--lib/find_bit_benchmark.c7
-rw-r--r--lib/fonts/font_7x14.c256
-rw-r--r--lib/fonts/font_8x16.c256
-rw-r--r--lib/fonts/font_8x8.c256
-rw-r--r--lib/fonts/font_pearl_8x8.c256
-rw-r--r--lib/gen_crc32table.c5
-rw-r--r--lib/gen_crc64table.c68
-rw-r--r--lib/idr.c464
-rw-r--r--lib/int_sqrt.c30
-rw-r--r--lib/interval_tree_test.c5
-rw-r--r--lib/iommu-common.c267
-rw-r--r--lib/iommu-helper.c14
-rw-r--r--lib/ioremap.c4
-rw-r--r--lib/iov_iter.c257
-rw-r--r--lib/kfifo.c4
-rw-r--r--lib/klist.c10
-rw-r--r--lib/kobject.c76
-rw-r--r--lib/kobject_uevent.c272
-rw-r--r--lib/kstrtox.c16
-rw-r--r--lib/libcrc32c.c6
-rw-r--r--lib/list_debug.c14
-rw-r--r--lib/locking-selftest.c2
-rw-r--r--lib/lockref.c28
-rw-r--r--lib/logic_pio.c280
-rw-r--r--lib/lru_cache.c2
-rw-r--r--lib/lz4/lz4_decompress.c481
-rw-r--r--lib/lz4/lz4defs.h9
-rw-r--r--lib/memcat_p.c34
-rw-r--r--lib/mpi/mpi-internal.h75
-rw-r--r--lib/mpi/mpi-pow.c3
-rw-r--r--lib/mpi/mpiutil.c4
-rw-r--r--lib/nlattr.c269
-rw-r--r--lib/nmi_backtrace.c3
-rw-r--r--lib/parser.c16
-rw-r--r--lib/percpu-refcount.c28
-rw-r--r--lib/percpu_counter.c1
-rw-r--r--lib/percpu_ida.c391
-rw-r--r--lib/radix-tree.c852
-rw-r--r--lib/raid6/.gitignore1
-rw-r--r--lib/raid6/Makefile33
-rw-r--r--lib/raid6/algos.c7
-rw-r--r--lib/raid6/altivec.uc3
-rw-r--r--lib/raid6/s390vx.uc34
-rw-r--r--lib/raid6/sse2.c14
-rw-r--r--lib/raid6/test/Makefile33
-rw-r--r--lib/raid6/tilegx.uc87
-rw-r--r--lib/raid6/vpermxor.uc105
-rw-r--r--lib/rbtree_test.c2
-rw-r--r--lib/reciprocal_div.c41
-rw-r--r--lib/reed_solomon/decode_rs.c34
-rw-r--r--lib/reed_solomon/encode_rs.c15
-rw-r--r--lib/reed_solomon/reed_solomon.c240
-rw-r--r--lib/refcount.c83
-rw-r--r--lib/rhashtable.c156
-rw-r--r--lib/sbitmap.c119
-rw-r--r--lib/scatterlist.c18
-rw-r--r--lib/sg_pool.c7
-rw-r--r--lib/sha256.c283
-rw-r--r--lib/string.c1
-rw-r--r--lib/swiotlb.c1135
-rw-r--r--lib/test_bitfield.c168
-rw-r--r--lib/test_bitmap.c35
-rw-r--r--lib/test_bpf.c709
-rw-r--r--lib/test_debug_virtual.c2
-rw-r--r--lib/test_firmware.c11
-rw-r--r--lib/test_hexdump.c28
-rw-r--r--lib/test_ida.c177
-rw-r--r--lib/test_kasan.c78
-rw-r--r--lib/test_kmod.c5
-rw-r--r--lib/test_memcat_p.c115
-rw-r--r--lib/test_overflow.c613
-rw-r--r--lib/test_printf.c26
-rw-r--r--lib/test_rhashtable.c21
-rw-r--r--lib/test_ubsan.c144
-rw-r--r--lib/test_user_copy.c3
-rw-r--r--lib/test_xarray.c1238
-rw-r--r--lib/textsearch.c40
-rw-r--r--lib/ucmpdi2.c2
-rw-r--r--lib/ucs2_string.c2
-rw-r--r--lib/vsprintf.c342
-rw-r--r--lib/xarray.c2036
-rw-r--r--lib/xz/xz_crc32.c2
-rw-r--r--lib/xz/xz_private.h4
-rw-r--r--lib/zlib_inflate/inflate.c12
-rw-r--r--lib/zstd/Makefile17
112 files changed, 9461 insertions, 6970 deletions
diff --git a/lib/.gitignore b/lib/.gitignore
index 09aae85418ab..f2a39c9e5485 100644
--- a/lib/.gitignore
+++ b/lib/.gitignore
@@ -2,5 +2,7 @@
# Generated files
#
gen_crc32table
+gen_crc64table
crc32table.h
+crc64table.h
oid_registry_data.c
diff --git a/lib/Kconfig b/lib/Kconfig
index e96089499371..a9965f4af4dd 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -55,6 +55,22 @@ config ARCH_USE_CMPXCHG_LOCKREF
config ARCH_HAS_FAST_MULTIPLIER
bool
+config INDIRECT_PIO
+ bool "Access I/O in non-MMIO mode"
+ depends on ARM64
+ help
+ On some platforms where no separate I/O space exists, there are I/O
+ hosts which can not be accessed in MMIO mode. Using the logical PIO
+ mechanism, the host-local I/O resource can be mapped into system
+ logic PIO space shared with MMIO hosts, such as PCI/PCIe, then the
+ system can access the I/O devices with the mapped-logic PIO through
+ I/O accessors.
+
+ This way has relatively little I/O performance cost. Please make
+ sure your devices really need this configure item enabled.
+
+ When in doubt, say N.
+
config CRC_CCITT
tristate "CRC-CCITT functions"
help
@@ -154,6 +170,14 @@ config CRC32_BIT
endchoice
+config CRC64
+ tristate "CRC64 functions"
+ help
+ This option is provided for the case where no in-kernel-tree
+ modules require CRC64 functions, but a module built outside
+ the kernel tree does. Such modules that use library CRC64
+ functions require M here.
+
config CRC4
tristate "CRC4 functions"
help
@@ -207,7 +231,6 @@ config AUDIT_COMPAT_GENERIC
config RANDOM32_SELFTEST
bool "PRNG perform self test on init"
- default n
help
This option enables the 32 bit PRNG library functions to perform a
self test on initialization.
@@ -376,8 +399,11 @@ config INTERVAL_TREE
for more information.
-config RADIX_TREE_MULTIORDER
+config XARRAY_MULTI
bool
+ help
+ Support entries which occupy multiple consecutive indices in the
+ XArray.
config ASSOCIATIVE_ARRAY
bool
@@ -389,7 +415,7 @@ config ASSOCIATIVE_ARRAY
See:
- Documentation/assoc_array.txt
+ Documentation/core-api/assoc_array.rst
for more information.
@@ -404,24 +430,14 @@ config HAS_IOPORT_MAP
depends on HAS_IOMEM && !NO_IOPORT_MAP
default y
-config HAS_DMA
- bool
- depends on !NO_DMA
- default y
+source "kernel/dma/Kconfig"
config SGL_ALLOC
bool
default n
-config DMA_DIRECT_OPS
+config IOMMU_HELPER
bool
- depends on HAS_DMA && (!64BIT || ARCH_DMA_ADDR_T_64BIT)
- default n
-
-config DMA_VIRT_OPS
- bool
- depends on HAS_DMA && (!64BIT || ARCH_DMA_ADDR_T_64BIT)
- default n
config CHECK_SIGNATURE
bool
@@ -570,6 +586,9 @@ config ARCH_HAS_PMEM_API
config ARCH_HAS_UACCESS_FLUSHCACHE
bool
+config ARCH_HAS_UACCESS_MCSAFE
+ bool
+
config STACKDEPOT
bool
select STACKTRACE
@@ -588,20 +607,20 @@ config STRING_SELFTEST
endmenu
-config GENERIC_ASHLDI3
+config GENERIC_LIB_ASHLDI3
bool
-config GENERIC_ASHRDI3
+config GENERIC_LIB_ASHRDI3
bool
-config GENERIC_LSHRDI3
+config GENERIC_LIB_LSHRDI3
bool
-config GENERIC_MULDI3
+config GENERIC_LIB_MULDI3
bool
-config GENERIC_CMPDI2
+config GENERIC_LIB_CMPDI2
bool
-config GENERIC_UCMPDI2
+config GENERIC_LIB_UCMPDI2
bool
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 64155e310a9f..1af29b8224fd 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1,3 +1,5 @@
+menu "Kernel hacking"
+
menu "printk and dmesg options"
config PRINTK_TIME
@@ -30,6 +32,17 @@ config CONSOLE_LOGLEVEL_DEFAULT
usage in the kernel. That is controlled by the MESSAGE_LOGLEVEL_DEFAULT
option.
+config CONSOLE_LOGLEVEL_QUIET
+ int "quiet console loglevel (1-15)"
+ range 1 15
+ default "4"
+ help
+ loglevel to use when "quiet" is passed on the kernel commandline.
+
+ When "quiet" is passed on the kernel commandline this loglevel
+ will be used as the loglevel. IOW passing "quiet" will be the
+ equivalent of passing "loglevel=<CONSOLE_LOGLEVEL_QUIET>"
+
config MESSAGE_LOGLEVEL_DEFAULT
int "Default message log level (1-7)"
range 1 7
@@ -165,7 +178,7 @@ config DEBUG_INFO_REDUCED
config DEBUG_INFO_SPLIT
bool "Produce split debuginfo in .dwo files"
- depends on DEBUG_INFO && !FRV
+ depends on DEBUG_INFO
help
Generate debug info into separate .dwo files. This significantly
reduces the build directory size for builds with DEBUG_INFO,
@@ -198,14 +211,6 @@ config GDB_SCRIPTS
instance. See Documentation/dev-tools/gdb-kernel-debugging.rst
for further details.
-config ENABLE_WARN_DEPRECATED
- bool "Enable __deprecated logic"
- default y
- help
- Enable the __deprecated logic in the kernel build.
- Disable this to suppress the "warning: 'foo' is deprecated
- (declared at kernel/power/somefile.c:1234)" messages.
-
config ENABLE_MUST_CHECK
bool "Enable __must_check logic"
default y
@@ -324,11 +329,11 @@ config DEBUG_SECTION_MISMATCH
the analysis would not catch the illegal reference.
This option tells gcc to inline less (but it does result in
a larger kernel).
- - Run the section mismatch analysis for each module/built-in.o file.
+ - Run the section mismatch analysis for each module/built-in.a file.
When we run the section mismatch analysis on vmlinux.o, we
lose valuable information about where the mismatch was
introduced.
- Running the analysis for each module/built-in.o file
+ Running the analysis for each module/built-in.a file
tells where the mismatch happens much closer to the
source. The drawback is that the same mismatch is
reported at least twice.
@@ -354,10 +359,7 @@ config ARCH_WANT_FRAME_POINTERS
config FRAME_POINTER
bool "Compile the kernel with frame pointers"
- depends on DEBUG_KERNEL && \
- (CRIS || M68K || FRV || UML || \
- SUPERH || BLACKFIN || MN10300 || METAG) || \
- ARCH_WANT_FRAME_POINTERS
+ depends on DEBUG_KERNEL && (M68K || UML || SUPERH) || ARCH_WANT_FRAME_POINTERS
default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS
help
If you say Y here the resulting kernel image will be slightly
@@ -739,12 +741,15 @@ config ARCH_HAS_KCOV
only for x86_64. KCOV requires testing on other archs, and most likely
disabling of instrumentation for some early boot code.
+config CC_HAS_SANCOV_TRACE_PC
+ def_bool $(cc-option,-fsanitize-coverage=trace-pc)
+
config KCOV
bool "Code coverage for fuzzing"
depends on ARCH_HAS_KCOV
+ depends on CC_HAS_SANCOV_TRACE_PC || GCC_PLUGINS
select DEBUG_FS
- select GCC_PLUGINS if !COMPILE_TEST
- select GCC_PLUGIN_SANCOV if !COMPILE_TEST
+ select GCC_PLUGIN_SANCOV if !CC_HAS_SANCOV_TRACE_PC
help
KCOV exposes kernel code coverage information in a form suitable
for coverage-guided fuzzing (randomized testing).
@@ -758,7 +763,7 @@ config KCOV
config KCOV_ENABLE_COMPARISONS
bool "Enable comparison operands collection by KCOV"
depends on KCOV
- default n
+ depends on $(cc-option,-fsanitize-coverage=trace-cmp)
help
KCOV also exposes operands of every comparison in the instrumented
code along with operand sizes and PCs of the comparison instructions.
@@ -768,7 +773,7 @@ config KCOV_ENABLE_COMPARISONS
config KCOV_INSTRUMENT_ALL
bool "Instrument all code by default"
depends on KCOV
- default y if KCOV
+ default y
help
If you are doing generic system call fuzzing (like e.g. syzkaller),
then you will want to instrument the whole kernel and you should
@@ -803,6 +808,30 @@ config SOFTLOCKUP_DETECTOR
chance to run. The current stack trace is displayed upon
detection and the system will stay locked up.
+config BOOTPARAM_SOFTLOCKUP_PANIC
+ bool "Panic (Reboot) On Soft Lockups"
+ depends on SOFTLOCKUP_DETECTOR
+ help
+ Say Y here to enable the kernel to panic on "soft lockups",
+ which are bugs that cause the kernel to loop in kernel
+ mode for more than 20 seconds (configurable using the watchdog_thresh
+ sysctl), without giving other tasks a chance to run.
+
+ The panic can be used in combination with panic_timeout,
+ to cause the system to reboot automatically after a
+ lockup has been detected. This feature is useful for
+ high-availability systems that have uptime guarantees and
+ where a lockup must be resolved ASAP.
+
+ Say N if unsure.
+
+config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
+ int
+ depends on SOFTLOCKUP_DETECTOR
+ range 0 1
+ default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC
+ default 1 if BOOTPARAM_SOFTLOCKUP_PANIC
+
config HARDLOCKUP_DETECTOR_PERF
bool
select SOFTLOCKUP_DETECTOR
@@ -852,30 +881,6 @@ config BOOTPARAM_HARDLOCKUP_PANIC_VALUE
default 0 if !BOOTPARAM_HARDLOCKUP_PANIC
default 1 if BOOTPARAM_HARDLOCKUP_PANIC
-config BOOTPARAM_SOFTLOCKUP_PANIC
- bool "Panic (Reboot) On Soft Lockups"
- depends on SOFTLOCKUP_DETECTOR
- help
- Say Y here to enable the kernel to panic on "soft lockups",
- which are bugs that cause the kernel to loop in kernel
- mode for more than 20 seconds (configurable using the watchdog_thresh
- sysctl), without giving other tasks a chance to run.
-
- The panic can be used in combination with panic_timeout,
- to cause the system to reboot automatically after a
- lockup has been detected. This feature is useful for
- high-availability systems that have uptime guarantees and
- where a lockup must be resolved ASAP.
-
- Say N if unsure.
-
-config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
- int
- depends on SOFTLOCKUP_DETECTOR
- range 0 1
- default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC
- default 1 if BOOTPARAM_SOFTLOCKUP_PANIC
-
config DETECT_HUNG_TASK
bool "Detect Hung Tasks"
depends on DEBUG_KERNEL
@@ -1034,69 +1039,20 @@ config DEBUG_PREEMPT
menu "Lock Debugging (spinlocks, mutexes, etc...)"
-config DEBUG_RT_MUTEXES
- bool "RT Mutex debugging, deadlock detection"
- depends on DEBUG_KERNEL && RT_MUTEXES
- help
- This allows rt mutex semantics violations and rt mutex related
- deadlocks (lockups) to be detected and reported automatically.
-
-config DEBUG_SPINLOCK
- bool "Spinlock and rw-lock debugging: basic checks"
- depends on DEBUG_KERNEL
- select UNINLINE_SPIN_UNLOCK
- help
- Say Y here and build SMP to catch missing spinlock initialization
- and certain other kinds of spinlock errors commonly made. This is
- best used in conjunction with the NMI watchdog so that spinlock
- deadlocks are also debuggable.
-
-config DEBUG_MUTEXES
- bool "Mutex debugging: basic checks"
- depends on DEBUG_KERNEL
- help
- This feature allows mutex semantics violations to be detected and
- reported.
-
-config DEBUG_WW_MUTEX_SLOWPATH
- bool "Wait/wound mutex debugging: Slowpath testing"
- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
- select DEBUG_LOCK_ALLOC
- select DEBUG_SPINLOCK
- select DEBUG_MUTEXES
- help
- This feature enables slowpath testing for w/w mutex users by
- injecting additional -EDEADLK wound/backoff cases. Together with
- the full mutex checks enabled with (CONFIG_PROVE_LOCKING) this
- will test all possible w/w mutex interface abuse with the
- exception of simply not acquiring all the required locks.
- Note that this feature can introduce significant overhead, so
- it really should not be enabled in a production or distro kernel,
- even a debug kernel. If you are a driver writer, enable it. If
- you are a distro, do not.
-
-config DEBUG_LOCK_ALLOC
- bool "Lock debugging: detect incorrect freeing of live locks"
- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
- select DEBUG_SPINLOCK
- select DEBUG_MUTEXES
- select DEBUG_RT_MUTEXES if RT_MUTEXES
- select LOCKDEP
- help
- This feature will check whether any held lock (spinlock, rwlock,
- mutex or rwsem) is incorrectly freed by the kernel, via any of the
- memory-freeing routines (kfree(), kmem_cache_free(), free_pages(),
- vfree(), etc.), whether a live lock is incorrectly reinitialized via
- spin_lock_init()/mutex_init()/etc., or whether there is any lock
- held during task exit.
+config LOCK_DEBUGGING_SUPPORT
+ bool
+ depends on TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
+ default y
config PROVE_LOCKING
bool "Lock debugging: prove locking correctness"
- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
+ depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
select LOCKDEP
select DEBUG_SPINLOCK
select DEBUG_MUTEXES
select DEBUG_RT_MUTEXES if RT_MUTEXES
+ select DEBUG_RWSEMS if RWSEM_SPIN_ON_OWNER
+ select DEBUG_WW_MUTEX_SLOWPATH
select DEBUG_LOCK_ALLOC
select TRACE_IRQFLAGS
default n
@@ -1134,20 +1090,9 @@ config PROVE_LOCKING
For more details, see Documentation/locking/lockdep-design.txt.
-config LOCKDEP
- bool
- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
- select STACKTRACE
- select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC && !SCORE && !X86
- select KALLSYMS
- select KALLSYMS_ALL
-
-config LOCKDEP_SMALL
- bool
-
config LOCK_STAT
bool "Lock usage statistics"
- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
+ depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
select LOCKDEP
select DEBUG_SPINLOCK
select DEBUG_MUTEXES
@@ -1167,6 +1112,80 @@ config LOCK_STAT
CONFIG_LOCK_STAT defines "contended" and "acquired" lock events.
(CONFIG_LOCKDEP defines "acquire" and "release" events.)
+config DEBUG_RT_MUTEXES
+ bool "RT Mutex debugging, deadlock detection"
+ depends on DEBUG_KERNEL && RT_MUTEXES
+ help
+ This allows rt mutex semantics violations and rt mutex related
+ deadlocks (lockups) to be detected and reported automatically.
+
+config DEBUG_SPINLOCK
+ bool "Spinlock and rw-lock debugging: basic checks"
+ depends on DEBUG_KERNEL
+ select UNINLINE_SPIN_UNLOCK
+ help
+ Say Y here and build SMP to catch missing spinlock initialization
+ and certain other kinds of spinlock errors commonly made. This is
+ best used in conjunction with the NMI watchdog so that spinlock
+ deadlocks are also debuggable.
+
+config DEBUG_MUTEXES
+ bool "Mutex debugging: basic checks"
+ depends on DEBUG_KERNEL
+ help
+ This feature allows mutex semantics violations to be detected and
+ reported.
+
+config DEBUG_WW_MUTEX_SLOWPATH
+ bool "Wait/wound mutex debugging: Slowpath testing"
+ depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
+ select DEBUG_LOCK_ALLOC
+ select DEBUG_SPINLOCK
+ select DEBUG_MUTEXES
+ help
+ This feature enables slowpath testing for w/w mutex users by
+ injecting additional -EDEADLK wound/backoff cases. Together with
+ the full mutex checks enabled with (CONFIG_PROVE_LOCKING) this
+ will test all possible w/w mutex interface abuse with the
+ exception of simply not acquiring all the required locks.
+ Note that this feature can introduce significant overhead, so
+ it really should not be enabled in a production or distro kernel,
+ even a debug kernel. If you are a driver writer, enable it. If
+ you are a distro, do not.
+
+config DEBUG_RWSEMS
+ bool "RW Semaphore debugging: basic checks"
+ depends on DEBUG_KERNEL && RWSEM_SPIN_ON_OWNER
+ help
+ This debugging feature allows mismatched rw semaphore locks and unlocks
+ to be detected and reported.
+
+config DEBUG_LOCK_ALLOC
+ bool "Lock debugging: detect incorrect freeing of live locks"
+ depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
+ select DEBUG_SPINLOCK
+ select DEBUG_MUTEXES
+ select DEBUG_RT_MUTEXES if RT_MUTEXES
+ select LOCKDEP
+ help
+ This feature will check whether any held lock (spinlock, rwlock,
+ mutex or rwsem) is incorrectly freed by the kernel, via any of the
+ memory-freeing routines (kfree(), kmem_cache_free(), free_pages(),
+ vfree(), etc.), whether a live lock is incorrectly reinitialized via
+ spin_lock_init()/mutex_init()/etc., or whether there is any lock
+ held during task exit.
+
+config LOCKDEP
+ bool
+ depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
+ select STACKTRACE
+ select FRAME_POINTER if !MIPS && !PPC && !ARM && !S390 && !MICROBLAZE && !ARC && !X86
+ select KALLSYMS
+ select KALLSYMS_ALL
+
+config LOCKDEP_SMALL
+ bool
+
config DEBUG_LOCKDEP
bool "Lock dependency engine debugging"
depends on DEBUG_KERNEL && LOCKDEP
@@ -1179,6 +1198,7 @@ config DEBUG_ATOMIC_SLEEP
bool "Sleep inside atomic section checking"
select PREEMPT_COUNT
depends on DEBUG_KERNEL
+ depends on !ARCH_NO_PREEMPT
help
If you say Y here, various routines which may sleep will become very
noisy if they are called inside atomic sections: when a spinlock is
@@ -1200,7 +1220,6 @@ config LOCK_TORTURE_TEST
tristate "torture tests for locking"
depends on DEBUG_KERNEL
select TORTURE_TEST
- default n
help
This option provides a kernel module that runs torture tests
on kernel locking primitives. The kernel module may be built
@@ -1258,13 +1277,13 @@ config WARN_ALL_UNSEEDED_RANDOM
time. This is really bad from a security perspective, and
so architecture maintainers really need to do what they can
to get the CRNG seeded sooner after the system is booted.
- However, since users can not do anything actionble to
+ However, since users cannot do anything actionable to
address this, by default the kernel will issue only a single
warning for the first use of unseeded randomness.
Say Y here if you want to receive warnings for all uses of
unseeded randomness. This will be of use primarily for
- those developers interersted in improving the security of
+ those developers interested in improving the security of
Linux kernels running on their architecture (or
subarchitecture).
@@ -1273,7 +1292,7 @@ config DEBUG_KOBJECT
depends on DEBUG_KERNEL
help
If you say Y here, some extra kobject debugging messages will be sent
- to the syslog.
+ to the syslog.
config DEBUG_KOBJECT_RELEASE
bool "kobject release debugging"
@@ -1492,6 +1511,10 @@ config NETDEV_NOTIFIER_ERROR_INJECT
If unsure, say N.
+config FUNCTION_ERROR_INJECTION
+ def_bool y
+ depends on HAVE_FUNCTION_ERROR_INJECTION && KPROBES
+
config FAULT_INJECTION
bool "Fault-injection framework"
depends on DEBUG_KERNEL
@@ -1499,10 +1522,6 @@ config FAULT_INJECTION
Provide fault-injection framework.
For more details, see Documentation/fault-injection/.
-config FUNCTION_ERROR_INJECTION
- def_bool y
- depends on HAVE_FUNCTION_ERROR_INJECTION && KPROBES
-
config FAILSLAB
bool "Fault-injection capability for kmalloc"
depends on FAULT_INJECTION
@@ -1533,16 +1552,6 @@ config FAIL_IO_TIMEOUT
Only works with drivers that use the generic timeout handling,
for others it wont do anything.
-config FAIL_MMC_REQUEST
- bool "Fault-injection capability for MMC IO"
- depends on FAULT_INJECTION_DEBUG_FS && MMC
- help
- Provide fault-injection capability for MMC IO.
- This will make the mmc core return data errors. This is
- useful to test the error handling in the mmc block device
- and to test how the mmc host driver handles retries from
- the block device.
-
config FAIL_FUTEX
bool "Fault-injection capability for futexes"
select DEBUG_FS
@@ -1550,6 +1559,12 @@ config FAIL_FUTEX
help
Provide fault-injection capability for futexes.
+config FAULT_INJECTION_DEBUG_FS
+ bool "Debugfs entries for fault-injection capabilities"
+ depends on FAULT_INJECTION && SYSFS && DEBUG_FS
+ help
+ Enable configuration of fault-injection capabilities via debugfs.
+
config FAIL_FUNCTION
bool "Fault-injection capability for functions"
depends on FAULT_INJECTION_DEBUG_FS && FUNCTION_ERROR_INJECTION
@@ -1560,18 +1575,22 @@ config FAIL_FUNCTION
an error value and have to handle it. This is useful to test the
error handling in various subsystems.
-config FAULT_INJECTION_DEBUG_FS
- bool "Debugfs entries for fault-injection capabilities"
- depends on FAULT_INJECTION && SYSFS && DEBUG_FS
+config FAIL_MMC_REQUEST
+ bool "Fault-injection capability for MMC IO"
+ depends on FAULT_INJECTION_DEBUG_FS && MMC
help
- Enable configuration of fault-injection capabilities via debugfs.
+ Provide fault-injection capability for MMC IO.
+ This will make the mmc core return data errors. This is
+ useful to test the error handling in the mmc block device
+ and to test how the mmc host driver handles retries from
+ the block device.
config FAULT_INJECTION_STACKTRACE_FILTER
bool "stacktrace filter for fault-injection capabilities"
depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
depends on !X86_64
select STACKTRACE
- select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !SCORE && !X86
+ select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86
help
Provide stacktrace filter for fault-injection capabilities
@@ -1580,7 +1599,7 @@ config LATENCYTOP
depends on DEBUG_KERNEL
depends on STACKTRACE_SUPPORT
depends on PROC_FS
- select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !X86
+ select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86
select KALLSYMS
select KALLSYMS_ALL
select STACKTRACE
@@ -1623,7 +1642,7 @@ config PROVIDE_OHCI1394_DMA_INIT
config DMA_API_DEBUG
bool "Enable debugging of DMA-API usage"
- depends on HAVE_DMA_API_DEBUG
+ select NEED_DMA_MAP_STATE
help
Enable this option to debug the use of the DMA API by device drivers.
With this option you will be able to detect common bugs in device
@@ -1640,6 +1659,23 @@ config DMA_API_DEBUG
If unsure, say N.
+config DMA_API_DEBUG_SG
+ bool "Debug DMA scatter-gather usage"
+ default y
+ depends on DMA_API_DEBUG
+ help
+ Perform extra checking that callers of dma_map_sg() have respected the
+ appropriate segment length/boundary limits for the given device when
+ preparing DMA scatterlists.
+
+ This is particularly likely to have been overlooked in cases where the
+ dma_map_sg() API is used for general bulk mapping of pages rather than
+ preparing literal scatter-gather descriptors, where there is a risk of
+ unexpected behaviour from DMA API implementations if the scatterlist
+ is technically out-of-spec.
+
+ If unsure, say N.
+
menuconfig RUNTIME_TESTING_MENU
bool "Runtime Testing"
def_bool y
@@ -1650,7 +1686,6 @@ config LKDTM
tristate "Linux Kernel Dump Test Tool Module"
depends on DEBUG_FS
depends on BLOCK
- default n
help
This module enables testing of the different dumping mechanisms by
inducing system failures at predefined crash points.
@@ -1684,10 +1719,9 @@ config KPROBES_SANITY_TEST
bool "Kprobes sanity tests"
depends on DEBUG_KERNEL
depends on KPROBES
- default n
help
This option provides for testing basic kprobes functionality on
- boot. A sample kprobe, jprobe and kretprobe are inserted and
+ boot. Samples of kprobe and kretprobe are inserted and
verified for functionality.
Say N if you are unsure.
@@ -1695,7 +1729,6 @@ config KPROBES_SANITY_TEST
config BACKTRACE_SELF_TEST
tristate "Self test for the backtrace code"
depends on DEBUG_KERNEL
- default n
help
This option provides a kernel module that can be used to test
the kernel stack backtrace code. This option is not useful
@@ -1765,18 +1798,29 @@ config TEST_PRINTF
config TEST_BITMAP
tristate "Test bitmap_*() family of functions at runtime"
- default n
help
Enable this option to test the bitmap functions at boot.
If unsure, say N.
+config TEST_BITFIELD
+ tristate "Test bitfield functions at runtime"
+ help
+ Enable this option to test the bitfield functions at boot.
+
+ If unsure, say N.
+
config TEST_UUID
tristate "Test functions located in the uuid module at runtime"
+config TEST_XARRAY
+ tristate "Test the XArray code at runtime"
+
+config TEST_OVERFLOW
+ tristate "Test check_*_overflow() functions at runtime"
+
config TEST_RHASHTABLE
tristate "Perform selftest on resizable hash table"
- default n
help
Enable this option to test the rhashtable functions at boot.
@@ -1784,7 +1828,6 @@ config TEST_RHASHTABLE
config TEST_HASH
tristate "Perform selftest on hash functions"
- default n
help
Enable this option to test the kernel's integer (<linux/hash.h>),
string (<linux/stringhash.h>), and siphash (<linux/siphash.h>)
@@ -1793,9 +1836,11 @@ config TEST_HASH
This is intended to help people writing architecture-specific
optimized versions. If unsure, say N.
+config TEST_IDA
+ tristate "Perform selftest on IDA functions"
+
config TEST_PARMAN
tristate "Perform selftest on priority array manager"
- default n
depends on PARMAN
help
Enable this option to test priority array manager on boot
@@ -1805,7 +1850,6 @@ config TEST_PARMAN
config TEST_LKM
tristate "Test module loading with 'hello world' module"
- default n
depends on m
help
This builds the "test_module" module that emits "Hello, world"
@@ -1819,7 +1863,6 @@ config TEST_LKM
config TEST_USER_COPY
tristate "Test user/kernel boundary protections"
- default n
depends on m
help
This builds the "test_user_copy" module that runs sanity checks
@@ -1832,7 +1875,6 @@ config TEST_USER_COPY
config TEST_BPF
tristate "Test BPF filter functionality"
- default n
depends on m && NET
help
This builds the "test_bpf" module that runs various test vectors
@@ -1846,7 +1888,6 @@ config TEST_BPF
config FIND_BIT_BENCHMARK
tristate "Test find_bit functions"
- default n
help
This builds the "test_find_bit" module that measure find_*_bit()
functions performance.
@@ -1855,7 +1896,6 @@ config FIND_BIT_BENCHMARK
config TEST_FIRMWARE
tristate "Test firmware loading via userspace interface"
- default n
depends on FW_LOADER
help
This builds the "test_firmware" module that creates a userspace
@@ -1868,7 +1908,6 @@ config TEST_FIRMWARE
config TEST_SYSCTL
tristate "sysctl test driver"
- default n
depends on PROC_SYSCTL
help
This builds the "test_sysctl" module. This driver enables to test the
@@ -1879,7 +1918,6 @@ config TEST_SYSCTL
config TEST_UDELAY
tristate "udelay test driver"
- default n
help
This builds the "udelay_test" module that helps to make sure
that udelay() is working properly.
@@ -1888,7 +1926,6 @@ config TEST_UDELAY
config TEST_STATIC_KEYS
tristate "Test static keys"
- default n
depends on m
help
Test the static key interfaces.
@@ -1897,7 +1934,6 @@ config TEST_STATIC_KEYS
config TEST_KMOD
tristate "kmod stress tester"
- default n
depends on m
depends on BLOCK && (64BIT || LBDAF) # for XFS, BTRFS
depends on NETDEVICES && NET_CORE && INET # for TUN
@@ -1932,11 +1968,18 @@ config TEST_DEBUG_VIRTUAL
If unsure, say N.
+config TEST_MEMCAT_P
+ tristate "Test memcat_p() helper function"
+ help
+ Test the memcat_p() helper for correctly merging two
+ pointer arrays together.
+
+ If unsure, say N.
+
endif # RUNTIME_TESTING_MENU
config MEMTEST
bool "Memtest"
- depends on HAVE_MEMBLOCK
---help---
This option adds a kernel parameter 'memtest', which allows memtest
to be set.
@@ -1969,7 +2012,7 @@ config STRICT_DEVMEM
bool "Filter access to /dev/mem"
depends on MMU && DEVMEM
depends on ARCH_HAS_DEVMEM_IS_ALLOWED
- default y if TILE || PPC || X86 || ARM64
+ default y if PPC || X86 || ARM64
---help---
If this option is disabled, you allow userspace (root) access to all
of memory, including kernel and userspace memory. Accidental
@@ -2000,3 +2043,7 @@ config IO_STRICT_DEVMEM
if the driver using a given range cannot be disabled.
If in doubt, say Y.
+
+source "arch/$(SRCARCH)/Kconfig.debug"
+
+endmenu # Kernel hacking
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 3d35d062970d..d0bad1bd9a2b 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -5,7 +5,8 @@ if HAVE_ARCH_KASAN
config KASAN
bool "KASan: runtime memory debugger"
- depends on SLUB || (SLAB && !DEBUG_SLAB)
+ depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
+ select SLUB_DEBUG if SLUB
select CONSTRUCTORS
select STACKDEPOT
help
@@ -56,6 +57,15 @@ config KASAN_INLINE
endchoice
+config KASAN_S390_4_LEVEL_PAGING
+ bool "KASan: use 4-level paging"
+ depends on KASAN && S390
+ help
+ Compiling the kernel with KASan disables automatic 3-level vs
+ 4-level paging selection. 3-level paging is used by default (up
+ to 3TB of RAM with KASan enabled). This options allows to force
+ 4-level paging instead.
+
config TEST_KASAN
tristate "Module for testing kasan for bug detection"
depends on m && KASAN
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index a669c193b878..98fa559ebd80 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -1,9 +1,6 @@
config ARCH_HAS_UBSAN_SANITIZE_ALL
bool
-config ARCH_WANTS_UBSAN_NO_NULL
- def_bool n
-
config UBSAN
bool "Undefined behaviour sanity checker"
help
@@ -39,10 +36,9 @@ config UBSAN_ALIGNMENT
Enabling this option on architectures that support unaligned
accesses may produce a lot of false positives.
-config UBSAN_NULL
- bool "Enable checking of null pointers"
- depends on UBSAN
- default y if !ARCH_WANTS_UBSAN_NO_NULL
+config TEST_UBSAN
+ tristate "Module for testing for undefined behavior detection"
+ depends on m && UBSAN
help
- This option enables detection of memory accesses via a
- null pointer.
+ This is a test module for UBSAN.
+ It triggers various undefined behavior, and detect it.
diff --git a/lib/Makefile b/lib/Makefile
index a90d4fcd748f..db06d1237898 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -18,18 +18,17 @@ KCOV_INSTRUMENT_debugobjects.o := n
KCOV_INSTRUMENT_dynamic_debug.o := n
lib-y := ctype.o string.o vsprintf.o cmdline.o \
- rbtree.o radix-tree.o dump_stack.o timerqueue.o\
+ rbtree.o radix-tree.o timerqueue.o xarray.o \
idr.o int_sqrt.o extable.o \
sha1.o chacha20.o irq_regs.o argv_split.o \
flex_proportions.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
- earlycpio.o seq_buf.o siphash.o \
- nmi_backtrace.o nodemask.o win_minmax.o
+ earlycpio.o seq_buf.o siphash.o dec_and_lock.o \
+ nmi_backtrace.o nodemask.o win_minmax.o memcat_p.o
+lib-$(CONFIG_PRINTK) += dump_stack.o
lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o
-lib-$(CONFIG_DMA_DIRECT_OPS) += dma-direct.o
-lib-$(CONFIG_DMA_VIRT_OPS) += dma-virt.o
lib-y += kobject.o klist.o
obj-y += lockref.o
@@ -38,7 +37,7 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \
bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
bsearch.o find_bit.o llist.o memweight.o kfifo.o \
- percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \
+ percpu-refcount.o rhashtable.o reciprocal_div.o \
once.o refcount.o usercopy.o errseq.o bucket_locks.o
obj-$(CONFIG_STRING_SELFTEST) += test_string.o
obj-y += string_helpers.o
@@ -51,10 +50,17 @@ obj-$(CONFIG_TEST_BPF) += test_bpf.o
obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o
obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o
+obj-$(CONFIG_TEST_IDA) += test_ida.o
obj-$(CONFIG_TEST_KASAN) += test_kasan.o
+CFLAGS_test_kasan.o += -fno-builtin
+CFLAGS_test_kasan.o += $(call cc-disable-warning, vla)
+obj-$(CONFIG_TEST_UBSAN) += test_ubsan.o
+CFLAGS_test_ubsan.o += $(call cc-disable-warning, vla)
+UBSAN_SANITIZE_test_ubsan.o := y
obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o
obj-$(CONFIG_TEST_LKM) += test_module.o
+obj-$(CONFIG_TEST_OVERFLOW) += test_overflow.o
obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o
obj-$(CONFIG_TEST_SORT) += test_sort.o
obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
@@ -62,10 +68,13 @@ obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o
obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
obj-$(CONFIG_TEST_PRINTF) += test_printf.o
obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
+obj-$(CONFIG_TEST_BITFIELD) += test_bitfield.o
obj-$(CONFIG_TEST_UUID) += test_uuid.o
+obj-$(CONFIG_TEST_XARRAY) += test_xarray.o
obj-$(CONFIG_TEST_PARMAN) += test_parman.o
obj-$(CONFIG_TEST_KMOD) += test_kmod.o
obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o
+obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG
@@ -81,6 +90,8 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
+obj-y += logic_pio.o
+
obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
obj-$(CONFIG_BTREE) += btree.o
@@ -90,10 +101,6 @@ obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
obj-$(CONFIG_DEBUG_LIST) += list_debug.o
obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
-ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
- lib-y += dec_and_lock.o
-endif
-
obj-$(CONFIG_BITREVERSE) += bitrev.o
obj-$(CONFIG_RATIONAL) += rational.o
obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
@@ -101,6 +108,7 @@ obj-$(CONFIG_CRC16) += crc16.o
obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o
obj-$(CONFIG_CRC_ITU_T) += crc-itu-t.o
obj-$(CONFIG_CRC32) += crc32.o
+obj-$(CONFIG_CRC64) += crc64.o
obj-$(CONFIG_CRC32_SELFTEST) += crc32test.o
obj-$(CONFIG_CRC4) += crc4.o
obj-$(CONFIG_CRC7) += crc7.o
@@ -140,8 +148,7 @@ obj-$(CONFIG_SMP) += percpu_counter.o
obj-$(CONFIG_AUDIT_GENERIC) += audit.o
obj-$(CONFIG_AUDIT_COMPAT_GENERIC) += compat_audit.o
-obj-$(CONFIG_SWIOTLB) += swiotlb.o
-obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o iommu-common.o
+obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o
obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o
obj-$(CONFIG_PM_NOTIFIER_ERROR_INJECT) += pm-notifier-error-inject.o
@@ -161,8 +168,6 @@ obj-$(CONFIG_NLATTR) += nlattr.o
obj-$(CONFIG_LRU_CACHE) += lru_cache.o
-obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o
-
obj-$(CONFIG_GENERIC_CSUM) += checksum.o
obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o
@@ -217,7 +222,9 @@ obj-$(CONFIG_FONT_SUPPORT) += fonts/
obj-$(CONFIG_PRIME_NUMBERS) += prime_numbers.o
hostprogs-y := gen_crc32table
+hostprogs-y += gen_crc64table
clean-files := crc32table.h
+clean-files += crc64table.h
$(obj)/crc32.o: $(obj)/crc32table.h
@@ -227,6 +234,14 @@ quiet_cmd_crc32 = GEN $@
$(obj)/crc32table.h: $(obj)/gen_crc32table
$(call cmd,crc32)
+$(obj)/crc64.o: $(obj)/crc64table.h
+
+quiet_cmd_crc64 = GEN $@
+ cmd_crc64 = $< > $@
+
+$(obj)/crc64table.h: $(obj)/gen_crc64table
+ $(call cmd,crc64)
+
#
# Build a fast OID lookip registry from include/linux/oid_registry.h
#
@@ -253,9 +268,9 @@ obj-$(CONFIG_SBITMAP) += sbitmap.o
obj-$(CONFIG_PARMAN) += parman.o
# GCC library routines
-obj-$(CONFIG_GENERIC_ASHLDI3) += ashldi3.o
-obj-$(CONFIG_GENERIC_ASHRDI3) += ashrdi3.o
-obj-$(CONFIG_GENERIC_LSHRDI3) += lshrdi3.o
-obj-$(CONFIG_GENERIC_MULDI3) += muldi3.o
-obj-$(CONFIG_GENERIC_CMPDI2) += cmpdi2.o
-obj-$(CONFIG_GENERIC_UCMPDI2) += ucmpdi2.o
+obj-$(CONFIG_GENERIC_LIB_ASHLDI3) += ashldi3.o
+obj-$(CONFIG_GENERIC_LIB_ASHRDI3) += ashrdi3.o
+obj-$(CONFIG_GENERIC_LIB_LSHRDI3) += lshrdi3.o
+obj-$(CONFIG_GENERIC_LIB_MULDI3) += muldi3.o
+obj-$(CONFIG_GENERIC_LIB_CMPDI2) += cmpdi2.o
+obj-$(CONFIG_GENERIC_LIB_UCMPDI2) += ucmpdi2.o
diff --git a/lib/argv_split.c b/lib/argv_split.c
index 5c35752a9414..1a19a0a93dc1 100644
--- a/lib/argv_split.c
+++ b/lib/argv_split.c
@@ -69,7 +69,7 @@ char **argv_split(gfp_t gfp, const char *str, int *argcp)
return NULL;
argc = count_argc(argv_str);
- argv = kmalloc(sizeof(*argv) * (argc + 2), gfp);
+ argv = kmalloc_array(argc + 2, sizeof(*argv), gfp);
if (!argv) {
kfree(argv_str);
return NULL;
diff --git a/lib/atomic64.c b/lib/atomic64.c
index 53c2d5edc826..1d91e31eceec 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -178,18 +178,18 @@ long long atomic64_xchg(atomic64_t *v, long long new)
}
EXPORT_SYMBOL(atomic64_xchg);
-int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
- int ret = 0;
+ long long val;
raw_spin_lock_irqsave(lock, flags);
- if (v->counter != u) {
+ val = v->counter;
+ if (val != u)
v->counter += a;
- ret = 1;
- }
raw_spin_unlock_irqrestore(lock, flags);
- return ret;
+
+ return val;
}
-EXPORT_SYMBOL(atomic64_add_unless);
+EXPORT_SYMBOL(atomic64_fetch_add_unless);
diff --git a/lib/bch.c b/lib/bch.c
index bc89dfe4d1b3..5db6d3a4c8a6 100644
--- a/lib/bch.c
+++ b/lib/bch.c
@@ -78,15 +78,21 @@
#define GF_M(_p) (CONFIG_BCH_CONST_M)
#define GF_T(_p) (CONFIG_BCH_CONST_T)
#define GF_N(_p) ((1 << (CONFIG_BCH_CONST_M))-1)
+#define BCH_MAX_M (CONFIG_BCH_CONST_M)
+#define BCH_MAX_T (CONFIG_BCH_CONST_T)
#else
#define GF_M(_p) ((_p)->m)
#define GF_T(_p) ((_p)->t)
#define GF_N(_p) ((_p)->n)
+#define BCH_MAX_M 15 /* 2KB */
+#define BCH_MAX_T 64 /* 64 bit correction */
#endif
#define BCH_ECC_WORDS(_p) DIV_ROUND_UP(GF_M(_p)*GF_T(_p), 32)
#define BCH_ECC_BYTES(_p) DIV_ROUND_UP(GF_M(_p)*GF_T(_p), 8)
+#define BCH_ECC_MAX_WORDS DIV_ROUND_UP(BCH_MAX_M * BCH_MAX_T, 32)
+
#ifndef dbg
#define dbg(_fmt, args...) do {} while (0)
#endif
@@ -187,18 +193,22 @@ void encode_bch(struct bch_control *bch, const uint8_t *data,
const unsigned int l = BCH_ECC_WORDS(bch)-1;
unsigned int i, mlen;
unsigned long m;
- uint32_t w, r[l+1];
+ uint32_t w, r[BCH_ECC_MAX_WORDS];
+ const size_t r_bytes = BCH_ECC_WORDS(bch) * sizeof(*r);
const uint32_t * const tab0 = bch->mod8_tab;
const uint32_t * const tab1 = tab0 + 256*(l+1);
const uint32_t * const tab2 = tab1 + 256*(l+1);
const uint32_t * const tab3 = tab2 + 256*(l+1);
const uint32_t *pdata, *p0, *p1, *p2, *p3;
+ if (WARN_ON(r_bytes > sizeof(r)))
+ return;
+
if (ecc) {
/* load ecc parity bytes into internal 32-bit buffer */
load_ecc8(bch, bch->ecc_buf, ecc);
} else {
- memset(bch->ecc_buf, 0, sizeof(r));
+ memset(bch->ecc_buf, 0, r_bytes);
}
/* process first unaligned data bytes */
@@ -215,7 +225,7 @@ void encode_bch(struct bch_control *bch, const uint8_t *data,
mlen = len/4;
data += 4*mlen;
len -= 4*mlen;
- memcpy(r, bch->ecc_buf, sizeof(r));
+ memcpy(r, bch->ecc_buf, r_bytes);
/*
* split each 32-bit word into 4 polynomials of weight 8 as follows:
@@ -241,7 +251,7 @@ void encode_bch(struct bch_control *bch, const uint8_t *data,
r[l] = p0[l]^p1[l]^p2[l]^p3[l];
}
- memcpy(bch->ecc_buf, r, sizeof(r));
+ memcpy(bch->ecc_buf, r, r_bytes);
/* process last unaligned bytes */
if (len)
@@ -434,7 +444,7 @@ static int solve_linear_system(struct bch_control *bch, unsigned int *rows,
{
const int m = GF_M(bch);
unsigned int tmp, mask;
- int rem, c, r, p, k, param[m];
+ int rem, c, r, p, k, param[BCH_MAX_M];
k = 0;
mask = 1 << m;
@@ -1114,7 +1124,7 @@ static int build_deg2_base(struct bch_control *bch)
{
const int m = GF_M(bch);
int i, j, r;
- unsigned int sum, x, y, remaining, ak = 0, xi[m];
+ unsigned int sum, x, y, remaining, ak = 0, xi[BCH_MAX_M];
/* find k s.t. Tr(a^k) = 1 and 0 <= k < m */
for (i = 0; i < m; i++) {
@@ -1254,7 +1264,6 @@ struct bch_control *init_bch(int m, int t, unsigned int prim_poly)
struct bch_control *bch = NULL;
const int min_m = 5;
- const int max_m = 15;
/* default primitive polynomials */
static const unsigned int prim_poly_tab[] = {
@@ -1270,7 +1279,7 @@ struct bch_control *init_bch(int m, int t, unsigned int prim_poly)
goto fail;
}
#endif
- if ((m < min_m) || (m > max_m))
+ if ((m < min_m) || (m > BCH_MAX_M))
/*
* values of m greater than 15 are not currently supported;
* supporting m > 15 would require changing table base type
@@ -1278,6 +1287,13 @@ struct bch_control *init_bch(int m, int t, unsigned int prim_poly)
*/
goto fail;
+ if (t > BCH_MAX_T)
+ /*
+ * we can support larger than 64 bits if necessary, at the
+ * cost of higher stack usage.
+ */
+ goto fail;
+
/* sanity checks */
if ((t < 1) || (m*t >= ((1 << m)-1)))
/* invalid t value */
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 9e498c77ed0e..eead55aa7170 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -13,6 +13,8 @@
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
#include <linux/string.h>
#include <linux/uaccess.h>
@@ -35,11 +37,6 @@
* carefully filter out these unused bits from impacting their
* results.
*
- * These operations actually hold to a slightly stronger rule:
- * if you don't input any bitmaps to these ops that have some
- * unused bits set, then they won't output any set unused bits
- * in output bitmaps.
- *
* The byte ordering of bitmaps is more natural on little
* endian architectures. See the big-endian headers
* include/asm-ppc64/bitops.h and include/asm-s390/bitops.h
@@ -64,12 +61,9 @@ EXPORT_SYMBOL(__bitmap_equal);
void __bitmap_complement(unsigned long *dst, const unsigned long *src, unsigned int bits)
{
- unsigned int k, lim = bits/BITS_PER_LONG;
+ unsigned int k, lim = BITS_TO_LONGS(bits);
for (k = 0; k < lim; ++k)
dst[k] = ~src[k];
-
- if (bits % BITS_PER_LONG)
- dst[k] = ~src[k];
}
EXPORT_SYMBOL(__bitmap_complement);
@@ -468,20 +462,18 @@ EXPORT_SYMBOL(bitmap_parse_user);
* ranges if list is specified or hex digits grouped into comma-separated
* sets of 8 digits/set. Returns the number of characters written to buf.
*
- * It is assumed that @buf is a pointer into a PAGE_SIZE area and that
- * sufficient storage remains at @buf to accommodate the
- * bitmap_print_to_pagebuf() output.
+ * It is assumed that @buf is a pointer into a PAGE_SIZE, page-aligned
+ * area and that sufficient storage remains at @buf to accommodate the
+ * bitmap_print_to_pagebuf() output. Returns the number of characters
+ * actually printed to @buf, excluding terminating '\0'.
*/
int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp,
int nmaskbits)
{
- ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
- int n = 0;
+ ptrdiff_t len = PAGE_SIZE - offset_in_page(buf);
- if (len > 1)
- n = list ? scnprintf(buf, len, "%*pbl\n", nmaskbits, maskp) :
- scnprintf(buf, len, "%*pb\n", nmaskbits, maskp);
- return n;
+ return list ? scnprintf(buf, len, "%*pbl\n", nmaskbits, maskp) :
+ scnprintf(buf, len, "%*pb\n", nmaskbits, maskp);
}
EXPORT_SYMBOL(bitmap_print_to_pagebuf);
@@ -607,7 +599,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
/* if no digit is after '-', it's wrong*/
if (at_start && in_range)
return -EINVAL;
- if (!(a <= b) || !(used_size <= group_size))
+ if (!(a <= b) || group_size == 0 || !(used_size <= group_size))
return -EINVAL;
if (b >= nmaskbits)
return -ERANGE;
@@ -1128,6 +1120,25 @@ void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int n
EXPORT_SYMBOL(bitmap_copy_le);
#endif
+unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags)
+{
+ return kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long),
+ flags);
+}
+EXPORT_SYMBOL(bitmap_alloc);
+
+unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags)
+{
+ return bitmap_alloc(nbits, flags | __GFP_ZERO);
+}
+EXPORT_SYMBOL(bitmap_zalloc);
+
+void bitmap_free(const unsigned long *bitmap)
+{
+ kfree(bitmap);
+}
+EXPORT_SYMBOL(bitmap_free);
+
#if BITS_PER_LONG == 64
/**
* bitmap_from_arr32 - copy the contents of u32 array of bits to bitmap
@@ -1135,14 +1146,10 @@ EXPORT_SYMBOL(bitmap_copy_le);
* @buf: array of u32 (in host byte order), the source bitmap
* @nbits: number of bits in @bitmap
*/
-void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf,
- unsigned int nbits)
+void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, unsigned int nbits)
{
unsigned int i, halfwords;
- if (!nbits)
- return;
-
halfwords = DIV_ROUND_UP(nbits, 32);
for (i = 0; i < halfwords; i++) {
bitmap[i/2] = (unsigned long) buf[i];
@@ -1166,9 +1173,6 @@ void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, unsigned int nbits)
{
unsigned int i, halfwords;
- if (!nbits)
- return;
-
halfwords = DIV_ROUND_UP(nbits, 32);
for (i = 0; i < halfwords; i++) {
buf[i] = (u32) (bitmap[i/2] & UINT_MAX);
diff --git a/lib/bucket_locks.c b/lib/bucket_locks.c
index 266a97c5708b..64b92e1dbace 100644
--- a/lib/bucket_locks.c
+++ b/lib/bucket_locks.c
@@ -11,8 +11,9 @@
* to a power of 2 to be suitable as a hash table.
*/
-int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
- size_t max_size, unsigned int cpu_mult, gfp_t gfp)
+int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
+ size_t max_size, unsigned int cpu_mult, gfp_t gfp,
+ const char *name, struct lock_class_key *key)
{
spinlock_t *tlocks = NULL;
unsigned int i, size;
@@ -30,14 +31,13 @@ int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
}
if (sizeof(spinlock_t) != 0) {
- if (gfpflags_allow_blocking(gfp))
- tlocks = kvmalloc(size * sizeof(spinlock_t), gfp);
- else
- tlocks = kmalloc_array(size, sizeof(spinlock_t), gfp);
+ tlocks = kvmalloc_array(size, sizeof(spinlock_t), gfp);
if (!tlocks)
return -ENOMEM;
- for (i = 0; i < size; i++)
+ for (i = 0; i < size; i++) {
spin_lock_init(&tlocks[i]);
+ lockdep_init_map(&tlocks[i].dep_map, name, key, 0);
+ }
}
*locks = tlocks;
@@ -45,7 +45,7 @@ int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
return 0;
}
-EXPORT_SYMBOL(alloc_bucket_spinlocks);
+EXPORT_SYMBOL(__alloc_bucket_spinlocks);
void free_bucket_spinlocks(spinlock_t *locks)
{
diff --git a/lib/chacha20.c b/lib/chacha20.c
index c1cc50fb68c9..d907fec6a9ed 100644
--- a/lib/chacha20.c
+++ b/lib/chacha20.c
@@ -16,9 +16,9 @@
#include <asm/unaligned.h>
#include <crypto/chacha20.h>
-void chacha20_block(u32 *state, u32 *stream)
+void chacha20_block(u32 *state, u8 *stream)
{
- u32 x[16], *out = stream;
+ u32 x[16];
int i;
for (i = 0; i < ARRAY_SIZE(x); i++)
@@ -67,7 +67,7 @@ void chacha20_block(u32 *state, u32 *stream)
}
for (i = 0; i < ARRAY_SIZE(x); i++)
- out[i] = cpu_to_le32(x[i] + state[i]);
+ put_unaligned_le32(x[i] + state[i], &stream[i * sizeof(u32)]);
state[12]++;
}
diff --git a/lib/cpumask.c b/lib/cpumask.c
index beca6244671a..8d666ab84b5c 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -4,7 +4,7 @@
#include <linux/bitops.h>
#include <linux/cpumask.h>
#include <linux/export.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
/**
* cpumask_next - get the next cpu in a cpumask
@@ -163,7 +163,7 @@ EXPORT_SYMBOL(zalloc_cpumask_var);
*/
void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
{
- *mask = memblock_virt_alloc(cpumask_size(), 0);
+ *mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES);
}
/**
diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c
index 1ad33e555805..4d0d47c1ffbd 100644
--- a/lib/crc-t10dif.c
+++ b/lib/crc-t10dif.c
@@ -14,10 +14,47 @@
#include <linux/err.h>
#include <linux/init.h>
#include <crypto/hash.h>
+#include <crypto/algapi.h>
#include <linux/static_key.h>
+#include <linux/notifier.h>
-static struct crypto_shash *crct10dif_tfm;
+static struct crypto_shash __rcu *crct10dif_tfm;
static struct static_key crct10dif_fallback __read_mostly;
+static DEFINE_MUTEX(crc_t10dif_mutex);
+
+static int crc_t10dif_rehash(struct notifier_block *self, unsigned long val, void *data)
+{
+ struct crypto_alg *alg = data;
+ struct crypto_shash *new, *old;
+
+ if (val != CRYPTO_MSG_ALG_LOADED ||
+ static_key_false(&crct10dif_fallback) ||
+ strncmp(alg->cra_name, CRC_T10DIF_STRING, strlen(CRC_T10DIF_STRING)))
+ return 0;
+
+ mutex_lock(&crc_t10dif_mutex);
+ old = rcu_dereference_protected(crct10dif_tfm,
+ lockdep_is_held(&crc_t10dif_mutex));
+ if (!old) {
+ mutex_unlock(&crc_t10dif_mutex);
+ return 0;
+ }
+ new = crypto_alloc_shash("crct10dif", 0, 0);
+ if (IS_ERR(new)) {
+ mutex_unlock(&crc_t10dif_mutex);
+ return 0;
+ }
+ rcu_assign_pointer(crct10dif_tfm, new);
+ mutex_unlock(&crc_t10dif_mutex);
+
+ synchronize_rcu();
+ crypto_free_shash(old);
+ return 0;
+}
+
+static struct notifier_block crc_t10dif_nb = {
+ .notifier_call = crc_t10dif_rehash,
+};
__u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len)
{
@@ -30,11 +67,14 @@ __u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len)
if (static_key_false(&crct10dif_fallback))
return crc_t10dif_generic(crc, buffer, len);
- desc.shash.tfm = crct10dif_tfm;
+ rcu_read_lock();
+ desc.shash.tfm = rcu_dereference(crct10dif_tfm);
desc.shash.flags = 0;
*(__u16 *)desc.ctx = crc;
err = crypto_shash_update(&desc.shash, buffer, len);
+ rcu_read_unlock();
+
BUG_ON(err);
return *(__u16 *)desc.ctx;
@@ -49,6 +89,7 @@ EXPORT_SYMBOL(crc_t10dif);
static int __init crc_t10dif_mod_init(void)
{
+ crypto_register_notifier(&crc_t10dif_nb);
crct10dif_tfm = crypto_alloc_shash("crct10dif", 0, 0);
if (IS_ERR(crct10dif_tfm)) {
static_key_slow_inc(&crct10dif_fallback);
@@ -59,12 +100,24 @@ static int __init crc_t10dif_mod_init(void)
static void __exit crc_t10dif_mod_fini(void)
{
+ crypto_unregister_notifier(&crc_t10dif_nb);
crypto_free_shash(crct10dif_tfm);
}
module_init(crc_t10dif_mod_init);
module_exit(crc_t10dif_mod_fini);
+static int crc_t10dif_transform_show(char *buffer, const struct kernel_param *kp)
+{
+ if (static_key_false(&crct10dif_fallback))
+ return sprintf(buffer, "fallback\n");
+
+ return sprintf(buffer, "%s\n",
+ crypto_tfm_alg_driver_name(crypto_shash_tfm(crct10dif_tfm)));
+}
+
+module_param_call(transform, NULL, crc_t10dif_transform_show, NULL, 0644);
+
MODULE_DESCRIPTION("T10 DIF CRC calculation");
MODULE_LICENSE("GPL");
MODULE_SOFTDEP("pre: crct10dif");
diff --git a/lib/crc32.c b/lib/crc32.c
index 2ef20fe84b69..45b1d67a1767 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -27,6 +27,7 @@
/* see: Documentation/crc32.txt for a description of algorithms */
#include <linux/crc32.h>
+#include <linux/crc32poly.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/sched.h>
@@ -182,21 +183,21 @@ static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p,
}
#if CRC_LE_BITS == 1
-u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
+u32 __pure __weak crc32_le(u32 crc, unsigned char const *p, size_t len)
{
- return crc32_le_generic(crc, p, len, NULL, CRCPOLY_LE);
+ return crc32_le_generic(crc, p, len, NULL, CRC32_POLY_LE);
}
-u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
+u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len)
{
return crc32_le_generic(crc, p, len, NULL, CRC32C_POLY_LE);
}
#else
-u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
+u32 __pure __weak crc32_le(u32 crc, unsigned char const *p, size_t len)
{
return crc32_le_generic(crc, p, len,
- (const u32 (*)[256])crc32table_le, CRCPOLY_LE);
+ (const u32 (*)[256])crc32table_le, CRC32_POLY_LE);
}
-u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
+u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len)
{
return crc32_le_generic(crc, p, len,
(const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE);
@@ -205,6 +206,9 @@ u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
EXPORT_SYMBOL(crc32_le);
EXPORT_SYMBOL(__crc32c_le);
+u32 crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le);
+u32 __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le);
+
/*
* This multiplies the polynomials x and y modulo the given modulus.
* This follows the "little-endian" CRC convention that the lsbit
@@ -268,7 +272,7 @@ static u32 __attribute_const__ crc32_generic_shift(u32 crc, size_t len,
u32 __attribute_const__ crc32_le_shift(u32 crc, size_t len)
{
- return crc32_generic_shift(crc, len, CRCPOLY_LE);
+ return crc32_generic_shift(crc, len, CRC32_POLY_LE);
}
u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len)
@@ -330,13 +334,13 @@ static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p,
#if CRC_LE_BITS == 1
u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
{
- return crc32_be_generic(crc, p, len, NULL, CRCPOLY_BE);
+ return crc32_be_generic(crc, p, len, NULL, CRC32_POLY_BE);
}
#else
u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
{
return crc32_be_generic(crc, p, len,
- (const u32 (*)[256])crc32table_be, CRCPOLY_BE);
+ (const u32 (*)[256])crc32table_be, CRC32_POLY_BE);
}
#endif
EXPORT_SYMBOL(crc32_be);
diff --git a/lib/crc32defs.h b/lib/crc32defs.h
index cb275a28a750..0c8fb5923e7e 100644
--- a/lib/crc32defs.h
+++ b/lib/crc32defs.h
@@ -1,18 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * There are multiple 16-bit CRC polynomials in common use, but this is
- * *the* standard CRC-32 polynomial, first popularized by Ethernet.
- * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0
- */
-#define CRCPOLY_LE 0xedb88320
-#define CRCPOLY_BE 0x04c11db7
-
-/*
- * This is the CRC32c polynomial, as outlined by Castagnoli.
- * x^32+x^28+x^27+x^26+x^25+x^23+x^22+x^20+x^19+x^18+x^14+x^13+x^11+x^10+x^9+
- * x^8+x^6+x^0
- */
-#define CRC32C_POLY_LE 0x82F63B78
/* Try to choose an implementation variant via Kconfig */
#ifdef CONFIG_CRC32_SLICEBY8
diff --git a/lib/crc64.c b/lib/crc64.c
new file mode 100644
index 000000000000..0ef8ae6ac047
--- /dev/null
+++ b/lib/crc64.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Normal 64-bit CRC calculation.
+ *
+ * This is a basic crc64 implementation following ECMA-182 specification,
+ * which can be found from,
+ * http://www.ecma-international.org/publications/standards/Ecma-182.htm
+ *
+ * Dr. Ross N. Williams has a great document to introduce the idea of CRC
+ * algorithm, here the CRC64 code is also inspired by the table-driven
+ * algorithm and detail example from this paper. This paper can be found
+ * from,
+ * http://www.ross.net/crc/download/crc_v3.txt
+ *
+ * crc64table[256] is the lookup table of a table-driven 64-bit CRC
+ * calculation, which is generated by gen_crc64table.c in kernel build
+ * time. The polynomial of crc64 arithmetic is from ECMA-182 specification
+ * as well, which is defined as,
+ *
+ * x^64 + x^62 + x^57 + x^55 + x^54 + x^53 + x^52 + x^47 + x^46 + x^45 +
+ * x^40 + x^39 + x^38 + x^37 + x^35 + x^33 + x^32 + x^31 + x^29 + x^27 +
+ * x^24 + x^23 + x^22 + x^21 + x^19 + x^17 + x^13 + x^12 + x^10 + x^9 +
+ * x^7 + x^4 + x + 1
+ *
+ * Copyright 2018 SUSE Linux.
+ * Author: Coly Li <colyli@suse.de>
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include "crc64table.h"
+
+MODULE_DESCRIPTION("CRC64 calculations");
+MODULE_LICENSE("GPL v2");
+
+/**
+ * crc64_be - Calculate bitwise big-endian ECMA-182 CRC64
+ * @crc: seed value for computation. 0 or (u64)~0 for a new CRC calculation,
+ or the previous crc64 value if computing incrementally.
+ * @p: pointer to buffer over which CRC64 is run
+ * @len: length of buffer @p
+ */
+u64 __pure crc64_be(u64 crc, const void *p, size_t len)
+{
+ size_t i, t;
+
+ const unsigned char *_p = p;
+
+ for (i = 0; i < len; i++) {
+ t = ((crc >> 56) ^ (*_p++)) & 0xFF;
+ crc = crc64table[t] ^ (crc << 8);
+ }
+
+ return crc;
+}
+EXPORT_SYMBOL_GPL(crc64_be);
diff --git a/lib/debug_locks.c b/lib/debug_locks.c
index 96c4c633d95e..ce51749cc145 100644
--- a/lib/debug_locks.c
+++ b/lib/debug_locks.c
@@ -21,7 +21,7 @@
* that would just muddy the log. So we report the first one and
* shut up after that.
*/
-int debug_locks = 1;
+int debug_locks __read_mostly = 1;
EXPORT_SYMBOL_GPL(debug_locks);
/*
@@ -29,7 +29,7 @@ EXPORT_SYMBOL_GPL(debug_locks);
* 'silent failure': nothing is printed to the console when
* a locking bug is detected.
*/
-int debug_locks_silent;
+int debug_locks_silent __read_mostly;
EXPORT_SYMBOL_GPL(debug_locks_silent);
/*
@@ -37,7 +37,7 @@ EXPORT_SYMBOL_GPL(debug_locks_silent);
*/
int debug_locks_off(void)
{
- if (__debug_locks_off()) {
+ if (debug_locks && __debug_locks_off()) {
if (!debug_locks_silent) {
console_verbose();
return 1;
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 2f5349c6e81a..70935ed91125 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -42,14 +42,18 @@ static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
static DEFINE_RAW_SPINLOCK(pool_lock);
static HLIST_HEAD(obj_pool);
+static HLIST_HEAD(obj_to_free);
static int obj_pool_min_free = ODEBUG_POOL_SIZE;
static int obj_pool_free = ODEBUG_POOL_SIZE;
static int obj_pool_used;
static int obj_pool_max_used;
+/* The number of objs on the global free list */
+static int obj_nr_tofree;
static struct kmem_cache *obj_cache;
static int debug_objects_maxchain __read_mostly;
+static int __maybe_unused debug_objects_maxchecked __read_mostly;
static int debug_objects_fixups __read_mostly;
static int debug_objects_warnings __read_mostly;
static int debug_objects_enabled __read_mostly
@@ -96,12 +100,32 @@ static const char *obj_states[ODEBUG_STATE_MAX] = {
static void fill_pool(void)
{
gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
- struct debug_obj *new;
+ struct debug_obj *new, *obj;
unsigned long flags;
if (likely(obj_pool_free >= debug_objects_pool_min_level))
return;
+ /*
+ * Reuse objs from the global free list; they will be reinitialized
+ * when allocating.
+ */
+ while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
+ raw_spin_lock_irqsave(&pool_lock, flags);
+ /*
+ * Recheck with the lock held as the worker thread might have
+ * won the race and freed the global free list already.
+ */
+ if (obj_nr_tofree) {
+ obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
+ hlist_del(&obj->node);
+ obj_nr_tofree--;
+ hlist_add_head(&obj->node, &obj_pool);
+ obj_pool_free++;
+ }
+ raw_spin_unlock_irqrestore(&pool_lock, flags);
+ }
+
if (unlikely(!obj_cache))
return;
@@ -177,62 +201,76 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
* workqueue function to free objects.
*
* To reduce contention on the global pool_lock, the actual freeing of
- * debug objects will be delayed if the pool_lock is busy. We also free
- * the objects in a batch of 4 for each lock/unlock cycle.
+ * debug objects will be delayed if the pool_lock is busy.
*/
-#define ODEBUG_FREE_BATCH 4
-
static void free_obj_work(struct work_struct *work)
{
- struct debug_obj *objs[ODEBUG_FREE_BATCH];
+ struct hlist_node *tmp;
+ struct debug_obj *obj;
unsigned long flags;
- int i;
+ HLIST_HEAD(tofree);
if (!raw_spin_trylock_irqsave(&pool_lock, flags))
return;
- while (obj_pool_free >= debug_objects_pool_size + ODEBUG_FREE_BATCH) {
- for (i = 0; i < ODEBUG_FREE_BATCH; i++) {
- objs[i] = hlist_entry(obj_pool.first,
- typeof(*objs[0]), node);
- hlist_del(&objs[i]->node);
- }
- obj_pool_free -= ODEBUG_FREE_BATCH;
- debug_objects_freed += ODEBUG_FREE_BATCH;
- /*
- * We release pool_lock across kmem_cache_free() to
- * avoid contention on pool_lock.
- */
- raw_spin_unlock_irqrestore(&pool_lock, flags);
- for (i = 0; i < ODEBUG_FREE_BATCH; i++)
- kmem_cache_free(obj_cache, objs[i]);
- if (!raw_spin_trylock_irqsave(&pool_lock, flags))
- return;
+ /*
+ * The objs on the pool list might be allocated before the work is
+ * run, so recheck if pool list it full or not, if not fill pool
+ * list from the global free list
+ */
+ while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
+ obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
+ hlist_del(&obj->node);
+ hlist_add_head(&obj->node, &obj_pool);
+ obj_pool_free++;
+ obj_nr_tofree--;
+ }
+
+ /*
+ * Pool list is already full and there are still objs on the free
+ * list. Move remaining free objs to a temporary list to free the
+ * memory outside the pool_lock held region.
+ */
+ if (obj_nr_tofree) {
+ hlist_move_list(&obj_to_free, &tofree);
+ debug_objects_freed += obj_nr_tofree;
+ obj_nr_tofree = 0;
}
raw_spin_unlock_irqrestore(&pool_lock, flags);
+
+ hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
+ hlist_del(&obj->node);
+ kmem_cache_free(obj_cache, obj);
+ }
}
-/*
- * Put the object back into the pool and schedule work to free objects
- * if necessary.
- */
-static void free_object(struct debug_obj *obj)
+static bool __free_object(struct debug_obj *obj)
{
unsigned long flags;
- int sched = 0;
+ bool work;
raw_spin_lock_irqsave(&pool_lock, flags);
- /*
- * schedule work when the pool is filled and the cache is
- * initialized:
- */
- if (obj_pool_free > debug_objects_pool_size && obj_cache)
- sched = 1;
- hlist_add_head(&obj->node, &obj_pool);
- obj_pool_free++;
+ work = (obj_pool_free > debug_objects_pool_size) && obj_cache;
obj_pool_used--;
+
+ if (work) {
+ obj_nr_tofree++;
+ hlist_add_head(&obj->node, &obj_to_free);
+ } else {
+ obj_pool_free++;
+ hlist_add_head(&obj->node, &obj_pool);
+ }
raw_spin_unlock_irqrestore(&pool_lock, flags);
- if (sched)
+ return work;
+}
+
+/*
+ * Put the object back into the pool and schedule work to free objects
+ * if necessary.
+ */
+static void free_object(struct debug_obj *obj)
+{
+ if (__free_object(obj))
schedule_work(&debug_obj_work);
}
@@ -322,9 +360,12 @@ static void debug_object_is_on_stack(void *addr, int onstack)
limit++;
if (is_on_stack)
- pr_warn("object is on stack, but not annotated\n");
+ pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
+ task_stack_page(current));
else
- pr_warn("object is not on stack, but annotated\n");
+ pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
+ task_stack_page(current));
+
WARN_ON(1);
}
@@ -714,13 +755,13 @@ EXPORT_SYMBOL_GPL(debug_object_active_state);
static void __debug_check_no_obj_freed(const void *address, unsigned long size)
{
unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
- struct hlist_node *tmp;
- HLIST_HEAD(freelist);
struct debug_obj_descr *descr;
enum debug_obj_state state;
struct debug_bucket *db;
+ struct hlist_node *tmp;
struct debug_obj *obj;
- int cnt;
+ int cnt, objs_checked = 0;
+ bool work = false;
saddr = (unsigned long) address;
eaddr = saddr + size;
@@ -751,21 +792,24 @@ repeat:
goto repeat;
default:
hlist_del(&obj->node);
- hlist_add_head(&obj->node, &freelist);
+ work |= __free_object(obj);
break;
}
}
raw_spin_unlock_irqrestore(&db->lock, flags);
- /* Now free them */
- hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
- hlist_del(&obj->node);
- free_object(obj);
- }
-
if (cnt > debug_objects_maxchain)
debug_objects_maxchain = cnt;
+
+ objs_checked += cnt;
}
+
+ if (objs_checked > debug_objects_maxchecked)
+ debug_objects_maxchecked = objs_checked;
+
+ /* Schedule work to actually kmem_cache_free() objects */
+ if (work)
+ schedule_work(&debug_obj_work);
}
void debug_check_no_obj_freed(const void *address, unsigned long size)
@@ -780,12 +824,14 @@ void debug_check_no_obj_freed(const void *address, unsigned long size)
static int debug_stats_show(struct seq_file *m, void *v)
{
seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
+ seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked);
seq_printf(m, "warnings :%d\n", debug_objects_warnings);
seq_printf(m, "fixups :%d\n", debug_objects_fixups);
seq_printf(m, "pool_free :%d\n", obj_pool_free);
seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
seq_printf(m, "pool_used :%d\n", obj_pool_used);
seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
+ seq_printf(m, "on_free_list :%d\n", obj_nr_tofree);
seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
seq_printf(m, "objs_freed :%d\n", debug_objects_freed);
return 0;
@@ -1142,8 +1188,7 @@ void __init debug_objects_mem_init(void)
if (!obj_cache || debug_objects_replace_static_objects()) {
debug_objects_enabled = 0;
- if (obj_cache)
- kmem_cache_destroy(obj_cache);
+ kmem_cache_destroy(obj_cache);
pr_warn("out of memory.\n");
} else
debug_objects_selftest();
diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c
index 347fa7ac2e8a..9555b68bb774 100644
--- a/lib/dec_and_lock.c
+++ b/lib/dec_and_lock.c
@@ -33,3 +33,19 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
}
EXPORT_SYMBOL(_atomic_dec_and_lock);
+
+int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
+ unsigned long *flags)
+{
+ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
+ if (atomic_add_unless(atomic, -1, 1))
+ return 0;
+
+ /* Otherwise do it the slow way */
+ spin_lock_irqsave(lock, *flags);
+ if (atomic_dec_and_test(atomic))
+ return 1;
+ spin_unlock_irqrestore(lock, *flags);
+ return 0;
+}
+EXPORT_SYMBOL(_atomic_dec_and_lock_irqsave);
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index 0234361b24b8..7c4932eed748 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -51,6 +51,7 @@
#endif /* STATIC */
#include <linux/decompress/mm.h>
+#include <linux/crc32poly.h>
#ifndef INT_MAX
#define INT_MAX 0x7fffffff
@@ -654,7 +655,7 @@ static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, long len,
for (i = 0; i < 256; i++) {
c = i << 24;
for (j = 8; j; j--)
- c = c&0x80000000 ? (c << 1)^0x04c11db7 : (c << 1);
+ c = c&0x80000000 ? (c << 1)^(CRC32_POLY_BE) : (c << 1);
bd->crc32Table[i] = c;
}
diff --git a/lib/devres.c b/lib/devres.c
index 5f2aedd58bc5..faccf1a037d0 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -4,6 +4,13 @@
#include <linux/io.h>
#include <linux/gfp.h>
#include <linux/export.h>
+#include <linux/of_address.h>
+
+enum devm_ioremap_type {
+ DEVM_IOREMAP = 0,
+ DEVM_IOREMAP_NC,
+ DEVM_IOREMAP_WC,
+};
void devm_ioremap_release(struct device *dev, void *res)
{
@@ -15,24 +22,28 @@ static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
return *(void **)res == match_data;
}
-/**
- * devm_ioremap - Managed ioremap()
- * @dev: Generic device to remap IO address for
- * @offset: Resource address to map
- * @size: Size of map
- *
- * Managed ioremap(). Map is automatically unmapped on driver detach.
- */
-void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
- resource_size_t size)
+static void __iomem *__devm_ioremap(struct device *dev, resource_size_t offset,
+ resource_size_t size,
+ enum devm_ioremap_type type)
{
- void __iomem **ptr, *addr;
+ void __iomem **ptr, *addr = NULL;
ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return NULL;
- addr = ioremap(offset, size);
+ switch (type) {
+ case DEVM_IOREMAP:
+ addr = ioremap(offset, size);
+ break;
+ case DEVM_IOREMAP_NC:
+ addr = ioremap_nocache(offset, size);
+ break;
+ case DEVM_IOREMAP_WC:
+ addr = ioremap_wc(offset, size);
+ break;
+ }
+
if (addr) {
*ptr = addr;
devres_add(dev, ptr);
@@ -41,6 +52,20 @@ void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
return addr;
}
+
+/**
+ * devm_ioremap - Managed ioremap()
+ * @dev: Generic device to remap IO address for
+ * @offset: Resource address to map
+ * @size: Size of map
+ *
+ * Managed ioremap(). Map is automatically unmapped on driver detach.
+ */
+void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
+ resource_size_t size)
+{
+ return __devm_ioremap(dev, offset, size, DEVM_IOREMAP);
+}
EXPORT_SYMBOL(devm_ioremap);
/**
@@ -55,20 +80,7 @@ EXPORT_SYMBOL(devm_ioremap);
void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
resource_size_t size)
{
- void __iomem **ptr, *addr;
-
- ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return NULL;
-
- addr = ioremap_nocache(offset, size);
- if (addr) {
- *ptr = addr;
- devres_add(dev, ptr);
- } else
- devres_free(ptr);
-
- return addr;
+ return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_NC);
}
EXPORT_SYMBOL(devm_ioremap_nocache);
@@ -83,20 +95,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
resource_size_t size)
{
- void __iomem **ptr, *addr;
-
- ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return NULL;
-
- addr = ioremap_wc(offset, size);
- if (addr) {
- *ptr = addr;
- devres_add(dev, ptr);
- } else
- devres_free(ptr);
-
- return addr;
+ return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_WC);
}
EXPORT_SYMBOL(devm_ioremap_wc);
@@ -164,6 +163,41 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
}
EXPORT_SYMBOL(devm_ioremap_resource);
+/*
+ * devm_of_iomap - Requests a resource and maps the memory mapped IO
+ * for a given device_node managed by a given device
+ *
+ * Checks that a resource is a valid memory region, requests the memory
+ * region and ioremaps it. All operations are managed and will be undone
+ * on driver detach of the device.
+ *
+ * This is to be used when a device requests/maps resources described
+ * by other device tree nodes (children or otherwise).
+ *
+ * @dev: The device "managing" the resource
+ * @node: The device-tree node where the resource resides
+ * @index: index of the MMIO range in the "reg" property
+ * @size: Returns the size of the resource (pass NULL if not needed)
+ * Returns a pointer to the requested and mapped memory or an ERR_PTR() encoded
+ * error code on failure. Usage example:
+ *
+ * base = devm_of_iomap(&pdev->dev, node, 0, NULL);
+ * if (IS_ERR(base))
+ * return PTR_ERR(base);
+ */
+void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index,
+ resource_size_t *size)
+{
+ struct resource res;
+
+ if (of_address_to_resource(node, index, &res))
+ return IOMEM_ERR_PTR(-EINVAL);
+ if (size)
+ *size = resource_size(&res);
+ return devm_ioremap_resource(dev, &res);
+}
+EXPORT_SYMBOL(devm_of_iomap);
+
#ifdef CONFIG_HAS_IOPORT_MAP
/*
* Generic iomap devres
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
deleted file mode 100644
index 7f5cdc1e6b29..000000000000
--- a/lib/dma-debug.c
+++ /dev/null
@@ -1,1752 +0,0 @@
-/*
- * Copyright (C) 2008 Advanced Micro Devices, Inc.
- *
- * Author: Joerg Roedel <joerg.roedel@amd.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/sched/task_stack.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-mapping.h>
-#include <linux/sched/task.h>
-#include <linux/stacktrace.h>
-#include <linux/dma-debug.h>
-#include <linux/spinlock.h>
-#include <linux/vmalloc.h>
-#include <linux/debugfs.h>
-#include <linux/uaccess.h>
-#include <linux/export.h>
-#include <linux/device.h>
-#include <linux/types.h>
-#include <linux/sched.h>
-#include <linux/ctype.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-
-#include <asm/sections.h>
-
-#define HASH_SIZE 1024ULL
-#define HASH_FN_SHIFT 13
-#define HASH_FN_MASK (HASH_SIZE - 1)
-
-enum {
- dma_debug_single,
- dma_debug_page,
- dma_debug_sg,
- dma_debug_coherent,
- dma_debug_resource,
-};
-
-enum map_err_types {
- MAP_ERR_CHECK_NOT_APPLICABLE,
- MAP_ERR_NOT_CHECKED,
- MAP_ERR_CHECKED,
-};
-
-#define DMA_DEBUG_STACKTRACE_ENTRIES 5
-
-/**
- * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
- * @list: node on pre-allocated free_entries list
- * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
- * @type: single, page, sg, coherent
- * @pfn: page frame of the start address
- * @offset: offset of mapping relative to pfn
- * @size: length of the mapping
- * @direction: enum dma_data_direction
- * @sg_call_ents: 'nents' from dma_map_sg
- * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
- * @map_err_type: track whether dma_mapping_error() was checked
- * @stacktrace: support backtraces when a violation is detected
- */
-struct dma_debug_entry {
- struct list_head list;
- struct device *dev;
- int type;
- unsigned long pfn;
- size_t offset;
- u64 dev_addr;
- u64 size;
- int direction;
- int sg_call_ents;
- int sg_mapped_ents;
- enum map_err_types map_err_type;
-#ifdef CONFIG_STACKTRACE
- struct stack_trace stacktrace;
- unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
-#endif
-};
-
-typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
-
-struct hash_bucket {
- struct list_head list;
- spinlock_t lock;
-} ____cacheline_aligned_in_smp;
-
-/* Hash list to save the allocated dma addresses */
-static struct hash_bucket dma_entry_hash[HASH_SIZE];
-/* List of pre-allocated dma_debug_entry's */
-static LIST_HEAD(free_entries);
-/* Lock for the list above */
-static DEFINE_SPINLOCK(free_entries_lock);
-
-/* Global disable flag - will be set in case of an error */
-static bool global_disable __read_mostly;
-
-/* Early initialization disable flag, set at the end of dma_debug_init */
-static bool dma_debug_initialized __read_mostly;
-
-static inline bool dma_debug_disabled(void)
-{
- return global_disable || !dma_debug_initialized;
-}
-
-/* Global error count */
-static u32 error_count;
-
-/* Global error show enable*/
-static u32 show_all_errors __read_mostly;
-/* Number of errors to show */
-static u32 show_num_errors = 1;
-
-static u32 num_free_entries;
-static u32 min_free_entries;
-static u32 nr_total_entries;
-
-/* number of preallocated entries requested by kernel cmdline */
-static u32 req_entries;
-
-/* debugfs dentry's for the stuff above */
-static struct dentry *dma_debug_dent __read_mostly;
-static struct dentry *global_disable_dent __read_mostly;
-static struct dentry *error_count_dent __read_mostly;
-static struct dentry *show_all_errors_dent __read_mostly;
-static struct dentry *show_num_errors_dent __read_mostly;
-static struct dentry *num_free_entries_dent __read_mostly;
-static struct dentry *min_free_entries_dent __read_mostly;
-static struct dentry *filter_dent __read_mostly;
-
-/* per-driver filter related state */
-
-#define NAME_MAX_LEN 64
-
-static char current_driver_name[NAME_MAX_LEN] __read_mostly;
-static struct device_driver *current_driver __read_mostly;
-
-static DEFINE_RWLOCK(driver_name_lock);
-
-static const char *const maperr2str[] = {
- [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
- [MAP_ERR_NOT_CHECKED] = "dma map error not checked",
- [MAP_ERR_CHECKED] = "dma map error checked",
-};
-
-static const char *type2name[5] = { "single", "page",
- "scather-gather", "coherent",
- "resource" };
-
-static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
- "DMA_FROM_DEVICE", "DMA_NONE" };
-
-/*
- * The access to some variables in this macro is racy. We can't use atomic_t
- * here because all these variables are exported to debugfs. Some of them even
- * writeable. This is also the reason why a lock won't help much. But anyway,
- * the races are no big deal. Here is why:
- *
- * error_count: the addition is racy, but the worst thing that can happen is
- * that we don't count some errors
- * show_num_errors: the subtraction is racy. Also no big deal because in
- * worst case this will result in one warning more in the
- * system log than the user configured. This variable is
- * writeable via debugfs.
- */
-static inline void dump_entry_trace(struct dma_debug_entry *entry)
-{
-#ifdef CONFIG_STACKTRACE
- if (entry) {
- pr_warning("Mapped at:\n");
- print_stack_trace(&entry->stacktrace, 0);
- }
-#endif
-}
-
-static bool driver_filter(struct device *dev)
-{
- struct device_driver *drv;
- unsigned long flags;
- bool ret;
-
- /* driver filter off */
- if (likely(!current_driver_name[0]))
- return true;
-
- /* driver filter on and initialized */
- if (current_driver && dev && dev->driver == current_driver)
- return true;
-
- /* driver filter on, but we can't filter on a NULL device... */
- if (!dev)
- return false;
-
- if (current_driver || !current_driver_name[0])
- return false;
-
- /* driver filter on but not yet initialized */
- drv = dev->driver;
- if (!drv)
- return false;
-
- /* lock to protect against change of current_driver_name */
- read_lock_irqsave(&driver_name_lock, flags);
-
- ret = false;
- if (drv->name &&
- strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
- current_driver = drv;
- ret = true;
- }
-
- read_unlock_irqrestore(&driver_name_lock, flags);
-
- return ret;
-}
-
-#define err_printk(dev, entry, format, arg...) do { \
- error_count += 1; \
- if (driver_filter(dev) && \
- (show_all_errors || show_num_errors > 0)) { \
- WARN(1, "%s %s: " format, \
- dev ? dev_driver_string(dev) : "NULL", \
- dev ? dev_name(dev) : "NULL", ## arg); \
- dump_entry_trace(entry); \
- } \
- if (!show_all_errors && show_num_errors > 0) \
- show_num_errors -= 1; \
- } while (0);
-
-/*
- * Hash related functions
- *
- * Every DMA-API request is saved into a struct dma_debug_entry. To
- * have quick access to these structs they are stored into a hash.
- */
-static int hash_fn(struct dma_debug_entry *entry)
-{
- /*
- * Hash function is based on the dma address.
- * We use bits 20-27 here as the index into the hash
- */
- return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
-}
-
-/*
- * Request exclusive access to a hash bucket for a given dma_debug_entry.
- */
-static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
- unsigned long *flags)
- __acquires(&dma_entry_hash[idx].lock)
-{
- int idx = hash_fn(entry);
- unsigned long __flags;
-
- spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
- *flags = __flags;
- return &dma_entry_hash[idx];
-}
-
-/*
- * Give up exclusive access to the hash bucket
- */
-static void put_hash_bucket(struct hash_bucket *bucket,
- unsigned long *flags)
- __releases(&bucket->lock)
-{
- unsigned long __flags = *flags;
-
- spin_unlock_irqrestore(&bucket->lock, __flags);
-}
-
-static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
-{
- return ((a->dev_addr == b->dev_addr) &&
- (a->dev == b->dev)) ? true : false;
-}
-
-static bool containing_match(struct dma_debug_entry *a,
- struct dma_debug_entry *b)
-{
- if (a->dev != b->dev)
- return false;
-
- if ((b->dev_addr <= a->dev_addr) &&
- ((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
- return true;
-
- return false;
-}
-
-/*
- * Search a given entry in the hash bucket list
- */
-static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
- struct dma_debug_entry *ref,
- match_fn match)
-{
- struct dma_debug_entry *entry, *ret = NULL;
- int matches = 0, match_lvl, last_lvl = -1;
-
- list_for_each_entry(entry, &bucket->list, list) {
- if (!match(ref, entry))
- continue;
-
- /*
- * Some drivers map the same physical address multiple
- * times. Without a hardware IOMMU this results in the
- * same device addresses being put into the dma-debug
- * hash multiple times too. This can result in false
- * positives being reported. Therefore we implement a
- * best-fit algorithm here which returns the entry from
- * the hash which fits best to the reference value
- * instead of the first-fit.
- */
- matches += 1;
- match_lvl = 0;
- entry->size == ref->size ? ++match_lvl : 0;
- entry->type == ref->type ? ++match_lvl : 0;
- entry->direction == ref->direction ? ++match_lvl : 0;
- entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
-
- if (match_lvl == 4) {
- /* perfect-fit - return the result */
- return entry;
- } else if (match_lvl > last_lvl) {
- /*
- * We found an entry that fits better then the
- * previous one or it is the 1st match.
- */
- last_lvl = match_lvl;
- ret = entry;
- }
- }
-
- /*
- * If we have multiple matches but no perfect-fit, just return
- * NULL.
- */
- ret = (matches == 1) ? ret : NULL;
-
- return ret;
-}
-
-static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
- struct dma_debug_entry *ref)
-{
- return __hash_bucket_find(bucket, ref, exact_match);
-}
-
-static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
- struct dma_debug_entry *ref,
- unsigned long *flags)
-{
-
- unsigned int max_range = dma_get_max_seg_size(ref->dev);
- struct dma_debug_entry *entry, index = *ref;
- unsigned int range = 0;
-
- while (range <= max_range) {
- entry = __hash_bucket_find(*bucket, ref, containing_match);
-
- if (entry)
- return entry;
-
- /*
- * Nothing found, go back a hash bucket
- */
- put_hash_bucket(*bucket, flags);
- range += (1 << HASH_FN_SHIFT);
- index.dev_addr -= (1 << HASH_FN_SHIFT);
- *bucket = get_hash_bucket(&index, flags);
- }
-
- return NULL;
-}
-
-/*
- * Add an entry to a hash bucket
- */
-static void hash_bucket_add(struct hash_bucket *bucket,
- struct dma_debug_entry *entry)
-{
- list_add_tail(&entry->list, &bucket->list);
-}
-
-/*
- * Remove entry from a hash bucket list
- */
-static void hash_bucket_del(struct dma_debug_entry *entry)
-{
- list_del(&entry->list);
-}
-
-static unsigned long long phys_addr(struct dma_debug_entry *entry)
-{
- if (entry->type == dma_debug_resource)
- return __pfn_to_phys(entry->pfn) + entry->offset;
-
- return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
-}
-
-/*
- * Dump mapping entries for debugging purposes
- */
-void debug_dma_dump_mappings(struct device *dev)
-{
- int idx;
-
- for (idx = 0; idx < HASH_SIZE; idx++) {
- struct hash_bucket *bucket = &dma_entry_hash[idx];
- struct dma_debug_entry *entry;
- unsigned long flags;
-
- spin_lock_irqsave(&bucket->lock, flags);
-
- list_for_each_entry(entry, &bucket->list, list) {
- if (!dev || dev == entry->dev) {
- dev_info(entry->dev,
- "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n",
- type2name[entry->type], idx,
- phys_addr(entry), entry->pfn,
- entry->dev_addr, entry->size,
- dir2name[entry->direction],
- maperr2str[entry->map_err_type]);
- }
- }
-
- spin_unlock_irqrestore(&bucket->lock, flags);
- }
-}
-EXPORT_SYMBOL(debug_dma_dump_mappings);
-
-/*
- * For each mapping (initial cacheline in the case of
- * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
- * scatterlist, or the cacheline specified in dma_map_single) insert
- * into this tree using the cacheline as the key. At
- * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If
- * the entry already exists at insertion time add a tag as a reference
- * count for the overlapping mappings. For now, the overlap tracking
- * just ensures that 'unmaps' balance 'maps' before marking the
- * cacheline idle, but we should also be flagging overlaps as an API
- * violation.
- *
- * Memory usage is mostly constrained by the maximum number of available
- * dma-debug entries in that we need a free dma_debug_entry before
- * inserting into the tree. In the case of dma_map_page and
- * dma_alloc_coherent there is only one dma_debug_entry and one
- * dma_active_cacheline entry to track per event. dma_map_sg(), on the
- * other hand, consumes a single dma_debug_entry, but inserts 'nents'
- * entries into the tree.
- *
- * At any time debug_dma_assert_idle() can be called to trigger a
- * warning if any cachelines in the given page are in the active set.
- */
-static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT);
-static DEFINE_SPINLOCK(radix_lock);
-#define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
-#define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
-#define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
-
-static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
-{
- return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
- (entry->offset >> L1_CACHE_SHIFT);
-}
-
-static int active_cacheline_read_overlap(phys_addr_t cln)
-{
- int overlap = 0, i;
-
- for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
- if (radix_tree_tag_get(&dma_active_cacheline, cln, i))
- overlap |= 1 << i;
- return overlap;
-}
-
-static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
-{
- int i;
-
- if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0)
- return overlap;
-
- for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
- if (overlap & 1 << i)
- radix_tree_tag_set(&dma_active_cacheline, cln, i);
- else
- radix_tree_tag_clear(&dma_active_cacheline, cln, i);
-
- return overlap;
-}
-
-static void active_cacheline_inc_overlap(phys_addr_t cln)
-{
- int overlap = active_cacheline_read_overlap(cln);
-
- overlap = active_cacheline_set_overlap(cln, ++overlap);
-
- /* If we overflowed the overlap counter then we're potentially
- * leaking dma-mappings. Otherwise, if maps and unmaps are
- * balanced then this overflow may cause false negatives in
- * debug_dma_assert_idle() as the cacheline may be marked idle
- * prematurely.
- */
- WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
- "DMA-API: exceeded %d overlapping mappings of cacheline %pa\n",
- ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
-}
-
-static int active_cacheline_dec_overlap(phys_addr_t cln)
-{
- int overlap = active_cacheline_read_overlap(cln);
-
- return active_cacheline_set_overlap(cln, --overlap);
-}
-
-static int active_cacheline_insert(struct dma_debug_entry *entry)
-{
- phys_addr_t cln = to_cacheline_number(entry);
- unsigned long flags;
- int rc;
-
- /* If the device is not writing memory then we don't have any
- * concerns about the cpu consuming stale data. This mitigates
- * legitimate usages of overlapping mappings.
- */
- if (entry->direction == DMA_TO_DEVICE)
- return 0;
-
- spin_lock_irqsave(&radix_lock, flags);
- rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
- if (rc == -EEXIST)
- active_cacheline_inc_overlap(cln);
- spin_unlock_irqrestore(&radix_lock, flags);
-
- return rc;
-}
-
-static void active_cacheline_remove(struct dma_debug_entry *entry)
-{
- phys_addr_t cln = to_cacheline_number(entry);
- unsigned long flags;
-
- /* ...mirror the insert case */
- if (entry->direction == DMA_TO_DEVICE)
- return;
-
- spin_lock_irqsave(&radix_lock, flags);
- /* since we are counting overlaps the final put of the
- * cacheline will occur when the overlap count is 0.
- * active_cacheline_dec_overlap() returns -1 in that case
- */
- if (active_cacheline_dec_overlap(cln) < 0)
- radix_tree_delete(&dma_active_cacheline, cln);
- spin_unlock_irqrestore(&radix_lock, flags);
-}
-
-/**
- * debug_dma_assert_idle() - assert that a page is not undergoing dma
- * @page: page to lookup in the dma_active_cacheline tree
- *
- * Place a call to this routine in cases where the cpu touching the page
- * before the dma completes (page is dma_unmapped) will lead to data
- * corruption.
- */
-void debug_dma_assert_idle(struct page *page)
-{
- static struct dma_debug_entry *ents[CACHELINES_PER_PAGE];
- struct dma_debug_entry *entry = NULL;
- void **results = (void **) &ents;
- unsigned int nents, i;
- unsigned long flags;
- phys_addr_t cln;
-
- if (dma_debug_disabled())
- return;
-
- if (!page)
- return;
-
- cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT;
- spin_lock_irqsave(&radix_lock, flags);
- nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln,
- CACHELINES_PER_PAGE);
- for (i = 0; i < nents; i++) {
- phys_addr_t ent_cln = to_cacheline_number(ents[i]);
-
- if (ent_cln == cln) {
- entry = ents[i];
- break;
- } else if (ent_cln >= cln + CACHELINES_PER_PAGE)
- break;
- }
- spin_unlock_irqrestore(&radix_lock, flags);
-
- if (!entry)
- return;
-
- cln = to_cacheline_number(entry);
- err_printk(entry->dev, entry,
- "DMA-API: cpu touching an active dma mapped cacheline [cln=%pa]\n",
- &cln);
-}
-
-/*
- * Wrapper function for adding an entry to the hash.
- * This function takes care of locking itself.
- */
-static void add_dma_entry(struct dma_debug_entry *entry)
-{
- struct hash_bucket *bucket;
- unsigned long flags;
- int rc;
-
- bucket = get_hash_bucket(entry, &flags);
- hash_bucket_add(bucket, entry);
- put_hash_bucket(bucket, &flags);
-
- rc = active_cacheline_insert(entry);
- if (rc == -ENOMEM) {
- pr_err("DMA-API: cacheline tracking ENOMEM, dma-debug disabled\n");
- global_disable = true;
- }
-
- /* TODO: report -EEXIST errors here as overlapping mappings are
- * not supported by the DMA API
- */
-}
-
-static struct dma_debug_entry *__dma_entry_alloc(void)
-{
- struct dma_debug_entry *entry;
-
- entry = list_entry(free_entries.next, struct dma_debug_entry, list);
- list_del(&entry->list);
- memset(entry, 0, sizeof(*entry));
-
- num_free_entries -= 1;
- if (num_free_entries < min_free_entries)
- min_free_entries = num_free_entries;
-
- return entry;
-}
-
-/* struct dma_entry allocator
- *
- * The next two functions implement the allocator for
- * struct dma_debug_entries.
- */
-static struct dma_debug_entry *dma_entry_alloc(void)
-{
- struct dma_debug_entry *entry;
- unsigned long flags;
-
- spin_lock_irqsave(&free_entries_lock, flags);
-
- if (list_empty(&free_entries)) {
- global_disable = true;
- spin_unlock_irqrestore(&free_entries_lock, flags);
- pr_err("DMA-API: debugging out of memory - disabling\n");
- return NULL;
- }
-
- entry = __dma_entry_alloc();
-
- spin_unlock_irqrestore(&free_entries_lock, flags);
-
-#ifdef CONFIG_STACKTRACE
- entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
- entry->stacktrace.entries = entry->st_entries;
- entry->stacktrace.skip = 2;
- save_stack_trace(&entry->stacktrace);
-#endif
-
- return entry;
-}
-
-static void dma_entry_free(struct dma_debug_entry *entry)
-{
- unsigned long flags;
-
- active_cacheline_remove(entry);
-
- /*
- * add to beginning of the list - this way the entries are
- * more likely cache hot when they are reallocated.
- */
- spin_lock_irqsave(&free_entries_lock, flags);
- list_add(&entry->list, &free_entries);
- num_free_entries += 1;
- spin_unlock_irqrestore(&free_entries_lock, flags);
-}
-
-int dma_debug_resize_entries(u32 num_entries)
-{
- int i, delta, ret = 0;
- unsigned long flags;
- struct dma_debug_entry *entry;
- LIST_HEAD(tmp);
-
- spin_lock_irqsave(&free_entries_lock, flags);
-
- if (nr_total_entries < num_entries) {
- delta = num_entries - nr_total_entries;
-
- spin_unlock_irqrestore(&free_entries_lock, flags);
-
- for (i = 0; i < delta; i++) {
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- break;
-
- list_add_tail(&entry->list, &tmp);
- }
-
- spin_lock_irqsave(&free_entries_lock, flags);
-
- list_splice(&tmp, &free_entries);
- nr_total_entries += i;
- num_free_entries += i;
- } else {
- delta = nr_total_entries - num_entries;
-
- for (i = 0; i < delta && !list_empty(&free_entries); i++) {
- entry = __dma_entry_alloc();
- kfree(entry);
- }
-
- nr_total_entries -= i;
- }
-
- if (nr_total_entries != num_entries)
- ret = 1;
-
- spin_unlock_irqrestore(&free_entries_lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL(dma_debug_resize_entries);
-
-/*
- * DMA-API debugging init code
- *
- * The init code does two things:
- * 1. Initialize core data structures
- * 2. Preallocate a given number of dma_debug_entry structs
- */
-
-static int prealloc_memory(u32 num_entries)
-{
- struct dma_debug_entry *entry, *next_entry;
- int i;
-
- for (i = 0; i < num_entries; ++i) {
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- goto out_err;
-
- list_add_tail(&entry->list, &free_entries);
- }
-
- num_free_entries = num_entries;
- min_free_entries = num_entries;
-
- pr_info("DMA-API: preallocated %d debug entries\n", num_entries);
-
- return 0;
-
-out_err:
-
- list_for_each_entry_safe(entry, next_entry, &free_entries, list) {
- list_del(&entry->list);
- kfree(entry);
- }
-
- return -ENOMEM;
-}
-
-static ssize_t filter_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- char buf[NAME_MAX_LEN + 1];
- unsigned long flags;
- int len;
-
- if (!current_driver_name[0])
- return 0;
-
- /*
- * We can't copy to userspace directly because current_driver_name can
- * only be read under the driver_name_lock with irqs disabled. So
- * create a temporary copy first.
- */
- read_lock_irqsave(&driver_name_lock, flags);
- len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
- read_unlock_irqrestore(&driver_name_lock, flags);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static ssize_t filter_write(struct file *file, const char __user *userbuf,
- size_t count, loff_t *ppos)
-{
- char buf[NAME_MAX_LEN];
- unsigned long flags;
- size_t len;
- int i;
-
- /*
- * We can't copy from userspace directly. Access to
- * current_driver_name is protected with a write_lock with irqs
- * disabled. Since copy_from_user can fault and may sleep we
- * need to copy to temporary buffer first
- */
- len = min(count, (size_t)(NAME_MAX_LEN - 1));
- if (copy_from_user(buf, userbuf, len))
- return -EFAULT;
-
- buf[len] = 0;
-
- write_lock_irqsave(&driver_name_lock, flags);
-
- /*
- * Now handle the string we got from userspace very carefully.
- * The rules are:
- * - only use the first token we got
- * - token delimiter is everything looking like a space
- * character (' ', '\n', '\t' ...)
- *
- */
- if (!isalnum(buf[0])) {
- /*
- * If the first character userspace gave us is not
- * alphanumerical then assume the filter should be
- * switched off.
- */
- if (current_driver_name[0])
- pr_info("DMA-API: switching off dma-debug driver filter\n");
- current_driver_name[0] = 0;
- current_driver = NULL;
- goto out_unlock;
- }
-
- /*
- * Now parse out the first token and use it as the name for the
- * driver to filter for.
- */
- for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
- current_driver_name[i] = buf[i];
- if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
- break;
- }
- current_driver_name[i] = 0;
- current_driver = NULL;
-
- pr_info("DMA-API: enable driver filter for driver [%s]\n",
- current_driver_name);
-
-out_unlock:
- write_unlock_irqrestore(&driver_name_lock, flags);
-
- return count;
-}
-
-static const struct file_operations filter_fops = {
- .read = filter_read,
- .write = filter_write,
- .llseek = default_llseek,
-};
-
-static int dma_debug_fs_init(void)
-{
- dma_debug_dent = debugfs_create_dir("dma-api", NULL);
- if (!dma_debug_dent) {
- pr_err("DMA-API: can not create debugfs directory\n");
- return -ENOMEM;
- }
-
- global_disable_dent = debugfs_create_bool("disabled", 0444,
- dma_debug_dent,
- &global_disable);
- if (!global_disable_dent)
- goto out_err;
-
- error_count_dent = debugfs_create_u32("error_count", 0444,
- dma_debug_dent, &error_count);
- if (!error_count_dent)
- goto out_err;
-
- show_all_errors_dent = debugfs_create_u32("all_errors", 0644,
- dma_debug_dent,
- &show_all_errors);
- if (!show_all_errors_dent)
- goto out_err;
-
- show_num_errors_dent = debugfs_create_u32("num_errors", 0644,
- dma_debug_dent,
- &show_num_errors);
- if (!show_num_errors_dent)
- goto out_err;
-
- num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444,
- dma_debug_dent,
- &num_free_entries);
- if (!num_free_entries_dent)
- goto out_err;
-
- min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444,
- dma_debug_dent,
- &min_free_entries);
- if (!min_free_entries_dent)
- goto out_err;
-
- filter_dent = debugfs_create_file("driver_filter", 0644,
- dma_debug_dent, NULL, &filter_fops);
- if (!filter_dent)
- goto out_err;
-
- return 0;
-
-out_err:
- debugfs_remove_recursive(dma_debug_dent);
-
- return -ENOMEM;
-}
-
-static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
-{
- struct dma_debug_entry *entry;
- unsigned long flags;
- int count = 0, i;
-
- for (i = 0; i < HASH_SIZE; ++i) {
- spin_lock_irqsave(&dma_entry_hash[i].lock, flags);
- list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
- if (entry->dev == dev) {
- count += 1;
- *out_entry = entry;
- }
- }
- spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags);
- }
-
- return count;
-}
-
-static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
-{
- struct device *dev = data;
- struct dma_debug_entry *uninitialized_var(entry);
- int count;
-
- if (dma_debug_disabled())
- return 0;
-
- switch (action) {
- case BUS_NOTIFY_UNBOUND_DRIVER:
- count = device_dma_allocations(dev, &entry);
- if (count == 0)
- break;
- err_printk(dev, entry, "DMA-API: device driver has pending "
- "DMA allocations while released from device "
- "[count=%d]\n"
- "One of leaked entries details: "
- "[device address=0x%016llx] [size=%llu bytes] "
- "[mapped with %s] [mapped as %s]\n",
- count, entry->dev_addr, entry->size,
- dir2name[entry->direction], type2name[entry->type]);
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-void dma_debug_add_bus(struct bus_type *bus)
-{
- struct notifier_block *nb;
-
- if (dma_debug_disabled())
- return;
-
- nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
- if (nb == NULL) {
- pr_err("dma_debug_add_bus: out of memory\n");
- return;
- }
-
- nb->notifier_call = dma_debug_device_change;
-
- bus_register_notifier(bus, nb);
-}
-
-/*
- * Let the architectures decide how many entries should be preallocated.
- */
-void dma_debug_init(u32 num_entries)
-{
- int i;
-
- /* Do not use dma_debug_initialized here, since we really want to be
- * called to set dma_debug_initialized
- */
- if (global_disable)
- return;
-
- for (i = 0; i < HASH_SIZE; ++i) {
- INIT_LIST_HEAD(&dma_entry_hash[i].list);
- spin_lock_init(&dma_entry_hash[i].lock);
- }
-
- if (dma_debug_fs_init() != 0) {
- pr_err("DMA-API: error creating debugfs entries - disabling\n");
- global_disable = true;
-
- return;
- }
-
- if (req_entries)
- num_entries = req_entries;
-
- if (prealloc_memory(num_entries) != 0) {
- pr_err("DMA-API: debugging out of memory error - disabled\n");
- global_disable = true;
-
- return;
- }
-
- nr_total_entries = num_free_entries;
-
- dma_debug_initialized = true;
-
- pr_info("DMA-API: debugging enabled by kernel config\n");
-}
-
-static __init int dma_debug_cmdline(char *str)
-{
- if (!str)
- return -EINVAL;
-
- if (strncmp(str, "off", 3) == 0) {
- pr_info("DMA-API: debugging disabled on kernel command line\n");
- global_disable = true;
- }
-
- return 0;
-}
-
-static __init int dma_debug_entries_cmdline(char *str)
-{
- int res;
-
- if (!str)
- return -EINVAL;
-
- res = get_option(&str, &req_entries);
-
- if (!res)
- req_entries = 0;
-
- return 0;
-}
-
-__setup("dma_debug=", dma_debug_cmdline);
-__setup("dma_debug_entries=", dma_debug_entries_cmdline);
-
-static void check_unmap(struct dma_debug_entry *ref)
-{
- struct dma_debug_entry *entry;
- struct hash_bucket *bucket;
- unsigned long flags;
-
- bucket = get_hash_bucket(ref, &flags);
- entry = bucket_find_exact(bucket, ref);
-
- if (!entry) {
- /* must drop lock before calling dma_mapping_error */
- put_hash_bucket(bucket, &flags);
-
- if (dma_mapping_error(ref->dev, ref->dev_addr)) {
- err_printk(ref->dev, NULL,
- "DMA-API: device driver tries to free an "
- "invalid DMA memory address\n");
- } else {
- err_printk(ref->dev, NULL,
- "DMA-API: device driver tries to free DMA "
- "memory it has not allocated [device "
- "address=0x%016llx] [size=%llu bytes]\n",
- ref->dev_addr, ref->size);
- }
- return;
- }
-
- if (ref->size != entry->size) {
- err_printk(ref->dev, entry, "DMA-API: device driver frees "
- "DMA memory with different size "
- "[device address=0x%016llx] [map size=%llu bytes] "
- "[unmap size=%llu bytes]\n",
- ref->dev_addr, entry->size, ref->size);
- }
-
- if (ref->type != entry->type) {
- err_printk(ref->dev, entry, "DMA-API: device driver frees "
- "DMA memory with wrong function "
- "[device address=0x%016llx] [size=%llu bytes] "
- "[mapped as %s] [unmapped as %s]\n",
- ref->dev_addr, ref->size,
- type2name[entry->type], type2name[ref->type]);
- } else if ((entry->type == dma_debug_coherent) &&
- (phys_addr(ref) != phys_addr(entry))) {
- err_printk(ref->dev, entry, "DMA-API: device driver frees "
- "DMA memory with different CPU address "
- "[device address=0x%016llx] [size=%llu bytes] "
- "[cpu alloc address=0x%016llx] "
- "[cpu free address=0x%016llx]",
- ref->dev_addr, ref->size,
- phys_addr(entry),
- phys_addr(ref));
- }
-
- if (ref->sg_call_ents && ref->type == dma_debug_sg &&
- ref->sg_call_ents != entry->sg_call_ents) {
- err_printk(ref->dev, entry, "DMA-API: device driver frees "
- "DMA sg list with different entry count "
- "[map count=%d] [unmap count=%d]\n",
- entry->sg_call_ents, ref->sg_call_ents);
- }
-
- /*
- * This may be no bug in reality - but most implementations of the
- * DMA API don't handle this properly, so check for it here
- */
- if (ref->direction != entry->direction) {
- err_printk(ref->dev, entry, "DMA-API: device driver frees "
- "DMA memory with different direction "
- "[device address=0x%016llx] [size=%llu bytes] "
- "[mapped with %s] [unmapped with %s]\n",
- ref->dev_addr, ref->size,
- dir2name[entry->direction],
- dir2name[ref->direction]);
- }
-
- /*
- * Drivers should use dma_mapping_error() to check the returned
- * addresses of dma_map_single() and dma_map_page().
- * If not, print this warning message. See Documentation/DMA-API.txt.
- */
- if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
- err_printk(ref->dev, entry,
- "DMA-API: device driver failed to check map error"
- "[device address=0x%016llx] [size=%llu bytes] "
- "[mapped as %s]",
- ref->dev_addr, ref->size,
- type2name[entry->type]);
- }
-
- hash_bucket_del(entry);
- dma_entry_free(entry);
-
- put_hash_bucket(bucket, &flags);
-}
-
-static void check_for_stack(struct device *dev,
- struct page *page, size_t offset)
-{
- void *addr;
- struct vm_struct *stack_vm_area = task_stack_vm_area(current);
-
- if (!stack_vm_area) {
- /* Stack is direct-mapped. */
- if (PageHighMem(page))
- return;
- addr = page_address(page) + offset;
- if (object_is_on_stack(addr))
- err_printk(dev, NULL, "DMA-API: device driver maps memory from stack [addr=%p]\n", addr);
- } else {
- /* Stack is vmalloced. */
- int i;
-
- for (i = 0; i < stack_vm_area->nr_pages; i++) {
- if (page != stack_vm_area->pages[i])
- continue;
-
- addr = (u8 *)current->stack + i * PAGE_SIZE + offset;
- err_printk(dev, NULL, "DMA-API: device driver maps memory from stack [probable addr=%p]\n", addr);
- break;
- }
- }
-}
-
-static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
-{
- unsigned long a1 = (unsigned long)addr;
- unsigned long b1 = a1 + len;
- unsigned long a2 = (unsigned long)start;
- unsigned long b2 = (unsigned long)end;
-
- return !(b1 <= a2 || a1 >= b2);
-}
-
-static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
-{
- if (overlap(addr, len, _stext, _etext) ||
- overlap(addr, len, __start_rodata, __end_rodata))
- err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
-}
-
-static void check_sync(struct device *dev,
- struct dma_debug_entry *ref,
- bool to_cpu)
-{
- struct dma_debug_entry *entry;
- struct hash_bucket *bucket;
- unsigned long flags;
-
- bucket = get_hash_bucket(ref, &flags);
-
- entry = bucket_find_contain(&bucket, ref, &flags);
-
- if (!entry) {
- err_printk(dev, NULL, "DMA-API: device driver tries "
- "to sync DMA memory it has not allocated "
- "[device address=0x%016llx] [size=%llu bytes]\n",
- (unsigned long long)ref->dev_addr, ref->size);
- goto out;
- }
-
- if (ref->size > entry->size) {
- err_printk(dev, entry, "DMA-API: device driver syncs"
- " DMA memory outside allocated range "
- "[device address=0x%016llx] "
- "[allocation size=%llu bytes] "
- "[sync offset+size=%llu]\n",
- entry->dev_addr, entry->size,
- ref->size);
- }
-
- if (entry->direction == DMA_BIDIRECTIONAL)
- goto out;
-
- if (ref->direction != entry->direction) {
- err_printk(dev, entry, "DMA-API: device driver syncs "
- "DMA memory with different direction "
- "[device address=0x%016llx] [size=%llu bytes] "
- "[mapped with %s] [synced with %s]\n",
- (unsigned long long)ref->dev_addr, entry->size,
- dir2name[entry->direction],
- dir2name[ref->direction]);
- }
-
- if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
- !(ref->direction == DMA_TO_DEVICE))
- err_printk(dev, entry, "DMA-API: device driver syncs "
- "device read-only DMA memory for cpu "
- "[device address=0x%016llx] [size=%llu bytes] "
- "[mapped with %s] [synced with %s]\n",
- (unsigned long long)ref->dev_addr, entry->size,
- dir2name[entry->direction],
- dir2name[ref->direction]);
-
- if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
- !(ref->direction == DMA_FROM_DEVICE))
- err_printk(dev, entry, "DMA-API: device driver syncs "
- "device write-only DMA memory to device "
- "[device address=0x%016llx] [size=%llu bytes] "
- "[mapped with %s] [synced with %s]\n",
- (unsigned long long)ref->dev_addr, entry->size,
- dir2name[entry->direction],
- dir2name[ref->direction]);
-
- if (ref->sg_call_ents && ref->type == dma_debug_sg &&
- ref->sg_call_ents != entry->sg_call_ents) {
- err_printk(ref->dev, entry, "DMA-API: device driver syncs "
- "DMA sg list with different entry count "
- "[map count=%d] [sync count=%d]\n",
- entry->sg_call_ents, ref->sg_call_ents);
- }
-
-out:
- put_hash_bucket(bucket, &flags);
-}
-
-void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
- size_t size, int direction, dma_addr_t dma_addr,
- bool map_single)
-{
- struct dma_debug_entry *entry;
-
- if (unlikely(dma_debug_disabled()))
- return;
-
- if (dma_mapping_error(dev, dma_addr))
- return;
-
- entry = dma_entry_alloc();
- if (!entry)
- return;
-
- entry->dev = dev;
- entry->type = dma_debug_page;
- entry->pfn = page_to_pfn(page);
- entry->offset = offset,
- entry->dev_addr = dma_addr;
- entry->size = size;
- entry->direction = direction;
- entry->map_err_type = MAP_ERR_NOT_CHECKED;
-
- if (map_single)
- entry->type = dma_debug_single;
-
- check_for_stack(dev, page, offset);
-
- if (!PageHighMem(page)) {
- void *addr = page_address(page) + offset;
-
- check_for_illegal_area(dev, addr, size);
- }
-
- add_dma_entry(entry);
-}
-EXPORT_SYMBOL(debug_dma_map_page);
-
-void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- struct dma_debug_entry ref;
- struct dma_debug_entry *entry;
- struct hash_bucket *bucket;
- unsigned long flags;
-
- if (unlikely(dma_debug_disabled()))
- return;
-
- ref.dev = dev;
- ref.dev_addr = dma_addr;
- bucket = get_hash_bucket(&ref, &flags);
-
- list_for_each_entry(entry, &bucket->list, list) {
- if (!exact_match(&ref, entry))
- continue;
-
- /*
- * The same physical address can be mapped multiple
- * times. Without a hardware IOMMU this results in the
- * same device addresses being put into the dma-debug
- * hash multiple times too. This can result in false
- * positives being reported. Therefore we implement a
- * best-fit algorithm here which updates the first entry
- * from the hash which fits the reference value and is
- * not currently listed as being checked.
- */
- if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
- entry->map_err_type = MAP_ERR_CHECKED;
- break;
- }
- }
-
- put_hash_bucket(bucket, &flags);
-}
-EXPORT_SYMBOL(debug_dma_mapping_error);
-
-void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
- size_t size, int direction, bool map_single)
-{
- struct dma_debug_entry ref = {
- .type = dma_debug_page,
- .dev = dev,
- .dev_addr = addr,
- .size = size,
- .direction = direction,
- };
-
- if (unlikely(dma_debug_disabled()))
- return;
-
- if (map_single)
- ref.type = dma_debug_single;
-
- check_unmap(&ref);
-}
-EXPORT_SYMBOL(debug_dma_unmap_page);
-
-void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, int mapped_ents, int direction)
-{
- struct dma_debug_entry *entry;
- struct scatterlist *s;
- int i;
-
- if (unlikely(dma_debug_disabled()))
- return;
-
- for_each_sg(sg, s, mapped_ents, i) {
- entry = dma_entry_alloc();
- if (!entry)
- return;
-
- entry->type = dma_debug_sg;
- entry->dev = dev;
- entry->pfn = page_to_pfn(sg_page(s));
- entry->offset = s->offset,
- entry->size = sg_dma_len(s);
- entry->dev_addr = sg_dma_address(s);
- entry->direction = direction;
- entry->sg_call_ents = nents;
- entry->sg_mapped_ents = mapped_ents;
-
- check_for_stack(dev, sg_page(s), s->offset);
-
- if (!PageHighMem(sg_page(s))) {
- check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
- }
-
- add_dma_entry(entry);
- }
-}
-EXPORT_SYMBOL(debug_dma_map_sg);
-
-static int get_nr_mapped_entries(struct device *dev,
- struct dma_debug_entry *ref)
-{
- struct dma_debug_entry *entry;
- struct hash_bucket *bucket;
- unsigned long flags;
- int mapped_ents;
-
- bucket = get_hash_bucket(ref, &flags);
- entry = bucket_find_exact(bucket, ref);
- mapped_ents = 0;
-
- if (entry)
- mapped_ents = entry->sg_mapped_ents;
- put_hash_bucket(bucket, &flags);
-
- return mapped_ents;
-}
-
-void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
- int nelems, int dir)
-{
- struct scatterlist *s;
- int mapped_ents = 0, i;
-
- if (unlikely(dma_debug_disabled()))
- return;
-
- for_each_sg(sglist, s, nelems, i) {
-
- struct dma_debug_entry ref = {
- .type = dma_debug_sg,
- .dev = dev,
- .pfn = page_to_pfn(sg_page(s)),
- .offset = s->offset,
- .dev_addr = sg_dma_address(s),
- .size = sg_dma_len(s),
- .direction = dir,
- .sg_call_ents = nelems,
- };
-
- if (mapped_ents && i >= mapped_ents)
- break;
-
- if (!i)
- mapped_ents = get_nr_mapped_entries(dev, &ref);
-
- check_unmap(&ref);
- }
-}
-EXPORT_SYMBOL(debug_dma_unmap_sg);
-
-void debug_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t dma_addr, void *virt)
-{
- struct dma_debug_entry *entry;
-
- if (unlikely(dma_debug_disabled()))
- return;
-
- if (unlikely(virt == NULL))
- return;
-
- /* handle vmalloc and linear addresses */
- if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
- return;
-
- entry = dma_entry_alloc();
- if (!entry)
- return;
-
- entry->type = dma_debug_coherent;
- entry->dev = dev;
- entry->offset = offset_in_page(virt);
- entry->size = size;
- entry->dev_addr = dma_addr;
- entry->direction = DMA_BIDIRECTIONAL;
-
- if (is_vmalloc_addr(virt))
- entry->pfn = vmalloc_to_pfn(virt);
- else
- entry->pfn = page_to_pfn(virt_to_page(virt));
-
- add_dma_entry(entry);
-}
-EXPORT_SYMBOL(debug_dma_alloc_coherent);
-
-void debug_dma_free_coherent(struct device *dev, size_t size,
- void *virt, dma_addr_t addr)
-{
- struct dma_debug_entry ref = {
- .type = dma_debug_coherent,
- .dev = dev,
- .offset = offset_in_page(virt),
- .dev_addr = addr,
- .size = size,
- .direction = DMA_BIDIRECTIONAL,
- };
-
- /* handle vmalloc and linear addresses */
- if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
- return;
-
- if (is_vmalloc_addr(virt))
- ref.pfn = vmalloc_to_pfn(virt);
- else
- ref.pfn = page_to_pfn(virt_to_page(virt));
-
- if (unlikely(dma_debug_disabled()))
- return;
-
- check_unmap(&ref);
-}
-EXPORT_SYMBOL(debug_dma_free_coherent);
-
-void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
- int direction, dma_addr_t dma_addr)
-{
- struct dma_debug_entry *entry;
-
- if (unlikely(dma_debug_disabled()))
- return;
-
- entry = dma_entry_alloc();
- if (!entry)
- return;
-
- entry->type = dma_debug_resource;
- entry->dev = dev;
- entry->pfn = PHYS_PFN(addr);
- entry->offset = offset_in_page(addr);
- entry->size = size;
- entry->dev_addr = dma_addr;
- entry->direction = direction;
- entry->map_err_type = MAP_ERR_NOT_CHECKED;
-
- add_dma_entry(entry);
-}
-EXPORT_SYMBOL(debug_dma_map_resource);
-
-void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
- size_t size, int direction)
-{
- struct dma_debug_entry ref = {
- .type = dma_debug_resource,
- .dev = dev,
- .dev_addr = dma_addr,
- .size = size,
- .direction = direction,
- };
-
- if (unlikely(dma_debug_disabled()))
- return;
-
- check_unmap(&ref);
-}
-EXPORT_SYMBOL(debug_dma_unmap_resource);
-
-void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
- size_t size, int direction)
-{
- struct dma_debug_entry ref;
-
- if (unlikely(dma_debug_disabled()))
- return;
-
- ref.type = dma_debug_single;
- ref.dev = dev;
- ref.dev_addr = dma_handle;
- ref.size = size;
- ref.direction = direction;
- ref.sg_call_ents = 0;
-
- check_sync(dev, &ref, true);
-}
-EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
-
-void debug_dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- int direction)
-{
- struct dma_debug_entry ref;
-
- if (unlikely(dma_debug_disabled()))
- return;
-
- ref.type = dma_debug_single;
- ref.dev = dev;
- ref.dev_addr = dma_handle;
- ref.size = size;
- ref.direction = direction;
- ref.sg_call_ents = 0;
-
- check_sync(dev, &ref, false);
-}
-EXPORT_SYMBOL(debug_dma_sync_single_for_device);
-
-void debug_dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- int direction)
-{
- struct dma_debug_entry ref;
-
- if (unlikely(dma_debug_disabled()))
- return;
-
- ref.type = dma_debug_single;
- ref.dev = dev;
- ref.dev_addr = dma_handle;
- ref.size = offset + size;
- ref.direction = direction;
- ref.sg_call_ents = 0;
-
- check_sync(dev, &ref, true);
-}
-EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
-
-void debug_dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size, int direction)
-{
- struct dma_debug_entry ref;
-
- if (unlikely(dma_debug_disabled()))
- return;
-
- ref.type = dma_debug_single;
- ref.dev = dev;
- ref.dev_addr = dma_handle;
- ref.size = offset + size;
- ref.direction = direction;
- ref.sg_call_ents = 0;
-
- check_sync(dev, &ref, false);
-}
-EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
-
-void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
- int nelems, int direction)
-{
- struct scatterlist *s;
- int mapped_ents = 0, i;
-
- if (unlikely(dma_debug_disabled()))
- return;
-
- for_each_sg(sg, s, nelems, i) {
-
- struct dma_debug_entry ref = {
- .type = dma_debug_sg,
- .dev = dev,
- .pfn = page_to_pfn(sg_page(s)),
- .offset = s->offset,
- .dev_addr = sg_dma_address(s),
- .size = sg_dma_len(s),
- .direction = direction,
- .sg_call_ents = nelems,
- };
-
- if (!i)
- mapped_ents = get_nr_mapped_entries(dev, &ref);
-
- if (i >= mapped_ents)
- break;
-
- check_sync(dev, &ref, true);
- }
-}
-EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
-
-void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
- int nelems, int direction)
-{
- struct scatterlist *s;
- int mapped_ents = 0, i;
-
- if (unlikely(dma_debug_disabled()))
- return;
-
- for_each_sg(sg, s, nelems, i) {
-
- struct dma_debug_entry ref = {
- .type = dma_debug_sg,
- .dev = dev,
- .pfn = page_to_pfn(sg_page(s)),
- .offset = s->offset,
- .dev_addr = sg_dma_address(s),
- .size = sg_dma_len(s),
- .direction = direction,
- .sg_call_ents = nelems,
- };
- if (!i)
- mapped_ents = get_nr_mapped_entries(dev, &ref);
-
- if (i >= mapped_ents)
- break;
-
- check_sync(dev, &ref, false);
- }
-}
-EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
-
-static int __init dma_debug_driver_setup(char *str)
-{
- int i;
-
- for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
- current_driver_name[i] = *str;
- if (*str == 0)
- break;
- }
-
- if (current_driver_name[0])
- pr_info("DMA-API: enable driver filter for driver [%s]\n",
- current_driver_name);
-
-
- return 1;
-}
-__setup("dma_debug_driver=", dma_debug_driver_setup);
diff --git a/lib/dma-direct.c b/lib/dma-direct.c
deleted file mode 100644
index c9e8e21cb334..000000000000
--- a/lib/dma-direct.c
+++ /dev/null
@@ -1,161 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * DMA operations that map physical memory directly without using an IOMMU or
- * flushing caches.
- */
-#include <linux/export.h>
-#include <linux/mm.h>
-#include <linux/dma-direct.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-contiguous.h>
-#include <linux/pfn.h>
-
-#define DIRECT_MAPPING_ERROR 0
-
-/*
- * Most architectures use ZONE_DMA for the first 16 Megabytes, but
- * some use it for entirely different regions:
- */
-#ifndef ARCH_ZONE_DMA_BITS
-#define ARCH_ZONE_DMA_BITS 24
-#endif
-
-static bool
-check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
- const char *caller)
-{
- if (unlikely(dev && !dma_capable(dev, dma_addr, size))) {
- if (*dev->dma_mask >= DMA_BIT_MASK(32)) {
- dev_err(dev,
- "%s: overflow %pad+%zu of device mask %llx\n",
- caller, &dma_addr, size, *dev->dma_mask);
- }
- return false;
- }
- return true;
-}
-
-static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
-{
- return phys_to_dma(dev, phys) + size - 1 <= dev->coherent_dma_mask;
-}
-
-void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t gfp, unsigned long attrs)
-{
- unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
- int page_order = get_order(size);
- struct page *page = NULL;
-
- /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */
- if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
- gfp |= GFP_DMA;
- if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
- gfp |= GFP_DMA32;
-
-again:
- /* CMA can be used only in the context which permits sleeping */
- if (gfpflags_allow_blocking(gfp)) {
- page = dma_alloc_from_contiguous(dev, count, page_order, gfp);
- if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
- dma_release_from_contiguous(dev, page, count);
- page = NULL;
- }
- }
- if (!page)
- page = alloc_pages_node(dev_to_node(dev), gfp, page_order);
-
- if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
- __free_pages(page, page_order);
- page = NULL;
-
- if (dev->coherent_dma_mask < DMA_BIT_MASK(32) &&
- !(gfp & GFP_DMA)) {
- gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
- goto again;
- }
- }
-
- if (!page)
- return NULL;
-
- *dma_handle = phys_to_dma(dev, page_to_phys(page));
- memset(page_address(page), 0, size);
- return page_address(page);
-}
-
-/*
- * NOTE: this function must never look at the dma_addr argument, because we want
- * to be able to use it as a helper for iommu implementations as well.
- */
-void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t dma_addr, unsigned long attrs)
-{
- unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-
- if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count))
- free_pages((unsigned long)cpu_addr, get_order(size));
-}
-
-static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- dma_addr_t dma_addr = phys_to_dma(dev, page_to_phys(page)) + offset;
-
- if (!check_addr(dev, dma_addr, size, __func__))
- return DIRECT_MAPPING_ERROR;
- return dma_addr;
-}
-
-static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
- int nents, enum dma_data_direction dir, unsigned long attrs)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sgl, sg, nents, i) {
- BUG_ON(!sg_page(sg));
-
- sg_dma_address(sg) = phys_to_dma(dev, sg_phys(sg));
- if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__))
- return 0;
- sg_dma_len(sg) = sg->length;
- }
-
- return nents;
-}
-
-int dma_direct_supported(struct device *dev, u64 mask)
-{
-#ifdef CONFIG_ZONE_DMA
- if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
- return 0;
-#else
- /*
- * Because 32-bit DMA masks are so common we expect every architecture
- * to be able to satisfy them - either by not supporting more physical
- * memory, or by providing a ZONE_DMA32. If neither is the case, the
- * architecture needs to use an IOMMU instead of the direct mapping.
- */
- if (mask < DMA_BIT_MASK(32))
- return 0;
-#endif
- return 1;
-}
-
-static int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return dma_addr == DIRECT_MAPPING_ERROR;
-}
-
-const struct dma_map_ops dma_direct_ops = {
- .alloc = dma_direct_alloc,
- .free = dma_direct_free,
- .map_page = dma_direct_map_page,
- .map_sg = dma_direct_map_sg,
- .dma_supported = dma_direct_supported,
- .mapping_error = dma_direct_mapping_error,
- .is_phys = 1,
-};
-EXPORT_SYMBOL(dma_direct_ops);
diff --git a/lib/dma-virt.c b/lib/dma-virt.c
deleted file mode 100644
index 8e61a02ef9ca..000000000000
--- a/lib/dma-virt.c
+++ /dev/null
@@ -1,61 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * lib/dma-virt.c
- *
- * DMA operations that map to virtual addresses without flushing memory.
- */
-#include <linux/export.h>
-#include <linux/mm.h>
-#include <linux/dma-mapping.h>
-#include <linux/scatterlist.h>
-
-static void *dma_virt_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp,
- unsigned long attrs)
-{
- void *ret;
-
- ret = (void *)__get_free_pages(gfp, get_order(size));
- if (ret)
- *dma_handle = (uintptr_t)ret;
- return ret;
-}
-
-static void dma_virt_free(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_addr,
- unsigned long attrs)
-{
- free_pages((unsigned long)cpu_addr, get_order(size));
-}
-
-static dma_addr_t dma_virt_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- return (uintptr_t)(page_address(page) + offset);
-}
-
-static int dma_virt_map_sg(struct device *dev, struct scatterlist *sgl,
- int nents, enum dma_data_direction dir,
- unsigned long attrs)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sgl, sg, nents, i) {
- BUG_ON(!sg_page(sg));
- sg_dma_address(sg) = (uintptr_t)sg_virt(sg);
- sg_dma_len(sg) = sg->length;
- }
-
- return nents;
-}
-
-const struct dma_map_ops dma_virt_ops = {
- .alloc = dma_virt_alloc,
- .free = dma_virt_free,
- .map_page = dma_virt_map_page,
- .map_sg = dma_virt_map_sg,
-};
-EXPORT_SYMBOL(dma_virt_ops);
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index c5edbedd364d..5cff72f18c4a 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -10,6 +10,66 @@
#include <linux/sched/debug.h>
#include <linux/smp.h>
#include <linux/atomic.h>
+#include <linux/kexec.h>
+#include <linux/utsname.h>
+
+static char dump_stack_arch_desc_str[128];
+
+/**
+ * dump_stack_set_arch_desc - set arch-specific str to show with task dumps
+ * @fmt: printf-style format string
+ * @...: arguments for the format string
+ *
+ * The configured string will be printed right after utsname during task
+ * dumps. Usually used to add arch-specific system identifiers. If an
+ * arch wants to make use of such an ID string, it should initialize this
+ * as soon as possible during boot.
+ */
+void __init dump_stack_set_arch_desc(const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ vsnprintf(dump_stack_arch_desc_str, sizeof(dump_stack_arch_desc_str),
+ fmt, args);
+ va_end(args);
+}
+
+/**
+ * dump_stack_print_info - print generic debug info for dump_stack()
+ * @log_lvl: log level
+ *
+ * Arch-specific dump_stack() implementations can use this function to
+ * print out the same debug information as the generic dump_stack().
+ */
+void dump_stack_print_info(const char *log_lvl)
+{
+ printk("%sCPU: %d PID: %d Comm: %.20s %s%s %s %.*s\n",
+ log_lvl, raw_smp_processor_id(), current->pid, current->comm,
+ kexec_crash_loaded() ? "Kdump: loaded " : "",
+ print_tainted(),
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+
+ if (dump_stack_arch_desc_str[0] != '\0')
+ printk("%sHardware name: %s\n",
+ log_lvl, dump_stack_arch_desc_str);
+
+ print_worker_info(log_lvl, current);
+}
+
+/**
+ * show_regs_print_info - print generic debug info for show_regs()
+ * @log_lvl: log level
+ *
+ * show_regs() implementations can use this function to print out generic
+ * debug information.
+ */
+void show_regs_print_info(const char *log_lvl)
+{
+ dump_stack_print_info(log_lvl);
+}
static void __dump_stack(void)
{
diff --git a/lib/errseq.c b/lib/errseq.c
index df782418b333..81f9e33aa7e7 100644
--- a/lib/errseq.c
+++ b/lib/errseq.c
@@ -111,27 +111,22 @@ EXPORT_SYMBOL(errseq_set);
* errseq_sample() - Grab current errseq_t value.
* @eseq: Pointer to errseq_t to be sampled.
*
- * This function allows callers to sample an errseq_t value, marking it as
- * "seen" if required.
+ * This function allows callers to initialise their errseq_t variable.
+ * If the error has been "seen", new callers will not see an old error.
+ * If there is an unseen error in @eseq, the caller of this function will
+ * see it the next time it checks for an error.
*
+ * Context: Any context.
* Return: The current errseq value.
*/
errseq_t errseq_sample(errseq_t *eseq)
{
errseq_t old = READ_ONCE(*eseq);
- errseq_t new = old;
- /*
- * For the common case of no errors ever having been set, we can skip
- * marking the SEEN bit. Once an error has been set, the value will
- * never go back to zero.
- */
- if (old != 0) {
- new |= ERRSEQ_SEEN;
- if (old != new)
- cmpxchg(eseq, old, new);
- }
- return new;
+ /* If nobody has seen this error yet, then we can be the first. */
+ if (!(old & ERRSEQ_SEEN))
+ old = 0;
+ return old;
}
EXPORT_SYMBOL(errseq_sample);
diff --git a/lib/find_bit_benchmark.c b/lib/find_bit_benchmark.c
index 5985a25e6cbc..5367ffa5c18f 100644
--- a/lib/find_bit_benchmark.c
+++ b/lib/find_bit_benchmark.c
@@ -132,7 +132,12 @@ static int __init find_bit_test(void)
test_find_next_bit(bitmap, BITMAP_LEN);
test_find_next_zero_bit(bitmap, BITMAP_LEN);
test_find_last_bit(bitmap, BITMAP_LEN);
- test_find_first_bit(bitmap, BITMAP_LEN);
+
+ /*
+ * test_find_first_bit() may take some time, so
+ * traverse only part of bitmap to avoid soft lockup.
+ */
+ test_find_first_bit(bitmap, BITMAP_LEN / 10);
test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN);
pr_err("\nStart testing find_bit() with sparse bitmap\n");
diff --git a/lib/fonts/font_7x14.c b/lib/fonts/font_7x14.c
index 9ae5b62c8a0d..89752d0b23e8 100644
--- a/lib/fonts/font_7x14.c
+++ b/lib/fonts/font_7x14.c
@@ -2058,7 +2058,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 128 0x80 '€' */
+ /* 128 0x80 'Ç' */
0x00, /* 0000000 */
0x38, /* 0011100 */
0x6c, /* 0110110 */
@@ -2074,7 +2074,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x70, /* 0111000 */
0x00, /* 0000000 */
- /* 129 0x81 '' */
+ /* 129 0x81 'ü' */
0x00, /* 0000000 */
0xcc, /* 1100110 */
0x00, /* 0000000 */
@@ -2090,7 +2090,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 130 0x82 '‚' */
+ /* 130 0x82 'é' */
0x0c, /* 0000110 */
0x18, /* 0001100 */
0x30, /* 0011000 */
@@ -2106,7 +2106,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 131 0x83 'ƒ' */
+ /* 131 0x83 'â' */
0x10, /* 0001000 */
0x38, /* 0011100 */
0x6c, /* 0110110 */
@@ -2122,7 +2122,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 132 0x84 '„' */
+ /* 132 0x84 'ä' */
0x00, /* 0000000 */
0xcc, /* 1100110 */
0x00, /* 0000000 */
@@ -2138,7 +2138,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 133 0x85 '…' */
+ /* 133 0x85 'à' */
0x60, /* 0110000 */
0x30, /* 0011000 */
0x18, /* 0001100 */
@@ -2154,7 +2154,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 134 0x86 '†' */
+ /* 134 0x86 'Ã¥' */
0x38, /* 0011100 */
0x6c, /* 0110110 */
0x38, /* 0011100 */
@@ -2170,7 +2170,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 135 0x87 '‡' */
+ /* 135 0x87 'ç' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -2186,7 +2186,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0xe0, /* 1110000 */
- /* 136 0x88 'ˆ' */
+ /* 136 0x88 'ê' */
0x10, /* 0001000 */
0x38, /* 0011100 */
0x6c, /* 0110110 */
@@ -2202,7 +2202,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 137 0x89 '‰' */
+ /* 137 0x89 'ë' */
0x00, /* 0000000 */
0xcc, /* 1100110 */
0x00, /* 0000000 */
@@ -2218,7 +2218,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 138 0x8a 'Š' */
+ /* 138 0x8a 'è' */
0xc0, /* 1100000 */
0x60, /* 0110000 */
0x30, /* 0011000 */
@@ -2234,7 +2234,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 139 0x8b '‹' */
+ /* 139 0x8b 'ï' */
0x00, /* 0000000 */
0x6c, /* 0110110 */
0x00, /* 0000000 */
@@ -2250,7 +2250,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 140 0x8c 'Œ' */
+ /* 140 0x8c 'î' */
0x30, /* 0011000 */
0x78, /* 0111100 */
0xcc, /* 1100110 */
@@ -2266,7 +2266,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 141 0x8d '' */
+ /* 141 0x8d 'ì' */
0xc0, /* 1100000 */
0x60, /* 0110000 */
0x30, /* 0011000 */
@@ -2282,7 +2282,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 142 0x8e 'Ž' */
+ /* 142 0x8e 'Ä' */
0x00, /* 0000000 */
0xcc, /* 1100110 */
0x00, /* 0000000 */
@@ -2298,7 +2298,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 143 0x8f '' */
+ /* 143 0x8f 'Ã…' */
0x30, /* 0011000 */
0x48, /* 0100100 */
0x48, /* 0100100 */
@@ -2314,7 +2314,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 144 0x90 '' */
+ /* 144 0x90 'É' */
0x18, /* 0001100 */
0x30, /* 0011000 */
0xfc, /* 1111110 */
@@ -2330,7 +2330,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 145 0x91 '‘' */
+ /* 145 0x91 'æ' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -2346,7 +2346,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 146 0x92 '’' */
+ /* 146 0x92 'Æ' */
0x00, /* 0000000 */
0x3e, /* 0011111 */
0x6c, /* 0110110 */
@@ -2362,7 +2362,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 147 0x93 '“' */
+ /* 147 0x93 'ô' */
0x10, /* 0001000 */
0x38, /* 0011100 */
0x6c, /* 0110110 */
@@ -2378,7 +2378,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 148 0x94 '”' */
+ /* 148 0x94 'ö' */
0x00, /* 0000000 */
0xcc, /* 1100110 */
0x00, /* 0000000 */
@@ -2394,7 +2394,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 149 0x95 '•' */
+ /* 149 0x95 'ò' */
0xc0, /* 1100000 */
0x60, /* 0110000 */
0x30, /* 0011000 */
@@ -2410,7 +2410,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 150 0x96 '–' */
+ /* 150 0x96 'û' */
0x30, /* 0011000 */
0x78, /* 0111100 */
0xcc, /* 1100110 */
@@ -2426,7 +2426,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 151 0x97 '—' */
+ /* 151 0x97 'ù' */
0x60, /* 0110000 */
0x30, /* 0011000 */
0x18, /* 0001100 */
@@ -2442,7 +2442,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 152 0x98 '˜' */
+ /* 152 0x98 'ÿ' */
0x00, /* 0000000 */
0xcc, /* 1100110 */
0x00, /* 0000000 */
@@ -2458,7 +2458,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x18, /* 0001100 */
0x70, /* 0111000 */
- /* 153 0x99 '™' */
+ /* 153 0x99 'Ö' */
0xcc, /* 1100110 */
0x00, /* 0000000 */
0x78, /* 0111100 */
@@ -2474,7 +2474,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 154 0x9a 'š' */
+ /* 154 0x9a 'Ü' */
0xcc, /* 1100110 */
0x00, /* 0000000 */
0xcc, /* 1100110 */
@@ -2490,7 +2490,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 155 0x9b '›' */
+ /* 155 0x9b '¢' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x7c, /* 0111110 */
@@ -2506,7 +2506,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 156 0x9c 'œ' */
+ /* 156 0x9c '£' */
0x38, /* 0011100 */
0x6c, /* 0110110 */
0x64, /* 0110010 */
@@ -2522,7 +2522,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 157 0x9d '' */
+ /* 157 0x9d 'Â¥' */
0x00, /* 0000000 */
0xcc, /* 1100110 */
0xcc, /* 1100110 */
@@ -2538,7 +2538,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 158 0x9e 'ž' */
+ /* 158 0x9e 'â‚§' */
0xf8, /* 1111100 */
0xcc, /* 1100110 */
0xcc, /* 1100110 */
@@ -2554,7 +2554,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 159 0x9f 'Ÿ' */
+ /* 159 0x9f 'Æ’' */
0x1c, /* 0001110 */
0x36, /* 0011011 */
0x30, /* 0011000 */
@@ -2570,7 +2570,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 160 0xa0 ' ' */
+ /* 160 0xa0 'á' */
0x18, /* 0001100 */
0x30, /* 0011000 */
0x60, /* 0110000 */
@@ -2586,7 +2586,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 161 0xa1 '¡' */
+ /* 161 0xa1 'í' */
0x18, /* 0001100 */
0x30, /* 0011000 */
0x60, /* 0110000 */
@@ -2602,7 +2602,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 162 0xa2 '¢' */
+ /* 162 0xa2 'ó' */
0x18, /* 0001100 */
0x30, /* 0011000 */
0x60, /* 0110000 */
@@ -2618,7 +2618,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 163 0xa3 '£' */
+ /* 163 0xa3 'ú' */
0x18, /* 0001100 */
0x30, /* 0011000 */
0x60, /* 0110000 */
@@ -2634,7 +2634,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 164 0xa4 '¤' */
+ /* 164 0xa4 'ñ' */
0x00, /* 0000000 */
0x76, /* 0111011 */
0xdc, /* 1101110 */
@@ -2650,7 +2650,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 165 0xa5 '¥' */
+ /* 165 0xa5 'Ñ' */
0x76, /* 0111011 */
0xdc, /* 1101110 */
0x00, /* 0000000 */
@@ -2666,7 +2666,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 166 0xa6 '¦' */
+ /* 166 0xa6 'ª' */
0x00, /* 0000000 */
0x78, /* 0111100 */
0xd8, /* 1101100 */
@@ -2682,7 +2682,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 167 0xa7 '§' */
+ /* 167 0xa7 'º' */
0x00, /* 0000000 */
0x70, /* 0111000 */
0xd8, /* 1101100 */
@@ -2698,7 +2698,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 168 0xa8 '¨' */
+ /* 168 0xa8 '¿' */
0x00, /* 0000000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -2714,7 +2714,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 169 0xa9 '©' */
+ /* 169 0xa9 'âŒ' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -2730,7 +2730,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 170 0xaa 'ª' */
+ /* 170 0xaa '¬' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -2746,7 +2746,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 171 0xab '«' */
+ /* 171 0xab '½' */
0x60, /* 0110000 */
0xe0, /* 1110000 */
0x62, /* 0110001 */
@@ -2762,7 +2762,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x7c, /* 0111110 */
- /* 172 0xac '¬' */
+ /* 172 0xac '¼' */
0x60, /* 0110000 */
0xe0, /* 1110000 */
0x62, /* 0110001 */
@@ -2778,7 +2778,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x0c, /* 0000110 */
0x00, /* 0000000 */
- /* 173 0xad '­' */
+ /* 173 0xad '¡' */
0x00, /* 0000000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -2794,7 +2794,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 174 0xae '®' */
+ /* 174 0xae '«' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -2810,7 +2810,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 175 0xaf '¯' */
+ /* 175 0xaf '»' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -2826,7 +2826,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 176 0xb0 '°' */
+ /* 176 0xb0 'â–‘' */
0x88, /* 1000100 */
0x22, /* 0010001 */
0x88, /* 1000100 */
@@ -2842,7 +2842,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x88, /* 1000100 */
0x22, /* 0010001 */
- /* 177 0xb1 '±' */
+ /* 177 0xb1 'â–’' */
0x54, /* 0101010 */
0xaa, /* 1010101 */
0x54, /* 0101010 */
@@ -2858,7 +2858,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x54, /* 0101010 */
0xaa, /* 1010101 */
- /* 178 0xb2 '²' */
+ /* 178 0xb2 'â–“' */
0xee, /* 1110111 */
0xba, /* 1011101 */
0xee, /* 1110111 */
@@ -2874,7 +2874,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0xee, /* 1110111 */
0xba, /* 1011101 */
- /* 179 0xb3 '³' */
+ /* 179 0xb3 '│' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -2890,7 +2890,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 180 0xb4 '´' */
+ /* 180 0xb4 '┤' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -2906,7 +2906,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 181 0xb5 'µ' */
+ /* 181 0xb5 'â•¡' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -2922,7 +2922,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 182 0xb6 '¶' */
+ /* 182 0xb6 'â•¢' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -2938,7 +2938,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 183 0xb7 '·' */
+ /* 183 0xb7 'â•–' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -2954,7 +2954,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 184 0xb8 '¸' */
+ /* 184 0xb8 'â••' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -2970,7 +2970,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 185 0xb9 '¹' */
+ /* 185 0xb9 'â•£' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -2986,7 +2986,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 186 0xba 'º' */
+ /* 186 0xba 'â•‘' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -3002,7 +3002,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 187 0xbb '»' */
+ /* 187 0xbb 'â•—' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3018,7 +3018,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 188 0xbc '¼' */
+ /* 188 0xbc 'â•' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -3034,7 +3034,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 189 0xbd '½' */
+ /* 189 0xbd '╜' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -3050,7 +3050,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 190 0xbe '¾' */
+ /* 190 0xbe 'â•›' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -3066,7 +3066,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 191 0xbf '¿' */
+ /* 191 0xbf 'â”' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3082,7 +3082,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 192 0xc0 'À' */
+ /* 192 0xc0 'â””' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -3098,7 +3098,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 193 0xc1 'Á' */
+ /* 193 0xc1 'â”´' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -3114,7 +3114,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 194 0xc2 'Â' */
+ /* 194 0xc2 '┬' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3130,7 +3130,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 195 0xc3 'Ã' */
+ /* 195 0xc3 '├' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -3146,7 +3146,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 196 0xc4 'Ä' */
+ /* 196 0xc4 '─' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3162,7 +3162,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 197 0xc5 'Å' */
+ /* 197 0xc5 '┼' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -3178,7 +3178,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 198 0xc6 'Æ' */
+ /* 198 0xc6 '╞' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -3194,7 +3194,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 199 0xc7 'Ç' */
+ /* 199 0xc7 '╟' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -3210,7 +3210,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 200 0xc8 'È' */
+ /* 200 0xc8 '╚' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -3226,7 +3226,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 201 0xc9 'É' */
+ /* 201 0xc9 'â•”' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3242,7 +3242,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 202 0xca 'Ê' */
+ /* 202 0xca 'â•©' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -3258,7 +3258,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 203 0xcb 'Ë' */
+ /* 203 0xcb '╦' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3274,7 +3274,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 204 0xcc 'Ì' */
+ /* 204 0xcc 'â• ' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -3290,7 +3290,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 205 0xcd 'Í' */
+ /* 205 0xcd 'â•' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3306,7 +3306,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 206 0xce 'Î' */
+ /* 206 0xce '╬' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -3322,7 +3322,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 207 0xcf 'Ï' */
+ /* 207 0xcf 'â•§' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -3338,7 +3338,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 208 0xd0 'Ð' */
+ /* 208 0xd0 '╨' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -3354,7 +3354,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 209 0xd1 'Ñ' */
+ /* 209 0xd1 '╤' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3370,7 +3370,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 210 0xd2 'Ò' */
+ /* 210 0xd2 'â•¥' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3386,7 +3386,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 211 0xd3 'Ó' */
+ /* 211 0xd3 'â•™' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -3402,7 +3402,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 212 0xd4 'Ô' */
+ /* 212 0xd4 '╘' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -3418,7 +3418,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 213 0xd5 'Õ' */
+ /* 213 0xd5 'â•’' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3434,7 +3434,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 214 0xd6 'Ö' */
+ /* 214 0xd6 'â•“' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3450,7 +3450,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 215 0xd7 '×' */
+ /* 215 0xd7 'â•«' */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -3466,7 +3466,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x6c, /* 0110110 */
0x6c, /* 0110110 */
- /* 216 0xd8 'Ø' */
+ /* 216 0xd8 '╪' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -3482,7 +3482,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 217 0xd9 'Ù' */
+ /* 217 0xd9 '┘' */
0x30, /* 0011000 */
0x30, /* 0011000 */
0x30, /* 0011000 */
@@ -3498,7 +3498,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 218 0xda 'Ú' */
+ /* 218 0xda '┌' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3514,7 +3514,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 219 0xdb 'Û' */
+ /* 219 0xdb 'â–ˆ' */
0xfe, /* 1111111 */
0xfe, /* 1111111 */
0xfe, /* 1111111 */
@@ -3530,7 +3530,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0xfe, /* 1111111 */
0xfe, /* 1111111 */
- /* 220 0xdc 'Ü' */
+ /* 220 0xdc 'â–„' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3546,7 +3546,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0xfe, /* 1111111 */
0xfe, /* 1111111 */
- /* 221 0xdd 'Ý' */
+ /* 221 0xdd '▌' */
0xe0, /* 1110000 */
0xe0, /* 1110000 */
0xe0, /* 1110000 */
@@ -3562,7 +3562,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0xe0, /* 1110000 */
0xe0, /* 1110000 */
- /* 222 0xde 'Þ' */
+ /* 222 0xde 'â–' */
0x1e, /* 0001111 */
0x1e, /* 0001111 */
0x1e, /* 0001111 */
@@ -3578,7 +3578,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x1e, /* 0001111 */
0x1e, /* 0001111 */
- /* 223 0xdf 'ß' */
+ /* 223 0xdf 'â–€' */
0xfe, /* 1111111 */
0xfe, /* 1111111 */
0xfe, /* 1111111 */
@@ -3594,7 +3594,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 224 0xe0 'à' */
+ /* 224 0xe0 'α' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3610,7 +3610,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 225 0xe1 'á' */
+ /* 225 0xe1 'ß' */
0x00, /* 0000000 */
0x78, /* 0111100 */
0xcc, /* 1100110 */
@@ -3626,7 +3626,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 226 0xe2 'â' */
+ /* 226 0xe2 'Γ' */
0x00, /* 0000000 */
0xfc, /* 1111110 */
0xcc, /* 1100110 */
@@ -3642,7 +3642,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 227 0xe3 'ã' */
+ /* 227 0xe3 'Ï€' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0xfe, /* 1111111 */
@@ -3658,7 +3658,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 228 0xe4 'ä' */
+ /* 228 0xe4 'Σ' */
0x00, /* 0000000 */
0xfc, /* 1111110 */
0xcc, /* 1100110 */
@@ -3674,7 +3674,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 229 0xe5 'å' */
+ /* 229 0xe5 'σ' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3690,7 +3690,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 230 0xe6 'æ' */
+ /* 230 0xe6 'µ' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3706,7 +3706,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0xc0, /* 1100000 */
0x80, /* 1000000 */
- /* 231 0xe7 'ç' */
+ /* 231 0xe7 'Ï„' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3722,7 +3722,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 232 0xe8 'è' */
+ /* 232 0xe8 'Φ' */
0x00, /* 0000000 */
0xfc, /* 1111110 */
0x30, /* 0011000 */
@@ -3738,7 +3738,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 233 0xe9 'é' */
+ /* 233 0xe9 'Θ' */
0x00, /* 0000000 */
0x38, /* 0011100 */
0x6c, /* 0110110 */
@@ -3754,7 +3754,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 234 0xea 'ê' */
+ /* 234 0xea 'Ω' */
0x00, /* 0000000 */
0x38, /* 0011100 */
0x6c, /* 0110110 */
@@ -3770,7 +3770,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 235 0xeb 'ë' */
+ /* 235 0xeb 'δ' */
0x00, /* 0000000 */
0x3c, /* 0011110 */
0x60, /* 0110000 */
@@ -3786,7 +3786,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 236 0xec 'ì' */
+ /* 236 0xec '∞' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3802,7 +3802,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 237 0xed 'í' */
+ /* 237 0xed 'φ' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x06, /* 0000011 */
@@ -3818,7 +3818,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 238 0xee 'î' */
+ /* 238 0xee 'ε' */
0x00, /* 0000000 */
0x1c, /* 0001110 */
0x30, /* 0011000 */
@@ -3834,7 +3834,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 239 0xef 'ï' */
+ /* 239 0xef '∩' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x78, /* 0111100 */
@@ -3850,7 +3850,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 240 0xf0 'ð' */
+ /* 240 0xf0 '≡' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3866,7 +3866,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 241 0xf1 'ñ' */
+ /* 241 0xf1 '±' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3882,7 +3882,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 242 0xf2 'ò' */
+ /* 242 0xf2 '≥' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x60, /* 0110000 */
@@ -3898,7 +3898,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 243 0xf3 'ó' */
+ /* 243 0xf3 '≤' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x18, /* 0001100 */
@@ -3914,7 +3914,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 244 0xf4 'ô' */
+ /* 244 0xf4 '⌠' */
0x00, /* 0000000 */
0x1c, /* 0001110 */
0x36, /* 0011011 */
@@ -3930,7 +3930,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x30, /* 0011000 */
0x30, /* 0011000 */
- /* 245 0xf5 'õ' */
+ /* 245 0xf5 '⌡' */
0x18, /* 0001100 */
0x18, /* 0001100 */
0x18, /* 0001100 */
@@ -3946,7 +3946,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 246 0xf6 'ö' */
+ /* 246 0xf6 '÷' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3962,7 +3962,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 247 0xf7 '÷' */
+ /* 247 0xf7 '≈' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -3978,7 +3978,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 248 0xf8 'ø' */
+ /* 248 0xf8 '°' */
0x38, /* 0011100 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -3994,7 +3994,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 249 0xf9 'ù' */
+ /* 249 0xf9 '·' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -4010,7 +4010,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 250 0xfa 'ú' */
+ /* 250 0xfa '•' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -4026,7 +4026,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 251 0xfb 'û' */
+ /* 251 0xfb '√' */
0x1e, /* 0001111 */
0x18, /* 0001100 */
0x18, /* 0001100 */
@@ -4042,7 +4042,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 252 0xfc 'ü' */
+ /* 252 0xfc 'â¿' */
0xd8, /* 1101100 */
0x6c, /* 0110110 */
0x6c, /* 0110110 */
@@ -4058,7 +4058,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 253 0xfd 'ý' */
+ /* 253 0xfd '²' */
0x78, /* 0111100 */
0xcc, /* 1100110 */
0x18, /* 0001100 */
@@ -4074,7 +4074,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 254 0xfe 'þ' */
+ /* 254 0xfe 'â– ' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
@@ -4090,7 +4090,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = {
0x00, /* 0000000 */
0x00, /* 0000000 */
- /* 255 0xff 'ÿ' */
+ /* 255 0xff ' ' */
0x00, /* 0000000 */
0x00, /* 0000000 */
0x00, /* 0000000 */
diff --git a/lib/fonts/font_8x16.c b/lib/fonts/font_8x16.c
index 34292cdfaa23..b7ab1f5fbdb8 100644
--- a/lib/fonts/font_8x16.c
+++ b/lib/fonts/font_8x16.c
@@ -2316,7 +2316,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 128 0x80 '€' */
+ /* 128 0x80 'Ç' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x3c, /* 00111100 */
@@ -2334,7 +2334,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 129 0x81 '' */
+ /* 129 0x81 'ü' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xcc, /* 11001100 */
@@ -2352,7 +2352,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 130 0x82 '‚' */
+ /* 130 0x82 'é' */
0x00, /* 00000000 */
0x0c, /* 00001100 */
0x18, /* 00011000 */
@@ -2370,7 +2370,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 131 0x83 'ƒ' */
+ /* 131 0x83 'â' */
0x00, /* 00000000 */
0x10, /* 00010000 */
0x38, /* 00111000 */
@@ -2388,7 +2388,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 132 0x84 '„' */
+ /* 132 0x84 'ä' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xcc, /* 11001100 */
@@ -2406,7 +2406,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 133 0x85 '…' */
+ /* 133 0x85 'à' */
0x00, /* 00000000 */
0x60, /* 01100000 */
0x30, /* 00110000 */
@@ -2424,7 +2424,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 134 0x86 '†' */
+ /* 134 0x86 'Ã¥' */
0x00, /* 00000000 */
0x38, /* 00111000 */
0x6c, /* 01101100 */
@@ -2442,7 +2442,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 135 0x87 '‡' */
+ /* 135 0x87 'ç' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2460,7 +2460,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 136 0x88 'ˆ' */
+ /* 136 0x88 'ê' */
0x00, /* 00000000 */
0x10, /* 00010000 */
0x38, /* 00111000 */
@@ -2478,7 +2478,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 137 0x89 '‰' */
+ /* 137 0x89 'ë' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xc6, /* 11000110 */
@@ -2496,7 +2496,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 138 0x8a 'Š' */
+ /* 138 0x8a 'è' */
0x00, /* 00000000 */
0x60, /* 01100000 */
0x30, /* 00110000 */
@@ -2514,7 +2514,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 139 0x8b '‹' */
+ /* 139 0x8b 'ï' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x66, /* 01100110 */
@@ -2532,7 +2532,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 140 0x8c 'Œ' */
+ /* 140 0x8c 'î' */
0x00, /* 00000000 */
0x18, /* 00011000 */
0x3c, /* 00111100 */
@@ -2550,7 +2550,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 141 0x8d '' */
+ /* 141 0x8d 'ì' */
0x00, /* 00000000 */
0x60, /* 01100000 */
0x30, /* 00110000 */
@@ -2568,7 +2568,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 142 0x8e 'Ž' */
+ /* 142 0x8e 'Ä' */
0x00, /* 00000000 */
0xc6, /* 11000110 */
0x00, /* 00000000 */
@@ -2586,7 +2586,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 143 0x8f '' */
+ /* 143 0x8f 'Ã…' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0x38, /* 00111000 */
@@ -2604,7 +2604,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 144 0x90 '' */
+ /* 144 0x90 'É' */
0x0c, /* 00001100 */
0x18, /* 00011000 */
0x00, /* 00000000 */
@@ -2622,7 +2622,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 145 0x91 '‘' */
+ /* 145 0x91 'æ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2640,7 +2640,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 146 0x92 '’' */
+ /* 146 0x92 'Æ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x3e, /* 00111110 */
@@ -2658,7 +2658,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 147 0x93 '“' */
+ /* 147 0x93 'ô' */
0x00, /* 00000000 */
0x10, /* 00010000 */
0x38, /* 00111000 */
@@ -2676,7 +2676,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 148 0x94 '”' */
+ /* 148 0x94 'ö' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xc6, /* 11000110 */
@@ -2694,7 +2694,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 149 0x95 '•' */
+ /* 149 0x95 'ò' */
0x00, /* 00000000 */
0x60, /* 01100000 */
0x30, /* 00110000 */
@@ -2712,7 +2712,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 150 0x96 '–' */
+ /* 150 0x96 'û' */
0x00, /* 00000000 */
0x30, /* 00110000 */
0x78, /* 01111000 */
@@ -2730,7 +2730,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 151 0x97 '—' */
+ /* 151 0x97 'ù' */
0x00, /* 00000000 */
0x60, /* 01100000 */
0x30, /* 00110000 */
@@ -2748,7 +2748,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 152 0x98 '˜' */
+ /* 152 0x98 'ÿ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xc6, /* 11000110 */
@@ -2766,7 +2766,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x78, /* 01111000 */
0x00, /* 00000000 */
- /* 153 0x99 '™' */
+ /* 153 0x99 'Ö' */
0x00, /* 00000000 */
0xc6, /* 11000110 */
0x00, /* 00000000 */
@@ -2784,7 +2784,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 154 0x9a 'š' */
+ /* 154 0x9a 'Ü' */
0x00, /* 00000000 */
0xc6, /* 11000110 */
0x00, /* 00000000 */
@@ -2802,7 +2802,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 155 0x9b '›' */
+ /* 155 0x9b '¢' */
0x00, /* 00000000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -2820,7 +2820,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 156 0x9c 'œ' */
+ /* 156 0x9c '£' */
0x00, /* 00000000 */
0x38, /* 00111000 */
0x6c, /* 01101100 */
@@ -2838,7 +2838,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 157 0x9d '' */
+ /* 157 0x9d 'Â¥' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x66, /* 01100110 */
@@ -2856,7 +2856,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 158 0x9e 'ž' */
+ /* 158 0x9e 'â‚§' */
0x00, /* 00000000 */
0xf8, /* 11111000 */
0xcc, /* 11001100 */
@@ -2874,7 +2874,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 159 0x9f 'Ÿ' */
+ /* 159 0x9f 'Æ’' */
0x00, /* 00000000 */
0x0e, /* 00001110 */
0x1b, /* 00011011 */
@@ -2892,7 +2892,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 160 0xa0 ' ' */
+ /* 160 0xa0 'á' */
0x00, /* 00000000 */
0x18, /* 00011000 */
0x30, /* 00110000 */
@@ -2910,7 +2910,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 161 0xa1 '¡' */
+ /* 161 0xa1 'í' */
0x00, /* 00000000 */
0x0c, /* 00001100 */
0x18, /* 00011000 */
@@ -2928,7 +2928,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 162 0xa2 '¢' */
+ /* 162 0xa2 'ó' */
0x00, /* 00000000 */
0x18, /* 00011000 */
0x30, /* 00110000 */
@@ -2946,7 +2946,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 163 0xa3 '£' */
+ /* 163 0xa3 'ú' */
0x00, /* 00000000 */
0x18, /* 00011000 */
0x30, /* 00110000 */
@@ -2964,7 +2964,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 164 0xa4 '¤' */
+ /* 164 0xa4 'ñ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x76, /* 01110110 */
@@ -2982,7 +2982,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 165 0xa5 '¥' */
+ /* 165 0xa5 'Ñ' */
0x76, /* 01110110 */
0xdc, /* 11011100 */
0x00, /* 00000000 */
@@ -3000,7 +3000,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 166 0xa6 '¦' */
+ /* 166 0xa6 'ª' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x3c, /* 00111100 */
@@ -3018,7 +3018,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 167 0xa7 '§' */
+ /* 167 0xa7 'º' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x38, /* 00111000 */
@@ -3036,7 +3036,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 168 0xa8 '¨' */
+ /* 168 0xa8 '¿' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x30, /* 00110000 */
@@ -3054,7 +3054,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 169 0xa9 '©' */
+ /* 169 0xa9 'âŒ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3072,7 +3072,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 170 0xaa 'ª' */
+ /* 170 0xaa '¬' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3090,7 +3090,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 171 0xab '«' */
+ /* 171 0xab '½' */
0x00, /* 00000000 */
0x60, /* 01100000 */
0xe0, /* 11100000 */
@@ -3108,7 +3108,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 172 0xac '¬' */
+ /* 172 0xac '¼' */
0x00, /* 00000000 */
0x60, /* 01100000 */
0xe0, /* 11100000 */
@@ -3126,7 +3126,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 173 0xad '­' */
+ /* 173 0xad '¡' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x18, /* 00011000 */
@@ -3144,7 +3144,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 174 0xae '®' */
+ /* 174 0xae '«' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3162,7 +3162,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 175 0xaf '¯' */
+ /* 175 0xaf '»' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3180,7 +3180,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 176 0xb0 '°' */
+ /* 176 0xb0 'â–‘' */
0x11, /* 00010001 */
0x44, /* 01000100 */
0x11, /* 00010001 */
@@ -3198,7 +3198,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x11, /* 00010001 */
0x44, /* 01000100 */
- /* 177 0xb1 '±' */
+ /* 177 0xb1 'â–’' */
0x55, /* 01010101 */
0xaa, /* 10101010 */
0x55, /* 01010101 */
@@ -3216,7 +3216,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x55, /* 01010101 */
0xaa, /* 10101010 */
- /* 178 0xb2 '²' */
+ /* 178 0xb2 'â–“' */
0xdd, /* 11011101 */
0x77, /* 01110111 */
0xdd, /* 11011101 */
@@ -3234,7 +3234,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0xdd, /* 11011101 */
0x77, /* 01110111 */
- /* 179 0xb3 '³' */
+ /* 179 0xb3 '│' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3252,7 +3252,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 180 0xb4 '´' */
+ /* 180 0xb4 '┤' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3270,7 +3270,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 181 0xb5 'µ' */
+ /* 181 0xb5 'â•¡' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3288,7 +3288,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 182 0xb6 '¶' */
+ /* 182 0xb6 'â•¢' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3306,7 +3306,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 183 0xb7 '·' */
+ /* 183 0xb7 'â•–' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3324,7 +3324,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 184 0xb8 '¸' */
+ /* 184 0xb8 'â••' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3342,7 +3342,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 185 0xb9 '¹' */
+ /* 185 0xb9 'â•£' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3360,7 +3360,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 186 0xba 'º' */
+ /* 186 0xba 'â•‘' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3378,7 +3378,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 187 0xbb '»' */
+ /* 187 0xbb 'â•—' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3396,7 +3396,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 188 0xbc '¼' */
+ /* 188 0xbc 'â•' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3414,7 +3414,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 189 0xbd '½' */
+ /* 189 0xbd '╜' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3432,7 +3432,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 190 0xbe '¾' */
+ /* 190 0xbe 'â•›' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3450,7 +3450,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 191 0xbf '¿' */
+ /* 191 0xbf 'â”' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3468,7 +3468,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 192 0xc0 'À' */
+ /* 192 0xc0 'â””' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3486,7 +3486,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 193 0xc1 'Á' */
+ /* 193 0xc1 'â”´' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3504,7 +3504,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 194 0xc2 'Â' */
+ /* 194 0xc2 '┬' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3522,7 +3522,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 195 0xc3 'Ã' */
+ /* 195 0xc3 '├' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3540,7 +3540,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 196 0xc4 'Ä' */
+ /* 196 0xc4 '─' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3558,7 +3558,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 197 0xc5 'Å' */
+ /* 197 0xc5 '┼' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3576,7 +3576,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 198 0xc6 'Æ' */
+ /* 198 0xc6 '╞' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3594,7 +3594,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 199 0xc7 'Ç' */
+ /* 199 0xc7 '╟' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3612,7 +3612,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 200 0xc8 'È' */
+ /* 200 0xc8 '╚' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3630,7 +3630,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 201 0xc9 'É' */
+ /* 201 0xc9 'â•”' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3648,7 +3648,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 202 0xca 'Ê' */
+ /* 202 0xca 'â•©' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3666,7 +3666,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 203 0xcb 'Ë' */
+ /* 203 0xcb '╦' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3684,7 +3684,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 204 0xcc 'Ì' */
+ /* 204 0xcc 'â• ' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3702,7 +3702,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 205 0xcd 'Í' */
+ /* 205 0xcd 'â•' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3720,7 +3720,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 206 0xce 'Î' */
+ /* 206 0xce '╬' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3738,7 +3738,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 207 0xcf 'Ï' */
+ /* 207 0xcf 'â•§' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3756,7 +3756,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 208 0xd0 'Ð' */
+ /* 208 0xd0 '╨' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3774,7 +3774,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 209 0xd1 'Ñ' */
+ /* 209 0xd1 '╤' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3792,7 +3792,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 210 0xd2 'Ò' */
+ /* 210 0xd2 'â•¥' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3810,7 +3810,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 211 0xd3 'Ó' */
+ /* 211 0xd3 'â•™' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3828,7 +3828,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 212 0xd4 'Ô' */
+ /* 212 0xd4 '╘' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3846,7 +3846,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 213 0xd5 'Õ' */
+ /* 213 0xd5 'â•’' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3864,7 +3864,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 214 0xd6 'Ö' */
+ /* 214 0xd6 'â•“' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3882,7 +3882,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 215 0xd7 '×' */
+ /* 215 0xd7 'â•«' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -3900,7 +3900,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 216 0xd8 'Ø' */
+ /* 216 0xd8 '╪' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3918,7 +3918,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 217 0xd9 'Ù' */
+ /* 217 0xd9 '┘' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -3936,7 +3936,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 218 0xda 'Ú' */
+ /* 218 0xda '┌' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3954,7 +3954,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 219 0xdb 'Û' */
+ /* 219 0xdb 'â–ˆ' */
0xff, /* 11111111 */
0xff, /* 11111111 */
0xff, /* 11111111 */
@@ -3972,7 +3972,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0xff, /* 11111111 */
0xff, /* 11111111 */
- /* 220 0xdc 'Ü' */
+ /* 220 0xdc 'â–„' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -3990,7 +3990,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0xff, /* 11111111 */
0xff, /* 11111111 */
- /* 221 0xdd 'Ý' */
+ /* 221 0xdd '▌' */
0xf0, /* 11110000 */
0xf0, /* 11110000 */
0xf0, /* 11110000 */
@@ -4008,7 +4008,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0xf0, /* 11110000 */
0xf0, /* 11110000 */
- /* 222 0xde 'Þ' */
+ /* 222 0xde 'â–' */
0x0f, /* 00001111 */
0x0f, /* 00001111 */
0x0f, /* 00001111 */
@@ -4026,7 +4026,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x0f, /* 00001111 */
0x0f, /* 00001111 */
- /* 223 0xdf 'ß' */
+ /* 223 0xdf 'â–€' */
0xff, /* 11111111 */
0xff, /* 11111111 */
0xff, /* 11111111 */
@@ -4044,7 +4044,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 224 0xe0 'à' */
+ /* 224 0xe0 'α' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4062,7 +4062,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 225 0xe1 'á' */
+ /* 225 0xe1 'ß' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x78, /* 01111000 */
@@ -4080,7 +4080,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 226 0xe2 'â' */
+ /* 226 0xe2 'Γ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xfe, /* 11111110 */
@@ -4098,7 +4098,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 227 0xe3 'ã' */
+ /* 227 0xe3 'Ï€' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4116,7 +4116,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 228 0xe4 'ä' */
+ /* 228 0xe4 'Σ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xfe, /* 11111110 */
@@ -4134,7 +4134,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 229 0xe5 'å' */
+ /* 229 0xe5 'σ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4152,7 +4152,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 230 0xe6 'æ' */
+ /* 230 0xe6 'µ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4170,7 +4170,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0xc0, /* 11000000 */
0x00, /* 00000000 */
- /* 231 0xe7 'ç' */
+ /* 231 0xe7 'Ï„' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4188,7 +4188,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 232 0xe8 'è' */
+ /* 232 0xe8 'Φ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x7e, /* 01111110 */
@@ -4206,7 +4206,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 233 0xe9 'é' */
+ /* 233 0xe9 'Θ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x38, /* 00111000 */
@@ -4224,7 +4224,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 234 0xea 'ê' */
+ /* 234 0xea 'Ω' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x38, /* 00111000 */
@@ -4242,7 +4242,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 235 0xeb 'ë' */
+ /* 235 0xeb 'δ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x1e, /* 00011110 */
@@ -4260,7 +4260,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 236 0xec 'ì' */
+ /* 236 0xec '∞' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4278,7 +4278,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 237 0xed 'í' */
+ /* 237 0xed 'φ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4296,7 +4296,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 238 0xee 'î' */
+ /* 238 0xee 'ε' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x1c, /* 00011100 */
@@ -4314,7 +4314,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 239 0xef 'ï' */
+ /* 239 0xef '∩' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4332,7 +4332,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 240 0xf0 'ð' */
+ /* 240 0xf0 '≡' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4350,7 +4350,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 241 0xf1 'ñ' */
+ /* 241 0xf1 '±' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4368,7 +4368,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 242 0xf2 'ò' */
+ /* 242 0xf2 '≥' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4386,7 +4386,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 243 0xf3 'ó' */
+ /* 243 0xf3 '≤' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4404,7 +4404,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 244 0xf4 'ô' */
+ /* 244 0xf4 '⌠' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x0e, /* 00001110 */
@@ -4422,7 +4422,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 245 0xf5 'õ' */
+ /* 245 0xf5 '⌡' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -4440,7 +4440,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 246 0xf6 'ö' */
+ /* 246 0xf6 '÷' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4458,7 +4458,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 247 0xf7 '÷' */
+ /* 247 0xf7 '≈' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4476,7 +4476,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 248 0xf8 'ø' */
+ /* 248 0xf8 '°' */
0x00, /* 00000000 */
0x38, /* 00111000 */
0x6c, /* 01101100 */
@@ -4494,7 +4494,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 249 0xf9 'ù' */
+ /* 249 0xf9 '·' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4512,7 +4512,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 250 0xfa 'ú' */
+ /* 250 0xfa '•' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4530,7 +4530,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 251 0xfb 'û' */
+ /* 251 0xfb '√' */
0x00, /* 00000000 */
0x0f, /* 00001111 */
0x0c, /* 00001100 */
@@ -4548,7 +4548,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 252 0xfc 'ü' */
+ /* 252 0xfc 'â¿' */
0x00, /* 00000000 */
0x6c, /* 01101100 */
0x36, /* 00110110 */
@@ -4566,7 +4566,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 253 0xfd 'ý' */
+ /* 253 0xfd '²' */
0x00, /* 00000000 */
0x3c, /* 00111100 */
0x66, /* 01100110 */
@@ -4584,7 +4584,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 254 0xfe 'þ' */
+ /* 254 0xfe 'â– ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -4602,7 +4602,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 255 0xff 'ÿ' */
+ /* 255 0xff ' ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
diff --git a/lib/fonts/font_8x8.c b/lib/fonts/font_8x8.c
index 751becf3c521..2328ebc8bab5 100644
--- a/lib/fonts/font_8x8.c
+++ b/lib/fonts/font_8x8.c
@@ -1291,7 +1291,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xfe, /* 11111110 */
0x00, /* 00000000 */
- /* 128 0x80 '€' */
+ /* 128 0x80 'Ç' */
0x7c, /* 01111100 */
0xc6, /* 11000110 */
0xc0, /* 11000000 */
@@ -1301,7 +1301,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x0c, /* 00001100 */
0x78, /* 01111000 */
- /* 129 0x81 '' */
+ /* 129 0x81 'ü' */
0xcc, /* 11001100 */
0x00, /* 00000000 */
0xcc, /* 11001100 */
@@ -1311,7 +1311,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 130 0x82 '‚' */
+ /* 130 0x82 'é' */
0x0c, /* 00001100 */
0x18, /* 00011000 */
0x7c, /* 01111100 */
@@ -1321,7 +1321,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 131 0x83 'ƒ' */
+ /* 131 0x83 'â' */
0x7c, /* 01111100 */
0x82, /* 10000010 */
0x78, /* 01111000 */
@@ -1331,7 +1331,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 132 0x84 '„' */
+ /* 132 0x84 'ä' */
0xc6, /* 11000110 */
0x00, /* 00000000 */
0x78, /* 01111000 */
@@ -1341,7 +1341,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 133 0x85 '…' */
+ /* 133 0x85 'à' */
0x30, /* 00110000 */
0x18, /* 00011000 */
0x78, /* 01111000 */
@@ -1351,7 +1351,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 134 0x86 '†' */
+ /* 134 0x86 'Ã¥' */
0x30, /* 00110000 */
0x30, /* 00110000 */
0x78, /* 01111000 */
@@ -1361,7 +1361,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 135 0x87 '‡' */
+ /* 135 0x87 'ç' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x7e, /* 01111110 */
@@ -1371,7 +1371,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x0c, /* 00001100 */
0x38, /* 00111000 */
- /* 136 0x88 'ˆ' */
+ /* 136 0x88 'ê' */
0x7c, /* 01111100 */
0x82, /* 10000010 */
0x7c, /* 01111100 */
@@ -1381,7 +1381,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 137 0x89 '‰' */
+ /* 137 0x89 'ë' */
0xc6, /* 11000110 */
0x00, /* 00000000 */
0x7c, /* 01111100 */
@@ -1391,7 +1391,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 138 0x8a 'Š' */
+ /* 138 0x8a 'è' */
0x30, /* 00110000 */
0x18, /* 00011000 */
0x7c, /* 01111100 */
@@ -1401,7 +1401,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 139 0x8b '‹' */
+ /* 139 0x8b 'ï' */
0x66, /* 01100110 */
0x00, /* 00000000 */
0x38, /* 00111000 */
@@ -1411,7 +1411,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x3c, /* 00111100 */
0x00, /* 00000000 */
- /* 140 0x8c 'Œ' */
+ /* 140 0x8c 'î' */
0x7c, /* 01111100 */
0x82, /* 10000010 */
0x38, /* 00111000 */
@@ -1421,7 +1421,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x3c, /* 00111100 */
0x00, /* 00000000 */
- /* 141 0x8d '' */
+ /* 141 0x8d 'ì' */
0x30, /* 00110000 */
0x18, /* 00011000 */
0x00, /* 00000000 */
@@ -1431,7 +1431,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x3c, /* 00111100 */
0x00, /* 00000000 */
- /* 142 0x8e 'Ž' */
+ /* 142 0x8e 'Ä' */
0xc6, /* 11000110 */
0x38, /* 00111000 */
0x6c, /* 01101100 */
@@ -1441,7 +1441,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xc6, /* 11000110 */
0x00, /* 00000000 */
- /* 143 0x8f '' */
+ /* 143 0x8f 'Ã…' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0x7c, /* 01111100 */
@@ -1451,7 +1451,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xc6, /* 11000110 */
0x00, /* 00000000 */
- /* 144 0x90 '' */
+ /* 144 0x90 'É' */
0x18, /* 00011000 */
0x30, /* 00110000 */
0xfe, /* 11111110 */
@@ -1461,7 +1461,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xfe, /* 11111110 */
0x00, /* 00000000 */
- /* 145 0x91 '‘' */
+ /* 145 0x91 'æ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x7e, /* 01111110 */
@@ -1471,7 +1471,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7e, /* 01111110 */
0x00, /* 00000000 */
- /* 146 0x92 '’' */
+ /* 146 0x92 'Æ' */
0x3e, /* 00111110 */
0x6c, /* 01101100 */
0xcc, /* 11001100 */
@@ -1481,7 +1481,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xce, /* 11001110 */
0x00, /* 00000000 */
- /* 147 0x93 '“' */
+ /* 147 0x93 'ô' */
0x7c, /* 01111100 */
0x82, /* 10000010 */
0x7c, /* 01111100 */
@@ -1491,7 +1491,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 148 0x94 '”' */
+ /* 148 0x94 'ö' */
0xc6, /* 11000110 */
0x00, /* 00000000 */
0x7c, /* 01111100 */
@@ -1501,7 +1501,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 149 0x95 '•' */
+ /* 149 0x95 'ò' */
0x30, /* 00110000 */
0x18, /* 00011000 */
0x7c, /* 01111100 */
@@ -1511,7 +1511,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 150 0x96 '–' */
+ /* 150 0x96 'û' */
0x78, /* 01111000 */
0x84, /* 10000100 */
0x00, /* 00000000 */
@@ -1521,7 +1521,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 151 0x97 '—' */
+ /* 151 0x97 'ù' */
0x60, /* 01100000 */
0x30, /* 00110000 */
0xcc, /* 11001100 */
@@ -1531,7 +1531,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 152 0x98 '˜' */
+ /* 152 0x98 'ÿ' */
0xc6, /* 11000110 */
0x00, /* 00000000 */
0xc6, /* 11000110 */
@@ -1541,7 +1541,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x06, /* 00000110 */
0xfc, /* 11111100 */
- /* 153 0x99 '™' */
+ /* 153 0x99 'Ö' */
0xc6, /* 11000110 */
0x38, /* 00111000 */
0x6c, /* 01101100 */
@@ -1551,7 +1551,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x38, /* 00111000 */
0x00, /* 00000000 */
- /* 154 0x9a 'š' */
+ /* 154 0x9a 'Ü' */
0xc6, /* 11000110 */
0x00, /* 00000000 */
0xc6, /* 11000110 */
@@ -1561,7 +1561,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 155 0x9b '›' */
+ /* 155 0x9b '¢' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x7e, /* 01111110 */
@@ -1571,7 +1571,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 156 0x9c 'œ' */
+ /* 156 0x9c '£' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0x64, /* 01100100 */
@@ -1581,7 +1581,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xfc, /* 11111100 */
0x00, /* 00000000 */
- /* 157 0x9d '' */
+ /* 157 0x9d 'Â¥' */
0x66, /* 01100110 */
0x66, /* 01100110 */
0x3c, /* 00111100 */
@@ -1591,7 +1591,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 158 0x9e 'ž' */
+ /* 158 0x9e 'â‚§' */
0xf8, /* 11111000 */
0xcc, /* 11001100 */
0xcc, /* 11001100 */
@@ -1601,7 +1601,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xc6, /* 11000110 */
0xc7, /* 11000111 */
- /* 159 0x9f 'Ÿ' */
+ /* 159 0x9f 'Æ’' */
0x0e, /* 00001110 */
0x1b, /* 00011011 */
0x18, /* 00011000 */
@@ -1611,7 +1611,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x70, /* 01110000 */
0x00, /* 00000000 */
- /* 160 0xa0 ' ' */
+ /* 160 0xa0 'á' */
0x18, /* 00011000 */
0x30, /* 00110000 */
0x78, /* 01111000 */
@@ -1621,7 +1621,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 161 0xa1 '¡' */
+ /* 161 0xa1 'í' */
0x0c, /* 00001100 */
0x18, /* 00011000 */
0x00, /* 00000000 */
@@ -1631,7 +1631,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x3c, /* 00111100 */
0x00, /* 00000000 */
- /* 162 0xa2 '¢' */
+ /* 162 0xa2 'ó' */
0x0c, /* 00001100 */
0x18, /* 00011000 */
0x7c, /* 01111100 */
@@ -1641,7 +1641,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 163 0xa3 '£' */
+ /* 163 0xa3 'ú' */
0x18, /* 00011000 */
0x30, /* 00110000 */
0xcc, /* 11001100 */
@@ -1651,7 +1651,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 164 0xa4 '¤' */
+ /* 164 0xa4 'ñ' */
0x76, /* 01110110 */
0xdc, /* 11011100 */
0x00, /* 00000000 */
@@ -1661,7 +1661,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x66, /* 01100110 */
0x00, /* 00000000 */
- /* 165 0xa5 '¥' */
+ /* 165 0xa5 'Ñ' */
0x76, /* 01110110 */
0xdc, /* 11011100 */
0x00, /* 00000000 */
@@ -1671,7 +1671,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xce, /* 11001110 */
0x00, /* 00000000 */
- /* 166 0xa6 '¦' */
+ /* 166 0xa6 'ª' */
0x3c, /* 00111100 */
0x6c, /* 01101100 */
0x6c, /* 01101100 */
@@ -1681,7 +1681,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 167 0xa7 '§' */
+ /* 167 0xa7 'º' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0x6c, /* 01101100 */
@@ -1691,7 +1691,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 168 0xa8 '¨' */
+ /* 168 0xa8 '¿' */
0x18, /* 00011000 */
0x00, /* 00000000 */
0x18, /* 00011000 */
@@ -1701,7 +1701,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x3e, /* 00111110 */
0x00, /* 00000000 */
- /* 169 0xa9 '©' */
+ /* 169 0xa9 'âŒ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -1711,7 +1711,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 170 0xaa 'ª' */
+ /* 170 0xaa '¬' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -1721,7 +1721,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 171 0xab '«' */
+ /* 171 0xab '½' */
0x63, /* 01100011 */
0xe6, /* 11100110 */
0x6c, /* 01101100 */
@@ -1731,7 +1731,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xcc, /* 11001100 */
0x0f, /* 00001111 */
- /* 172 0xac '¬' */
+ /* 172 0xac '¼' */
0x63, /* 01100011 */
0xe6, /* 11100110 */
0x6c, /* 01101100 */
@@ -1741,7 +1741,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xdf, /* 11011111 */
0x06, /* 00000110 */
- /* 173 0xad '­' */
+ /* 173 0xad '¡' */
0x18, /* 00011000 */
0x00, /* 00000000 */
0x18, /* 00011000 */
@@ -1751,7 +1751,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x00, /* 00000000 */
- /* 174 0xae '®' */
+ /* 174 0xae '«' */
0x00, /* 00000000 */
0x33, /* 00110011 */
0x66, /* 01100110 */
@@ -1761,7 +1761,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 175 0xaf '¯' */
+ /* 175 0xaf '»' */
0x00, /* 00000000 */
0xcc, /* 11001100 */
0x66, /* 01100110 */
@@ -1771,7 +1771,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 176 0xb0 '°' */
+ /* 176 0xb0 'â–‘' */
0x22, /* 00100010 */
0x88, /* 10001000 */
0x22, /* 00100010 */
@@ -1781,7 +1781,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x22, /* 00100010 */
0x88, /* 10001000 */
- /* 177 0xb1 '±' */
+ /* 177 0xb1 'â–’' */
0x55, /* 01010101 */
0xaa, /* 10101010 */
0x55, /* 01010101 */
@@ -1791,7 +1791,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x55, /* 01010101 */
0xaa, /* 10101010 */
- /* 178 0xb2 '²' */
+ /* 178 0xb2 'â–“' */
0x77, /* 01110111 */
0xdd, /* 11011101 */
0x77, /* 01110111 */
@@ -1801,7 +1801,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x77, /* 01110111 */
0xdd, /* 11011101 */
- /* 179 0xb3 '³' */
+ /* 179 0xb3 '│' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -1811,7 +1811,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 180 0xb4 '´' */
+ /* 180 0xb4 '┤' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -1821,7 +1821,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 181 0xb5 'µ' */
+ /* 181 0xb5 'â•¡' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0xf8, /* 11111000 */
@@ -1831,7 +1831,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 182 0xb6 '¶' */
+ /* 182 0xb6 'â•¢' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -1841,7 +1841,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 183 0xb7 '·' */
+ /* 183 0xb7 'â•–' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -1851,7 +1851,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 184 0xb8 '¸' */
+ /* 184 0xb8 'â••' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xf8, /* 11111000 */
@@ -1861,7 +1861,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 185 0xb9 '¹' */
+ /* 185 0xb9 'â•£' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0xf6, /* 11110110 */
@@ -1871,7 +1871,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 186 0xba 'º' */
+ /* 186 0xba 'â•‘' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -1881,7 +1881,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 187 0xbb '»' */
+ /* 187 0xbb 'â•—' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xfe, /* 11111110 */
@@ -1891,7 +1891,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 188 0xbc '¼' */
+ /* 188 0xbc 'â•' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0xf6, /* 11110110 */
@@ -1901,7 +1901,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 189 0xbd '½' */
+ /* 189 0xbd '╜' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -1911,7 +1911,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 190 0xbe '¾' */
+ /* 190 0xbe 'â•›' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0xf8, /* 11111000 */
@@ -1921,7 +1921,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 191 0xbf '¿' */
+ /* 191 0xbf 'â”' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -1931,7 +1931,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 192 0xc0 'À' */
+ /* 192 0xc0 'â””' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -1941,7 +1941,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 193 0xc1 'Á' */
+ /* 193 0xc1 'â”´' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -1951,7 +1951,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 194 0xc2 'Â' */
+ /* 194 0xc2 '┬' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -1961,7 +1961,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 195 0xc3 'Ã' */
+ /* 195 0xc3 '├' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -1971,7 +1971,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 196 0xc4 'Ä' */
+ /* 196 0xc4 '─' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -1981,7 +1981,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 197 0xc5 'Å' */
+ /* 197 0xc5 '┼' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -1991,7 +1991,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 198 0xc6 'Æ' */
+ /* 198 0xc6 '╞' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x1f, /* 00011111 */
@@ -2001,7 +2001,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 199 0xc7 'Ç' */
+ /* 199 0xc7 '╟' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -2011,7 +2011,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 200 0xc8 'È' */
+ /* 200 0xc8 '╚' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x37, /* 00110111 */
@@ -2021,7 +2021,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 201 0xc9 'É' */
+ /* 201 0xc9 'â•”' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x3f, /* 00111111 */
@@ -2031,7 +2031,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 202 0xca 'Ê' */
+ /* 202 0xca 'â•©' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0xf7, /* 11110111 */
@@ -2041,7 +2041,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 203 0xcb 'Ë' */
+ /* 203 0xcb '╦' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xff, /* 11111111 */
@@ -2051,7 +2051,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 204 0xcc 'Ì' */
+ /* 204 0xcc 'â• ' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x37, /* 00110111 */
@@ -2061,7 +2061,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 205 0xcd 'Í' */
+ /* 205 0xcd 'â•' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xff, /* 11111111 */
@@ -2071,7 +2071,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 206 0xce 'Î' */
+ /* 206 0xce '╬' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0xf7, /* 11110111 */
@@ -2081,7 +2081,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 207 0xcf 'Ï' */
+ /* 207 0xcf 'â•§' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0xff, /* 11111111 */
@@ -2091,7 +2091,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 208 0xd0 'Ð' */
+ /* 208 0xd0 '╨' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -2101,7 +2101,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 209 0xd1 'Ñ' */
+ /* 209 0xd1 '╤' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xff, /* 11111111 */
@@ -2111,7 +2111,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 210 0xd2 'Ò' */
+ /* 210 0xd2 'â•¥' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2121,7 +2121,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 211 0xd3 'Ó' */
+ /* 211 0xd3 'â•™' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -2131,7 +2131,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 212 0xd4 'Ô' */
+ /* 212 0xd4 '╘' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x1f, /* 00011111 */
@@ -2141,7 +2141,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 213 0xd5 'Õ' */
+ /* 213 0xd5 'â•’' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x1f, /* 00011111 */
@@ -2151,7 +2151,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 214 0xd6 'Ö' */
+ /* 214 0xd6 'â•“' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2161,7 +2161,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 215 0xd7 '×' */
+ /* 215 0xd7 'â•«' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -2171,7 +2171,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 216 0xd8 'Ø' */
+ /* 216 0xd8 '╪' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0xff, /* 11111111 */
@@ -2181,7 +2181,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 217 0xd9 'Ù' */
+ /* 217 0xd9 '┘' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -2191,7 +2191,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 218 0xda 'Ú' */
+ /* 218 0xda '┌' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2201,7 +2201,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 219 0xdb 'Û' */
+ /* 219 0xdb 'â–ˆ' */
0xff, /* 11111111 */
0xff, /* 11111111 */
0xff, /* 11111111 */
@@ -2211,7 +2211,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xff, /* 11111111 */
0xff, /* 11111111 */
- /* 220 0xdc 'Ü' */
+ /* 220 0xdc 'â–„' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2221,7 +2221,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xff, /* 11111111 */
0xff, /* 11111111 */
- /* 221 0xdd 'Ý' */
+ /* 221 0xdd '▌' */
0xf0, /* 11110000 */
0xf0, /* 11110000 */
0xf0, /* 11110000 */
@@ -2231,7 +2231,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xf0, /* 11110000 */
0xf0, /* 11110000 */
- /* 222 0xde 'Þ' */
+ /* 222 0xde 'â–' */
0x0f, /* 00001111 */
0x0f, /* 00001111 */
0x0f, /* 00001111 */
@@ -2241,7 +2241,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x0f, /* 00001111 */
0x0f, /* 00001111 */
- /* 223 0xdf 'ß' */
+ /* 223 0xdf 'â–€' */
0xff, /* 11111111 */
0xff, /* 11111111 */
0xff, /* 11111111 */
@@ -2251,7 +2251,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 224 0xe0 'à' */
+ /* 224 0xe0 'α' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x76, /* 01110110 */
@@ -2261,7 +2261,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 225 0xe1 'á' */
+ /* 225 0xe1 'ß' */
0x78, /* 01111000 */
0xcc, /* 11001100 */
0xcc, /* 11001100 */
@@ -2271,7 +2271,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xcc, /* 11001100 */
0x00, /* 00000000 */
- /* 226 0xe2 'â' */
+ /* 226 0xe2 'Γ' */
0xfe, /* 11111110 */
0xc6, /* 11000110 */
0xc0, /* 11000000 */
@@ -2281,7 +2281,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xc0, /* 11000000 */
0x00, /* 00000000 */
- /* 227 0xe3 'ã' */
+ /* 227 0xe3 'Ï€' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xfe, /* 11111110 */
@@ -2291,7 +2291,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x6c, /* 01101100 */
0x00, /* 00000000 */
- /* 228 0xe4 'ä' */
+ /* 228 0xe4 'Σ' */
0xfe, /* 11111110 */
0xc6, /* 11000110 */
0x60, /* 01100000 */
@@ -2301,7 +2301,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xfe, /* 11111110 */
0x00, /* 00000000 */
- /* 229 0xe5 'å' */
+ /* 229 0xe5 'σ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x7e, /* 01111110 */
@@ -2311,7 +2311,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x70, /* 01110000 */
0x00, /* 00000000 */
- /* 230 0xe6 'æ' */
+ /* 230 0xe6 'µ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x66, /* 01100110 */
@@ -2321,7 +2321,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0xc0, /* 11000000 */
- /* 231 0xe7 'ç' */
+ /* 231 0xe7 'Ï„' */
0x00, /* 00000000 */
0x76, /* 01110110 */
0xdc, /* 11011100 */
@@ -2331,7 +2331,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x00, /* 00000000 */
- /* 232 0xe8 'è' */
+ /* 232 0xe8 'Φ' */
0x7e, /* 01111110 */
0x18, /* 00011000 */
0x3c, /* 00111100 */
@@ -2341,7 +2341,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x7e, /* 01111110 */
- /* 233 0xe9 'é' */
+ /* 233 0xe9 'Θ' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0xc6, /* 11000110 */
@@ -2351,7 +2351,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x38, /* 00111000 */
0x00, /* 00000000 */
- /* 234 0xea 'ê' */
+ /* 234 0xea 'Ω' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0xc6, /* 11000110 */
@@ -2361,7 +2361,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xee, /* 11101110 */
0x00, /* 00000000 */
- /* 235 0xeb 'ë' */
+ /* 235 0xeb 'δ' */
0x0e, /* 00001110 */
0x18, /* 00011000 */
0x0c, /* 00001100 */
@@ -2371,7 +2371,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x3c, /* 00111100 */
0x00, /* 00000000 */
- /* 236 0xec 'ì' */
+ /* 236 0xec '∞' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x7e, /* 01111110 */
@@ -2381,7 +2381,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 237 0xed 'í' */
+ /* 237 0xed 'φ' */
0x06, /* 00000110 */
0x0c, /* 00001100 */
0x7e, /* 01111110 */
@@ -2391,7 +2391,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x60, /* 01100000 */
0xc0, /* 11000000 */
- /* 238 0xee 'î' */
+ /* 238 0xee 'ε' */
0x1e, /* 00011110 */
0x30, /* 00110000 */
0x60, /* 01100000 */
@@ -2401,7 +2401,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x1e, /* 00011110 */
0x00, /* 00000000 */
- /* 239 0xef 'ï' */
+ /* 239 0xef '∩' */
0x00, /* 00000000 */
0x7c, /* 01111100 */
0xc6, /* 11000110 */
@@ -2411,7 +2411,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xc6, /* 11000110 */
0x00, /* 00000000 */
- /* 240 0xf0 'ð' */
+ /* 240 0xf0 '≡' */
0x00, /* 00000000 */
0xfe, /* 11111110 */
0x00, /* 00000000 */
@@ -2421,7 +2421,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 241 0xf1 'ñ' */
+ /* 241 0xf1 '±' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x7e, /* 01111110 */
@@ -2431,7 +2431,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7e, /* 01111110 */
0x00, /* 00000000 */
- /* 242 0xf2 'ò' */
+ /* 242 0xf2 '≥' */
0x30, /* 00110000 */
0x18, /* 00011000 */
0x0c, /* 00001100 */
@@ -2441,7 +2441,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7e, /* 01111110 */
0x00, /* 00000000 */
- /* 243 0xf3 'ó' */
+ /* 243 0xf3 '≤' */
0x0c, /* 00001100 */
0x18, /* 00011000 */
0x30, /* 00110000 */
@@ -2451,7 +2451,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x7e, /* 01111110 */
0x00, /* 00000000 */
- /* 244 0xf4 'ô' */
+ /* 244 0xf4 '⌠' */
0x0e, /* 00001110 */
0x1b, /* 00011011 */
0x1b, /* 00011011 */
@@ -2461,7 +2461,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 245 0xf5 'õ' */
+ /* 245 0xf5 '⌡' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -2471,7 +2471,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0xd8, /* 11011000 */
0x70, /* 01110000 */
- /* 246 0xf6 'ö' */
+ /* 246 0xf6 '÷' */
0x00, /* 00000000 */
0x18, /* 00011000 */
0x00, /* 00000000 */
@@ -2481,7 +2481,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 247 0xf7 '÷' */
+ /* 247 0xf7 '≈' */
0x00, /* 00000000 */
0x76, /* 01110110 */
0xdc, /* 11011100 */
@@ -2491,7 +2491,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 248 0xf8 'ø' */
+ /* 248 0xf8 '°' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0x6c, /* 01101100 */
@@ -2501,7 +2501,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 249 0xf9 'ù' */
+ /* 249 0xf9 '·' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2511,7 +2511,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 250 0xfa 'ú' */
+ /* 250 0xfa '•' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2521,7 +2521,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 251 0xfb 'û' */
+ /* 251 0xfb '√' */
0x0f, /* 00001111 */
0x0c, /* 00001100 */
0x0c, /* 00001100 */
@@ -2531,7 +2531,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x3c, /* 00111100 */
0x1c, /* 00011100 */
- /* 252 0xfc 'ü' */
+ /* 252 0xfc 'â¿' */
0x6c, /* 01101100 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -2541,7 +2541,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 253 0xfd 'ý' */
+ /* 253 0xfd '²' */
0x78, /* 01111000 */
0x0c, /* 00001100 */
0x18, /* 00011000 */
@@ -2551,7 +2551,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 254 0xfe 'þ' */
+ /* 254 0xfe 'â– ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x3c, /* 00111100 */
@@ -2561,7 +2561,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 255 0xff 'ÿ' */
+ /* 255 0xff ' ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
diff --git a/lib/fonts/font_pearl_8x8.c b/lib/fonts/font_pearl_8x8.c
index b0514c0a7445..b15d3c342c5b 100644
--- a/lib/fonts/font_pearl_8x8.c
+++ b/lib/fonts/font_pearl_8x8.c
@@ -1296,7 +1296,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xfe, /* 11111110 */
0x00, /* 00000000 */
- /* 128 0x80 '€' */
+ /* 128 0x80 'Ç' */
0x7c, /* 01111100 */
0xc6, /* 11000110 */
0xc0, /* 11000000 */
@@ -1306,7 +1306,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x0c, /* 00001100 */
0x78, /* 01111000 */
- /* 129 0x81 '' */
+ /* 129 0x81 'ü' */
0xcc, /* 11001100 */
0x00, /* 00000000 */
0xcc, /* 11001100 */
@@ -1316,7 +1316,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 130 0x82 '‚' */
+ /* 130 0x82 'é' */
0x0c, /* 00001100 */
0x18, /* 00011000 */
0x7c, /* 01111100 */
@@ -1326,7 +1326,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 131 0x83 'ƒ' */
+ /* 131 0x83 'â' */
0x7c, /* 01111100 */
0x82, /* 10000010 */
0x78, /* 01111000 */
@@ -1336,7 +1336,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 132 0x84 '„' */
+ /* 132 0x84 'ä' */
0xc6, /* 11000110 */
0x00, /* 00000000 */
0x78, /* 01111000 */
@@ -1346,7 +1346,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 133 0x85 '…' */
+ /* 133 0x85 'à' */
0x30, /* 00110000 */
0x18, /* 00011000 */
0x78, /* 01111000 */
@@ -1356,7 +1356,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 134 0x86 '†' */
+ /* 134 0x86 'Ã¥' */
0x30, /* 00110000 */
0x30, /* 00110000 */
0x78, /* 01111000 */
@@ -1366,7 +1366,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 135 0x87 '‡' */
+ /* 135 0x87 'ç' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x7e, /* 01111110 */
@@ -1376,7 +1376,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x0c, /* 00001100 */
0x38, /* 00111000 */
- /* 136 0x88 'ˆ' */
+ /* 136 0x88 'ê' */
0x7c, /* 01111100 */
0x82, /* 10000010 */
0x7c, /* 01111100 */
@@ -1386,7 +1386,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 137 0x89 '‰' */
+ /* 137 0x89 'ë' */
0xc6, /* 11000110 */
0x00, /* 00000000 */
0x7c, /* 01111100 */
@@ -1396,7 +1396,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 138 0x8a 'Š' */
+ /* 138 0x8a 'è' */
0x30, /* 00110000 */
0x18, /* 00011000 */
0x7c, /* 01111100 */
@@ -1406,7 +1406,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 139 0x8b '‹' */
+ /* 139 0x8b 'ï' */
0x66, /* 01100110 */
0x00, /* 00000000 */
0x38, /* 00111000 */
@@ -1416,7 +1416,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x3c, /* 00111100 */
0x00, /* 00000000 */
- /* 140 0x8c 'Œ' */
+ /* 140 0x8c 'î' */
0x7c, /* 01111100 */
0x82, /* 10000010 */
0x38, /* 00111000 */
@@ -1426,7 +1426,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x3c, /* 00111100 */
0x00, /* 00000000 */
- /* 141 0x8d '' */
+ /* 141 0x8d 'ì' */
0x30, /* 00110000 */
0x18, /* 00011000 */
0x00, /* 00000000 */
@@ -1436,7 +1436,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x3c, /* 00111100 */
0x00, /* 00000000 */
- /* 142 0x8e 'Ž' */
+ /* 142 0x8e 'Ä' */
0xc6, /* 11000110 */
0x38, /* 00111000 */
0x6c, /* 01101100 */
@@ -1446,7 +1446,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xc6, /* 11000110 */
0x00, /* 00000000 */
- /* 143 0x8f '' */
+ /* 143 0x8f 'Ã…' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0x7c, /* 01111100 */
@@ -1456,7 +1456,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xc6, /* 11000110 */
0x00, /* 00000000 */
- /* 144 0x90 '' */
+ /* 144 0x90 'É' */
0x18, /* 00011000 */
0x30, /* 00110000 */
0xfe, /* 11111110 */
@@ -1466,7 +1466,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xfe, /* 11111110 */
0x00, /* 00000000 */
- /* 145 0x91 '‘' */
+ /* 145 0x91 'æ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x7e, /* 01111110 */
@@ -1476,7 +1476,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7e, /* 01111110 */
0x00, /* 00000000 */
- /* 146 0x92 '’' */
+ /* 146 0x92 'Æ' */
0x3e, /* 00111110 */
0x6c, /* 01101100 */
0xcc, /* 11001100 */
@@ -1486,7 +1486,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xce, /* 11001110 */
0x00, /* 00000000 */
- /* 147 0x93 '“' */
+ /* 147 0x93 'ô' */
0x7c, /* 01111100 */
0x82, /* 10000010 */
0x7c, /* 01111100 */
@@ -1496,7 +1496,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 148 0x94 '”' */
+ /* 148 0x94 'ö' */
0xc6, /* 11000110 */
0x00, /* 00000000 */
0x7c, /* 01111100 */
@@ -1506,7 +1506,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 149 0x95 '•' */
+ /* 149 0x95 'ò' */
0x30, /* 00110000 */
0x18, /* 00011000 */
0x7c, /* 01111100 */
@@ -1516,7 +1516,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 150 0x96 '–' */
+ /* 150 0x96 'û' */
0x78, /* 01111000 */
0x84, /* 10000100 */
0x00, /* 00000000 */
@@ -1526,7 +1526,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 151 0x97 '—' */
+ /* 151 0x97 'ù' */
0x60, /* 01100000 */
0x30, /* 00110000 */
0xcc, /* 11001100 */
@@ -1536,7 +1536,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 152 0x98 '˜' */
+ /* 152 0x98 'ÿ' */
0xc6, /* 11000110 */
0x00, /* 00000000 */
0xc6, /* 11000110 */
@@ -1546,7 +1546,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x06, /* 00000110 */
0xfc, /* 11111100 */
- /* 153 0x99 '™' */
+ /* 153 0x99 'Ö' */
0xc6, /* 11000110 */
0x38, /* 00111000 */
0x6c, /* 01101100 */
@@ -1556,7 +1556,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x38, /* 00111000 */
0x00, /* 00000000 */
- /* 154 0x9a 'š' */
+ /* 154 0x9a 'Ü' */
0xc6, /* 11000110 */
0x00, /* 00000000 */
0xc6, /* 11000110 */
@@ -1566,7 +1566,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 155 0x9b '›' */
+ /* 155 0x9b '¢' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x7e, /* 01111110 */
@@ -1576,7 +1576,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 156 0x9c 'œ' */
+ /* 156 0x9c '£' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0x64, /* 01100100 */
@@ -1586,7 +1586,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xfc, /* 11111100 */
0x00, /* 00000000 */
- /* 157 0x9d '' */
+ /* 157 0x9d 'Â¥' */
0x66, /* 01100110 */
0x66, /* 01100110 */
0x3c, /* 00111100 */
@@ -1596,7 +1596,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 158 0x9e 'ž' */
+ /* 158 0x9e 'â‚§' */
0xf8, /* 11111000 */
0xcc, /* 11001100 */
0xcc, /* 11001100 */
@@ -1606,7 +1606,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xc6, /* 11000110 */
0xc7, /* 11000111 */
- /* 159 0x9f 'Ÿ' */
+ /* 159 0x9f 'Æ’' */
0x0e, /* 00001110 */
0x1b, /* 00011011 */
0x18, /* 00011000 */
@@ -1616,7 +1616,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x70, /* 01110000 */
0x00, /* 00000000 */
- /* 160 0xa0 ' ' */
+ /* 160 0xa0 'á' */
0x18, /* 00011000 */
0x30, /* 00110000 */
0x78, /* 01111000 */
@@ -1626,7 +1626,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 161 0xa1 '¡' */
+ /* 161 0xa1 'í' */
0x0c, /* 00001100 */
0x18, /* 00011000 */
0x00, /* 00000000 */
@@ -1636,7 +1636,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x3c, /* 00111100 */
0x00, /* 00000000 */
- /* 162 0xa2 '¢' */
+ /* 162 0xa2 'ó' */
0x0c, /* 00001100 */
0x18, /* 00011000 */
0x7c, /* 01111100 */
@@ -1646,7 +1646,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0x00, /* 00000000 */
- /* 163 0xa3 '£' */
+ /* 163 0xa3 'ú' */
0x18, /* 00011000 */
0x30, /* 00110000 */
0xcc, /* 11001100 */
@@ -1656,7 +1656,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 164 0xa4 '¤' */
+ /* 164 0xa4 'ñ' */
0x76, /* 01110110 */
0xdc, /* 11011100 */
0x00, /* 00000000 */
@@ -1666,7 +1666,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x66, /* 01100110 */
0x00, /* 00000000 */
- /* 165 0xa5 '¥' */
+ /* 165 0xa5 'Ñ' */
0x76, /* 01110110 */
0xdc, /* 11011100 */
0x00, /* 00000000 */
@@ -1676,7 +1676,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xce, /* 11001110 */
0x00, /* 00000000 */
- /* 166 0xa6 '¦' */
+ /* 166 0xa6 'ª' */
0x3c, /* 00111100 */
0x6c, /* 01101100 */
0x6c, /* 01101100 */
@@ -1686,7 +1686,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 167 0xa7 '§' */
+ /* 167 0xa7 'º' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0x6c, /* 01101100 */
@@ -1696,7 +1696,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 168 0xa8 '¨' */
+ /* 168 0xa8 '¿' */
0x18, /* 00011000 */
0x00, /* 00000000 */
0x18, /* 00011000 */
@@ -1706,7 +1706,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x3e, /* 00111110 */
0x00, /* 00000000 */
- /* 169 0xa9 '©' */
+ /* 169 0xa9 'âŒ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -1716,7 +1716,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 170 0xaa 'ª' */
+ /* 170 0xaa '¬' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -1726,7 +1726,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 171 0xab '«' */
+ /* 171 0xab '½' */
0x63, /* 01100011 */
0xe6, /* 11100110 */
0x6c, /* 01101100 */
@@ -1736,7 +1736,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xcc, /* 11001100 */
0x0f, /* 00001111 */
- /* 172 0xac '¬' */
+ /* 172 0xac '¼' */
0x63, /* 01100011 */
0xe6, /* 11100110 */
0x6c, /* 01101100 */
@@ -1746,7 +1746,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xdf, /* 11011111 */
0x06, /* 00000110 */
- /* 173 0xad '­' */
+ /* 173 0xad '¡' */
0x18, /* 00011000 */
0x00, /* 00000000 */
0x18, /* 00011000 */
@@ -1756,7 +1756,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x00, /* 00000000 */
- /* 174 0xae '®' */
+ /* 174 0xae '«' */
0x00, /* 00000000 */
0x33, /* 00110011 */
0x66, /* 01100110 */
@@ -1766,7 +1766,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 175 0xaf '¯' */
+ /* 175 0xaf '»' */
0x00, /* 00000000 */
0xcc, /* 11001100 */
0x66, /* 01100110 */
@@ -1776,7 +1776,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 176 0xb0 '°' */
+ /* 176 0xb0 'â–‘' */
0x22, /* 00100010 */
0x88, /* 10001000 */
0x22, /* 00100010 */
@@ -1786,7 +1786,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x22, /* 00100010 */
0x88, /* 10001000 */
- /* 177 0xb1 '±' */
+ /* 177 0xb1 'â–’' */
0x55, /* 01010101 */
0xaa, /* 10101010 */
0x55, /* 01010101 */
@@ -1796,7 +1796,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x55, /* 01010101 */
0xaa, /* 10101010 */
- /* 178 0xb2 '²' */
+ /* 178 0xb2 'â–“' */
0x77, /* 01110111 */
0xdd, /* 11011101 */
0x77, /* 01110111 */
@@ -1806,7 +1806,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x77, /* 01110111 */
0xdd, /* 11011101 */
- /* 179 0xb3 '³' */
+ /* 179 0xb3 '│' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -1816,7 +1816,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 180 0xb4 '´' */
+ /* 180 0xb4 '┤' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -1826,7 +1826,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 181 0xb5 'µ' */
+ /* 181 0xb5 'â•¡' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0xf8, /* 11111000 */
@@ -1836,7 +1836,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 182 0xb6 '¶' */
+ /* 182 0xb6 'â•¢' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -1846,7 +1846,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 183 0xb7 '·' */
+ /* 183 0xb7 'â•–' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -1856,7 +1856,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 184 0xb8 '¸' */
+ /* 184 0xb8 'â••' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xf8, /* 11111000 */
@@ -1866,7 +1866,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 185 0xb9 '¹' */
+ /* 185 0xb9 'â•£' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0xf6, /* 11110110 */
@@ -1876,7 +1876,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 186 0xba 'º' */
+ /* 186 0xba 'â•‘' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -1886,7 +1886,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 187 0xbb '»' */
+ /* 187 0xbb 'â•—' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xfe, /* 11111110 */
@@ -1896,7 +1896,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 188 0xbc '¼' */
+ /* 188 0xbc 'â•' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0xf6, /* 11110110 */
@@ -1906,7 +1906,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 189 0xbd '½' */
+ /* 189 0xbd '╜' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -1916,7 +1916,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 190 0xbe '¾' */
+ /* 190 0xbe 'â•›' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0xf8, /* 11111000 */
@@ -1926,7 +1926,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 191 0xbf '¿' */
+ /* 191 0xbf 'â”' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -1936,7 +1936,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 192 0xc0 'À' */
+ /* 192 0xc0 'â””' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -1946,7 +1946,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 193 0xc1 'Á' */
+ /* 193 0xc1 'â”´' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -1956,7 +1956,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 194 0xc2 'Â' */
+ /* 194 0xc2 '┬' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -1966,7 +1966,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 195 0xc3 'Ã' */
+ /* 195 0xc3 '├' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -1976,7 +1976,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 196 0xc4 'Ä' */
+ /* 196 0xc4 '─' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -1986,7 +1986,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 197 0xc5 'Å' */
+ /* 197 0xc5 '┼' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -1996,7 +1996,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 198 0xc6 'Æ' */
+ /* 198 0xc6 '╞' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x1f, /* 00011111 */
@@ -2006,7 +2006,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 199 0xc7 'Ç' */
+ /* 199 0xc7 '╟' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -2016,7 +2016,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 200 0xc8 'È' */
+ /* 200 0xc8 '╚' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x37, /* 00110111 */
@@ -2026,7 +2026,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 201 0xc9 'É' */
+ /* 201 0xc9 'â•”' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x3f, /* 00111111 */
@@ -2036,7 +2036,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 202 0xca 'Ê' */
+ /* 202 0xca 'â•©' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0xf7, /* 11110111 */
@@ -2046,7 +2046,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 203 0xcb 'Ë' */
+ /* 203 0xcb '╦' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xff, /* 11111111 */
@@ -2056,7 +2056,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 204 0xcc 'Ì' */
+ /* 204 0xcc 'â• ' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x37, /* 00110111 */
@@ -2066,7 +2066,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 205 0xcd 'Í' */
+ /* 205 0xcd 'â•' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xff, /* 11111111 */
@@ -2076,7 +2076,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 206 0xce 'Î' */
+ /* 206 0xce '╬' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0xf7, /* 11110111 */
@@ -2086,7 +2086,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 207 0xcf 'Ï' */
+ /* 207 0xcf 'â•§' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0xff, /* 11111111 */
@@ -2096,7 +2096,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 208 0xd0 'Ð' */
+ /* 208 0xd0 '╨' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -2106,7 +2106,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 209 0xd1 'Ñ' */
+ /* 209 0xd1 '╤' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xff, /* 11111111 */
@@ -2116,7 +2116,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 210 0xd2 'Ò' */
+ /* 210 0xd2 'â•¥' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2126,7 +2126,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 211 0xd3 'Ó' */
+ /* 211 0xd3 'â•™' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -2136,7 +2136,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 212 0xd4 'Ô' */
+ /* 212 0xd4 '╘' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x1f, /* 00011111 */
@@ -2146,7 +2146,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 213 0xd5 'Õ' */
+ /* 213 0xd5 'â•’' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x1f, /* 00011111 */
@@ -2156,7 +2156,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 214 0xd6 'Ö' */
+ /* 214 0xd6 'â•“' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2166,7 +2166,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 215 0xd7 '×' */
+ /* 215 0xd7 'â•«' */
0x36, /* 00110110 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -2176,7 +2176,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x36, /* 00110110 */
0x36, /* 00110110 */
- /* 216 0xd8 'Ø' */
+ /* 216 0xd8 '╪' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0xff, /* 11111111 */
@@ -2186,7 +2186,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 217 0xd9 'Ù' */
+ /* 217 0xd9 '┘' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -2196,7 +2196,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 218 0xda 'Ú' */
+ /* 218 0xda '┌' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2206,7 +2206,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 219 0xdb 'Û' */
+ /* 219 0xdb 'â–ˆ' */
0xff, /* 11111111 */
0xff, /* 11111111 */
0xff, /* 11111111 */
@@ -2216,7 +2216,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xff, /* 11111111 */
0xff, /* 11111111 */
- /* 220 0xdc 'Ü' */
+ /* 220 0xdc 'â–„' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2226,7 +2226,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xff, /* 11111111 */
0xff, /* 11111111 */
- /* 221 0xdd 'Ý' */
+ /* 221 0xdd '▌' */
0xf0, /* 11110000 */
0xf0, /* 11110000 */
0xf0, /* 11110000 */
@@ -2236,7 +2236,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xf0, /* 11110000 */
0xf0, /* 11110000 */
- /* 222 0xde 'Þ' */
+ /* 222 0xde 'â–' */
0x0f, /* 00001111 */
0x0f, /* 00001111 */
0x0f, /* 00001111 */
@@ -2246,7 +2246,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x0f, /* 00001111 */
0x0f, /* 00001111 */
- /* 223 0xdf 'ß' */
+ /* 223 0xdf 'â–€' */
0xff, /* 11111111 */
0xff, /* 11111111 */
0xff, /* 11111111 */
@@ -2256,7 +2256,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 224 0xe0 'à' */
+ /* 224 0xe0 'α' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x76, /* 01110110 */
@@ -2266,7 +2266,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x76, /* 01110110 */
0x00, /* 00000000 */
- /* 225 0xe1 'á' */
+ /* 225 0xe1 'ß' */
0x78, /* 01111000 */
0xcc, /* 11001100 */
0xcc, /* 11001100 */
@@ -2276,7 +2276,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xcc, /* 11001100 */
0x00, /* 00000000 */
- /* 226 0xe2 'â' */
+ /* 226 0xe2 'Γ' */
0xfe, /* 11111110 */
0xc6, /* 11000110 */
0xc0, /* 11000000 */
@@ -2286,7 +2286,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xc0, /* 11000000 */
0x00, /* 00000000 */
- /* 227 0xe3 'ã' */
+ /* 227 0xe3 'Ï€' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0xfe, /* 11111110 */
@@ -2296,7 +2296,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x6c, /* 01101100 */
0x00, /* 00000000 */
- /* 228 0xe4 'ä' */
+ /* 228 0xe4 'Σ' */
0xfe, /* 11111110 */
0xc6, /* 11000110 */
0x60, /* 01100000 */
@@ -2306,7 +2306,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xfe, /* 11111110 */
0x00, /* 00000000 */
- /* 229 0xe5 'å' */
+ /* 229 0xe5 'σ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x7e, /* 01111110 */
@@ -2316,7 +2316,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x70, /* 01110000 */
0x00, /* 00000000 */
- /* 230 0xe6 'æ' */
+ /* 230 0xe6 'µ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x66, /* 01100110 */
@@ -2326,7 +2326,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7c, /* 01111100 */
0xc0, /* 11000000 */
- /* 231 0xe7 'ç' */
+ /* 231 0xe7 'Ï„' */
0x00, /* 00000000 */
0x76, /* 01110110 */
0xdc, /* 11011100 */
@@ -2336,7 +2336,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x00, /* 00000000 */
- /* 232 0xe8 'è' */
+ /* 232 0xe8 'Φ' */
0x7e, /* 01111110 */
0x18, /* 00011000 */
0x3c, /* 00111100 */
@@ -2346,7 +2346,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x7e, /* 01111110 */
- /* 233 0xe9 'é' */
+ /* 233 0xe9 'Θ' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0xc6, /* 11000110 */
@@ -2356,7 +2356,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x38, /* 00111000 */
0x00, /* 00000000 */
- /* 234 0xea 'ê' */
+ /* 234 0xea 'Ω' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0xc6, /* 11000110 */
@@ -2366,7 +2366,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xee, /* 11101110 */
0x00, /* 00000000 */
- /* 235 0xeb 'ë' */
+ /* 235 0xeb 'δ' */
0x0e, /* 00001110 */
0x18, /* 00011000 */
0x0c, /* 00001100 */
@@ -2376,7 +2376,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x3c, /* 00111100 */
0x00, /* 00000000 */
- /* 236 0xec 'ì' */
+ /* 236 0xec '∞' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x7e, /* 01111110 */
@@ -2386,7 +2386,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 237 0xed 'í' */
+ /* 237 0xed 'φ' */
0x06, /* 00000110 */
0x0c, /* 00001100 */
0x7e, /* 01111110 */
@@ -2396,7 +2396,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x60, /* 01100000 */
0xc0, /* 11000000 */
- /* 238 0xee 'î' */
+ /* 238 0xee 'ε' */
0x1e, /* 00011110 */
0x30, /* 00110000 */
0x60, /* 01100000 */
@@ -2406,7 +2406,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x1e, /* 00011110 */
0x00, /* 00000000 */
- /* 239 0xef 'ï' */
+ /* 239 0xef '∩' */
0x00, /* 00000000 */
0x7c, /* 01111100 */
0xc6, /* 11000110 */
@@ -2416,7 +2416,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xc6, /* 11000110 */
0x00, /* 00000000 */
- /* 240 0xf0 'ð' */
+ /* 240 0xf0 '≡' */
0x00, /* 00000000 */
0xfe, /* 11111110 */
0x00, /* 00000000 */
@@ -2426,7 +2426,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 241 0xf1 'ñ' */
+ /* 241 0xf1 '±' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x7e, /* 01111110 */
@@ -2436,7 +2436,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7e, /* 01111110 */
0x00, /* 00000000 */
- /* 242 0xf2 'ò' */
+ /* 242 0xf2 '≥' */
0x30, /* 00110000 */
0x18, /* 00011000 */
0x0c, /* 00001100 */
@@ -2446,7 +2446,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7e, /* 01111110 */
0x00, /* 00000000 */
- /* 243 0xf3 'ó' */
+ /* 243 0xf3 '≤' */
0x0c, /* 00001100 */
0x18, /* 00011000 */
0x30, /* 00110000 */
@@ -2456,7 +2456,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x7e, /* 01111110 */
0x00, /* 00000000 */
- /* 244 0xf4 'ô' */
+ /* 244 0xf4 '⌠' */
0x0e, /* 00001110 */
0x1b, /* 00011011 */
0x1b, /* 00011011 */
@@ -2466,7 +2466,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x18, /* 00011000 */
0x18, /* 00011000 */
- /* 245 0xf5 'õ' */
+ /* 245 0xf5 '⌡' */
0x18, /* 00011000 */
0x18, /* 00011000 */
0x18, /* 00011000 */
@@ -2476,7 +2476,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0xd8, /* 11011000 */
0x70, /* 01110000 */
- /* 246 0xf6 'ö' */
+ /* 246 0xf6 '÷' */
0x00, /* 00000000 */
0x18, /* 00011000 */
0x00, /* 00000000 */
@@ -2486,7 +2486,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 247 0xf7 '÷' */
+ /* 247 0xf7 '≈' */
0x00, /* 00000000 */
0x76, /* 01110110 */
0xdc, /* 11011100 */
@@ -2496,7 +2496,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 248 0xf8 'ø' */
+ /* 248 0xf8 '°' */
0x38, /* 00111000 */
0x6c, /* 01101100 */
0x6c, /* 01101100 */
@@ -2506,7 +2506,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 249 0xf9 'ù' */
+ /* 249 0xf9 '·' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2516,7 +2516,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 250 0xfa 'ú' */
+ /* 250 0xfa '•' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
@@ -2526,7 +2526,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 251 0xfb 'û' */
+ /* 251 0xfb '√' */
0x0f, /* 00001111 */
0x0c, /* 00001100 */
0x0c, /* 00001100 */
@@ -2536,7 +2536,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x3c, /* 00111100 */
0x1c, /* 00011100 */
- /* 252 0xfc 'ü' */
+ /* 252 0xfc 'â¿' */
0x6c, /* 01101100 */
0x36, /* 00110110 */
0x36, /* 00110110 */
@@ -2546,7 +2546,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 253 0xfd 'ý' */
+ /* 253 0xfd '²' */
0x78, /* 01111000 */
0x0c, /* 00001100 */
0x18, /* 00011000 */
@@ -2556,7 +2556,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 254 0xfe 'þ' */
+ /* 254 0xfe 'â– ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x3c, /* 00111100 */
@@ -2566,7 +2566,7 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
0x00, /* 00000000 */
0x00, /* 00000000 */
- /* 255 0xff 'ÿ' */
+ /* 255 0xff ' ' */
0x00, /* 00000000 */
0x00, /* 00000000 */
0x00, /* 00000000 */
diff --git a/lib/gen_crc32table.c b/lib/gen_crc32table.c
index 8f26660ea10a..f755b997b967 100644
--- a/lib/gen_crc32table.c
+++ b/lib/gen_crc32table.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
+#include "../include/linux/crc32poly.h"
#include "../include/generated/autoconf.h"
#include "crc32defs.h"
#include <inttypes.h>
@@ -57,7 +58,7 @@ static void crc32init_le_generic(const uint32_t polynomial,
static void crc32init_le(void)
{
- crc32init_le_generic(CRCPOLY_LE, crc32table_le);
+ crc32init_le_generic(CRC32_POLY_LE, crc32table_le);
}
static void crc32cinit_le(void)
@@ -76,7 +77,7 @@ static void crc32init_be(void)
crc32table_be[0][0] = 0;
for (i = 1; i < BE_TABLE_SIZE; i <<= 1) {
- crc = (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE : 0);
+ crc = (crc << 1) ^ ((crc & 0x80000000) ? CRC32_POLY_BE : 0);
for (j = 0; j < i; j++)
crc32table_be[0][i + j] = crc ^ crc32table_be[0][j];
}
diff --git a/lib/gen_crc64table.c b/lib/gen_crc64table.c
new file mode 100644
index 000000000000..9011926e4162
--- /dev/null
+++ b/lib/gen_crc64table.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generate lookup table for the table-driven CRC64 calculation.
+ *
+ * gen_crc64table is executed in kernel build time and generates
+ * lib/crc64table.h. This header is included by lib/crc64.c for
+ * the table-driven CRC64 calculation.
+ *
+ * See lib/crc64.c for more information about which specification
+ * and polynomial arithmetic that gen_crc64table.c follows to
+ * generate the lookup table.
+ *
+ * Copyright 2018 SUSE Linux.
+ * Author: Coly Li <colyli@suse.de>
+ */
+#include <inttypes.h>
+#include <stdio.h>
+
+#include <linux/swab.h>
+
+#define CRC64_ECMA182_POLY 0x42F0E1EBA9EA3693ULL
+
+static uint64_t crc64_table[256] = {0};
+
+static void generate_crc64_table(void)
+{
+ uint64_t i, j, c, crc;
+
+ for (i = 0; i < 256; i++) {
+ crc = 0;
+ c = i << 56;
+
+ for (j = 0; j < 8; j++) {
+ if ((crc ^ c) & 0x8000000000000000ULL)
+ crc = (crc << 1) ^ CRC64_ECMA182_POLY;
+ else
+ crc <<= 1;
+ c <<= 1;
+ }
+
+ crc64_table[i] = crc;
+ }
+}
+
+static void print_crc64_table(void)
+{
+ int i;
+
+ printf("/* this file is generated - do not edit */\n\n");
+ printf("#include <linux/types.h>\n");
+ printf("#include <linux/cache.h>\n\n");
+ printf("static const u64 ____cacheline_aligned crc64table[256] = {\n");
+ for (i = 0; i < 256; i++) {
+ printf("\t0x%016" PRIx64 "ULL", crc64_table[i]);
+ if (i & 0x1)
+ printf(",\n");
+ else
+ printf(", ");
+ }
+ printf("};\n");
+}
+
+int main(int argc, char *argv[])
+{
+ generate_crc64_table();
+ print_crc64_table();
+ return 0;
+}
diff --git a/lib/idr.c b/lib/idr.c
index 823b813f08f8..cb1db9b8d3f6 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -4,9 +4,7 @@
#include <linux/idr.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-
-DEFINE_PER_CPU(struct ida_bitmap *, ida_bitmap);
-static DEFINE_SPINLOCK(simple_ida_lock);
+#include <linux/xarray.h>
/**
* idr_alloc_u32() - Allocate an ID.
@@ -39,10 +37,8 @@ int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid,
unsigned int base = idr->idr_base;
unsigned int id = *nextid;
- if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr)))
- return -EINVAL;
- if (WARN_ON_ONCE(!(idr->idr_rt.gfp_mask & ROOT_IS_IDR)))
- idr->idr_rt.gfp_mask |= IDR_RT_MARKER;
+ if (WARN_ON_ONCE(!(idr->idr_rt.xa_flags & ROOT_IS_IDR)))
+ idr->idr_rt.xa_flags |= IDR_RT_MARKER;
id = (id < base) ? 0 : id - base;
radix_tree_iter_init(&iter, id);
@@ -295,15 +291,13 @@ void *idr_replace(struct idr *idr, void *ptr, unsigned long id)
void __rcu **slot = NULL;
void *entry;
- if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr)))
- return ERR_PTR(-EINVAL);
id -= idr->idr_base;
entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot);
if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE))
return ERR_PTR(-ENOENT);
- __radix_tree_replace(&idr->idr_rt, node, slot, ptr, NULL);
+ __radix_tree_replace(&idr->idr_rt, node, slot, ptr);
return entry;
}
@@ -317,18 +311,15 @@ EXPORT_SYMBOL(idr_replace);
* bit per ID, and so is more space efficient than an IDR. To use an IDA,
* define it using DEFINE_IDA() (or embed a &struct ida in a data structure,
* then initialise it using ida_init()). To allocate a new ID, call
- * ida_simple_get(). To free an ID, call ida_simple_remove().
+ * ida_alloc(), ida_alloc_min(), ida_alloc_max() or ida_alloc_range().
+ * To free an ID, call ida_free().
*
- * If you have more complex locking requirements, use a loop around
- * ida_pre_get() and ida_get_new() to allocate a new ID. Then use
- * ida_remove() to free an ID. You must make sure that ida_get_new() and
- * ida_remove() cannot be called at the same time as each other for the
- * same IDA.
+ * ida_destroy() can be used to dispose of an IDA without needing to
+ * free the individual IDs in it. You can use ida_is_empty() to find
+ * out whether the IDA has any IDs currently allocated.
*
- * You can also use ida_get_new_above() if you need an ID to be allocated
- * above a particular number. ida_destroy() can be used to dispose of an
- * IDA without needing to free the individual IDs in it. You can use
- * ida_is_empty() to find out whether the IDA has any IDs currently allocated.
+ * The IDA handles its own locking. It is safe to call any of the IDA
+ * functions without synchronisation in your code.
*
* IDs are currently limited to the range [0-INT_MAX]. If this is an awkward
* limitation, it should be quite straightforward to raise the maximum.
@@ -337,286 +328,265 @@ EXPORT_SYMBOL(idr_replace);
/*
* Developer's notes:
*
- * The IDA uses the functionality provided by the IDR & radix tree to store
- * bitmaps in each entry. The IDR_FREE tag means there is at least one bit
- * free, unlike the IDR where it means at least one entry is free.
+ * The IDA uses the functionality provided by the XArray to store bitmaps in
+ * each entry. The XA_FREE_MARK is only cleared when all bits in the bitmap
+ * have been set.
*
- * I considered telling the radix tree that each slot is an order-10 node
- * and storing the bit numbers in the radix tree, but the radix tree can't
- * allow a single multiorder entry at index 0, which would significantly
- * increase memory consumption for the IDA. So instead we divide the index
- * by the number of bits in the leaf bitmap before doing a radix tree lookup.
+ * I considered telling the XArray that each slot is an order-10 node
+ * and indexing by bit number, but the XArray can't allow a single multi-index
+ * entry in the head, which would significantly increase memory consumption
+ * for the IDA. So instead we divide the index by the number of bits in the
+ * leaf bitmap before doing a radix tree lookup.
*
* As an optimisation, if there are only a few low bits set in any given
- * leaf, instead of allocating a 128-byte bitmap, we use the 'exceptional
- * entry' functionality of the radix tree to store BITS_PER_LONG - 2 bits
- * directly in the entry. By being really tricksy, we could store
- * BITS_PER_LONG - 1 bits, but there're diminishing returns after optimising
- * for 0-3 allocated IDs.
- *
- * We allow the radix tree 'exceptional' count to get out of date. Nothing
- * in the IDA nor the radix tree code checks it. If it becomes important
- * to maintain an accurate exceptional count, switch the rcu_assign_pointer()
- * calls to radix_tree_iter_replace() which will correct the exceptional
- * count.
- *
- * The IDA always requires a lock to alloc/free. If we add a 'test_bit'
+ * leaf, instead of allocating a 128-byte bitmap, we store the bits
+ * as a value entry. Value entries never have the XA_FREE_MARK cleared
+ * because we can always convert them into a bitmap entry.
+ *
+ * It would be possible to optimise further; once we've run out of a
+ * single 128-byte bitmap, we currently switch to a 576-byte node, put
+ * the 128-byte bitmap in the first entry and then start allocating extra
+ * 128-byte entries. We could instead use the 512 bytes of the node's
+ * data as a bitmap before moving to that scheme. I do not believe this
+ * is a worthwhile optimisation; Rasmus Villemoes surveyed the current
+ * users of the IDA and almost none of them use more than 1024 entries.
+ * Those that do use more than the 8192 IDs that the 512 bytes would
+ * provide.
+ *
+ * The IDA always uses a lock to alloc/free. If we add a 'test_bit'
* equivalent, it will still need locking. Going to RCU lookup would require
* using RCU to free bitmaps, and that's not trivial without embedding an
* RCU head in the bitmap, which adds a 2-pointer overhead to each 128-byte
* bitmap, which is excessive.
*/
-#define IDA_MAX (0x80000000U / IDA_BITMAP_BITS - 1)
-
/**
- * ida_get_new_above - allocate new ID above or equal to a start id
- * @ida: ida handle
- * @start: id to start search at
- * @id: pointer to the allocated handle
- *
- * Allocate new ID above or equal to @start. It should be called
- * with any required locks to ensure that concurrent calls to
- * ida_get_new_above() / ida_get_new() / ida_remove() are not allowed.
- * Consider using ida_simple_get() if you do not have complex locking
- * requirements.
- *
- * If memory is required, it will return %-EAGAIN, you should unlock
- * and go back to the ida_pre_get() call. If the ida is full, it will
- * return %-ENOSPC. On success, it will return 0.
- *
- * @id returns a value in the range @start ... %0x7fffffff.
+ * ida_alloc_range() - Allocate an unused ID.
+ * @ida: IDA handle.
+ * @min: Lowest ID to allocate.
+ * @max: Highest ID to allocate.
+ * @gfp: Memory allocation flags.
+ *
+ * Allocate an ID between @min and @max, inclusive. The allocated ID will
+ * not exceed %INT_MAX, even if @max is larger.
+ *
+ * Context: Any context.
+ * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
+ * or %-ENOSPC if there are no free IDs.
*/
-int ida_get_new_above(struct ida *ida, int start, int *id)
+int ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max,
+ gfp_t gfp)
{
- struct radix_tree_root *root = &ida->ida_rt;
- void __rcu **slot;
- struct radix_tree_iter iter;
- struct ida_bitmap *bitmap;
- unsigned long index;
- unsigned bit, ebit;
- int new;
-
- index = start / IDA_BITMAP_BITS;
- bit = start % IDA_BITMAP_BITS;
- ebit = bit + RADIX_TREE_EXCEPTIONAL_SHIFT;
-
- slot = radix_tree_iter_init(&iter, index);
- for (;;) {
- if (slot)
- slot = radix_tree_next_slot(slot, &iter,
- RADIX_TREE_ITER_TAGGED);
- if (!slot) {
- slot = idr_get_free(root, &iter, GFP_NOWAIT, IDA_MAX);
- if (IS_ERR(slot)) {
- if (slot == ERR_PTR(-ENOMEM))
- return -EAGAIN;
- return PTR_ERR(slot);
+ XA_STATE(xas, &ida->xa, min / IDA_BITMAP_BITS);
+ unsigned bit = min % IDA_BITMAP_BITS;
+ unsigned long flags;
+ struct ida_bitmap *bitmap, *alloc = NULL;
+
+ if ((int)min < 0)
+ return -ENOSPC;
+
+ if ((int)max < 0)
+ max = INT_MAX;
+
+retry:
+ xas_lock_irqsave(&xas, flags);
+next:
+ bitmap = xas_find_marked(&xas, max / IDA_BITMAP_BITS, XA_FREE_MARK);
+ if (xas.xa_index > min / IDA_BITMAP_BITS)
+ bit = 0;
+ if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
+ goto nospc;
+
+ if (xa_is_value(bitmap)) {
+ unsigned long tmp = xa_to_value(bitmap);
+
+ if (bit < BITS_PER_XA_VALUE) {
+ bit = find_next_zero_bit(&tmp, BITS_PER_XA_VALUE, bit);
+ if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
+ goto nospc;
+ if (bit < BITS_PER_XA_VALUE) {
+ tmp |= 1UL << bit;
+ xas_store(&xas, xa_mk_value(tmp));
+ goto out;
}
}
- if (iter.index > index) {
- bit = 0;
- ebit = RADIX_TREE_EXCEPTIONAL_SHIFT;
- }
- new = iter.index * IDA_BITMAP_BITS;
- bitmap = rcu_dereference_raw(*slot);
- if (radix_tree_exception(bitmap)) {
- unsigned long tmp = (unsigned long)bitmap;
- ebit = find_next_zero_bit(&tmp, BITS_PER_LONG, ebit);
- if (ebit < BITS_PER_LONG) {
- tmp |= 1UL << ebit;
- rcu_assign_pointer(*slot, (void *)tmp);
- *id = new + ebit - RADIX_TREE_EXCEPTIONAL_SHIFT;
- return 0;
- }
- bitmap = this_cpu_xchg(ida_bitmap, NULL);
- if (!bitmap)
- return -EAGAIN;
- bitmap->bitmap[0] = tmp >> RADIX_TREE_EXCEPTIONAL_SHIFT;
- rcu_assign_pointer(*slot, bitmap);
+ bitmap = alloc;
+ if (!bitmap)
+ bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
+ if (!bitmap)
+ goto alloc;
+ bitmap->bitmap[0] = tmp;
+ xas_store(&xas, bitmap);
+ if (xas_error(&xas)) {
+ bitmap->bitmap[0] = 0;
+ goto out;
}
+ }
- if (bitmap) {
- bit = find_next_zero_bit(bitmap->bitmap,
- IDA_BITMAP_BITS, bit);
- new += bit;
- if (new < 0)
- return -ENOSPC;
- if (bit == IDA_BITMAP_BITS)
- continue;
+ if (bitmap) {
+ bit = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, bit);
+ if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
+ goto nospc;
+ if (bit == IDA_BITMAP_BITS)
+ goto next;
- __set_bit(bit, bitmap->bitmap);
- if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS))
- radix_tree_iter_tag_clear(root, &iter,
- IDR_FREE);
+ __set_bit(bit, bitmap->bitmap);
+ if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS))
+ xas_clear_mark(&xas, XA_FREE_MARK);
+ } else {
+ if (bit < BITS_PER_XA_VALUE) {
+ bitmap = xa_mk_value(1UL << bit);
} else {
- new += bit;
- if (new < 0)
- return -ENOSPC;
- if (ebit < BITS_PER_LONG) {
- bitmap = (void *)((1UL << ebit) |
- RADIX_TREE_EXCEPTIONAL_ENTRY);
- radix_tree_iter_replace(root, &iter, slot,
- bitmap);
- *id = new;
- return 0;
- }
- bitmap = this_cpu_xchg(ida_bitmap, NULL);
+ bitmap = alloc;
+ if (!bitmap)
+ bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
if (!bitmap)
- return -EAGAIN;
+ goto alloc;
__set_bit(bit, bitmap->bitmap);
- radix_tree_iter_replace(root, &iter, slot, bitmap);
}
-
- *id = new;
- return 0;
+ xas_store(&xas, bitmap);
+ }
+out:
+ xas_unlock_irqrestore(&xas, flags);
+ if (xas_nomem(&xas, gfp)) {
+ xas.xa_index = min / IDA_BITMAP_BITS;
+ bit = min % IDA_BITMAP_BITS;
+ goto retry;
}
+ if (bitmap != alloc)
+ kfree(alloc);
+ if (xas_error(&xas))
+ return xas_error(&xas);
+ return xas.xa_index * IDA_BITMAP_BITS + bit;
+alloc:
+ xas_unlock_irqrestore(&xas, flags);
+ alloc = kzalloc(sizeof(*bitmap), gfp);
+ if (!alloc)
+ return -ENOMEM;
+ xas_set(&xas, min / IDA_BITMAP_BITS);
+ bit = min % IDA_BITMAP_BITS;
+ goto retry;
+nospc:
+ xas_unlock_irqrestore(&xas, flags);
+ return -ENOSPC;
}
-EXPORT_SYMBOL(ida_get_new_above);
+EXPORT_SYMBOL(ida_alloc_range);
/**
- * ida_remove - Free the given ID
- * @ida: ida handle
- * @id: ID to free
+ * ida_free() - Release an allocated ID.
+ * @ida: IDA handle.
+ * @id: Previously allocated ID.
*
- * This function should not be called at the same time as ida_get_new_above().
+ * Context: Any context.
*/
-void ida_remove(struct ida *ida, int id)
+void ida_free(struct ida *ida, unsigned int id)
{
- unsigned long index = id / IDA_BITMAP_BITS;
- unsigned offset = id % IDA_BITMAP_BITS;
+ XA_STATE(xas, &ida->xa, id / IDA_BITMAP_BITS);
+ unsigned bit = id % IDA_BITMAP_BITS;
struct ida_bitmap *bitmap;
- unsigned long *btmp;
- struct radix_tree_iter iter;
- void __rcu **slot;
+ unsigned long flags;
- slot = radix_tree_iter_lookup(&ida->ida_rt, &iter, index);
- if (!slot)
- goto err;
+ BUG_ON((int)id < 0);
- bitmap = rcu_dereference_raw(*slot);
- if (radix_tree_exception(bitmap)) {
- btmp = (unsigned long *)slot;
- offset += RADIX_TREE_EXCEPTIONAL_SHIFT;
- if (offset >= BITS_PER_LONG)
+ xas_lock_irqsave(&xas, flags);
+ bitmap = xas_load(&xas);
+
+ if (xa_is_value(bitmap)) {
+ unsigned long v = xa_to_value(bitmap);
+ if (bit >= BITS_PER_XA_VALUE)
+ goto err;
+ if (!(v & (1UL << bit)))
goto err;
+ v &= ~(1UL << bit);
+ if (!v)
+ goto delete;
+ xas_store(&xas, xa_mk_value(v));
} else {
- btmp = bitmap->bitmap;
- }
- if (!test_bit(offset, btmp))
- goto err;
-
- __clear_bit(offset, btmp);
- radix_tree_iter_tag_set(&ida->ida_rt, &iter, IDR_FREE);
- if (radix_tree_exception(bitmap)) {
- if (rcu_dereference_raw(*slot) ==
- (void *)RADIX_TREE_EXCEPTIONAL_ENTRY)
- radix_tree_iter_delete(&ida->ida_rt, &iter, slot);
- } else if (bitmap_empty(btmp, IDA_BITMAP_BITS)) {
- kfree(bitmap);
- radix_tree_iter_delete(&ida->ida_rt, &iter, slot);
+ if (!test_bit(bit, bitmap->bitmap))
+ goto err;
+ __clear_bit(bit, bitmap->bitmap);
+ xas_set_mark(&xas, XA_FREE_MARK);
+ if (bitmap_empty(bitmap->bitmap, IDA_BITMAP_BITS)) {
+ kfree(bitmap);
+delete:
+ xas_store(&xas, NULL);
+ }
}
+ xas_unlock_irqrestore(&xas, flags);
return;
err:
- WARN(1, "ida_remove called for id=%d which is not allocated.\n", id);
+ xas_unlock_irqrestore(&xas, flags);
+ WARN(1, "ida_free called for id=%d which is not allocated.\n", id);
}
-EXPORT_SYMBOL(ida_remove);
+EXPORT_SYMBOL(ida_free);
/**
- * ida_destroy - Free the contents of an ida
- * @ida: ida handle
+ * ida_destroy() - Free all IDs.
+ * @ida: IDA handle.
+ *
+ * Calling this function frees all IDs and releases all resources used
+ * by an IDA. When this call returns, the IDA is empty and can be reused
+ * or freed. If the IDA is already empty, there is no need to call this
+ * function.
*
- * Calling this function releases all resources associated with an IDA. When
- * this call returns, the IDA is empty and can be reused or freed. The caller
- * should not allow ida_remove() or ida_get_new_above() to be called at the
- * same time.
+ * Context: Any context.
*/
void ida_destroy(struct ida *ida)
{
- struct radix_tree_iter iter;
- void __rcu **slot;
+ XA_STATE(xas, &ida->xa, 0);
+ struct ida_bitmap *bitmap;
+ unsigned long flags;
- radix_tree_for_each_slot(slot, &ida->ida_rt, &iter, 0) {
- struct ida_bitmap *bitmap = rcu_dereference_raw(*slot);
- if (!radix_tree_exception(bitmap))
+ xas_lock_irqsave(&xas, flags);
+ xas_for_each(&xas, bitmap, ULONG_MAX) {
+ if (!xa_is_value(bitmap))
kfree(bitmap);
- radix_tree_iter_delete(&ida->ida_rt, &iter, slot);
+ xas_store(&xas, NULL);
}
+ xas_unlock_irqrestore(&xas, flags);
}
EXPORT_SYMBOL(ida_destroy);
-/**
- * ida_simple_get - get a new id.
- * @ida: the (initialized) ida.
- * @start: the minimum id (inclusive, < 0x8000000)
- * @end: the maximum id (exclusive, < 0x8000000 or 0)
- * @gfp_mask: memory allocation flags
- *
- * Allocates an id in the range start <= id < end, or returns -ENOSPC.
- * On memory allocation failure, returns -ENOMEM.
- *
- * Compared to ida_get_new_above() this function does its own locking, and
- * should be used unless there are special requirements.
- *
- * Use ida_simple_remove() to get rid of an id.
- */
-int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
- gfp_t gfp_mask)
-{
- int ret, id;
- unsigned int max;
- unsigned long flags;
-
- BUG_ON((int)start < 0);
- BUG_ON((int)end < 0);
-
- if (end == 0)
- max = 0x80000000;
- else {
- BUG_ON(end < start);
- max = end - 1;
- }
+#ifndef __KERNEL__
+extern void xa_dump_index(unsigned long index, unsigned int shift);
+#define IDA_CHUNK_SHIFT ilog2(IDA_BITMAP_BITS)
-again:
- if (!ida_pre_get(ida, gfp_mask))
- return -ENOMEM;
+static void ida_dump_entry(void *entry, unsigned long index)
+{
+ unsigned long i;
+
+ if (!entry)
+ return;
+
+ if (xa_is_node(entry)) {
+ struct xa_node *node = xa_to_node(entry);
+ unsigned int shift = node->shift + IDA_CHUNK_SHIFT +
+ XA_CHUNK_SHIFT;
+
+ xa_dump_index(index * IDA_BITMAP_BITS, shift);
+ xa_dump_node(node);
+ for (i = 0; i < XA_CHUNK_SIZE; i++)
+ ida_dump_entry(node->slots[i],
+ index | (i << node->shift));
+ } else if (xa_is_value(entry)) {
+ xa_dump_index(index * IDA_BITMAP_BITS, ilog2(BITS_PER_LONG));
+ pr_cont("value: data %lx [%px]\n", xa_to_value(entry), entry);
+ } else {
+ struct ida_bitmap *bitmap = entry;
- spin_lock_irqsave(&simple_ida_lock, flags);
- ret = ida_get_new_above(ida, start, &id);
- if (!ret) {
- if (id > max) {
- ida_remove(ida, id);
- ret = -ENOSPC;
- } else {
- ret = id;
- }
+ xa_dump_index(index * IDA_BITMAP_BITS, IDA_CHUNK_SHIFT);
+ pr_cont("bitmap: %p data", bitmap);
+ for (i = 0; i < IDA_BITMAP_LONGS; i++)
+ pr_cont(" %lx", bitmap->bitmap[i]);
+ pr_cont("\n");
}
- spin_unlock_irqrestore(&simple_ida_lock, flags);
-
- if (unlikely(ret == -EAGAIN))
- goto again;
-
- return ret;
}
-EXPORT_SYMBOL(ida_simple_get);
-/**
- * ida_simple_remove - remove an allocated id.
- * @ida: the (initialized) ida.
- * @id: the id returned by ida_simple_get.
- *
- * Use to release an id allocated with ida_simple_get().
- *
- * Compared to ida_remove() this function does its own locking, and should be
- * used unless there are special requirements.
- */
-void ida_simple_remove(struct ida *ida, unsigned int id)
+static void ida_dump(struct ida *ida)
{
- unsigned long flags;
-
- BUG_ON((int)id < 0);
- spin_lock_irqsave(&simple_ida_lock, flags);
- ida_remove(ida, id);
- spin_unlock_irqrestore(&simple_ida_lock, flags);
+ struct xarray *xa = &ida->xa;
+ pr_debug("ida: %p node %p free %d\n", ida, xa->xa_head,
+ xa->xa_flags >> ROOT_TAG_SHIFT);
+ ida_dump_entry(xa->xa_head, 0);
}
-EXPORT_SYMBOL(ida_simple_remove);
+#endif
diff --git a/lib/int_sqrt.c b/lib/int_sqrt.c
index e2d329099bf7..14436f4ca6bd 100644
--- a/lib/int_sqrt.c
+++ b/lib/int_sqrt.c
@@ -38,3 +38,33 @@ unsigned long int_sqrt(unsigned long x)
return y;
}
EXPORT_SYMBOL(int_sqrt);
+
+#if BITS_PER_LONG < 64
+/**
+ * int_sqrt64 - strongly typed int_sqrt function when minimum 64 bit input
+ * is expected.
+ * @x: 64bit integer of which to calculate the sqrt
+ */
+u32 int_sqrt64(u64 x)
+{
+ u64 b, m, y = 0;
+
+ if (x <= ULONG_MAX)
+ return int_sqrt((unsigned long) x);
+
+ m = 1ULL << (fls64(x) & ~1ULL);
+ while (m != 0) {
+ b = y + m;
+ y >>= 1;
+
+ if (x >= b) {
+ x -= b;
+ y += m;
+ }
+ m >>= 2;
+ }
+
+ return y;
+}
+EXPORT_SYMBOL(int_sqrt64);
+#endif
diff --git a/lib/interval_tree_test.c b/lib/interval_tree_test.c
index 835242e74aaa..75509a1511a3 100644
--- a/lib/interval_tree_test.c
+++ b/lib/interval_tree_test.c
@@ -64,11 +64,12 @@ static int interval_tree_test_init(void)
unsigned long results;
cycles_t time1, time2, time;
- nodes = kmalloc(nnodes * sizeof(struct interval_tree_node), GFP_KERNEL);
+ nodes = kmalloc_array(nnodes, sizeof(struct interval_tree_node),
+ GFP_KERNEL);
if (!nodes)
return -ENOMEM;
- queries = kmalloc(nsearches * sizeof(int), GFP_KERNEL);
+ queries = kmalloc_array(nsearches, sizeof(int), GFP_KERNEL);
if (!queries) {
kfree(nodes);
return -ENOMEM;
diff --git a/lib/iommu-common.c b/lib/iommu-common.c
deleted file mode 100644
index 55b00de106b5..000000000000
--- a/lib/iommu-common.c
+++ /dev/null
@@ -1,267 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * IOMMU mmap management and range allocation functions.
- * Based almost entirely upon the powerpc iommu allocator.
- */
-
-#include <linux/export.h>
-#include <linux/bitmap.h>
-#include <linux/bug.h>
-#include <linux/iommu-helper.h>
-#include <linux/iommu-common.h>
-#include <linux/dma-mapping.h>
-#include <linux/hash.h>
-
-static unsigned long iommu_large_alloc = 15;
-
-static DEFINE_PER_CPU(unsigned int, iommu_hash_common);
-
-static inline bool need_flush(struct iommu_map_table *iommu)
-{
- return ((iommu->flags & IOMMU_NEED_FLUSH) != 0);
-}
-
-static inline void set_flush(struct iommu_map_table *iommu)
-{
- iommu->flags |= IOMMU_NEED_FLUSH;
-}
-
-static inline void clear_flush(struct iommu_map_table *iommu)
-{
- iommu->flags &= ~IOMMU_NEED_FLUSH;
-}
-
-static void setup_iommu_pool_hash(void)
-{
- unsigned int i;
- static bool do_once;
-
- if (do_once)
- return;
- do_once = true;
- for_each_possible_cpu(i)
- per_cpu(iommu_hash_common, i) = hash_32(i, IOMMU_POOL_HASHBITS);
-}
-
-/*
- * Initialize iommu_pool entries for the iommu_map_table. `num_entries'
- * is the number of table entries. If `large_pool' is set to true,
- * the top 1/4 of the table will be set aside for pool allocations
- * of more than iommu_large_alloc pages.
- */
-void iommu_tbl_pool_init(struct iommu_map_table *iommu,
- unsigned long num_entries,
- u32 table_shift,
- void (*lazy_flush)(struct iommu_map_table *),
- bool large_pool, u32 npools,
- bool skip_span_boundary_check)
-{
- unsigned int start, i;
- struct iommu_pool *p = &(iommu->large_pool);
-
- setup_iommu_pool_hash();
- if (npools == 0)
- iommu->nr_pools = IOMMU_NR_POOLS;
- else
- iommu->nr_pools = npools;
- BUG_ON(npools > IOMMU_NR_POOLS);
-
- iommu->table_shift = table_shift;
- iommu->lazy_flush = lazy_flush;
- start = 0;
- if (skip_span_boundary_check)
- iommu->flags |= IOMMU_NO_SPAN_BOUND;
- if (large_pool)
- iommu->flags |= IOMMU_HAS_LARGE_POOL;
-
- if (!large_pool)
- iommu->poolsize = num_entries/iommu->nr_pools;
- else
- iommu->poolsize = (num_entries * 3 / 4)/iommu->nr_pools;
- for (i = 0; i < iommu->nr_pools; i++) {
- spin_lock_init(&(iommu->pools[i].lock));
- iommu->pools[i].start = start;
- iommu->pools[i].hint = start;
- start += iommu->poolsize; /* start for next pool */
- iommu->pools[i].end = start - 1;
- }
- if (!large_pool)
- return;
- /* initialize large_pool */
- spin_lock_init(&(p->lock));
- p->start = start;
- p->hint = p->start;
- p->end = num_entries;
-}
-EXPORT_SYMBOL(iommu_tbl_pool_init);
-
-unsigned long iommu_tbl_range_alloc(struct device *dev,
- struct iommu_map_table *iommu,
- unsigned long npages,
- unsigned long *handle,
- unsigned long mask,
- unsigned int align_order)
-{
- unsigned int pool_hash = __this_cpu_read(iommu_hash_common);
- unsigned long n, end, start, limit, boundary_size;
- struct iommu_pool *pool;
- int pass = 0;
- unsigned int pool_nr;
- unsigned int npools = iommu->nr_pools;
- unsigned long flags;
- bool large_pool = ((iommu->flags & IOMMU_HAS_LARGE_POOL) != 0);
- bool largealloc = (large_pool && npages > iommu_large_alloc);
- unsigned long shift;
- unsigned long align_mask = 0;
-
- if (align_order > 0)
- align_mask = ~0ul >> (BITS_PER_LONG - align_order);
-
- /* Sanity check */
- if (unlikely(npages == 0)) {
- WARN_ON_ONCE(1);
- return IOMMU_ERROR_CODE;
- }
-
- if (largealloc) {
- pool = &(iommu->large_pool);
- pool_nr = 0; /* to keep compiler happy */
- } else {
- /* pick out pool_nr */
- pool_nr = pool_hash & (npools - 1);
- pool = &(iommu->pools[pool_nr]);
- }
- spin_lock_irqsave(&pool->lock, flags);
-
- again:
- if (pass == 0 && handle && *handle &&
- (*handle >= pool->start) && (*handle < pool->end))
- start = *handle;
- else
- start = pool->hint;
-
- limit = pool->end;
-
- /* The case below can happen if we have a small segment appended
- * to a large, or when the previous alloc was at the very end of
- * the available space. If so, go back to the beginning. If a
- * flush is needed, it will get done based on the return value
- * from iommu_area_alloc() below.
- */
- if (start >= limit)
- start = pool->start;
- shift = iommu->table_map_base >> iommu->table_shift;
- if (limit + shift > mask) {
- limit = mask - shift + 1;
- /* If we're constrained on address range, first try
- * at the masked hint to avoid O(n) search complexity,
- * but on second pass, start at 0 in pool 0.
- */
- if ((start & mask) >= limit || pass > 0) {
- spin_unlock(&(pool->lock));
- pool = &(iommu->pools[0]);
- spin_lock(&(pool->lock));
- start = pool->start;
- } else {
- start &= mask;
- }
- }
-
- if (dev)
- boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
- 1 << iommu->table_shift);
- else
- boundary_size = ALIGN(1ULL << 32, 1 << iommu->table_shift);
-
- boundary_size = boundary_size >> iommu->table_shift;
- /*
- * if the skip_span_boundary_check had been set during init, we set
- * things up so that iommu_is_span_boundary() merely checks if the
- * (index + npages) < num_tsb_entries
- */
- if ((iommu->flags & IOMMU_NO_SPAN_BOUND) != 0) {
- shift = 0;
- boundary_size = iommu->poolsize * iommu->nr_pools;
- }
- n = iommu_area_alloc(iommu->map, limit, start, npages, shift,
- boundary_size, align_mask);
- if (n == -1) {
- if (likely(pass == 0)) {
- /* First failure, rescan from the beginning. */
- pool->hint = pool->start;
- set_flush(iommu);
- pass++;
- goto again;
- } else if (!largealloc && pass <= iommu->nr_pools) {
- spin_unlock(&(pool->lock));
- pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1);
- pool = &(iommu->pools[pool_nr]);
- spin_lock(&(pool->lock));
- pool->hint = pool->start;
- set_flush(iommu);
- pass++;
- goto again;
- } else {
- /* give up */
- n = IOMMU_ERROR_CODE;
- goto bail;
- }
- }
- if (iommu->lazy_flush &&
- (n < pool->hint || need_flush(iommu))) {
- clear_flush(iommu);
- iommu->lazy_flush(iommu);
- }
-
- end = n + npages;
- pool->hint = end;
-
- /* Update handle for SG allocations */
- if (handle)
- *handle = end;
-bail:
- spin_unlock_irqrestore(&(pool->lock), flags);
-
- return n;
-}
-EXPORT_SYMBOL(iommu_tbl_range_alloc);
-
-static struct iommu_pool *get_pool(struct iommu_map_table *tbl,
- unsigned long entry)
-{
- struct iommu_pool *p;
- unsigned long largepool_start = tbl->large_pool.start;
- bool large_pool = ((tbl->flags & IOMMU_HAS_LARGE_POOL) != 0);
-
- /* The large pool is the last pool at the top of the table */
- if (large_pool && entry >= largepool_start) {
- p = &tbl->large_pool;
- } else {
- unsigned int pool_nr = entry / tbl->poolsize;
-
- BUG_ON(pool_nr >= tbl->nr_pools);
- p = &tbl->pools[pool_nr];
- }
- return p;
-}
-
-/* Caller supplies the index of the entry into the iommu map table
- * itself when the mapping from dma_addr to the entry is not the
- * default addr->entry mapping below.
- */
-void iommu_tbl_range_free(struct iommu_map_table *iommu, u64 dma_addr,
- unsigned long npages, unsigned long entry)
-{
- struct iommu_pool *pool;
- unsigned long flags;
- unsigned long shift = iommu->table_shift;
-
- if (entry == IOMMU_ERROR_CODE) /* use default addr->entry mapping */
- entry = (dma_addr - iommu->table_map_base) >> shift;
- pool = get_pool(iommu, entry);
-
- spin_lock_irqsave(&(pool->lock), flags);
- bitmap_clear(iommu->map, entry, npages);
- spin_unlock_irqrestore(&(pool->lock), flags);
-}
-EXPORT_SYMBOL(iommu_tbl_range_free);
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c
index 23633c0fda4a..92a9f243c0e2 100644
--- a/lib/iommu-helper.c
+++ b/lib/iommu-helper.c
@@ -3,19 +3,8 @@
* IOMMU helper functions for the free area management
*/
-#include <linux/export.h>
#include <linux/bitmap.h>
-#include <linux/bug.h>
-
-int iommu_is_span_boundary(unsigned int index, unsigned int nr,
- unsigned long shift,
- unsigned long boundary_size)
-{
- BUG_ON(!is_power_of_2(boundary_size));
-
- shift = (shift + index) & (boundary_size - 1);
- return shift + nr > boundary_size;
-}
+#include <linux/iommu-helper.h>
unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr,
@@ -38,4 +27,3 @@ again:
}
return -1;
}
-EXPORT_SYMBOL(iommu_area_alloc);
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 54e5bbaa3200..517f5853ffed 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -92,7 +92,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
if (ioremap_pmd_enabled() &&
((next - addr) == PMD_SIZE) &&
IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
- pmd_free_pte_page(pmd)) {
+ pmd_free_pte_page(pmd, addr)) {
if (pmd_set_huge(pmd, phys_addr + addr, prot))
continue;
}
@@ -119,7 +119,7 @@ static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
if (ioremap_pud_enabled() &&
((next - addr) == PUD_SIZE) &&
IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
- pud_free_pmd_page(pud)) {
+ pud_free_pmd_page(pud, addr)) {
if (pud_set_huge(pud, phys_addr + addr, prot))
continue;
}
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 970212670b6a..7ebccb5c1637 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -83,6 +83,7 @@
const struct kvec *kvec; \
struct kvec v; \
iterate_kvec(i, n, v, kvec, skip, (K)) \
+ } else if (unlikely(i->type & ITER_DISCARD)) { \
} else { \
const struct iovec *iov; \
struct iovec v; \
@@ -114,6 +115,8 @@
} \
i->nr_segs -= kvec - i->kvec; \
i->kvec = kvec; \
+ } else if (unlikely(i->type & ITER_DISCARD)) { \
+ skip += n; \
} else { \
const struct iovec *iov; \
struct iovec v; \
@@ -428,17 +431,19 @@ int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
}
EXPORT_SYMBOL(iov_iter_fault_in_readable);
-void iov_iter_init(struct iov_iter *i, int direction,
+void iov_iter_init(struct iov_iter *i, unsigned int direction,
const struct iovec *iov, unsigned long nr_segs,
size_t count)
{
+ WARN_ON(direction & ~(READ | WRITE));
+ direction &= READ | WRITE;
+
/* It will get better. Eventually... */
if (uaccess_kernel()) {
- direction |= ITER_KVEC;
- i->type = direction;
+ i->type = ITER_KVEC | direction;
i->kvec = (struct kvec *)iov;
} else {
- i->type = direction;
+ i->type = ITER_IOVEC | direction;
i->iov = iov;
}
i->nr_segs = nr_segs;
@@ -558,7 +563,7 @@ static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
const char *from = addr;
- if (unlikely(i->type & ITER_PIPE))
+ if (unlikely(iov_iter_is_pipe(i)))
return copy_pipe_to_iter(addr, bytes, i);
if (iter_is_iovec(i))
might_fault();
@@ -573,10 +578,126 @@ size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
}
EXPORT_SYMBOL(_copy_to_iter);
+#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
+static int copyout_mcsafe(void __user *to, const void *from, size_t n)
+{
+ if (access_ok(VERIFY_WRITE, to, n)) {
+ kasan_check_read(from, n);
+ n = copy_to_user_mcsafe((__force void *) to, from, n);
+ }
+ return n;
+}
+
+static unsigned long memcpy_mcsafe_to_page(struct page *page, size_t offset,
+ const char *from, size_t len)
+{
+ unsigned long ret;
+ char *to;
+
+ to = kmap_atomic(page);
+ ret = memcpy_mcsafe(to + offset, from, len);
+ kunmap_atomic(to);
+
+ return ret;
+}
+
+static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes,
+ struct iov_iter *i)
+{
+ struct pipe_inode_info *pipe = i->pipe;
+ size_t n, off, xfer = 0;
+ int idx;
+
+ if (!sanity(i))
+ return 0;
+
+ bytes = n = push_pipe(i, bytes, &idx, &off);
+ if (unlikely(!n))
+ return 0;
+ for ( ; n; idx = next_idx(idx, pipe), off = 0) {
+ size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
+ unsigned long rem;
+
+ rem = memcpy_mcsafe_to_page(pipe->bufs[idx].page, off, addr,
+ chunk);
+ i->idx = idx;
+ i->iov_offset = off + chunk - rem;
+ xfer += chunk - rem;
+ if (rem)
+ break;
+ n -= chunk;
+ addr += chunk;
+ }
+ i->count -= xfer;
+ return xfer;
+}
+
+/**
+ * _copy_to_iter_mcsafe - copy to user with source-read error exception handling
+ * @addr: source kernel address
+ * @bytes: total transfer length
+ * @iter: destination iterator
+ *
+ * The pmem driver arranges for filesystem-dax to use this facility via
+ * dax_copy_to_iter() for protecting read/write to persistent memory.
+ * Unless / until an architecture can guarantee identical performance
+ * between _copy_to_iter_mcsafe() and _copy_to_iter() it would be a
+ * performance regression to switch more users to the mcsafe version.
+ *
+ * Otherwise, the main differences between this and typical _copy_to_iter().
+ *
+ * * Typical tail/residue handling after a fault retries the copy
+ * byte-by-byte until the fault happens again. Re-triggering machine
+ * checks is potentially fatal so the implementation uses source
+ * alignment and poison alignment assumptions to avoid re-triggering
+ * hardware exceptions.
+ *
+ * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
+ * Compare to copy_to_iter() where only ITER_IOVEC attempts might return
+ * a short copy.
+ *
+ * See MCSAFE_TEST for self-test.
+ */
+size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
+{
+ const char *from = addr;
+ unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
+
+ if (unlikely(iov_iter_is_pipe(i)))
+ return copy_pipe_to_iter_mcsafe(addr, bytes, i);
+ if (iter_is_iovec(i))
+ might_fault();
+ iterate_and_advance(i, bytes, v,
+ copyout_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
+ ({
+ rem = memcpy_mcsafe_to_page(v.bv_page, v.bv_offset,
+ (from += v.bv_len) - v.bv_len, v.bv_len);
+ if (rem) {
+ curr_addr = (unsigned long) from;
+ bytes = curr_addr - s_addr - rem;
+ return bytes;
+ }
+ }),
+ ({
+ rem = memcpy_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len,
+ v.iov_len);
+ if (rem) {
+ curr_addr = (unsigned long) from;
+ bytes = curr_addr - s_addr - rem;
+ return bytes;
+ }
+ })
+ )
+
+ return bytes;
+}
+EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe);
+#endif /* CONFIG_ARCH_HAS_UACCESS_MCSAFE */
+
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1);
return 0;
}
@@ -596,7 +717,7 @@ EXPORT_SYMBOL(_copy_from_iter);
bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1);
return false;
}
@@ -623,7 +744,7 @@ EXPORT_SYMBOL(_copy_from_iter_full);
size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1);
return 0;
}
@@ -640,10 +761,24 @@ size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
EXPORT_SYMBOL(_copy_from_iter_nocache);
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
+/**
+ * _copy_from_iter_flushcache - write destination through cpu cache
+ * @addr: destination kernel address
+ * @bytes: total transfer length
+ * @iter: source iterator
+ *
+ * The pmem driver arranges for filesystem-dax to use this facility via
+ * dax_copy_from_iter() for ensuring that writes to persistent memory
+ * are flushed through the CPU cache. It is differentiated from
+ * _copy_from_iter_nocache() in that guarantees all data is flushed for
+ * all iterator types. The _copy_from_iter_nocache() only attempts to
+ * bypass the cache for the ITER_IOVEC case, and on some archs may use
+ * instructions that strand dirty-data in the cache.
+ */
size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1);
return 0;
}
@@ -664,7 +799,7 @@ EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1);
return false;
}
@@ -706,7 +841,9 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
kunmap_atomic(kaddr);
return wanted;
- } else if (likely(!(i->type & ITER_PIPE)))
+ } else if (unlikely(iov_iter_is_discard(i)))
+ return bytes;
+ else if (likely(!iov_iter_is_pipe(i)))
return copy_page_to_iter_iovec(page, offset, bytes, i);
else
return copy_page_to_iter_pipe(page, offset, bytes, i);
@@ -718,7 +855,7 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
{
if (unlikely(!page_copy_sane(page, offset, bytes)))
return 0;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
WARN_ON(1);
return 0;
}
@@ -758,7 +895,7 @@ static size_t pipe_zero(size_t bytes, struct iov_iter *i)
size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
{
- if (unlikely(i->type & ITER_PIPE))
+ if (unlikely(iov_iter_is_pipe(i)))
return pipe_zero(bytes, i);
iterate_and_advance(i, bytes, v,
clear_user(v.iov_base, v.iov_len),
@@ -778,7 +915,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
kunmap_atomic(kaddr);
return 0;
}
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
kunmap_atomic(kaddr);
WARN_ON(1);
return 0;
@@ -842,10 +979,14 @@ static void pipe_advance(struct iov_iter *i, size_t size)
void iov_iter_advance(struct iov_iter *i, size_t size)
{
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i))) {
pipe_advance(i, size);
return;
}
+ if (unlikely(iov_iter_is_discard(i))) {
+ i->count -= size;
+ return;
+ }
iterate_and_advance(i, size, v, 0, 0, 0)
}
EXPORT_SYMBOL(iov_iter_advance);
@@ -857,7 +998,7 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll)
if (WARN_ON(unroll > MAX_RW_COUNT))
return;
i->count += unroll;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i))) {
struct pipe_inode_info *pipe = i->pipe;
int idx = i->idx;
size_t off = i->iov_offset;
@@ -881,12 +1022,14 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll)
pipe_truncate(i);
return;
}
+ if (unlikely(iov_iter_is_discard(i)))
+ return;
if (unroll <= i->iov_offset) {
i->iov_offset -= unroll;
return;
}
unroll -= i->iov_offset;
- if (i->type & ITER_BVEC) {
+ if (iov_iter_is_bvec(i)) {
const struct bio_vec *bvec = i->bvec;
while (1) {
size_t n = (--bvec)->bv_len;
@@ -919,23 +1062,25 @@ EXPORT_SYMBOL(iov_iter_revert);
*/
size_t iov_iter_single_seg_count(const struct iov_iter *i)
{
- if (unlikely(i->type & ITER_PIPE))
+ if (unlikely(iov_iter_is_pipe(i)))
return i->count; // it is a silly place, anyway
if (i->nr_segs == 1)
return i->count;
- else if (i->type & ITER_BVEC)
+ if (unlikely(iov_iter_is_discard(i)))
+ return i->count;
+ else if (iov_iter_is_bvec(i))
return min(i->count, i->bvec->bv_len - i->iov_offset);
else
return min(i->count, i->iov->iov_len - i->iov_offset);
}
EXPORT_SYMBOL(iov_iter_single_seg_count);
-void iov_iter_kvec(struct iov_iter *i, int direction,
+void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
const struct kvec *kvec, unsigned long nr_segs,
size_t count)
{
- BUG_ON(!(direction & ITER_KVEC));
- i->type = direction;
+ WARN_ON(direction & ~(READ | WRITE));
+ i->type = ITER_KVEC | (direction & (READ | WRITE));
i->kvec = kvec;
i->nr_segs = nr_segs;
i->iov_offset = 0;
@@ -943,12 +1088,12 @@ void iov_iter_kvec(struct iov_iter *i, int direction,
}
EXPORT_SYMBOL(iov_iter_kvec);
-void iov_iter_bvec(struct iov_iter *i, int direction,
+void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
const struct bio_vec *bvec, unsigned long nr_segs,
size_t count)
{
- BUG_ON(!(direction & ITER_BVEC));
- i->type = direction;
+ WARN_ON(direction & ~(READ | WRITE));
+ i->type = ITER_BVEC | (direction & (READ | WRITE));
i->bvec = bvec;
i->nr_segs = nr_segs;
i->iov_offset = 0;
@@ -956,13 +1101,13 @@ void iov_iter_bvec(struct iov_iter *i, int direction,
}
EXPORT_SYMBOL(iov_iter_bvec);
-void iov_iter_pipe(struct iov_iter *i, int direction,
+void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
struct pipe_inode_info *pipe,
size_t count)
{
- BUG_ON(direction != ITER_PIPE);
+ BUG_ON(direction != READ);
WARN_ON(pipe->nrbufs == pipe->buffers);
- i->type = direction;
+ i->type = ITER_PIPE | READ;
i->pipe = pipe;
i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
i->iov_offset = 0;
@@ -971,12 +1116,30 @@ void iov_iter_pipe(struct iov_iter *i, int direction,
}
EXPORT_SYMBOL(iov_iter_pipe);
+/**
+ * iov_iter_discard - Initialise an I/O iterator that discards data
+ * @i: The iterator to initialise.
+ * @direction: The direction of the transfer.
+ * @count: The size of the I/O buffer in bytes.
+ *
+ * Set up an I/O iterator that just discards everything that's written to it.
+ * It's only available as a READ iterator.
+ */
+void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
+{
+ BUG_ON(direction != READ);
+ i->type = ITER_DISCARD | READ;
+ i->count = count;
+ i->iov_offset = 0;
+}
+EXPORT_SYMBOL(iov_iter_discard);
+
unsigned long iov_iter_alignment(const struct iov_iter *i)
{
unsigned long res = 0;
size_t size = i->count;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i))) {
if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
return size | i->iov_offset;
return size;
@@ -995,7 +1158,7 @@ unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
unsigned long res = 0;
size_t size = i->count;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
WARN_ON(1);
return ~0U;
}
@@ -1012,7 +1175,7 @@ unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
}
EXPORT_SYMBOL(iov_iter_gap_alignment);
-static inline size_t __pipe_get_pages(struct iov_iter *i,
+static inline ssize_t __pipe_get_pages(struct iov_iter *i,
size_t maxsize,
struct page **pages,
int idx,
@@ -1063,8 +1226,11 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
if (maxsize > i->count)
maxsize = i->count;
- if (unlikely(i->type & ITER_PIPE))
+ if (unlikely(iov_iter_is_pipe(i)))
return pipe_get_pages(i, pages, maxsize, maxpages, start);
+ if (unlikely(iov_iter_is_discard(i)))
+ return -EFAULT;
+
iterate_all_kinds(i, maxsize, v, ({
unsigned long addr = (unsigned long)v.iov_base;
size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
@@ -1075,7 +1241,7 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
len = maxpages * PAGE_SIZE;
addr &= ~(PAGE_SIZE - 1);
n = DIV_ROUND_UP(len, PAGE_SIZE);
- res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
+ res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE, pages);
if (unlikely(res < 0))
return res;
return (res == n ? len : res * PAGE_SIZE) - *start;
@@ -1102,7 +1268,7 @@ static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
size_t *start)
{
struct page **p;
- size_t n;
+ ssize_t n;
int idx;
int npages;
@@ -1140,8 +1306,11 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
if (maxsize > i->count)
maxsize = i->count;
- if (unlikely(i->type & ITER_PIPE))
+ if (unlikely(iov_iter_is_pipe(i)))
return pipe_get_pages_alloc(i, pages, maxsize, start);
+ if (unlikely(iov_iter_is_discard(i)))
+ return -EFAULT;
+
iterate_all_kinds(i, maxsize, v, ({
unsigned long addr = (unsigned long)v.iov_base;
size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
@@ -1153,7 +1322,7 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
p = get_pages_array(n);
if (!p)
return -ENOMEM;
- res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
+ res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE, p);
if (unlikely(res < 0)) {
kvfree(p);
return res;
@@ -1183,7 +1352,7 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
__wsum sum, next;
size_t off = 0;
sum = *csum;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
WARN_ON(1);
return 0;
}
@@ -1225,7 +1394,7 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
__wsum sum, next;
size_t off = 0;
sum = *csum;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
WARN_ON(1);
return false;
}
@@ -1270,7 +1439,7 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
__wsum sum, next;
size_t off = 0;
sum = *csum;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
WARN_ON(1); /* for now */
return 0;
}
@@ -1312,8 +1481,10 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages)
if (!size)
return 0;
+ if (unlikely(iov_iter_is_discard(i)))
+ return 0;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i))) {
struct pipe_inode_info *pipe = i->pipe;
size_t off;
int idx;
@@ -1351,11 +1522,13 @@ EXPORT_SYMBOL(iov_iter_npages);
const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
{
*new = *old;
- if (unlikely(new->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(new))) {
WARN_ON(1);
return NULL;
}
- if (new->type & ITER_BVEC)
+ if (unlikely(iov_iter_is_discard(new)))
+ return NULL;
+ if (iov_iter_is_bvec(new))
return new->bvec = kmemdup(new->bvec,
new->nr_segs * sizeof(struct bio_vec),
flags);
diff --git a/lib/kfifo.c b/lib/kfifo.c
index 90ba1eb1df06..015656aa8182 100644
--- a/lib/kfifo.c
+++ b/lib/kfifo.c
@@ -39,7 +39,7 @@ int __kfifo_alloc(struct __kfifo *fifo, unsigned int size,
size_t esize, gfp_t gfp_mask)
{
/*
- * round down to the next power of 2, since our 'let the indices
+ * round up to the next power of 2, since our 'let the indices
* wrap' technique works only in this case.
*/
size = roundup_pow_of_two(size);
@@ -54,7 +54,7 @@ int __kfifo_alloc(struct __kfifo *fifo, unsigned int size,
return -EINVAL;
}
- fifo->data = kmalloc(size * esize, gfp_mask);
+ fifo->data = kmalloc_array(esize, size, gfp_mask);
if (!fifo->data) {
fifo->mask = 0;
diff --git a/lib/klist.c b/lib/klist.c
index 0507fa5d84c5..f6b547812fe3 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -336,8 +336,9 @@ struct klist_node *klist_prev(struct klist_iter *i)
void (*put)(struct klist_node *) = i->i_klist->put;
struct klist_node *last = i->i_cur;
struct klist_node *prev;
+ unsigned long flags;
- spin_lock(&i->i_klist->k_lock);
+ spin_lock_irqsave(&i->i_klist->k_lock, flags);
if (last) {
prev = to_klist_node(last->n_node.prev);
@@ -356,7 +357,7 @@ struct klist_node *klist_prev(struct klist_iter *i)
prev = to_klist_node(prev->n_node.prev);
}
- spin_unlock(&i->i_klist->k_lock);
+ spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
if (put && last)
put(last);
@@ -377,8 +378,9 @@ struct klist_node *klist_next(struct klist_iter *i)
void (*put)(struct klist_node *) = i->i_klist->put;
struct klist_node *last = i->i_cur;
struct klist_node *next;
+ unsigned long flags;
- spin_lock(&i->i_klist->k_lock);
+ spin_lock_irqsave(&i->i_klist->k_lock, flags);
if (last) {
next = to_klist_node(last->n_node.next);
@@ -397,7 +399,7 @@ struct klist_node *klist_next(struct klist_iter *i)
next = to_klist_node(next->n_node.next);
}
- spin_unlock(&i->i_klist->k_lock);
+ spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
if (put && last)
put(last);
diff --git a/lib/kobject.c b/lib/kobject.c
index afd5a3fc6123..97d86dc17c42 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -35,6 +35,25 @@ const void *kobject_namespace(struct kobject *kobj)
return kobj->ktype->namespace(kobj);
}
+/**
+ * kobject_get_ownership - get sysfs ownership data for @kobj
+ * @kobj: kobject in question
+ * @uid: kernel user ID for sysfs objects
+ * @gid: kernel group ID for sysfs objects
+ *
+ * Returns initial uid/gid pair that should be used when creating sysfs
+ * representation of given kobject. Normally used to adjust ownership of
+ * objects in a container.
+ */
+void kobject_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
+{
+ *uid = GLOBAL_ROOT_UID;
+ *gid = GLOBAL_ROOT_GID;
+
+ if (kobj->ktype->get_ownership)
+ kobj->ktype->get_ownership(kobj, uid, gid);
+}
+
/*
* populate_dir - populate directory with attributes.
* @kobj: object we're working on.
@@ -125,7 +144,7 @@ static void fill_kobj_path(struct kobject *kobj, char *path, int length)
int cur = strlen(kobject_name(parent));
/* back up enough to print this name with '/' */
length -= cur;
- strncpy(path + length, kobject_name(parent), cur);
+ memcpy(path + length, kobject_name(parent), cur);
*(path + --length) = '/';
}
@@ -204,8 +223,9 @@ static int kobject_add_internal(struct kobject *kobj)
return -ENOENT;
if (!kobj->name || !kobj->name[0]) {
- WARN(1, "kobject: (%p): attempted to be registered with empty "
- "name!\n", kobj);
+ WARN(1,
+ "kobject: (%p): attempted to be registered with empty name!\n",
+ kobj);
return -EINVAL;
}
@@ -232,14 +252,12 @@ static int kobject_add_internal(struct kobject *kobj)
/* be noisy on error issues */
if (error == -EEXIST)
- WARN(1, "%s failed for %s with "
- "-EEXIST, don't try to register things with "
- "the same name in the same directory.\n",
- __func__, kobject_name(kobj));
+ pr_err("%s failed for %s with -EEXIST, don't try to register things with the same name in the same directory.\n",
+ __func__, kobject_name(kobj));
else
- WARN(1, "%s failed for %s (error: %d parent: %s)\n",
- __func__, kobject_name(kobj), error,
- parent ? kobject_name(parent) : "'none'");
+ pr_err("%s failed for %s (error: %d parent: %s)\n",
+ __func__, kobject_name(kobj), error,
+ parent ? kobject_name(parent) : "'none'");
} else
kobj->state_in_sysfs = 1;
@@ -334,8 +352,8 @@ void kobject_init(struct kobject *kobj, struct kobj_type *ktype)
}
if (kobj->state_initialized) {
/* do not error out as sometimes we can recover */
- printk(KERN_ERR "kobject (%p): tried to init an initialized "
- "object, something is seriously wrong.\n", kobj);
+ pr_err("kobject (%p): tried to init an initialized object, something is seriously wrong.\n",
+ kobj);
dump_stack();
}
@@ -344,7 +362,7 @@ void kobject_init(struct kobject *kobj, struct kobj_type *ktype)
return;
error:
- printk(KERN_ERR "kobject (%p): %s\n", kobj, err_str);
+ pr_err("kobject (%p): %s\n", kobj, err_str);
dump_stack();
}
EXPORT_SYMBOL(kobject_init);
@@ -357,7 +375,7 @@ static __printf(3, 0) int kobject_add_varg(struct kobject *kobj,
retval = kobject_set_name_vargs(kobj, fmt, vargs);
if (retval) {
- printk(KERN_ERR "kobject: can not set name properly!\n");
+ pr_err("kobject: can not set name properly!\n");
return retval;
}
kobj->parent = parent;
@@ -399,8 +417,7 @@ int kobject_add(struct kobject *kobj, struct kobject *parent,
return -EINVAL;
if (!kobj->state_initialized) {
- printk(KERN_ERR "kobject '%s' (%p): tried to add an "
- "uninitialized object, something is seriously wrong.\n",
+ pr_err("kobject '%s' (%p): tried to add an uninitialized object, something is seriously wrong.\n",
kobject_name(kobj), kobj);
dump_stack();
return -EINVAL;
@@ -590,9 +607,9 @@ struct kobject *kobject_get(struct kobject *kobj)
{
if (kobj) {
if (!kobj->state_initialized)
- WARN(1, KERN_WARNING "kobject: '%s' (%p): is not "
- "initialized, yet kobject_get() is being "
- "called.\n", kobject_name(kobj), kobj);
+ WARN(1, KERN_WARNING
+ "kobject: '%s' (%p): is not initialized, yet kobject_get() is being called.\n",
+ kobject_name(kobj), kobj);
kref_get(&kobj->kref);
}
return kobj;
@@ -622,8 +639,7 @@ static void kobject_cleanup(struct kobject *kobj)
kobject_name(kobj), kobj, __func__, kobj->parent);
if (t && !t->release)
- pr_debug("kobject: '%s' (%p): does not have a release() "
- "function, it is broken and must be fixed.\n",
+ pr_debug("kobject: '%s' (%p): does not have a release() function, it is broken and must be fixed.\n",
kobject_name(kobj), kobj);
/* send "remove" if the caller did not do it but sent "add" */
@@ -686,9 +702,9 @@ void kobject_put(struct kobject *kobj)
{
if (kobj) {
if (!kobj->state_initialized)
- WARN(1, KERN_WARNING "kobject: '%s' (%p): is not "
- "initialized, yet kobject_put() is being "
- "called.\n", kobject_name(kobj), kobj);
+ WARN(1, KERN_WARNING
+ "kobject: '%s' (%p): is not initialized, yet kobject_put() is being called.\n",
+ kobject_name(kobj), kobj);
kref_put(&kobj->kref, kobject_release);
}
}
@@ -752,8 +768,7 @@ struct kobject *kobject_create_and_add(const char *name, struct kobject *parent)
retval = kobject_add(kobj, parent, "%s", name);
if (retval) {
- printk(KERN_WARNING "%s: kobject_add error: %d\n",
- __func__, retval);
+ pr_warn("%s: kobject_add error: %d\n", __func__, retval);
kobject_put(kobj);
kobj = NULL;
}
@@ -872,9 +887,16 @@ static void kset_release(struct kobject *kobj)
kfree(kset);
}
+void kset_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
+{
+ if (kobj->parent)
+ kobject_get_ownership(kobj->parent, uid, gid);
+}
+
static struct kobj_type kset_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
- .release = kset_release,
+ .release = kset_release,
+ .get_ownership = kset_get_ownership,
};
/**
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 9fe6ec8fda28..63d0816ab23b 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -22,9 +22,11 @@
#include <linux/socket.h>
#include <linux/skbuff.h>
#include <linux/netlink.h>
+#include <linux/uidgid.h>
#include <linux/uuid.h>
#include <linux/ctype.h>
#include <net/sock.h>
+#include <net/netlink.h>
#include <net/net_namespace.h>
@@ -32,11 +34,13 @@ u64 uevent_seqnum;
#ifdef CONFIG_UEVENT_HELPER
char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH;
#endif
-#ifdef CONFIG_NET
+
struct uevent_sock {
struct list_head list;
struct sock *sk;
};
+
+#ifdef CONFIG_NET
static LIST_HEAD(uevent_sock_list);
#endif
@@ -228,30 +232,6 @@ out:
return r;
}
-#ifdef CONFIG_NET
-static int kobj_bcast_filter(struct sock *dsk, struct sk_buff *skb, void *data)
-{
- struct kobject *kobj = data, *ksobj;
- const struct kobj_ns_type_operations *ops;
-
- ops = kobj_ns_ops(kobj);
- if (!ops && kobj->kset) {
- ksobj = &kobj->kset->kobj;
- if (ksobj->parent != NULL)
- ops = kobj_ns_ops(ksobj->parent);
- }
-
- if (ops && ops->netlink_ns && kobj->ktype->namespace) {
- const void *sock_ns, *ns;
- ns = kobj->ktype->namespace(kobj);
- sock_ns = ops->netlink_ns(dsk);
- return sock_ns != ns;
- }
-
- return 0;
-}
-#endif
-
#ifdef CONFIG_UEVENT_HELPER
static int kobj_usermode_filter(struct kobject *kobj)
{
@@ -293,15 +273,44 @@ static void cleanup_uevent_env(struct subprocess_info *info)
}
#endif
-static int kobject_uevent_net_broadcast(struct kobject *kobj,
- struct kobj_uevent_env *env,
+#ifdef CONFIG_NET
+static struct sk_buff *alloc_uevent_skb(struct kobj_uevent_env *env,
const char *action_string,
const char *devpath)
{
- int retval = 0;
-#if defined(CONFIG_NET)
+ struct netlink_skb_parms *parms;
+ struct sk_buff *skb = NULL;
+ char *scratch;
+ size_t len;
+
+ /* allocate message with maximum possible size */
+ len = strlen(action_string) + strlen(devpath) + 2;
+ skb = alloc_skb(len + env->buflen, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ /* add header */
+ scratch = skb_put(skb, len);
+ sprintf(scratch, "%s@%s", action_string, devpath);
+
+ skb_put_data(skb, env->buf, env->buflen);
+
+ parms = &NETLINK_CB(skb);
+ parms->creds.uid = GLOBAL_ROOT_UID;
+ parms->creds.gid = GLOBAL_ROOT_GID;
+ parms->dst_group = 1;
+ parms->portid = 0;
+
+ return skb;
+}
+
+static int uevent_net_broadcast_untagged(struct kobj_uevent_env *env,
+ const char *action_string,
+ const char *devpath)
+{
struct sk_buff *skb = NULL;
struct uevent_sock *ue_sk;
+ int retval = 0;
/* send netlink message */
list_for_each_entry(ue_sk, &uevent_sock_list, list) {
@@ -311,37 +320,99 @@ static int kobject_uevent_net_broadcast(struct kobject *kobj,
continue;
if (!skb) {
- /* allocate message with the maximum possible size */
- size_t len = strlen(action_string) + strlen(devpath) + 2;
- char *scratch;
-
retval = -ENOMEM;
- skb = alloc_skb(len + env->buflen, GFP_KERNEL);
+ skb = alloc_uevent_skb(env, action_string, devpath);
if (!skb)
continue;
-
- /* add header */
- scratch = skb_put(skb, len);
- sprintf(scratch, "%s@%s", action_string, devpath);
-
- skb_put_data(skb, env->buf, env->buflen);
-
- NETLINK_CB(skb).dst_group = 1;
}
- retval = netlink_broadcast_filtered(uevent_sock, skb_get(skb),
- 0, 1, GFP_KERNEL,
- kobj_bcast_filter,
- kobj);
+ retval = netlink_broadcast(uevent_sock, skb_get(skb), 0, 1,
+ GFP_KERNEL);
/* ENOBUFS should be handled in userspace */
if (retval == -ENOBUFS || retval == -ESRCH)
retval = 0;
}
consume_skb(skb);
-#endif
+
return retval;
}
+static int uevent_net_broadcast_tagged(struct sock *usk,
+ struct kobj_uevent_env *env,
+ const char *action_string,
+ const char *devpath)
+{
+ struct user_namespace *owning_user_ns = sock_net(usk)->user_ns;
+ struct sk_buff *skb = NULL;
+ int ret = 0;
+
+ skb = alloc_uevent_skb(env, action_string, devpath);
+ if (!skb)
+ return -ENOMEM;
+
+ /* fix credentials */
+ if (owning_user_ns != &init_user_ns) {
+ struct netlink_skb_parms *parms = &NETLINK_CB(skb);
+ kuid_t root_uid;
+ kgid_t root_gid;
+
+ /* fix uid */
+ root_uid = make_kuid(owning_user_ns, 0);
+ if (uid_valid(root_uid))
+ parms->creds.uid = root_uid;
+
+ /* fix gid */
+ root_gid = make_kgid(owning_user_ns, 0);
+ if (gid_valid(root_gid))
+ parms->creds.gid = root_gid;
+ }
+
+ ret = netlink_broadcast(usk, skb, 0, 1, GFP_KERNEL);
+ /* ENOBUFS should be handled in userspace */
+ if (ret == -ENOBUFS || ret == -ESRCH)
+ ret = 0;
+
+ return ret;
+}
+#endif
+
+static int kobject_uevent_net_broadcast(struct kobject *kobj,
+ struct kobj_uevent_env *env,
+ const char *action_string,
+ const char *devpath)
+{
+ int ret = 0;
+
+#ifdef CONFIG_NET
+ const struct kobj_ns_type_operations *ops;
+ const struct net *net = NULL;
+
+ ops = kobj_ns_ops(kobj);
+ if (!ops && kobj->kset) {
+ struct kobject *ksobj = &kobj->kset->kobj;
+ if (ksobj->parent != NULL)
+ ops = kobj_ns_ops(ksobj->parent);
+ }
+
+ /* kobjects currently only carry network namespace tags and they
+ * are the only tag relevant here since we want to decide which
+ * network namespaces to broadcast the uevent into.
+ */
+ if (ops && ops->netlink_ns && kobj->ktype->namespace)
+ if (ops->type == KOBJ_NS_TYPE_NET)
+ net = kobj->ktype->namespace(kobj);
+
+ if (!net)
+ ret = uevent_net_broadcast_untagged(env, action_string,
+ devpath);
+ else
+ ret = uevent_net_broadcast_tagged(net->uevent_sock->sk, env,
+ action_string, devpath);
+#endif
+
+ return ret;
+}
+
static void zap_modalias_env(struct kobj_uevent_env *env)
{
static const char modalias_prefix[] = "MODALIAS=";
@@ -602,12 +673,88 @@ int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
EXPORT_SYMBOL_GPL(add_uevent_var);
#if defined(CONFIG_NET)
+static int uevent_net_broadcast(struct sock *usk, struct sk_buff *skb,
+ struct netlink_ext_ack *extack)
+{
+ /* u64 to chars: 2^64 - 1 = 21 chars */
+ char buf[sizeof("SEQNUM=") + 21];
+ struct sk_buff *skbc;
+ int ret;
+
+ /* bump and prepare sequence number */
+ ret = snprintf(buf, sizeof(buf), "SEQNUM=%llu", ++uevent_seqnum);
+ if (ret < 0 || (size_t)ret >= sizeof(buf))
+ return -ENOMEM;
+ ret++;
+
+ /* verify message does not overflow */
+ if ((skb->len + ret) > UEVENT_BUFFER_SIZE) {
+ NL_SET_ERR_MSG(extack, "uevent message too big");
+ return -EINVAL;
+ }
+
+ /* copy skb and extend to accommodate sequence number */
+ skbc = skb_copy_expand(skb, 0, ret, GFP_KERNEL);
+ if (!skbc)
+ return -ENOMEM;
+
+ /* append sequence number */
+ skb_put_data(skbc, buf, ret);
+
+ /* remove msg header */
+ skb_pull(skbc, NLMSG_HDRLEN);
+
+ /* set portid 0 to inform userspace message comes from kernel */
+ NETLINK_CB(skbc).portid = 0;
+ NETLINK_CB(skbc).dst_group = 1;
+
+ ret = netlink_broadcast(usk, skbc, 0, 1, GFP_KERNEL);
+ /* ENOBUFS should be handled in userspace */
+ if (ret == -ENOBUFS || ret == -ESRCH)
+ ret = 0;
+
+ return ret;
+}
+
+static int uevent_net_rcv_skb(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct net *net;
+ int ret;
+
+ if (!nlmsg_data(nlh))
+ return -EINVAL;
+
+ /*
+ * Verify that we are allowed to send messages to the target
+ * network namespace. The caller must have CAP_SYS_ADMIN in the
+ * owning user namespace of the target network namespace.
+ */
+ net = sock_net(NETLINK_CB(skb).sk);
+ if (!netlink_ns_capable(skb, net->user_ns, CAP_SYS_ADMIN)) {
+ NL_SET_ERR_MSG(extack, "missing CAP_SYS_ADMIN capability");
+ return -EPERM;
+ }
+
+ mutex_lock(&uevent_sock_mutex);
+ ret = uevent_net_broadcast(net->uevent_sock->sk, skb, extack);
+ mutex_unlock(&uevent_sock_mutex);
+
+ return ret;
+}
+
+static void uevent_net_rcv(struct sk_buff *skb)
+{
+ netlink_rcv_skb(skb, &uevent_net_rcv_skb);
+}
+
static int uevent_net_init(struct net *net)
{
struct uevent_sock *ue_sk;
struct netlink_kernel_cfg cfg = {
.groups = 1,
- .flags = NL_CFG_F_NONROOT_RECV,
+ .input = uevent_net_rcv,
+ .flags = NL_CFG_F_NONROOT_RECV
};
ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL);
@@ -621,27 +768,28 @@ static int uevent_net_init(struct net *net)
kfree(ue_sk);
return -ENODEV;
}
- mutex_lock(&uevent_sock_mutex);
- list_add_tail(&ue_sk->list, &uevent_sock_list);
- mutex_unlock(&uevent_sock_mutex);
+
+ net->uevent_sock = ue_sk;
+
+ /* Restrict uevents to initial user namespace. */
+ if (sock_net(ue_sk->sk)->user_ns == &init_user_ns) {
+ mutex_lock(&uevent_sock_mutex);
+ list_add_tail(&ue_sk->list, &uevent_sock_list);
+ mutex_unlock(&uevent_sock_mutex);
+ }
+
return 0;
}
static void uevent_net_exit(struct net *net)
{
- struct uevent_sock *ue_sk;
+ struct uevent_sock *ue_sk = net->uevent_sock;
- mutex_lock(&uevent_sock_mutex);
- list_for_each_entry(ue_sk, &uevent_sock_list, list) {
- if (sock_net(ue_sk->sk) == net)
- goto found;
+ if (sock_net(ue_sk->sk)->user_ns == &init_user_ns) {
+ mutex_lock(&uevent_sock_mutex);
+ list_del(&ue_sk->list);
+ mutex_unlock(&uevent_sock_mutex);
}
- mutex_unlock(&uevent_sock_mutex);
- return;
-
-found:
- list_del(&ue_sk->list);
- mutex_unlock(&uevent_sock_mutex);
netlink_kernel_release(ue_sk->sk);
kfree(ue_sk);
diff --git a/lib/kstrtox.c b/lib/kstrtox.c
index 661a1e807bd1..1006bf70bf74 100644
--- a/lib/kstrtox.c
+++ b/lib/kstrtox.c
@@ -175,7 +175,7 @@ int _kstrtoul(const char *s, unsigned int base, unsigned long *res)
rv = kstrtoull(s, base, &tmp);
if (rv < 0)
return rv;
- if (tmp != (unsigned long long)(unsigned long)tmp)
+ if (tmp != (unsigned long)tmp)
return -ERANGE;
*res = tmp;
return 0;
@@ -191,7 +191,7 @@ int _kstrtol(const char *s, unsigned int base, long *res)
rv = kstrtoll(s, base, &tmp);
if (rv < 0)
return rv;
- if (tmp != (long long)(long)tmp)
+ if (tmp != (long)tmp)
return -ERANGE;
*res = tmp;
return 0;
@@ -222,7 +222,7 @@ int kstrtouint(const char *s, unsigned int base, unsigned int *res)
rv = kstrtoull(s, base, &tmp);
if (rv < 0)
return rv;
- if (tmp != (unsigned long long)(unsigned int)tmp)
+ if (tmp != (unsigned int)tmp)
return -ERANGE;
*res = tmp;
return 0;
@@ -253,7 +253,7 @@ int kstrtoint(const char *s, unsigned int base, int *res)
rv = kstrtoll(s, base, &tmp);
if (rv < 0)
return rv;
- if (tmp != (long long)(int)tmp)
+ if (tmp != (int)tmp)
return -ERANGE;
*res = tmp;
return 0;
@@ -268,7 +268,7 @@ int kstrtou16(const char *s, unsigned int base, u16 *res)
rv = kstrtoull(s, base, &tmp);
if (rv < 0)
return rv;
- if (tmp != (unsigned long long)(u16)tmp)
+ if (tmp != (u16)tmp)
return -ERANGE;
*res = tmp;
return 0;
@@ -283,7 +283,7 @@ int kstrtos16(const char *s, unsigned int base, s16 *res)
rv = kstrtoll(s, base, &tmp);
if (rv < 0)
return rv;
- if (tmp != (long long)(s16)tmp)
+ if (tmp != (s16)tmp)
return -ERANGE;
*res = tmp;
return 0;
@@ -298,7 +298,7 @@ int kstrtou8(const char *s, unsigned int base, u8 *res)
rv = kstrtoull(s, base, &tmp);
if (rv < 0)
return rv;
- if (tmp != (unsigned long long)(u8)tmp)
+ if (tmp != (u8)tmp)
return -ERANGE;
*res = tmp;
return 0;
@@ -313,7 +313,7 @@ int kstrtos8(const char *s, unsigned int base, s8 *res)
rv = kstrtoll(s, base, &tmp);
if (rv < 0)
return rv;
- if (tmp != (long long)(s8)tmp)
+ if (tmp != (s8)tmp)
return -ERANGE;
*res = tmp;
return 0;
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
index 9f79547d1b97..f0a2934605bf 100644
--- a/lib/libcrc32c.c
+++ b/lib/libcrc32c.c
@@ -71,6 +71,12 @@ static void __exit libcrc32c_mod_fini(void)
crypto_free_shash(tfm);
}
+const char *crc32c_impl(void)
+{
+ return crypto_shash_driver_name(tfm);
+}
+EXPORT_SYMBOL(crc32c_impl);
+
module_init(libcrc32c_mod_init);
module_exit(libcrc32c_mod_fini);
diff --git a/lib/list_debug.c b/lib/list_debug.c
index a34db8d27667..5d5424b51b74 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -21,13 +21,13 @@ bool __list_add_valid(struct list_head *new, struct list_head *prev,
struct list_head *next)
{
if (CHECK_DATA_CORRUPTION(next->prev != prev,
- "list_add corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
+ "list_add corruption. next->prev should be prev (%px), but was %px. (next=%px).\n",
prev, next->prev, next) ||
CHECK_DATA_CORRUPTION(prev->next != next,
- "list_add corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
+ "list_add corruption. prev->next should be next (%px), but was %px. (prev=%px).\n",
next, prev->next, prev) ||
CHECK_DATA_CORRUPTION(new == prev || new == next,
- "list_add double add: new=%p, prev=%p, next=%p.\n",
+ "list_add double add: new=%px, prev=%px, next=%px.\n",
new, prev, next))
return false;
@@ -43,16 +43,16 @@ bool __list_del_entry_valid(struct list_head *entry)
next = entry->next;
if (CHECK_DATA_CORRUPTION(next == LIST_POISON1,
- "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
+ "list_del corruption, %px->next is LIST_POISON1 (%px)\n",
entry, LIST_POISON1) ||
CHECK_DATA_CORRUPTION(prev == LIST_POISON2,
- "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
+ "list_del corruption, %px->prev is LIST_POISON2 (%px)\n",
entry, LIST_POISON2) ||
CHECK_DATA_CORRUPTION(prev->next != entry,
- "list_del corruption. prev->next should be %p, but was %p\n",
+ "list_del corruption. prev->next should be %px, but was %px\n",
entry, prev->next) ||
CHECK_DATA_CORRUPTION(next->prev != entry,
- "list_del corruption. next->prev should be %p, but was %p\n",
+ "list_del corruption. next->prev should be %px, but was %px\n",
entry, next->prev))
return false;
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index b5c1293ce147..1e1bbf171eca 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -29,7 +29,7 @@
*/
static unsigned int debug_locks_verbose;
-static DEFINE_WW_CLASS(ww_lockdep);
+static DEFINE_WD_CLASS(ww_lockdep);
static int __init setup_debug_locks_verbose(char *str)
{
diff --git a/lib/lockref.c b/lib/lockref.c
index 47169ed7e964..3d468b53d4c9 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -81,6 +81,34 @@ int lockref_get_not_zero(struct lockref *lockref)
EXPORT_SYMBOL(lockref_get_not_zero);
/**
+ * lockref_put_not_zero - Decrements count unless count <= 1 before decrement
+ * @lockref: pointer to lockref structure
+ * Return: 1 if count updated successfully or 0 if count would become zero
+ */
+int lockref_put_not_zero(struct lockref *lockref)
+{
+ int retval;
+
+ CMPXCHG_LOOP(
+ new.count--;
+ if (old.count <= 1)
+ return 0;
+ ,
+ return 1;
+ );
+
+ spin_lock(&lockref->lock);
+ retval = 0;
+ if (lockref->count > 1) {
+ lockref->count--;
+ retval = 1;
+ }
+ spin_unlock(&lockref->lock);
+ return retval;
+}
+EXPORT_SYMBOL(lockref_put_not_zero);
+
+/**
* lockref_get_or_lock - Increments count unless the count is 0 or dead
* @lockref: pointer to lockref structure
* Return: 1 if count updated successfully or 0 if count was zero
diff --git a/lib/logic_pio.c b/lib/logic_pio.c
new file mode 100644
index 000000000000..feea48fd1a0d
--- /dev/null
+++ b/lib/logic_pio.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2017 HiSilicon Limited, All Rights Reserved.
+ * Author: Gabriele Paoloni <gabriele.paoloni@huawei.com>
+ * Author: Zhichang Yuan <yuanzhichang@hisilicon.com>
+ */
+
+#define pr_fmt(fmt) "LOGIC PIO: " fmt
+
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/logic_pio.h>
+#include <linux/mm.h>
+#include <linux/rculist.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+
+/* The unique hardware address list */
+static LIST_HEAD(io_range_list);
+static DEFINE_MUTEX(io_range_mutex);
+
+/* Consider a kernel general helper for this */
+#define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len))
+
+/**
+ * logic_pio_register_range - register logical PIO range for a host
+ * @new_range: pointer to the IO range to be registered.
+ *
+ * Returns 0 on success, the error code in case of failure.
+ *
+ * Register a new IO range node in the IO range list.
+ */
+int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
+{
+ struct logic_pio_hwaddr *range;
+ resource_size_t start;
+ resource_size_t end;
+ resource_size_t mmio_sz = 0;
+ resource_size_t iio_sz = MMIO_UPPER_LIMIT;
+ int ret = 0;
+
+ if (!new_range || !new_range->fwnode || !new_range->size)
+ return -EINVAL;
+
+ start = new_range->hw_start;
+ end = new_range->hw_start + new_range->size;
+
+ mutex_lock(&io_range_mutex);
+ list_for_each_entry_rcu(range, &io_range_list, list) {
+ if (range->fwnode == new_range->fwnode) {
+ /* range already there */
+ goto end_register;
+ }
+ if (range->flags == LOGIC_PIO_CPU_MMIO &&
+ new_range->flags == LOGIC_PIO_CPU_MMIO) {
+ /* for MMIO ranges we need to check for overlap */
+ if (start >= range->hw_start + range->size ||
+ end < range->hw_start) {
+ mmio_sz += range->size;
+ } else {
+ ret = -EFAULT;
+ goto end_register;
+ }
+ } else if (range->flags == LOGIC_PIO_INDIRECT &&
+ new_range->flags == LOGIC_PIO_INDIRECT) {
+ iio_sz += range->size;
+ }
+ }
+
+ /* range not registered yet, check for available space */
+ if (new_range->flags == LOGIC_PIO_CPU_MMIO) {
+ if (mmio_sz + new_range->size - 1 > MMIO_UPPER_LIMIT) {
+ /* if it's too big check if 64K space can be reserved */
+ if (mmio_sz + SZ_64K - 1 > MMIO_UPPER_LIMIT) {
+ ret = -E2BIG;
+ goto end_register;
+ }
+ new_range->size = SZ_64K;
+ pr_warn("Requested IO range too big, new size set to 64K\n");
+ }
+ new_range->io_start = mmio_sz;
+ } else if (new_range->flags == LOGIC_PIO_INDIRECT) {
+ if (iio_sz + new_range->size - 1 > IO_SPACE_LIMIT) {
+ ret = -E2BIG;
+ goto end_register;
+ }
+ new_range->io_start = iio_sz;
+ } else {
+ /* invalid flag */
+ ret = -EINVAL;
+ goto end_register;
+ }
+
+ list_add_tail_rcu(&new_range->list, &io_range_list);
+
+end_register:
+ mutex_unlock(&io_range_mutex);
+ return ret;
+}
+
+/**
+ * find_io_range_by_fwnode - find logical PIO range for given FW node
+ * @fwnode: FW node handle associated with logical PIO range
+ *
+ * Returns pointer to node on success, NULL otherwise.
+ *
+ * Traverse the io_range_list to find the registered node for @fwnode.
+ */
+struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode)
+{
+ struct logic_pio_hwaddr *range;
+
+ list_for_each_entry_rcu(range, &io_range_list, list) {
+ if (range->fwnode == fwnode)
+ return range;
+ }
+ return NULL;
+}
+
+/* Return a registered range given an input PIO token */
+static struct logic_pio_hwaddr *find_io_range(unsigned long pio)
+{
+ struct logic_pio_hwaddr *range;
+
+ list_for_each_entry_rcu(range, &io_range_list, list) {
+ if (in_range(pio, range->io_start, range->size))
+ return range;
+ }
+ pr_err("PIO entry token %lx invalid\n", pio);
+ return NULL;
+}
+
+/**
+ * logic_pio_to_hwaddr - translate logical PIO to HW address
+ * @pio: logical PIO value
+ *
+ * Returns HW address if valid, ~0 otherwise.
+ *
+ * Translate the input logical PIO to the corresponding hardware address.
+ * The input PIO should be unique in the whole logical PIO space.
+ */
+resource_size_t logic_pio_to_hwaddr(unsigned long pio)
+{
+ struct logic_pio_hwaddr *range;
+
+ range = find_io_range(pio);
+ if (range)
+ return range->hw_start + pio - range->io_start;
+
+ return (resource_size_t)~0;
+}
+
+/**
+ * logic_pio_trans_hwaddr - translate HW address to logical PIO
+ * @fwnode: FW node reference for the host
+ * @addr: Host-relative HW address
+ * @size: size to translate
+ *
+ * Returns Logical PIO value if successful, ~0UL otherwise
+ */
+unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode,
+ resource_size_t addr, resource_size_t size)
+{
+ struct logic_pio_hwaddr *range;
+
+ range = find_io_range_by_fwnode(fwnode);
+ if (!range || range->flags == LOGIC_PIO_CPU_MMIO) {
+ pr_err("IO range not found or invalid\n");
+ return ~0UL;
+ }
+ if (range->size < size) {
+ pr_err("resource size %pa cannot fit in IO range size %pa\n",
+ &size, &range->size);
+ return ~0UL;
+ }
+ return addr - range->hw_start + range->io_start;
+}
+
+unsigned long logic_pio_trans_cpuaddr(resource_size_t addr)
+{
+ struct logic_pio_hwaddr *range;
+
+ list_for_each_entry_rcu(range, &io_range_list, list) {
+ if (range->flags != LOGIC_PIO_CPU_MMIO)
+ continue;
+ if (in_range(addr, range->hw_start, range->size))
+ return addr - range->hw_start + range->io_start;
+ }
+ pr_err("addr %llx not registered in io_range_list\n",
+ (unsigned long long) addr);
+ return ~0UL;
+}
+
+#if defined(CONFIG_INDIRECT_PIO) && defined(PCI_IOBASE)
+#define BUILD_LOGIC_IO(bw, type) \
+type logic_in##bw(unsigned long addr) \
+{ \
+ type ret = (type)~0; \
+ \
+ if (addr < MMIO_UPPER_LIMIT) { \
+ ret = read##bw(PCI_IOBASE + addr); \
+ } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
+ struct logic_pio_hwaddr *entry = find_io_range(addr); \
+ \
+ if (entry && entry->ops) \
+ ret = entry->ops->in(entry->hostdata, \
+ addr, sizeof(type)); \
+ else \
+ WARN_ON_ONCE(1); \
+ } \
+ return ret; \
+} \
+ \
+void logic_out##bw(type value, unsigned long addr) \
+{ \
+ if (addr < MMIO_UPPER_LIMIT) { \
+ write##bw(value, PCI_IOBASE + addr); \
+ } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
+ struct logic_pio_hwaddr *entry = find_io_range(addr); \
+ \
+ if (entry && entry->ops) \
+ entry->ops->out(entry->hostdata, \
+ addr, value, sizeof(type)); \
+ else \
+ WARN_ON_ONCE(1); \
+ } \
+} \
+ \
+void logic_ins##bw(unsigned long addr, void *buffer, \
+ unsigned int count) \
+{ \
+ if (addr < MMIO_UPPER_LIMIT) { \
+ reads##bw(PCI_IOBASE + addr, buffer, count); \
+ } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
+ struct logic_pio_hwaddr *entry = find_io_range(addr); \
+ \
+ if (entry && entry->ops) \
+ entry->ops->ins(entry->hostdata, \
+ addr, buffer, sizeof(type), count); \
+ else \
+ WARN_ON_ONCE(1); \
+ } \
+ \
+} \
+ \
+void logic_outs##bw(unsigned long addr, const void *buffer, \
+ unsigned int count) \
+{ \
+ if (addr < MMIO_UPPER_LIMIT) { \
+ writes##bw(PCI_IOBASE + addr, buffer, count); \
+ } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
+ struct logic_pio_hwaddr *entry = find_io_range(addr); \
+ \
+ if (entry && entry->ops) \
+ entry->ops->outs(entry->hostdata, \
+ addr, buffer, sizeof(type), count); \
+ else \
+ WARN_ON_ONCE(1); \
+ } \
+}
+
+BUILD_LOGIC_IO(b, u8)
+EXPORT_SYMBOL(logic_inb);
+EXPORT_SYMBOL(logic_insb);
+EXPORT_SYMBOL(logic_outb);
+EXPORT_SYMBOL(logic_outsb);
+
+BUILD_LOGIC_IO(w, u16)
+EXPORT_SYMBOL(logic_inw);
+EXPORT_SYMBOL(logic_insw);
+EXPORT_SYMBOL(logic_outw);
+EXPORT_SYMBOL(logic_outsw);
+
+BUILD_LOGIC_IO(l, u32)
+EXPORT_SYMBOL(logic_inl);
+EXPORT_SYMBOL(logic_insl);
+EXPORT_SYMBOL(logic_outl);
+EXPORT_SYMBOL(logic_outsl);
+
+#endif /* CONFIG_INDIRECT_PIO && PCI_IOBASE */
diff --git a/lib/lru_cache.c b/lib/lru_cache.c
index 28ba40b99337..2b10a4024c35 100644
--- a/lib/lru_cache.c
+++ b/lib/lru_cache.c
@@ -119,7 +119,7 @@ struct lru_cache *lc_create(const char *name, struct kmem_cache *cache,
slot = kcalloc(e_count, sizeof(struct hlist_head), GFP_KERNEL);
if (!slot)
goto out_fail;
- element = kzalloc(e_count * sizeof(struct lc_element *), GFP_KERNEL);
+ element = kcalloc(e_count, sizeof(struct lc_element *), GFP_KERNEL);
if (!element)
goto out_fail;
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index 141734d255e4..0c9d3ad17e0f 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -43,30 +43,36 @@
/*-*****************************
* Decompression functions
*******************************/
-/* LZ4_decompress_generic() :
- * This generic decompression function cover all use cases.
- * It shall be instantiated several times, using different sets of directives
- * Note that it is important this generic function is really inlined,
+
+#define DEBUGLOG(l, ...) {} /* disabled */
+
+#ifndef assert
+#define assert(condition) ((void)0)
+#endif
+
+/*
+ * LZ4_decompress_generic() :
+ * This generic decompression function covers all use cases.
+ * It shall be instantiated several times, using different sets of directives.
+ * Note that it is important for performance that this function really get inlined,
* in order to remove useless branches during compilation optimization.
*/
static FORCE_INLINE int LZ4_decompress_generic(
- const char * const source,
- char * const dest,
- int inputSize,
+ const char * const src,
+ char * const dst,
+ int srcSize,
/*
* If endOnInput == endOnInputSize,
- * this value is the max size of Output Buffer.
+ * this value is `dstCapacity`
*/
int outputSize,
/* endOnOutputSize, endOnInputSize */
- int endOnInput,
+ endCondition_directive endOnInput,
/* full, partial */
- int partialDecoding,
- /* only used if partialDecoding == partial */
- int targetOutputSize,
+ earlyEnd_directive partialDecoding,
/* noDict, withPrefix64k, usingExtDict */
- int dict,
- /* == dest when no prefix */
+ dict_directive dict,
+ /* always <= dst, == dst when no prefix */
const BYTE * const lowPrefix,
/* only if dict == usingExtDict */
const BYTE * const dictStart,
@@ -74,35 +80,43 @@ static FORCE_INLINE int LZ4_decompress_generic(
const size_t dictSize
)
{
- /* Local Variables */
- const BYTE *ip = (const BYTE *) source;
- const BYTE * const iend = ip + inputSize;
+ const BYTE *ip = (const BYTE *) src;
+ const BYTE * const iend = ip + srcSize;
- BYTE *op = (BYTE *) dest;
+ BYTE *op = (BYTE *) dst;
BYTE * const oend = op + outputSize;
BYTE *cpy;
- BYTE *oexit = op + targetOutputSize;
- const BYTE * const lowLimit = lowPrefix - dictSize;
const BYTE * const dictEnd = (const BYTE *)dictStart + dictSize;
- static const unsigned int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };
- static const int dec64table[] = { 0, 0, 0, -1, 0, 1, 2, 3 };
+ static const unsigned int inc32table[8] = {0, 1, 2, 1, 0, 4, 4, 4};
+ static const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3};
const int safeDecode = (endOnInput == endOnInputSize);
const int checkOffset = ((safeDecode) && (dictSize < (int)(64 * KB)));
+ /* Set up the "end" pointers for the shortcut. */
+ const BYTE *const shortiend = iend -
+ (endOnInput ? 14 : 8) /*maxLL*/ - 2 /*offset*/;
+ const BYTE *const shortoend = oend -
+ (endOnInput ? 14 : 8) /*maxLL*/ - 18 /*maxML*/;
+
+ DEBUGLOG(5, "%s (srcSize:%i, dstSize:%i)", __func__,
+ srcSize, outputSize);
+
/* Special cases */
- /* targetOutputSize too high => decode everything */
- if ((partialDecoding) && (oexit > oend - MFLIMIT))
- oexit = oend - MFLIMIT;
+ assert(lowPrefix <= op);
+ assert(src != NULL);
/* Empty output buffer */
if ((endOnInput) && (unlikely(outputSize == 0)))
- return ((inputSize == 1) && (*ip == 0)) ? 0 : -1;
+ return ((srcSize == 1) && (*ip == 0)) ? 0 : -1;
if ((!endOnInput) && (unlikely(outputSize == 0)))
return (*ip == 0 ? 1 : -1);
+ if ((endOnInput) && unlikely(srcSize == 0))
+ return -1;
+
/* Main Loop : decode sequences */
while (1) {
size_t length;
@@ -111,12 +125,74 @@ static FORCE_INLINE int LZ4_decompress_generic(
/* get literal length */
unsigned int const token = *ip++;
-
length = token>>ML_BITS;
+ /* ip < iend before the increment */
+ assert(!endOnInput || ip <= iend);
+
+ /*
+ * A two-stage shortcut for the most common case:
+ * 1) If the literal length is 0..14, and there is enough
+ * space, enter the shortcut and copy 16 bytes on behalf
+ * of the literals (in the fast mode, only 8 bytes can be
+ * safely copied this way).
+ * 2) Further if the match length is 4..18, copy 18 bytes
+ * in a similar manner; but we ensure that there's enough
+ * space in the output for those 18 bytes earlier, upon
+ * entering the shortcut (in other words, there is a
+ * combined check for both stages).
+ */
+ if ((endOnInput ? length != RUN_MASK : length <= 8)
+ /*
+ * strictly "less than" on input, to re-enter
+ * the loop with at least one byte
+ */
+ && likely((endOnInput ? ip < shortiend : 1) &
+ (op <= shortoend))) {
+ /* Copy the literals */
+ memcpy(op, ip, endOnInput ? 16 : 8);
+ op += length; ip += length;
+
+ /*
+ * The second stage:
+ * prepare for match copying, decode full info.
+ * If it doesn't work out, the info won't be wasted.
+ */
+ length = token & ML_MASK; /* match length */
+ offset = LZ4_readLE16(ip);
+ ip += 2;
+ match = op - offset;
+ assert(match <= op); /* check overflow */
+
+ /* Do not deal with overlapping matches. */
+ if ((length != ML_MASK) &&
+ (offset >= 8) &&
+ (dict == withPrefix64k || match >= lowPrefix)) {
+ /* Copy the match. */
+ memcpy(op + 0, match + 0, 8);
+ memcpy(op + 8, match + 8, 8);
+ memcpy(op + 16, match + 16, 2);
+ op += length + MINMATCH;
+ /* Both stages worked, load the next token. */
+ continue;
+ }
+
+ /*
+ * The second stage didn't work out, but the info
+ * is ready. Propel it right to the point of match
+ * copying.
+ */
+ goto _copy_match;
+ }
+
+ /* decode literal length */
if (length == RUN_MASK) {
unsigned int s;
+ if (unlikely(endOnInput ? ip >= iend - RUN_MASK : 0)) {
+ /* overflow detection */
+ goto _output_error;
+ }
do {
s = *ip++;
length += s;
@@ -125,14 +201,14 @@ static FORCE_INLINE int LZ4_decompress_generic(
: 1) & (s == 255));
if ((safeDecode)
- && unlikely(
- (size_t)(op + length) < (size_t)(op))) {
+ && unlikely((uptrval)(op) +
+ length < (uptrval)(op))) {
/* overflow detection */
goto _output_error;
}
if ((safeDecode)
- && unlikely(
- (size_t)(ip + length) < (size_t)(ip))) {
+ && unlikely((uptrval)(ip) +
+ length < (uptrval)(ip))) {
/* overflow detection */
goto _output_error;
}
@@ -140,16 +216,19 @@ static FORCE_INLINE int LZ4_decompress_generic(
/* copy literals */
cpy = op + length;
- if (((endOnInput) && ((cpy > (partialDecoding ? oexit : oend - MFLIMIT))
+ LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
+
+ if (((endOnInput) && ((cpy > oend - MFLIMIT)
|| (ip + length > iend - (2 + 1 + LASTLITERALS))))
|| ((!endOnInput) && (cpy > oend - WILDCOPYLENGTH))) {
if (partialDecoding) {
if (cpy > oend) {
/*
- * Error :
- * write attempt beyond end of output buffer
+ * Partial decoding :
+ * stop in the middle of literal segment
*/
- goto _output_error;
+ cpy = oend;
+ length = oend - op;
}
if ((endOnInput)
&& (ip + length > iend)) {
@@ -184,29 +263,43 @@ static FORCE_INLINE int LZ4_decompress_generic(
memcpy(op, ip, length);
ip += length;
op += length;
+
/* Necessarily EOF, due to parsing restrictions */
- break;
+ if (!partialDecoding || (cpy == oend))
+ break;
+ } else {
+ /* may overwrite up to WILDCOPYLENGTH beyond cpy */
+ LZ4_wildCopy(op, ip, cpy);
+ ip += length;
+ op = cpy;
}
- LZ4_wildCopy(op, ip, cpy);
- ip += length;
- op = cpy;
-
/* get offset */
offset = LZ4_readLE16(ip);
ip += 2;
match = op - offset;
- if ((checkOffset) && (unlikely(match < lowLimit))) {
+ /* get matchlength */
+ length = token & ML_MASK;
+
+_copy_match:
+ if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) {
/* Error : offset outside buffers */
goto _output_error;
}
/* costs ~1%; silence an msan warning when offset == 0 */
- LZ4_write32(op, (U32)offset);
+ /*
+ * note : when partialDecoding, there is no guarantee that
+ * at least 4 bytes remain available in output buffer
+ */
+ if (!partialDecoding) {
+ assert(oend > op);
+ assert(oend - op >= 4);
+
+ LZ4_write32(op, (U32)offset);
+ }
- /* get matchlength */
- length = token & ML_MASK;
if (length == ML_MASK) {
unsigned int s;
@@ -221,7 +314,7 @@ static FORCE_INLINE int LZ4_decompress_generic(
if ((safeDecode)
&& unlikely(
- (size_t)(op + length) < (size_t)op)) {
+ (uptrval)(op) + length < (uptrval)op)) {
/* overflow detection */
goto _output_error;
}
@@ -229,24 +322,26 @@ static FORCE_INLINE int LZ4_decompress_generic(
length += MINMATCH;
- /* check external dictionary */
+ /* match starting within external dictionary */
if ((dict == usingExtDict) && (match < lowPrefix)) {
if (unlikely(op + length > oend - LASTLITERALS)) {
/* doesn't respect parsing restriction */
- goto _output_error;
+ if (!partialDecoding)
+ goto _output_error;
+ length = min(length, (size_t)(oend - op));
}
if (length <= (size_t)(lowPrefix - match)) {
/*
- * match can be copied as a single segment
- * from external dictionary
+ * match fits entirely within external
+ * dictionary : just copy
*/
memmove(op, dictEnd - (lowPrefix - match),
length);
op += length;
} else {
/*
- * match encompass external
+ * match stretches into both external
* dictionary and current block
*/
size_t const copySize = (size_t)(lowPrefix - match);
@@ -254,7 +349,6 @@ static FORCE_INLINE int LZ4_decompress_generic(
memcpy(op, dictEnd - copySize, copySize);
op += copySize;
-
if (restSize > (size_t)(op - lowPrefix)) {
/* overlap copy */
BYTE * const endOfMatch = op + restSize;
@@ -267,23 +361,44 @@ static FORCE_INLINE int LZ4_decompress_generic(
op += restSize;
}
}
-
continue;
}
/* copy match within block */
cpy = op + length;
- if (unlikely(offset < 8)) {
- const int dec64 = dec64table[offset];
+ /*
+ * partialDecoding :
+ * may not respect endBlock parsing restrictions
+ */
+ assert(op <= oend);
+ if (partialDecoding &&
+ (cpy > oend - MATCH_SAFEGUARD_DISTANCE)) {
+ size_t const mlen = min(length, (size_t)(oend - op));
+ const BYTE * const matchEnd = match + mlen;
+ BYTE * const copyEnd = op + mlen;
+
+ if (matchEnd > op) {
+ /* overlap copy */
+ while (op < copyEnd)
+ *op++ = *match++;
+ } else {
+ memcpy(op, match, mlen);
+ }
+ op = copyEnd;
+ if (op == oend)
+ break;
+ continue;
+ }
+ if (unlikely(offset < 8)) {
op[0] = match[0];
op[1] = match[1];
op[2] = match[2];
op[3] = match[3];
- match += dec32table[offset];
+ match += inc32table[offset];
memcpy(op + 4, match, 4);
- match -= dec64;
+ match -= dec64table[offset];
} else {
LZ4_copy8(op, match);
match += 8;
@@ -291,7 +406,7 @@ static FORCE_INLINE int LZ4_decompress_generic(
op += 8;
- if (unlikely(cpy > oend - 12)) {
+ if (unlikely(cpy > oend - MATCH_SAFEGUARD_DISTANCE)) {
BYTE * const oCopyLimit = oend - (WILDCOPYLENGTH - 1);
if (cpy > oend - LASTLITERALS) {
@@ -307,60 +422,139 @@ static FORCE_INLINE int LZ4_decompress_generic(
match += oCopyLimit - op;
op = oCopyLimit;
}
-
while (op < cpy)
*op++ = *match++;
} else {
LZ4_copy8(op, match);
-
if (length > 16)
LZ4_wildCopy(op + 8, match + 8, cpy);
}
-
- op = cpy; /* correction */
+ op = cpy; /* wildcopy correction */
}
/* end of decoding */
if (endOnInput) {
/* Nb of output bytes decoded */
- return (int) (((char *)op) - dest);
+ return (int) (((char *)op) - dst);
} else {
/* Nb of input bytes read */
- return (int) (((const char *)ip) - source);
+ return (int) (((const char *)ip) - src);
}
/* Overflow error detected */
_output_error:
- return -1;
+ return (int) (-(((const char *)ip) - src)) - 1;
}
int LZ4_decompress_safe(const char *source, char *dest,
int compressedSize, int maxDecompressedSize)
{
- return LZ4_decompress_generic(source, dest, compressedSize,
- maxDecompressedSize, endOnInputSize, full, 0,
- noDict, (BYTE *)dest, NULL, 0);
+ return LZ4_decompress_generic(source, dest,
+ compressedSize, maxDecompressedSize,
+ endOnInputSize, decode_full_block,
+ noDict, (BYTE *)dest, NULL, 0);
}
-int LZ4_decompress_safe_partial(const char *source, char *dest,
- int compressedSize, int targetOutputSize, int maxDecompressedSize)
+int LZ4_decompress_safe_partial(const char *src, char *dst,
+ int compressedSize, int targetOutputSize, int dstCapacity)
{
- return LZ4_decompress_generic(source, dest, compressedSize,
- maxDecompressedSize, endOnInputSize, partial,
- targetOutputSize, noDict, (BYTE *)dest, NULL, 0);
+ dstCapacity = min(targetOutputSize, dstCapacity);
+ return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity,
+ endOnInputSize, partial_decode,
+ noDict, (BYTE *)dst, NULL, 0);
}
int LZ4_decompress_fast(const char *source, char *dest, int originalSize)
{
return LZ4_decompress_generic(source, dest, 0, originalSize,
- endOnOutputSize, full, 0, withPrefix64k,
- (BYTE *)(dest - 64 * KB), NULL, 64 * KB);
+ endOnOutputSize, decode_full_block,
+ withPrefix64k,
+ (BYTE *)dest - 64 * KB, NULL, 0);
+}
+
+/* ===== Instantiate a few more decoding cases, used more than once. ===== */
+
+int LZ4_decompress_safe_withPrefix64k(const char *source, char *dest,
+ int compressedSize, int maxOutputSize)
+{
+ return LZ4_decompress_generic(source, dest,
+ compressedSize, maxOutputSize,
+ endOnInputSize, decode_full_block,
+ withPrefix64k,
+ (BYTE *)dest - 64 * KB, NULL, 0);
+}
+
+static int LZ4_decompress_safe_withSmallPrefix(const char *source, char *dest,
+ int compressedSize,
+ int maxOutputSize,
+ size_t prefixSize)
+{
+ return LZ4_decompress_generic(source, dest,
+ compressedSize, maxOutputSize,
+ endOnInputSize, decode_full_block,
+ noDict,
+ (BYTE *)dest - prefixSize, NULL, 0);
+}
+
+int LZ4_decompress_safe_forceExtDict(const char *source, char *dest,
+ int compressedSize, int maxOutputSize,
+ const void *dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest,
+ compressedSize, maxOutputSize,
+ endOnInputSize, decode_full_block,
+ usingExtDict, (BYTE *)dest,
+ (const BYTE *)dictStart, dictSize);
}
+static int LZ4_decompress_fast_extDict(const char *source, char *dest,
+ int originalSize,
+ const void *dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest,
+ 0, originalSize,
+ endOnOutputSize, decode_full_block,
+ usingExtDict, (BYTE *)dest,
+ (const BYTE *)dictStart, dictSize);
+}
+
+/*
+ * The "double dictionary" mode, for use with e.g. ring buffers: the first part
+ * of the dictionary is passed as prefix, and the second via dictStart + dictSize.
+ * These routines are used only once, in LZ4_decompress_*_continue().
+ */
+static FORCE_INLINE
+int LZ4_decompress_safe_doubleDict(const char *source, char *dest,
+ int compressedSize, int maxOutputSize,
+ size_t prefixSize,
+ const void *dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest,
+ compressedSize, maxOutputSize,
+ endOnInputSize, decode_full_block,
+ usingExtDict, (BYTE *)dest - prefixSize,
+ (const BYTE *)dictStart, dictSize);
+}
+
+static FORCE_INLINE
+int LZ4_decompress_fast_doubleDict(const char *source, char *dest,
+ int originalSize, size_t prefixSize,
+ const void *dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest,
+ 0, originalSize,
+ endOnOutputSize, decode_full_block,
+ usingExtDict, (BYTE *)dest - prefixSize,
+ (const BYTE *)dictStart, dictSize);
+}
+
+/* ===== streaming decompression functions ===== */
+
int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
const char *dictionary, int dictSize)
{
- LZ4_streamDecode_t_internal *lz4sd = (LZ4_streamDecode_t_internal *) LZ4_streamDecode;
+ LZ4_streamDecode_t_internal *lz4sd =
+ &LZ4_streamDecode->internal_donotuse;
lz4sd->prefixSize = (size_t) dictSize;
lz4sd->prefixEnd = (const BYTE *) dictionary + dictSize;
@@ -382,35 +576,51 @@ int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode,
const char *source, char *dest, int compressedSize, int maxOutputSize)
{
- LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse;
+ LZ4_streamDecode_t_internal *lz4sd =
+ &LZ4_streamDecode->internal_donotuse;
int result;
- if (lz4sd->prefixEnd == (BYTE *)dest) {
- result = LZ4_decompress_generic(source, dest,
- compressedSize,
- maxOutputSize,
- endOnInputSize, full, 0,
- usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize,
- lz4sd->externalDict,
- lz4sd->extDictSize);
-
+ if (lz4sd->prefixSize == 0) {
+ /* The first call, no dictionary yet. */
+ assert(lz4sd->extDictSize == 0);
+ result = LZ4_decompress_safe(source, dest,
+ compressedSize, maxOutputSize);
+ if (result <= 0)
+ return result;
+ lz4sd->prefixSize = result;
+ lz4sd->prefixEnd = (BYTE *)dest + result;
+ } else if (lz4sd->prefixEnd == (BYTE *)dest) {
+ /* They're rolling the current segment. */
+ if (lz4sd->prefixSize >= 64 * KB - 1)
+ result = LZ4_decompress_safe_withPrefix64k(source, dest,
+ compressedSize, maxOutputSize);
+ else if (lz4sd->extDictSize == 0)
+ result = LZ4_decompress_safe_withSmallPrefix(source,
+ dest, compressedSize, maxOutputSize,
+ lz4sd->prefixSize);
+ else
+ result = LZ4_decompress_safe_doubleDict(source, dest,
+ compressedSize, maxOutputSize,
+ lz4sd->prefixSize,
+ lz4sd->externalDict, lz4sd->extDictSize);
if (result <= 0)
return result;
-
lz4sd->prefixSize += result;
- lz4sd->prefixEnd += result;
+ lz4sd->prefixEnd += result;
} else {
+ /*
+ * The buffer wraps around, or they're
+ * switching to another buffer.
+ */
lz4sd->extDictSize = lz4sd->prefixSize;
lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
- result = LZ4_decompress_generic(source, dest,
+ result = LZ4_decompress_safe_forceExtDict(source, dest,
compressedSize, maxOutputSize,
- endOnInputSize, full, 0,
- usingExtDict, (BYTE *)dest,
lz4sd->externalDict, lz4sd->extDictSize);
if (result <= 0)
return result;
lz4sd->prefixSize = result;
- lz4sd->prefixEnd = (BYTE *)dest + result;
+ lz4sd->prefixEnd = (BYTE *)dest + result;
}
return result;
@@ -422,75 +632,66 @@ int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode,
LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse;
int result;
- if (lz4sd->prefixEnd == (BYTE *)dest) {
- result = LZ4_decompress_generic(source, dest, 0, originalSize,
- endOnOutputSize, full, 0,
- usingExtDict,
- lz4sd->prefixEnd - lz4sd->prefixSize,
- lz4sd->externalDict, lz4sd->extDictSize);
-
+ if (lz4sd->prefixSize == 0) {
+ assert(lz4sd->extDictSize == 0);
+ result = LZ4_decompress_fast(source, dest, originalSize);
+ if (result <= 0)
+ return result;
+ lz4sd->prefixSize = originalSize;
+ lz4sd->prefixEnd = (BYTE *)dest + originalSize;
+ } else if (lz4sd->prefixEnd == (BYTE *)dest) {
+ if (lz4sd->prefixSize >= 64 * KB - 1 ||
+ lz4sd->extDictSize == 0)
+ result = LZ4_decompress_fast(source, dest,
+ originalSize);
+ else
+ result = LZ4_decompress_fast_doubleDict(source, dest,
+ originalSize, lz4sd->prefixSize,
+ lz4sd->externalDict, lz4sd->extDictSize);
if (result <= 0)
return result;
-
lz4sd->prefixSize += originalSize;
- lz4sd->prefixEnd += originalSize;
+ lz4sd->prefixEnd += originalSize;
} else {
lz4sd->extDictSize = lz4sd->prefixSize;
lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
- result = LZ4_decompress_generic(source, dest, 0, originalSize,
- endOnOutputSize, full, 0,
- usingExtDict, (BYTE *)dest,
- lz4sd->externalDict, lz4sd->extDictSize);
+ result = LZ4_decompress_fast_extDict(source, dest,
+ originalSize, lz4sd->externalDict, lz4sd->extDictSize);
if (result <= 0)
return result;
lz4sd->prefixSize = originalSize;
- lz4sd->prefixEnd = (BYTE *)dest + originalSize;
+ lz4sd->prefixEnd = (BYTE *)dest + originalSize;
}
-
return result;
}
-/*
- * Advanced decoding functions :
- * *_usingDict() :
- * These decoding functions work the same as "_continue" ones,
- * the dictionary must be explicitly provided within parameters
- */
-static FORCE_INLINE int LZ4_decompress_usingDict_generic(const char *source,
- char *dest, int compressedSize, int maxOutputSize, int safe,
- const char *dictStart, int dictSize)
+int LZ4_decompress_safe_usingDict(const char *source, char *dest,
+ int compressedSize, int maxOutputSize,
+ const char *dictStart, int dictSize)
{
if (dictSize == 0)
- return LZ4_decompress_generic(source, dest,
- compressedSize, maxOutputSize, safe, full, 0,
- noDict, (BYTE *)dest, NULL, 0);
- if (dictStart + dictSize == dest) {
- if (dictSize >= (int)(64 * KB - 1))
- return LZ4_decompress_generic(source, dest,
- compressedSize, maxOutputSize, safe, full, 0,
- withPrefix64k, (BYTE *)dest - 64 * KB, NULL, 0);
- return LZ4_decompress_generic(source, dest, compressedSize,
- maxOutputSize, safe, full, 0, noDict,
- (BYTE *)dest - dictSize, NULL, 0);
+ return LZ4_decompress_safe(source, dest,
+ compressedSize, maxOutputSize);
+ if (dictStart+dictSize == dest) {
+ if (dictSize >= 64 * KB - 1)
+ return LZ4_decompress_safe_withPrefix64k(source, dest,
+ compressedSize, maxOutputSize);
+ return LZ4_decompress_safe_withSmallPrefix(source, dest,
+ compressedSize, maxOutputSize, dictSize);
}
- return LZ4_decompress_generic(source, dest, compressedSize,
- maxOutputSize, safe, full, 0, usingExtDict,
- (BYTE *)dest, (const BYTE *)dictStart, dictSize);
-}
-
-int LZ4_decompress_safe_usingDict(const char *source, char *dest,
- int compressedSize, int maxOutputSize,
- const char *dictStart, int dictSize)
-{
- return LZ4_decompress_usingDict_generic(source, dest,
- compressedSize, maxOutputSize, 1, dictStart, dictSize);
+ return LZ4_decompress_safe_forceExtDict(source, dest,
+ compressedSize, maxOutputSize, dictStart, dictSize);
}
int LZ4_decompress_fast_usingDict(const char *source, char *dest,
- int originalSize, const char *dictStart, int dictSize)
+ int originalSize,
+ const char *dictStart, int dictSize)
{
- return LZ4_decompress_usingDict_generic(source, dest, 0,
- originalSize, 0, dictStart, dictSize);
+ if (dictSize == 0 || dictStart + dictSize == dest)
+ return LZ4_decompress_fast(source, dest, originalSize);
+
+ return LZ4_decompress_fast_extDict(source, dest, originalSize,
+ dictStart, dictSize);
}
#ifndef STATIC
diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
index 00a0b58a0871..1a7fa9d9170f 100644
--- a/lib/lz4/lz4defs.h
+++ b/lib/lz4/lz4defs.h
@@ -75,6 +75,11 @@ typedef uintptr_t uptrval;
#define WILDCOPYLENGTH 8
#define LASTLITERALS 5
#define MFLIMIT (WILDCOPYLENGTH + MINMATCH)
+/*
+ * ensure it's possible to write 2 x wildcopyLength
+ * without overflowing output buffer
+ */
+#define MATCH_SAFEGUARD_DISTANCE ((2 * WILDCOPYLENGTH) - MINMATCH)
/* Increase this value ==> compression run slower on incompressible data */
#define LZ4_SKIPTRIGGER 6
@@ -222,6 +227,8 @@ typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
-typedef enum { full = 0, partial = 1 } earlyEnd_directive;
+typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive;
+
+#define LZ4_STATIC_ASSERT(c) BUILD_BUG_ON(!(c))
#endif
diff --git a/lib/memcat_p.c b/lib/memcat_p.c
new file mode 100644
index 000000000000..b810fbc66962
--- /dev/null
+++ b/lib/memcat_p.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/slab.h>
+
+/*
+ * Merge two NULL-terminated pointer arrays into a newly allocated
+ * array, which is also NULL-terminated. Nomenclature is inspired by
+ * memset_p() and memcat() found elsewhere in the kernel source tree.
+ */
+void **__memcat_p(void **a, void **b)
+{
+ void **p = a, **new;
+ int nr;
+
+ /* count the elements in both arrays */
+ for (nr = 0, p = a; *p; nr++, p++)
+ ;
+ for (p = b; *p; nr++, p++)
+ ;
+ /* one for the NULL-terminator */
+ nr++;
+
+ new = kmalloc_array(nr, sizeof(void *), GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ /* nr -> last index; p points to NULL in b[] */
+ for (nr--; nr >= 0; nr--, p = p == b ? &a[nr] : p - 1)
+ new[nr] = *p;
+
+ return new;
+}
+EXPORT_SYMBOL_GPL(__memcat_p);
+
diff --git a/lib/mpi/mpi-internal.h b/lib/mpi/mpi-internal.h
index 7eceeddb3fb8..c2d6f4efcfbc 100644
--- a/lib/mpi/mpi-internal.h
+++ b/lib/mpi/mpi-internal.h
@@ -65,13 +65,6 @@
typedef mpi_limb_t *mpi_ptr_t; /* pointer to a limb */
typedef int mpi_size_t; /* (must be a signed type) */
-static inline int RESIZE_IF_NEEDED(MPI a, unsigned b)
-{
- if (a->alloced < b)
- return mpi_resize(a, b);
- return 0;
-}
-
/* Copy N limbs from S to D. */
#define MPN_COPY(d, s, n) \
do { \
@@ -80,13 +73,6 @@ static inline int RESIZE_IF_NEEDED(MPI a, unsigned b)
(d)[_i] = (s)[_i]; \
} while (0)
-#define MPN_COPY_INCR(d, s, n) \
- do { \
- mpi_size_t _i; \
- for (_i = 0; _i < (n); _i++) \
- (d)[_i] = (s)[_i]; \
- } while (0)
-
#define MPN_COPY_DECR(d, s, n) \
do { \
mpi_size_t _i; \
@@ -111,15 +97,6 @@ static inline int RESIZE_IF_NEEDED(MPI a, unsigned b)
} \
} while (0)
-#define MPN_NORMALIZE_NOT_ZERO(d, n) \
- do { \
- for (;;) { \
- if ((d)[(n)-1]) \
- break; \
- (n)--; \
- } \
- } while (0)
-
#define MPN_MUL_N_RECURSE(prodp, up, vp, size, tspace) \
do { \
if ((size) < KARATSUBA_THRESHOLD) \
@@ -128,46 +105,11 @@ static inline int RESIZE_IF_NEEDED(MPI a, unsigned b)
mul_n(prodp, up, vp, size, tspace); \
} while (0);
-/* Divide the two-limb number in (NH,,NL) by D, with DI being the largest
- * limb not larger than (2**(2*BITS_PER_MP_LIMB))/D - (2**BITS_PER_MP_LIMB).
- * If this would yield overflow, DI should be the largest possible number
- * (i.e., only ones). For correct operation, the most significant bit of D
- * has to be set. Put the quotient in Q and the remainder in R.
- */
-#define UDIV_QRNND_PREINV(q, r, nh, nl, d, di) \
- do { \
- mpi_limb_t _q, _ql, _r; \
- mpi_limb_t _xh, _xl; \
- umul_ppmm(_q, _ql, (nh), (di)); \
- _q += (nh); /* DI is 2**BITS_PER_MPI_LIMB too small */ \
- umul_ppmm(_xh, _xl, _q, (d)); \
- sub_ddmmss(_xh, _r, (nh), (nl), _xh, _xl); \
- if (_xh) { \
- sub_ddmmss(_xh, _r, _xh, _r, 0, (d)); \
- _q++; \
- if (_xh) { \
- sub_ddmmss(_xh, _r, _xh, _r, 0, (d)); \
- _q++; \
- } \
- } \
- if (_r >= (d)) { \
- _r -= (d); \
- _q++; \
- } \
- (r) = _r; \
- (q) = _q; \
- } while (0)
-
/*-- mpiutil.c --*/
mpi_ptr_t mpi_alloc_limb_space(unsigned nlimbs);
void mpi_free_limb_space(mpi_ptr_t a);
void mpi_assign_limb_space(MPI a, mpi_ptr_t ap, unsigned nlimbs);
-/*-- mpi-bit.c --*/
-void mpi_rshift_limbs(MPI a, unsigned int count);
-int mpi_lshift_limbs(MPI a, unsigned int count);
-
-/*-- mpihelp-add.c --*/
static inline mpi_limb_t mpihelp_add_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
mpi_size_t s1_size, mpi_limb_t s2_limb);
mpi_limb_t mpihelp_add_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
@@ -175,7 +117,6 @@ mpi_limb_t mpihelp_add_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
static inline mpi_limb_t mpihelp_add(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size,
mpi_ptr_t s2_ptr, mpi_size_t s2_size);
-/*-- mpihelp-sub.c --*/
static inline mpi_limb_t mpihelp_sub_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
mpi_size_t s1_size, mpi_limb_t s2_limb);
mpi_limb_t mpihelp_sub_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
@@ -183,10 +124,10 @@ mpi_limb_t mpihelp_sub_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
static inline mpi_limb_t mpihelp_sub(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size,
mpi_ptr_t s2_ptr, mpi_size_t s2_size);
-/*-- mpihelp-cmp.c --*/
+/*-- mpih-cmp.c --*/
int mpihelp_cmp(mpi_ptr_t op1_ptr, mpi_ptr_t op2_ptr, mpi_size_t size);
-/*-- mpihelp-mul.c --*/
+/*-- mpih-mul.c --*/
struct karatsuba_ctx {
struct karatsuba_ctx *next;
@@ -202,7 +143,6 @@ mpi_limb_t mpihelp_addmul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
mpi_size_t s1_size, mpi_limb_t s2_limb);
mpi_limb_t mpihelp_submul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
mpi_size_t s1_size, mpi_limb_t s2_limb);
-int mpihelp_mul_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_ptr_t vp, mpi_size_t size);
int mpihelp_mul(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t usize,
mpi_ptr_t vp, mpi_size_t vsize, mpi_limb_t *_result);
void mpih_sqr_n_basecase(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size);
@@ -214,21 +154,16 @@ int mpihelp_mul_karatsuba_case(mpi_ptr_t prodp,
mpi_ptr_t vp, mpi_size_t vsize,
struct karatsuba_ctx *ctx);
-/*-- mpihelp-mul_1.c (or xxx/cpu/ *.S) --*/
+/*-- generic_mpih-mul1.c --*/
mpi_limb_t mpihelp_mul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
mpi_size_t s1_size, mpi_limb_t s2_limb);
-/*-- mpihelp-div.c --*/
-mpi_limb_t mpihelp_mod_1(mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
- mpi_limb_t divisor_limb);
+/*-- mpih-div.c --*/
mpi_limb_t mpihelp_divrem(mpi_ptr_t qp, mpi_size_t qextra_limbs,
mpi_ptr_t np, mpi_size_t nsize,
mpi_ptr_t dp, mpi_size_t dsize);
-mpi_limb_t mpihelp_divmod_1(mpi_ptr_t quot_ptr,
- mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
- mpi_limb_t divisor_limb);
-/*-- mpihelp-shift.c --*/
+/*-- generic_mpih-[lr]shift.c --*/
mpi_limb_t mpihelp_lshift(mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize,
unsigned cnt);
mpi_limb_t mpihelp_rshift(mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize,
diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c
index 468fb7cd1221..a5c921e6d667 100644
--- a/lib/mpi/mpi-pow.c
+++ b/lib/mpi/mpi-pow.c
@@ -41,7 +41,7 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
mpi_ptr_t tspace = NULL;
mpi_ptr_t rp, ep, mp, bp;
mpi_size_t esize, msize, bsize, rsize;
- int esign, msign, bsign, rsign;
+ int msign, bsign, rsign;
mpi_size_t size;
int mod_shift_cnt;
int negative_result;
@@ -53,7 +53,6 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
esize = exp->nlimbs;
msize = mod->nlimbs;
size = 2 * msize;
- esign = exp->sign;
msign = mod->sign;
rp = res->d;
diff --git a/lib/mpi/mpiutil.c b/lib/mpi/mpiutil.c
index 314f4dfa603e..20ed0f766787 100644
--- a/lib/mpi/mpiutil.c
+++ b/lib/mpi/mpiutil.c
@@ -91,14 +91,14 @@ int mpi_resize(MPI a, unsigned nlimbs)
return 0; /* no need to do it */
if (a->d) {
- p = kmalloc(nlimbs * sizeof(mpi_limb_t), GFP_KERNEL);
+ p = kmalloc_array(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
if (!p)
return -ENOMEM;
memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t));
kzfree(a->d);
a->d = p;
} else {
- a->d = kzalloc(nlimbs * sizeof(mpi_limb_t), GFP_KERNEL);
+ a->d = kcalloc(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
if (!a->d)
return -ENOMEM;
}
diff --git a/lib/nlattr.c b/lib/nlattr.c
index dfa55c873c13..d26de6156b97 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -45,12 +45,11 @@ static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = {
};
static int validate_nla_bitfield32(const struct nlattr *nla,
- u32 *valid_flags_allowed)
+ const u32 *valid_flags_mask)
{
const struct nla_bitfield32 *bf = nla_data(nla);
- u32 *valid_flags_mask = valid_flags_allowed;
- if (!valid_flags_allowed)
+ if (!valid_flags_mask)
return -EINVAL;
/*disallow invalid bit selector */
@@ -68,11 +67,99 @@ static int validate_nla_bitfield32(const struct nlattr *nla,
return 0;
}
+static int nla_validate_array(const struct nlattr *head, int len, int maxtype,
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
+{
+ const struct nlattr *entry;
+ int rem;
+
+ nla_for_each_attr(entry, head, len, rem) {
+ int ret;
+
+ if (nla_len(entry) == 0)
+ continue;
+
+ if (nla_len(entry) < NLA_HDRLEN) {
+ NL_SET_ERR_MSG_ATTR(extack, entry,
+ "Array element too short");
+ return -ERANGE;
+ }
+
+ ret = nla_validate(nla_data(entry), nla_len(entry),
+ maxtype, policy, extack);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int nla_validate_int_range(const struct nla_policy *pt,
+ const struct nlattr *nla,
+ struct netlink_ext_ack *extack)
+{
+ bool validate_min, validate_max;
+ s64 value;
+
+ validate_min = pt->validation_type == NLA_VALIDATE_RANGE ||
+ pt->validation_type == NLA_VALIDATE_MIN;
+ validate_max = pt->validation_type == NLA_VALIDATE_RANGE ||
+ pt->validation_type == NLA_VALIDATE_MAX;
+
+ switch (pt->type) {
+ case NLA_U8:
+ value = nla_get_u8(nla);
+ break;
+ case NLA_U16:
+ value = nla_get_u16(nla);
+ break;
+ case NLA_U32:
+ value = nla_get_u32(nla);
+ break;
+ case NLA_S8:
+ value = nla_get_s8(nla);
+ break;
+ case NLA_S16:
+ value = nla_get_s16(nla);
+ break;
+ case NLA_S32:
+ value = nla_get_s32(nla);
+ break;
+ case NLA_S64:
+ value = nla_get_s64(nla);
+ break;
+ case NLA_U64:
+ /* treat this one specially, since it may not fit into s64 */
+ if ((validate_min && nla_get_u64(nla) < pt->min) ||
+ (validate_max && nla_get_u64(nla) > pt->max)) {
+ NL_SET_ERR_MSG_ATTR(extack, nla,
+ "integer out of range");
+ return -ERANGE;
+ }
+ return 0;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if ((validate_min && value < pt->min) ||
+ (validate_max && value > pt->max)) {
+ NL_SET_ERR_MSG_ATTR(extack, nla,
+ "integer out of range");
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
static int validate_nla(const struct nlattr *nla, int maxtype,
- const struct nla_policy *policy)
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
{
const struct nla_policy *pt;
int minlen = 0, attrlen = nla_len(nla), type = nla_type(nla);
+ int err = -ERANGE;
if (type <= 0 || type > maxtype)
return 0;
@@ -81,22 +168,40 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
BUG_ON(pt->type > NLA_TYPE_MAX);
- if (nla_attr_len[pt->type] && attrlen != nla_attr_len[pt->type]) {
+ if ((nla_attr_len[pt->type] && attrlen != nla_attr_len[pt->type]) ||
+ (pt->type == NLA_EXACT_LEN_WARN && attrlen != pt->len)) {
pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n",
current->comm, type);
}
switch (pt->type) {
+ case NLA_EXACT_LEN:
+ if (attrlen != pt->len)
+ goto out_err;
+ break;
+
+ case NLA_REJECT:
+ if (extack && pt->validation_data) {
+ NL_SET_BAD_ATTR(extack, nla);
+ extack->_msg = pt->validation_data;
+ return -EINVAL;
+ }
+ err = -EINVAL;
+ goto out_err;
+
case NLA_FLAG:
if (attrlen > 0)
- return -ERANGE;
+ goto out_err;
break;
case NLA_BITFIELD32:
if (attrlen != sizeof(struct nla_bitfield32))
- return -ERANGE;
+ goto out_err;
- return validate_nla_bitfield32(nla, pt->validation_data);
+ err = validate_nla_bitfield32(nla, pt->validation_data);
+ if (err)
+ goto out_err;
+ break;
case NLA_NUL_STRING:
if (pt->len)
@@ -104,13 +209,15 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
else
minlen = attrlen;
- if (!minlen || memchr(nla_data(nla), '\0', minlen) == NULL)
- return -EINVAL;
+ if (!minlen || memchr(nla_data(nla), '\0', minlen) == NULL) {
+ err = -EINVAL;
+ goto out_err;
+ }
/* fall through */
case NLA_STRING:
if (attrlen < 1)
- return -ERANGE;
+ goto out_err;
if (pt->len) {
char *buf = nla_data(nla);
@@ -119,32 +226,58 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
attrlen--;
if (attrlen > pt->len)
- return -ERANGE;
+ goto out_err;
}
break;
case NLA_BINARY:
if (pt->len && attrlen > pt->len)
- return -ERANGE;
+ goto out_err;
break;
- case NLA_NESTED_COMPAT:
- if (attrlen < pt->len)
- return -ERANGE;
- if (attrlen < NLA_ALIGN(pt->len))
- break;
- if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN)
- return -ERANGE;
- nla = nla_data(nla) + NLA_ALIGN(pt->len);
- if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN + nla_len(nla))
- return -ERANGE;
- break;
case NLA_NESTED:
/* a nested attributes is allowed to be empty; if its not,
* it must have a size of at least NLA_HDRLEN.
*/
if (attrlen == 0)
break;
+ if (attrlen < NLA_HDRLEN)
+ goto out_err;
+ if (pt->validation_data) {
+ err = nla_validate(nla_data(nla), nla_len(nla), pt->len,
+ pt->validation_data, extack);
+ if (err < 0) {
+ /*
+ * return directly to preserve the inner
+ * error message/attribute pointer
+ */
+ return err;
+ }
+ }
+ break;
+ case NLA_NESTED_ARRAY:
+ /* a nested array attribute is allowed to be empty; if its not,
+ * it must have a size of at least NLA_HDRLEN.
+ */
+ if (attrlen == 0)
+ break;
+ if (attrlen < NLA_HDRLEN)
+ goto out_err;
+ if (pt->validation_data) {
+ int err;
+
+ err = nla_validate_array(nla_data(nla), nla_len(nla),
+ pt->len, pt->validation_data,
+ extack);
+ if (err < 0) {
+ /*
+ * return directly to preserve the inner
+ * error message/attribute pointer
+ */
+ return err;
+ }
+ }
+ break;
default:
if (pt->len)
minlen = pt->len;
@@ -152,10 +285,34 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
minlen = nla_attr_minlen[pt->type];
if (attrlen < minlen)
- return -ERANGE;
+ goto out_err;
+ }
+
+ /* further validation */
+ switch (pt->validation_type) {
+ case NLA_VALIDATE_NONE:
+ /* nothing to do */
+ break;
+ case NLA_VALIDATE_RANGE:
+ case NLA_VALIDATE_MIN:
+ case NLA_VALIDATE_MAX:
+ err = nla_validate_int_range(pt, nla, extack);
+ if (err)
+ return err;
+ break;
+ case NLA_VALIDATE_FUNCTION:
+ if (pt->validate) {
+ err = pt->validate(nla, extack);
+ if (err)
+ return err;
+ }
+ break;
}
return 0;
+out_err:
+ NL_SET_ERR_MSG_ATTR(extack, nla, "Attribute failed policy validation");
+ return err;
}
/**
@@ -180,13 +337,10 @@ int nla_validate(const struct nlattr *head, int len, int maxtype,
int rem;
nla_for_each_attr(nla, head, len, rem) {
- int err = validate_nla(nla, maxtype, policy);
+ int err = validate_nla(nla, maxtype, policy, extack);
- if (err < 0) {
- if (extack)
- extack->bad_attr = nla;
+ if (err < 0)
return err;
- }
}
return 0;
@@ -237,42 +391,63 @@ EXPORT_SYMBOL(nla_policy_len);
*
* Returns 0 on success or a negative error code.
*/
-int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
- int len, const struct nla_policy *policy,
- struct netlink_ext_ack *extack)
+static int __nla_parse(struct nlattr **tb, int maxtype,
+ const struct nlattr *head, int len,
+ bool strict, const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
{
const struct nlattr *nla;
- int rem, err;
+ int rem;
memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
nla_for_each_attr(nla, head, len, rem) {
u16 type = nla_type(nla);
- if (type > 0 && type <= maxtype) {
- if (policy) {
- err = validate_nla(nla, maxtype, policy);
- if (err < 0) {
- if (extack)
- extack->bad_attr = nla;
- goto errout;
- }
+ if (type == 0 || type > maxtype) {
+ if (strict) {
+ NL_SET_ERR_MSG(extack, "Unknown attribute type");
+ return -EINVAL;
}
+ continue;
+ }
+ if (policy) {
+ int err = validate_nla(nla, maxtype, policy, extack);
- tb[type] = (struct nlattr *)nla;
+ if (err < 0)
+ return err;
}
+
+ tb[type] = (struct nlattr *)nla;
}
- if (unlikely(rem > 0))
+ if (unlikely(rem > 0)) {
pr_warn_ratelimited("netlink: %d bytes leftover after parsing attributes in process `%s'.\n",
rem, current->comm);
+ NL_SET_ERR_MSG(extack, "bytes leftover after parsing attributes");
+ if (strict)
+ return -EINVAL;
+ }
- err = 0;
-errout:
- return err;
+ return 0;
+}
+
+int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
+ int len, const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
+{
+ return __nla_parse(tb, maxtype, head, len, false, policy, extack);
}
EXPORT_SYMBOL(nla_parse);
+int nla_parse_strict(struct nlattr **tb, int maxtype, const struct nlattr *head,
+ int len, const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
+{
+ return __nla_parse(tb, maxtype, head, len, true, policy, extack);
+}
+EXPORT_SYMBOL(nla_parse_strict);
+
/**
* nla_find - Find a specific attribute in a stream of attributes
* @head: head of attribute stream
diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c
index 61a6b5aab07e..15ca78e1c7d4 100644
--- a/lib/nmi_backtrace.c
+++ b/lib/nmi_backtrace.c
@@ -87,11 +87,9 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
bool nmi_cpu_backtrace(struct pt_regs *regs)
{
- static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
int cpu = smp_processor_id();
if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
- arch_spin_lock(&lock);
if (regs && cpu_in_idle(instruction_pointer(regs))) {
pr_warn("NMI backtrace for cpu %d skipped: idling at %pS\n",
cpu, (void *)instruction_pointer(regs));
@@ -102,7 +100,6 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
else
dump_stack();
}
- arch_spin_unlock(&lock);
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
return true;
}
diff --git a/lib/parser.c b/lib/parser.c
index 3278958b472a..dd70e5e6c9e2 100644
--- a/lib/parser.c
+++ b/lib/parser.c
@@ -131,13 +131,10 @@ static int match_number(substring_t *s, int *result, int base)
char *buf;
int ret;
long val;
- size_t len = s->to - s->from;
- buf = kmalloc(len + 1, GFP_KERNEL);
+ buf = match_strdup(s);
if (!buf)
return -ENOMEM;
- memcpy(buf, s->from, len);
- buf[len] = '\0';
ret = 0;
val = simple_strtol(buf, &endp, base);
@@ -166,13 +163,10 @@ static int match_u64int(substring_t *s, u64 *result, int base)
char *buf;
int ret;
u64 val;
- size_t len = s->to - s->from;
- buf = kmalloc(len + 1, GFP_KERNEL);
+ buf = match_strdup(s);
if (!buf)
return -ENOMEM;
- memcpy(buf, s->from, len);
- buf[len] = '\0';
ret = kstrtoull(buf, base, &val);
if (!ret)
@@ -327,10 +321,6 @@ EXPORT_SYMBOL(match_strlcpy);
*/
char *match_strdup(const substring_t *s)
{
- size_t sz = s->to - s->from + 1;
- char *p = kmalloc(sz, GFP_KERNEL);
- if (p)
- match_strlcpy(p, s, sz);
- return p;
+ return kmemdup_nul(s->from, s->to - s->from, GFP_KERNEL);
}
EXPORT_SYMBOL(match_strdup);
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 9f96fa7bc000..de10b8c0bff6 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -356,11 +356,35 @@ EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
*/
void percpu_ref_reinit(struct percpu_ref *ref)
{
+ WARN_ON_ONCE(!percpu_ref_is_zero(ref));
+
+ percpu_ref_resurrect(ref);
+}
+EXPORT_SYMBOL_GPL(percpu_ref_reinit);
+
+/**
+ * percpu_ref_resurrect - modify a percpu refcount from dead to live
+ * @ref: perpcu_ref to resurrect
+ *
+ * Modify @ref so that it's in the same state as before percpu_ref_kill() was
+ * called. @ref must be dead but must not yet have exited.
+ *
+ * If @ref->release() frees @ref then the caller is responsible for
+ * guaranteeing that @ref->release() does not get called while this
+ * function is in progress.
+ *
+ * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
+ * this function is in progress.
+ */
+void percpu_ref_resurrect(struct percpu_ref *ref)
+{
+ unsigned long __percpu *percpu_count;
unsigned long flags;
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
- WARN_ON_ONCE(!percpu_ref_is_zero(ref));
+ WARN_ON_ONCE(!(ref->percpu_count_ptr & __PERCPU_REF_DEAD));
+ WARN_ON_ONCE(__ref_is_percpu(ref, &percpu_count));
ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
percpu_ref_get(ref);
@@ -368,4 +392,4 @@ void percpu_ref_reinit(struct percpu_ref *ref)
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
}
-EXPORT_SYMBOL_GPL(percpu_ref_reinit);
+EXPORT_SYMBOL_GPL(percpu_ref_resurrect);
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index c72577e472f2..a66595ba5543 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -4,7 +4,6 @@
*/
#include <linux/percpu_counter.h>
-#include <linux/notifier.h>
#include <linux/mutex.h>
#include <linux/init.h>
#include <linux/cpu.h>
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c
deleted file mode 100644
index 6016f1deb1f5..000000000000
--- a/lib/percpu_ida.c
+++ /dev/null
@@ -1,391 +0,0 @@
-/*
- * Percpu IDA library
- *
- * Copyright (C) 2013 Datera, Inc. Kent Overstreet
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-
-#include <linux/mm.h>
-#include <linux/bitmap.h>
-#include <linux/bitops.h>
-#include <linux/bug.h>
-#include <linux/err.h>
-#include <linux/export.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/percpu.h>
-#include <linux/sched/signal.h>
-#include <linux/string.h>
-#include <linux/spinlock.h>
-#include <linux/percpu_ida.h>
-
-struct percpu_ida_cpu {
- /*
- * Even though this is percpu, we need a lock for tag stealing by remote
- * CPUs:
- */
- spinlock_t lock;
-
- /* nr_free/freelist form a stack of free IDs */
- unsigned nr_free;
- unsigned freelist[];
-};
-
-static inline void move_tags(unsigned *dst, unsigned *dst_nr,
- unsigned *src, unsigned *src_nr,
- unsigned nr)
-{
- *src_nr -= nr;
- memcpy(dst + *dst_nr, src + *src_nr, sizeof(unsigned) * nr);
- *dst_nr += nr;
-}
-
-/*
- * Try to steal tags from a remote cpu's percpu freelist.
- *
- * We first check how many percpu freelists have tags
- *
- * Then we iterate through the cpus until we find some tags - we don't attempt
- * to find the "best" cpu to steal from, to keep cacheline bouncing to a
- * minimum.
- */
-static inline void steal_tags(struct percpu_ida *pool,
- struct percpu_ida_cpu *tags)
-{
- unsigned cpus_have_tags, cpu = pool->cpu_last_stolen;
- struct percpu_ida_cpu *remote;
-
- for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags);
- cpus_have_tags; cpus_have_tags--) {
- cpu = cpumask_next(cpu, &pool->cpus_have_tags);
-
- if (cpu >= nr_cpu_ids) {
- cpu = cpumask_first(&pool->cpus_have_tags);
- if (cpu >= nr_cpu_ids)
- BUG();
- }
-
- pool->cpu_last_stolen = cpu;
- remote = per_cpu_ptr(pool->tag_cpu, cpu);
-
- cpumask_clear_cpu(cpu, &pool->cpus_have_tags);
-
- if (remote == tags)
- continue;
-
- spin_lock(&remote->lock);
-
- if (remote->nr_free) {
- memcpy(tags->freelist,
- remote->freelist,
- sizeof(unsigned) * remote->nr_free);
-
- tags->nr_free = remote->nr_free;
- remote->nr_free = 0;
- }
-
- spin_unlock(&remote->lock);
-
- if (tags->nr_free)
- break;
- }
-}
-
-/*
- * Pop up to IDA_PCPU_BATCH_MOVE IDs off the global freelist, and push them onto
- * our percpu freelist:
- */
-static inline void alloc_global_tags(struct percpu_ida *pool,
- struct percpu_ida_cpu *tags)
-{
- move_tags(tags->freelist, &tags->nr_free,
- pool->freelist, &pool->nr_free,
- min(pool->nr_free, pool->percpu_batch_size));
-}
-
-static inline unsigned alloc_local_tag(struct percpu_ida_cpu *tags)
-{
- int tag = -ENOSPC;
-
- spin_lock(&tags->lock);
- if (tags->nr_free)
- tag = tags->freelist[--tags->nr_free];
- spin_unlock(&tags->lock);
-
- return tag;
-}
-
-/**
- * percpu_ida_alloc - allocate a tag
- * @pool: pool to allocate from
- * @state: task state for prepare_to_wait
- *
- * Returns a tag - an integer in the range [0..nr_tags) (passed to
- * tag_pool_init()), or otherwise -ENOSPC on allocation failure.
- *
- * Safe to be called from interrupt context (assuming it isn't passed
- * TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, of course).
- *
- * @gfp indicates whether or not to wait until a free id is available (it's not
- * used for internal memory allocations); thus if passed __GFP_RECLAIM we may sleep
- * however long it takes until another thread frees an id (same semantics as a
- * mempool).
- *
- * Will not fail if passed TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE.
- */
-int percpu_ida_alloc(struct percpu_ida *pool, int state)
-{
- DEFINE_WAIT(wait);
- struct percpu_ida_cpu *tags;
- unsigned long flags;
- int tag;
-
- local_irq_save(flags);
- tags = this_cpu_ptr(pool->tag_cpu);
-
- /* Fastpath */
- tag = alloc_local_tag(tags);
- if (likely(tag >= 0)) {
- local_irq_restore(flags);
- return tag;
- }
-
- while (1) {
- spin_lock(&pool->lock);
-
- /*
- * prepare_to_wait() must come before steal_tags(), in case
- * percpu_ida_free() on another cpu flips a bit in
- * cpus_have_tags
- *
- * global lock held and irqs disabled, don't need percpu lock
- */
- if (state != TASK_RUNNING)
- prepare_to_wait(&pool->wait, &wait, state);
-
- if (!tags->nr_free)
- alloc_global_tags(pool, tags);
- if (!tags->nr_free)
- steal_tags(pool, tags);
-
- if (tags->nr_free) {
- tag = tags->freelist[--tags->nr_free];
- if (tags->nr_free)
- cpumask_set_cpu(smp_processor_id(),
- &pool->cpus_have_tags);
- }
-
- spin_unlock(&pool->lock);
- local_irq_restore(flags);
-
- if (tag >= 0 || state == TASK_RUNNING)
- break;
-
- if (signal_pending_state(state, current)) {
- tag = -ERESTARTSYS;
- break;
- }
-
- schedule();
-
- local_irq_save(flags);
- tags = this_cpu_ptr(pool->tag_cpu);
- }
- if (state != TASK_RUNNING)
- finish_wait(&pool->wait, &wait);
-
- return tag;
-}
-EXPORT_SYMBOL_GPL(percpu_ida_alloc);
-
-/**
- * percpu_ida_free - free a tag
- * @pool: pool @tag was allocated from
- * @tag: a tag previously allocated with percpu_ida_alloc()
- *
- * Safe to be called from interrupt context.
- */
-void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
-{
- struct percpu_ida_cpu *tags;
- unsigned long flags;
- unsigned nr_free;
-
- BUG_ON(tag >= pool->nr_tags);
-
- local_irq_save(flags);
- tags = this_cpu_ptr(pool->tag_cpu);
-
- spin_lock(&tags->lock);
- tags->freelist[tags->nr_free++] = tag;
-
- nr_free = tags->nr_free;
- spin_unlock(&tags->lock);
-
- if (nr_free == 1) {
- cpumask_set_cpu(smp_processor_id(),
- &pool->cpus_have_tags);
- wake_up(&pool->wait);
- }
-
- if (nr_free == pool->percpu_max_size) {
- spin_lock(&pool->lock);
-
- /*
- * Global lock held and irqs disabled, don't need percpu
- * lock
- */
- if (tags->nr_free == pool->percpu_max_size) {
- move_tags(pool->freelist, &pool->nr_free,
- tags->freelist, &tags->nr_free,
- pool->percpu_batch_size);
-
- wake_up(&pool->wait);
- }
- spin_unlock(&pool->lock);
- }
-
- local_irq_restore(flags);
-}
-EXPORT_SYMBOL_GPL(percpu_ida_free);
-
-/**
- * percpu_ida_destroy - release a tag pool's resources
- * @pool: pool to free
- *
- * Frees the resources allocated by percpu_ida_init().
- */
-void percpu_ida_destroy(struct percpu_ida *pool)
-{
- free_percpu(pool->tag_cpu);
- free_pages((unsigned long) pool->freelist,
- get_order(pool->nr_tags * sizeof(unsigned)));
-}
-EXPORT_SYMBOL_GPL(percpu_ida_destroy);
-
-/**
- * percpu_ida_init - initialize a percpu tag pool
- * @pool: pool to initialize
- * @nr_tags: number of tags that will be available for allocation
- *
- * Initializes @pool so that it can be used to allocate tags - integers in the
- * range [0, nr_tags). Typically, they'll be used by driver code to refer to a
- * preallocated array of tag structures.
- *
- * Allocation is percpu, but sharding is limited by nr_tags - for best
- * performance, the workload should not span more cpus than nr_tags / 128.
- */
-int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags,
- unsigned long max_size, unsigned long batch_size)
-{
- unsigned i, cpu, order;
-
- memset(pool, 0, sizeof(*pool));
-
- init_waitqueue_head(&pool->wait);
- spin_lock_init(&pool->lock);
- pool->nr_tags = nr_tags;
- pool->percpu_max_size = max_size;
- pool->percpu_batch_size = batch_size;
-
- /* Guard against overflow */
- if (nr_tags > (unsigned) INT_MAX + 1) {
- pr_err("percpu_ida_init(): nr_tags too large\n");
- return -EINVAL;
- }
-
- order = get_order(nr_tags * sizeof(unsigned));
- pool->freelist = (void *) __get_free_pages(GFP_KERNEL, order);
- if (!pool->freelist)
- return -ENOMEM;
-
- for (i = 0; i < nr_tags; i++)
- pool->freelist[i] = i;
-
- pool->nr_free = nr_tags;
-
- pool->tag_cpu = __alloc_percpu(sizeof(struct percpu_ida_cpu) +
- pool->percpu_max_size * sizeof(unsigned),
- sizeof(unsigned));
- if (!pool->tag_cpu)
- goto err;
-
- for_each_possible_cpu(cpu)
- spin_lock_init(&per_cpu_ptr(pool->tag_cpu, cpu)->lock);
-
- return 0;
-err:
- percpu_ida_destroy(pool);
- return -ENOMEM;
-}
-EXPORT_SYMBOL_GPL(__percpu_ida_init);
-
-/**
- * percpu_ida_for_each_free - iterate free ids of a pool
- * @pool: pool to iterate
- * @fn: interate callback function
- * @data: parameter for @fn
- *
- * Note, this doesn't guarantee to iterate all free ids restrictly. Some free
- * ids might be missed, some might be iterated duplicated, and some might
- * be iterated and not free soon.
- */
-int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
- void *data)
-{
- unsigned long flags;
- struct percpu_ida_cpu *remote;
- unsigned cpu, i, err = 0;
-
- local_irq_save(flags);
- for_each_possible_cpu(cpu) {
- remote = per_cpu_ptr(pool->tag_cpu, cpu);
- spin_lock(&remote->lock);
- for (i = 0; i < remote->nr_free; i++) {
- err = fn(remote->freelist[i], data);
- if (err)
- break;
- }
- spin_unlock(&remote->lock);
- if (err)
- goto out;
- }
-
- spin_lock(&pool->lock);
- for (i = 0; i < pool->nr_free; i++) {
- err = fn(pool->freelist[i], data);
- if (err)
- break;
- }
- spin_unlock(&pool->lock);
-out:
- local_irq_restore(flags);
- return err;
-}
-EXPORT_SYMBOL_GPL(percpu_ida_for_each_free);
-
-/**
- * percpu_ida_free_tags - return free tags number of a specific cpu or global pool
- * @pool: pool related
- * @cpu: specific cpu or global pool if @cpu == nr_cpu_ids
- *
- * Note: this just returns a snapshot of free tags number.
- */
-unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu)
-{
- struct percpu_ida_cpu *remote;
- if (cpu == nr_cpu_ids)
- return pool->nr_free;
- remote = per_cpu_ptr(pool->tag_cpu, cpu);
- return remote->nr_free;
-}
-EXPORT_SYMBOL_GPL(percpu_ida_free_tags);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 8e00138d593f..1106bb6aa01e 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -38,15 +38,13 @@
#include <linux/rcupdate.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/xarray.h>
-/* Number of nodes in fully populated tree of given height */
-static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly;
-
/*
* Radix tree node cache.
*/
-static struct kmem_cache *radix_tree_node_cachep;
+struct kmem_cache *radix_tree_node_cachep;
/*
* The radix tree is variable-height, so an insert operation not only has
@@ -98,29 +96,12 @@ static inline void *node_to_entry(void *ptr)
return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE);
}
-#define RADIX_TREE_RETRY node_to_entry(NULL)
-
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
-/* Sibling slots point directly to another slot in the same node */
-static inline
-bool is_sibling_entry(const struct radix_tree_node *parent, void *node)
-{
- void __rcu **ptr = node;
- return (parent->slots <= ptr) &&
- (ptr < parent->slots + RADIX_TREE_MAP_SIZE);
-}
-#else
-static inline
-bool is_sibling_entry(const struct radix_tree_node *parent, void *node)
-{
- return false;
-}
-#endif
+#define RADIX_TREE_RETRY XA_RETRY_ENTRY
static inline unsigned long
get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot)
{
- return slot - parent->slots;
+ return parent ? slot - parent->slots : 0;
}
static unsigned int radix_tree_descend(const struct radix_tree_node *parent,
@@ -129,24 +110,13 @@ static unsigned int radix_tree_descend(const struct radix_tree_node *parent,
unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK;
void __rcu **entry = rcu_dereference_raw(parent->slots[offset]);
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
- if (radix_tree_is_internal_node(entry)) {
- if (is_sibling_entry(parent, entry)) {
- void __rcu **sibentry;
- sibentry = (void __rcu **) entry_to_node(entry);
- offset = get_slot_offset(parent, sibentry);
- entry = rcu_dereference_raw(*sibentry);
- }
- }
-#endif
-
*nodep = (void *)entry;
return offset;
}
static inline gfp_t root_gfp_mask(const struct radix_tree_root *root)
{
- return root->gfp_mask & __GFP_BITS_MASK;
+ return root->xa_flags & (__GFP_BITS_MASK & ~GFP_ZONEMASK);
}
static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
@@ -169,32 +139,32 @@ static inline int tag_get(const struct radix_tree_node *node, unsigned int tag,
static inline void root_tag_set(struct radix_tree_root *root, unsigned tag)
{
- root->gfp_mask |= (__force gfp_t)(1 << (tag + ROOT_TAG_SHIFT));
+ root->xa_flags |= (__force gfp_t)(1 << (tag + ROOT_TAG_SHIFT));
}
static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag)
{
- root->gfp_mask &= (__force gfp_t)~(1 << (tag + ROOT_TAG_SHIFT));
+ root->xa_flags &= (__force gfp_t)~(1 << (tag + ROOT_TAG_SHIFT));
}
static inline void root_tag_clear_all(struct radix_tree_root *root)
{
- root->gfp_mask &= (1 << ROOT_TAG_SHIFT) - 1;
+ root->xa_flags &= (__force gfp_t)((1 << ROOT_TAG_SHIFT) - 1);
}
static inline int root_tag_get(const struct radix_tree_root *root, unsigned tag)
{
- return (__force int)root->gfp_mask & (1 << (tag + ROOT_TAG_SHIFT));
+ return (__force int)root->xa_flags & (1 << (tag + ROOT_TAG_SHIFT));
}
static inline unsigned root_tags_get(const struct radix_tree_root *root)
{
- return (__force unsigned)root->gfp_mask >> ROOT_TAG_SHIFT;
+ return (__force unsigned)root->xa_flags >> ROOT_TAG_SHIFT;
}
static inline bool is_idr(const struct radix_tree_root *root)
{
- return !!(root->gfp_mask & ROOT_IS_IDR);
+ return !!(root->xa_flags & ROOT_IS_IDR);
}
/*
@@ -254,7 +224,7 @@ radix_tree_find_next_bit(struct radix_tree_node *node, unsigned int tag,
static unsigned int iter_offset(const struct radix_tree_iter *iter)
{
- return (iter->index >> iter_shift(iter)) & RADIX_TREE_MAP_MASK;
+ return iter->index & RADIX_TREE_MAP_MASK;
}
/*
@@ -277,99 +247,6 @@ static unsigned long next_index(unsigned long index,
return (index & ~node_maxindex(node)) + (offset << node->shift);
}
-#ifndef __KERNEL__
-static void dump_node(struct radix_tree_node *node, unsigned long index)
-{
- unsigned long i;
-
- pr_debug("radix node: %p offset %d indices %lu-%lu parent %p tags %lx %lx %lx shift %d count %d exceptional %d\n",
- node, node->offset, index, index | node_maxindex(node),
- node->parent,
- node->tags[0][0], node->tags[1][0], node->tags[2][0],
- node->shift, node->count, node->exceptional);
-
- for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
- unsigned long first = index | (i << node->shift);
- unsigned long last = first | ((1UL << node->shift) - 1);
- void *entry = node->slots[i];
- if (!entry)
- continue;
- if (entry == RADIX_TREE_RETRY) {
- pr_debug("radix retry offset %ld indices %lu-%lu parent %p\n",
- i, first, last, node);
- } else if (!radix_tree_is_internal_node(entry)) {
- pr_debug("radix entry %p offset %ld indices %lu-%lu parent %p\n",
- entry, i, first, last, node);
- } else if (is_sibling_entry(node, entry)) {
- pr_debug("radix sblng %p offset %ld indices %lu-%lu parent %p val %p\n",
- entry, i, first, last, node,
- *(void **)entry_to_node(entry));
- } else {
- dump_node(entry_to_node(entry), first);
- }
- }
-}
-
-/* For debug */
-static void radix_tree_dump(struct radix_tree_root *root)
-{
- pr_debug("radix root: %p rnode %p tags %x\n",
- root, root->rnode,
- root->gfp_mask >> ROOT_TAG_SHIFT);
- if (!radix_tree_is_internal_node(root->rnode))
- return;
- dump_node(entry_to_node(root->rnode), 0);
-}
-
-static void dump_ida_node(void *entry, unsigned long index)
-{
- unsigned long i;
-
- if (!entry)
- return;
-
- if (radix_tree_is_internal_node(entry)) {
- struct radix_tree_node *node = entry_to_node(entry);
-
- pr_debug("ida node: %p offset %d indices %lu-%lu parent %p free %lx shift %d count %d\n",
- node, node->offset, index * IDA_BITMAP_BITS,
- ((index | node_maxindex(node)) + 1) *
- IDA_BITMAP_BITS - 1,
- node->parent, node->tags[0][0], node->shift,
- node->count);
- for (i = 0; i < RADIX_TREE_MAP_SIZE; i++)
- dump_ida_node(node->slots[i],
- index | (i << node->shift));
- } else if (radix_tree_exceptional_entry(entry)) {
- pr_debug("ida excp: %p offset %d indices %lu-%lu data %lx\n",
- entry, (int)(index & RADIX_TREE_MAP_MASK),
- index * IDA_BITMAP_BITS,
- index * IDA_BITMAP_BITS + BITS_PER_LONG -
- RADIX_TREE_EXCEPTIONAL_SHIFT,
- (unsigned long)entry >>
- RADIX_TREE_EXCEPTIONAL_SHIFT);
- } else {
- struct ida_bitmap *bitmap = entry;
-
- pr_debug("ida btmp: %p offset %d indices %lu-%lu data", bitmap,
- (int)(index & RADIX_TREE_MAP_MASK),
- index * IDA_BITMAP_BITS,
- (index + 1) * IDA_BITMAP_BITS - 1);
- for (i = 0; i < IDA_BITMAP_LONGS; i++)
- pr_cont(" %lx", bitmap->bitmap[i]);
- pr_cont("\n");
- }
-}
-
-static void ida_dump(struct ida *ida)
-{
- struct radix_tree_root *root = &ida->ida_rt;
- pr_debug("ida: %p node %p free %d\n", ida, root->rnode,
- root->gfp_mask >> ROOT_TAG_SHIFT);
- dump_ida_node(root->rnode, 0);
-}
-#endif
-
/*
* This assumes that the caller has performed appropriate preallocation, and
* that the caller has pinned this thread of control to the current CPU.
@@ -378,7 +255,7 @@ static struct radix_tree_node *
radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent,
struct radix_tree_root *root,
unsigned int shift, unsigned int offset,
- unsigned int count, unsigned int exceptional)
+ unsigned int count, unsigned int nr_values)
{
struct radix_tree_node *ret = NULL;
@@ -425,14 +302,14 @@ out:
ret->shift = shift;
ret->offset = offset;
ret->count = count;
- ret->exceptional = exceptional;
+ ret->nr_values = nr_values;
ret->parent = parent;
- ret->root = root;
+ ret->array = root;
}
return ret;
}
-static void radix_tree_node_rcu_free(struct rcu_head *head)
+void radix_tree_node_rcu_free(struct rcu_head *head)
{
struct radix_tree_node *node =
container_of(head, struct radix_tree_node, rcu_head);
@@ -530,77 +407,10 @@ int radix_tree_maybe_preload(gfp_t gfp_mask)
}
EXPORT_SYMBOL(radix_tree_maybe_preload);
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
-/*
- * Preload with enough objects to ensure that we can split a single entry
- * of order @old_order into many entries of size @new_order
- */
-int radix_tree_split_preload(unsigned int old_order, unsigned int new_order,
- gfp_t gfp_mask)
-{
- unsigned top = 1 << (old_order % RADIX_TREE_MAP_SHIFT);
- unsigned layers = (old_order / RADIX_TREE_MAP_SHIFT) -
- (new_order / RADIX_TREE_MAP_SHIFT);
- unsigned nr = 0;
-
- WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask));
- BUG_ON(new_order >= old_order);
-
- while (layers--)
- nr = nr * RADIX_TREE_MAP_SIZE + 1;
- return __radix_tree_preload(gfp_mask, top * nr);
-}
-#endif
-
-/*
- * The same as function above, but preload number of nodes required to insert
- * (1 << order) continuous naturally-aligned elements.
- */
-int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order)
-{
- unsigned long nr_subtrees;
- int nr_nodes, subtree_height;
-
- /* Preloading doesn't help anything with this gfp mask, skip it */
- if (!gfpflags_allow_blocking(gfp_mask)) {
- preempt_disable();
- return 0;
- }
-
- /*
- * Calculate number and height of fully populated subtrees it takes to
- * store (1 << order) elements.
- */
- nr_subtrees = 1 << order;
- for (subtree_height = 0; nr_subtrees > RADIX_TREE_MAP_SIZE;
- subtree_height++)
- nr_subtrees >>= RADIX_TREE_MAP_SHIFT;
-
- /*
- * The worst case is zero height tree with a single item at index 0 and
- * then inserting items starting at ULONG_MAX - (1 << order).
- *
- * This requires RADIX_TREE_MAX_PATH nodes to build branch from root to
- * 0-index item.
- */
- nr_nodes = RADIX_TREE_MAX_PATH;
-
- /* Plus branch to fully populated subtrees. */
- nr_nodes += RADIX_TREE_MAX_PATH - subtree_height;
-
- /* Root node is shared. */
- nr_nodes--;
-
- /* Plus nodes required to build subtrees. */
- nr_nodes += nr_subtrees * height_to_maxnodes[subtree_height];
-
- return __radix_tree_preload(gfp_mask, nr_nodes);
-}
-
static unsigned radix_tree_load_root(const struct radix_tree_root *root,
struct radix_tree_node **nodep, unsigned long *maxindex)
{
- struct radix_tree_node *node = rcu_dereference_raw(root->rnode);
+ struct radix_tree_node *node = rcu_dereference_raw(root->xa_head);
*nodep = node;
@@ -629,7 +439,7 @@ static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp,
while (index > shift_maxindex(maxshift))
maxshift += RADIX_TREE_MAP_SHIFT;
- entry = rcu_dereference_raw(root->rnode);
+ entry = rcu_dereference_raw(root->xa_head);
if (!entry && (!is_idr(root) || root_tag_get(root, IDR_FREE)))
goto out;
@@ -656,9 +466,9 @@ static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp,
BUG_ON(shift > BITS_PER_LONG);
if (radix_tree_is_internal_node(entry)) {
entry_to_node(entry)->parent = node;
- } else if (radix_tree_exceptional_entry(entry)) {
- /* Moving an exceptional root->rnode to a node */
- node->exceptional = 1;
+ } else if (xa_is_value(entry)) {
+ /* Moving a value entry root->xa_head to a node */
+ node->nr_values = 1;
}
/*
* entry was already in the radix tree, so we do not need
@@ -666,7 +476,7 @@ static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp,
*/
node->slots[0] = (void __rcu *)entry;
entry = node_to_entry(node);
- rcu_assign_pointer(root->rnode, entry);
+ rcu_assign_pointer(root->xa_head, entry);
shift += RADIX_TREE_MAP_SHIFT;
} while (shift <= maxshift);
out:
@@ -677,13 +487,12 @@ out:
* radix_tree_shrink - shrink radix tree to minimum height
* @root radix tree root
*/
-static inline bool radix_tree_shrink(struct radix_tree_root *root,
- radix_tree_update_node_t update_node)
+static inline bool radix_tree_shrink(struct radix_tree_root *root)
{
bool shrunk = false;
for (;;) {
- struct radix_tree_node *node = rcu_dereference_raw(root->rnode);
+ struct radix_tree_node *node = rcu_dereference_raw(root->xa_head);
struct radix_tree_node *child;
if (!radix_tree_is_internal_node(node))
@@ -692,15 +501,20 @@ static inline bool radix_tree_shrink(struct radix_tree_root *root,
/*
* The candidate node has more than one child, or its child
- * is not at the leftmost slot, or the child is a multiorder
- * entry, we cannot shrink.
+ * is not at the leftmost slot, we cannot shrink.
*/
if (node->count != 1)
break;
child = rcu_dereference_raw(node->slots[0]);
if (!child)
break;
- if (!radix_tree_is_internal_node(child) && node->shift)
+
+ /*
+ * For an IDR, we must not shrink entry 0 into the root in
+ * case somebody calls idr_replace() with a pointer that
+ * appears to be an internal entry
+ */
+ if (!node->shift && is_idr(root))
break;
if (radix_tree_is_internal_node(child))
@@ -711,9 +525,9 @@ static inline bool radix_tree_shrink(struct radix_tree_root *root,
* moving the node from one part of the tree to another: if it
* was safe to dereference the old pointer to it
* (node->slots[0]), it will be safe to dereference the new
- * one (root->rnode) as far as dependent read barriers go.
+ * one (root->xa_head) as far as dependent read barriers go.
*/
- root->rnode = (void __rcu *)child;
+ root->xa_head = (void __rcu *)child;
if (is_idr(root) && !tag_get(node, IDR_FREE, 0))
root_tag_clear(root, IDR_FREE);
@@ -738,8 +552,6 @@ static inline bool radix_tree_shrink(struct radix_tree_root *root,
node->count = 0;
if (!radix_tree_is_internal_node(child)) {
node->slots[0] = (void __rcu *)RADIX_TREE_RETRY;
- if (update_node)
- update_node(node);
}
WARN_ON_ONCE(!list_empty(&node->private_list));
@@ -751,8 +563,7 @@ static inline bool radix_tree_shrink(struct radix_tree_root *root,
}
static bool delete_node(struct radix_tree_root *root,
- struct radix_tree_node *node,
- radix_tree_update_node_t update_node)
+ struct radix_tree_node *node)
{
bool deleted = false;
@@ -761,9 +572,8 @@ static bool delete_node(struct radix_tree_root *root,
if (node->count) {
if (node_to_entry(node) ==
- rcu_dereference_raw(root->rnode))
- deleted |= radix_tree_shrink(root,
- update_node);
+ rcu_dereference_raw(root->xa_head))
+ deleted |= radix_tree_shrink(root);
return deleted;
}
@@ -778,7 +588,7 @@ static bool delete_node(struct radix_tree_root *root,
*/
if (!is_idr(root))
root_tag_clear_all(root);
- root->rnode = NULL;
+ root->xa_head = NULL;
}
WARN_ON_ONCE(!list_empty(&node->private_list));
@@ -795,7 +605,6 @@ static bool delete_node(struct radix_tree_root *root,
* __radix_tree_create - create a slot in a radix tree
* @root: radix tree root
* @index: index key
- * @order: index occupies 2^order aligned slots
* @nodep: returns node
* @slotp: returns slot
*
@@ -803,36 +612,34 @@ static bool delete_node(struct radix_tree_root *root,
* at position @index in the radix tree @root.
*
* Until there is more than one item in the tree, no nodes are
- * allocated and @root->rnode is used as a direct slot instead of
+ * allocated and @root->xa_head is used as a direct slot instead of
* pointing to a node, in which case *@nodep will be NULL.
*
* Returns -ENOMEM, or 0 for success.
*/
-int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
- unsigned order, struct radix_tree_node **nodep,
- void __rcu ***slotp)
+static int __radix_tree_create(struct radix_tree_root *root,
+ unsigned long index, struct radix_tree_node **nodep,
+ void __rcu ***slotp)
{
struct radix_tree_node *node = NULL, *child;
- void __rcu **slot = (void __rcu **)&root->rnode;
+ void __rcu **slot = (void __rcu **)&root->xa_head;
unsigned long maxindex;
unsigned int shift, offset = 0;
- unsigned long max = index | ((1UL << order) - 1);
+ unsigned long max = index;
gfp_t gfp = root_gfp_mask(root);
shift = radix_tree_load_root(root, &child, &maxindex);
/* Make sure the tree is high enough. */
- if (order > 0 && max == ((1UL << order) - 1))
- max++;
if (max > maxindex) {
int error = radix_tree_extend(root, gfp, max, shift);
if (error < 0)
return error;
shift = error;
- child = rcu_dereference_raw(root->rnode);
+ child = rcu_dereference_raw(root->xa_head);
}
- while (shift > order) {
+ while (shift > 0) {
shift -= RADIX_TREE_MAP_SHIFT;
if (child == NULL) {
/* Have to add a child node. */
@@ -875,8 +682,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
for (;;) {
void *entry = rcu_dereference_raw(child->slots[offset]);
- if (radix_tree_is_internal_node(entry) &&
- !is_sibling_entry(child, entry)) {
+ if (xa_is_node(entry) && child->shift) {
child = entry_to_node(entry);
offset = 0;
continue;
@@ -894,96 +700,30 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
}
}
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
static inline int insert_entries(struct radix_tree_node *node,
- void __rcu **slot, void *item, unsigned order, bool replace)
-{
- struct radix_tree_node *child;
- unsigned i, n, tag, offset, tags = 0;
-
- if (node) {
- if (order > node->shift)
- n = 1 << (order - node->shift);
- else
- n = 1;
- offset = get_slot_offset(node, slot);
- } else {
- n = 1;
- offset = 0;
- }
-
- if (n > 1) {
- offset = offset & ~(n - 1);
- slot = &node->slots[offset];
- }
- child = node_to_entry(slot);
-
- for (i = 0; i < n; i++) {
- if (slot[i]) {
- if (replace) {
- node->count--;
- for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
- if (tag_get(node, tag, offset + i))
- tags |= 1 << tag;
- } else
- return -EEXIST;
- }
- }
-
- for (i = 0; i < n; i++) {
- struct radix_tree_node *old = rcu_dereference_raw(slot[i]);
- if (i) {
- rcu_assign_pointer(slot[i], child);
- for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
- if (tags & (1 << tag))
- tag_clear(node, tag, offset + i);
- } else {
- rcu_assign_pointer(slot[i], item);
- for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
- if (tags & (1 << tag))
- tag_set(node, tag, offset);
- }
- if (radix_tree_is_internal_node(old) &&
- !is_sibling_entry(node, old) &&
- (old != RADIX_TREE_RETRY))
- radix_tree_free_nodes(old);
- if (radix_tree_exceptional_entry(old))
- node->exceptional--;
- }
- if (node) {
- node->count += n;
- if (radix_tree_exceptional_entry(item))
- node->exceptional += n;
- }
- return n;
-}
-#else
-static inline int insert_entries(struct radix_tree_node *node,
- void __rcu **slot, void *item, unsigned order, bool replace)
+ void __rcu **slot, void *item, bool replace)
{
if (*slot)
return -EEXIST;
rcu_assign_pointer(*slot, item);
if (node) {
node->count++;
- if (radix_tree_exceptional_entry(item))
- node->exceptional++;
+ if (xa_is_value(item))
+ node->nr_values++;
}
return 1;
}
-#endif
/**
* __radix_tree_insert - insert into a radix tree
* @root: radix tree root
* @index: index key
- * @order: key covers the 2^order indices around index
* @item: item to insert
*
* Insert an item into the radix tree at position @index.
*/
-int __radix_tree_insert(struct radix_tree_root *root, unsigned long index,
- unsigned order, void *item)
+int radix_tree_insert(struct radix_tree_root *root, unsigned long index,
+ void *item)
{
struct radix_tree_node *node;
void __rcu **slot;
@@ -991,11 +731,11 @@ int __radix_tree_insert(struct radix_tree_root *root, unsigned long index,
BUG_ON(radix_tree_is_internal_node(item));
- error = __radix_tree_create(root, index, order, &node, &slot);
+ error = __radix_tree_create(root, index, &node, &slot);
if (error)
return error;
- error = insert_entries(node, slot, item, order, false);
+ error = insert_entries(node, slot, item, false);
if (error < 0)
return error;
@@ -1010,7 +750,7 @@ int __radix_tree_insert(struct radix_tree_root *root, unsigned long index,
return 0;
}
-EXPORT_SYMBOL(__radix_tree_insert);
+EXPORT_SYMBOL(radix_tree_insert);
/**
* __radix_tree_lookup - lookup an item in a radix tree
@@ -1023,7 +763,7 @@ EXPORT_SYMBOL(__radix_tree_insert);
* tree @root.
*
* Until there is more than one item in the tree, no nodes are
- * allocated and @root->rnode is used as a direct slot instead of
+ * allocated and @root->xa_head is used as a direct slot instead of
* pointing to a node, in which case *@nodep will be NULL.
*/
void *__radix_tree_lookup(const struct radix_tree_root *root,
@@ -1036,7 +776,7 @@ void *__radix_tree_lookup(const struct radix_tree_root *root,
restart:
parent = NULL;
- slot = (void __rcu **)&root->rnode;
+ slot = (void __rcu **)&root->xa_head;
radix_tree_load_root(root, &node, &maxindex);
if (index > maxindex)
return NULL;
@@ -1049,6 +789,8 @@ void *__radix_tree_lookup(const struct radix_tree_root *root,
parent = entry_to_node(node);
offset = radix_tree_descend(parent, &node, index);
slot = parent->slots + offset;
+ if (parent->shift == 0)
+ break;
}
if (nodep)
@@ -1100,36 +842,12 @@ void *radix_tree_lookup(const struct radix_tree_root *root, unsigned long index)
}
EXPORT_SYMBOL(radix_tree_lookup);
-static inline void replace_sibling_entries(struct radix_tree_node *node,
- void __rcu **slot, int count, int exceptional)
-{
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
- void *ptr = node_to_entry(slot);
- unsigned offset = get_slot_offset(node, slot) + 1;
-
- while (offset < RADIX_TREE_MAP_SIZE) {
- if (rcu_dereference_raw(node->slots[offset]) != ptr)
- break;
- if (count < 0) {
- node->slots[offset] = NULL;
- node->count--;
- }
- node->exceptional += exceptional;
- offset++;
- }
-#endif
-}
-
static void replace_slot(void __rcu **slot, void *item,
- struct radix_tree_node *node, int count, int exceptional)
+ struct radix_tree_node *node, int count, int values)
{
- if (WARN_ON_ONCE(radix_tree_is_internal_node(item)))
- return;
-
- if (node && (count || exceptional)) {
+ if (node && (count || values)) {
node->count += count;
- node->exceptional += exceptional;
- replace_sibling_entries(node, slot, count, exceptional);
+ node->nr_values += values;
}
rcu_assign_pointer(*slot, item);
@@ -1172,37 +890,31 @@ static int calculate_count(struct radix_tree_root *root,
* @node: pointer to tree node
* @slot: pointer to slot in @node
* @item: new item to store in the slot.
- * @update_node: callback for changing leaf nodes
*
* For use with __radix_tree_lookup(). Caller must hold tree write locked
* across slot lookup and replacement.
*/
void __radix_tree_replace(struct radix_tree_root *root,
struct radix_tree_node *node,
- void __rcu **slot, void *item,
- radix_tree_update_node_t update_node)
+ void __rcu **slot, void *item)
{
void *old = rcu_dereference_raw(*slot);
- int exceptional = !!radix_tree_exceptional_entry(item) -
- !!radix_tree_exceptional_entry(old);
+ int values = !!xa_is_value(item) - !!xa_is_value(old);
int count = calculate_count(root, node, slot, item, old);
/*
- * This function supports replacing exceptional entries and
+ * This function supports replacing value entries and
* deleting entries, but that needs accounting against the
- * node unless the slot is root->rnode.
+ * node unless the slot is root->xa_head.
*/
- WARN_ON_ONCE(!node && (slot != (void __rcu **)&root->rnode) &&
- (count || exceptional));
- replace_slot(slot, item, node, count, exceptional);
+ WARN_ON_ONCE(!node && (slot != (void __rcu **)&root->xa_head) &&
+ (count || values));
+ replace_slot(slot, item, node, count, values);
if (!node)
return;
- if (update_node)
- update_node(node);
-
- delete_node(root, node, update_node);
+ delete_node(root, node);
}
/**
@@ -1211,12 +923,12 @@ void __radix_tree_replace(struct radix_tree_root *root,
* @slot: pointer to slot
* @item: new item to store in the slot.
*
- * For use with radix_tree_lookup_slot(), radix_tree_gang_lookup_slot(),
+ * For use with radix_tree_lookup_slot() and
* radix_tree_gang_lookup_tag_slot(). Caller must hold tree write locked
* across slot lookup and replacement.
*
* NOTE: This cannot be used to switch between non-entries (empty slots),
- * regular entries, and exceptional entries, as that requires accounting
+ * regular entries, and value entries, as that requires accounting
* inside the radix tree node. When switching from one type of entry or
* deleting, use __radix_tree_lookup() and __radix_tree_replace() or
* radix_tree_iter_replace().
@@ -1224,7 +936,7 @@ void __radix_tree_replace(struct radix_tree_root *root,
void radix_tree_replace_slot(struct radix_tree_root *root,
void __rcu **slot, void *item)
{
- __radix_tree_replace(root, NULL, slot, item, NULL);
+ __radix_tree_replace(root, NULL, slot, item);
}
EXPORT_SYMBOL(radix_tree_replace_slot);
@@ -1234,161 +946,15 @@ EXPORT_SYMBOL(radix_tree_replace_slot);
* @slot: pointer to slot
* @item: new item to store in the slot.
*
- * For use with radix_tree_split() and radix_tree_for_each_slot().
- * Caller must hold tree write locked across split and replacement.
+ * For use with radix_tree_for_each_slot().
+ * Caller must hold tree write locked.
*/
void radix_tree_iter_replace(struct radix_tree_root *root,
const struct radix_tree_iter *iter,
void __rcu **slot, void *item)
{
- __radix_tree_replace(root, iter->node, slot, item, NULL);
-}
-
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
-/**
- * radix_tree_join - replace multiple entries with one multiorder entry
- * @root: radix tree root
- * @index: an index inside the new entry
- * @order: order of the new entry
- * @item: new entry
- *
- * Call this function to replace several entries with one larger entry.
- * The existing entries are presumed to not need freeing as a result of
- * this call.
- *
- * The replacement entry will have all the tags set on it that were set
- * on any of the entries it is replacing.
- */
-int radix_tree_join(struct radix_tree_root *root, unsigned long index,
- unsigned order, void *item)
-{
- struct radix_tree_node *node;
- void __rcu **slot;
- int error;
-
- BUG_ON(radix_tree_is_internal_node(item));
-
- error = __radix_tree_create(root, index, order, &node, &slot);
- if (!error)
- error = insert_entries(node, slot, item, order, true);
- if (error > 0)
- error = 0;
-
- return error;
-}
-
-/**
- * radix_tree_split - Split an entry into smaller entries
- * @root: radix tree root
- * @index: An index within the large entry
- * @order: Order of new entries
- *
- * Call this function as the first step in replacing a multiorder entry
- * with several entries of lower order. After this function returns,
- * loop over the relevant portion of the tree using radix_tree_for_each_slot()
- * and call radix_tree_iter_replace() to set up each new entry.
- *
- * The tags from this entry are replicated to all the new entries.
- *
- * The radix tree should be locked against modification during the entire
- * replacement operation. Lock-free lookups will see RADIX_TREE_RETRY which
- * should prompt RCU walkers to restart the lookup from the root.
- */
-int radix_tree_split(struct radix_tree_root *root, unsigned long index,
- unsigned order)
-{
- struct radix_tree_node *parent, *node, *child;
- void __rcu **slot;
- unsigned int offset, end;
- unsigned n, tag, tags = 0;
- gfp_t gfp = root_gfp_mask(root);
-
- if (!__radix_tree_lookup(root, index, &parent, &slot))
- return -ENOENT;
- if (!parent)
- return -ENOENT;
-
- offset = get_slot_offset(parent, slot);
-
- for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
- if (tag_get(parent, tag, offset))
- tags |= 1 << tag;
-
- for (end = offset + 1; end < RADIX_TREE_MAP_SIZE; end++) {
- if (!is_sibling_entry(parent,
- rcu_dereference_raw(parent->slots[end])))
- break;
- for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
- if (tags & (1 << tag))
- tag_set(parent, tag, end);
- /* rcu_assign_pointer ensures tags are set before RETRY */
- rcu_assign_pointer(parent->slots[end], RADIX_TREE_RETRY);
- }
- rcu_assign_pointer(parent->slots[offset], RADIX_TREE_RETRY);
- parent->exceptional -= (end - offset);
-
- if (order == parent->shift)
- return 0;
- if (order > parent->shift) {
- while (offset < end)
- offset += insert_entries(parent, &parent->slots[offset],
- RADIX_TREE_RETRY, order, true);
- return 0;
- }
-
- node = parent;
-
- for (;;) {
- if (node->shift > order) {
- child = radix_tree_node_alloc(gfp, node, root,
- node->shift - RADIX_TREE_MAP_SHIFT,
- offset, 0, 0);
- if (!child)
- goto nomem;
- if (node != parent) {
- node->count++;
- rcu_assign_pointer(node->slots[offset],
- node_to_entry(child));
- for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
- if (tags & (1 << tag))
- tag_set(node, tag, offset);
- }
-
- node = child;
- offset = 0;
- continue;
- }
-
- n = insert_entries(node, &node->slots[offset],
- RADIX_TREE_RETRY, order, false);
- BUG_ON(n > RADIX_TREE_MAP_SIZE);
-
- for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
- if (tags & (1 << tag))
- tag_set(node, tag, offset);
- offset += n;
-
- while (offset == RADIX_TREE_MAP_SIZE) {
- if (node == parent)
- break;
- offset = node->offset;
- child = node;
- node = node->parent;
- rcu_assign_pointer(node->slots[offset],
- node_to_entry(child));
- offset++;
- }
- if ((node == parent) && (offset == end))
- return 0;
- }
-
- nomem:
- /* Shouldn't happen; did user forget to preload? */
- /* TODO: free all the allocated nodes */
- WARN_ON(1);
- return -ENOMEM;
+ __radix_tree_replace(root, iter->node, slot, item);
}
-#endif
static void node_tag_set(struct radix_tree_root *root,
struct radix_tree_node *node,
@@ -1447,18 +1013,6 @@ void *radix_tree_tag_set(struct radix_tree_root *root,
}
EXPORT_SYMBOL(radix_tree_tag_set);
-/**
- * radix_tree_iter_tag_set - set a tag on the current iterator entry
- * @root: radix tree root
- * @iter: iterator state
- * @tag: tag to set
- */
-void radix_tree_iter_tag_set(struct radix_tree_root *root,
- const struct radix_tree_iter *iter, unsigned int tag)
-{
- node_tag_set(root, iter->node, tag, iter_offset(iter));
-}
-
static void node_tag_clear(struct radix_tree_root *root,
struct radix_tree_node *node,
unsigned int tag, unsigned int offset)
@@ -1574,14 +1128,6 @@ int radix_tree_tag_get(const struct radix_tree_root *root,
}
EXPORT_SYMBOL(radix_tree_tag_get);
-static inline void __set_iter_shift(struct radix_tree_iter *iter,
- unsigned int shift)
-{
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
- iter->shift = shift;
-#endif
-}
-
/* Construct iter->tags bit-mask from node->tags[tag] array */
static void set_iter_tags(struct radix_tree_iter *iter,
struct radix_tree_node *node, unsigned offset,
@@ -1608,94 +1154,11 @@ static void set_iter_tags(struct radix_tree_iter *iter,
}
}
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
-static void __rcu **skip_siblings(struct radix_tree_node **nodep,
- void __rcu **slot, struct radix_tree_iter *iter)
-{
- void *sib = node_to_entry(slot - 1);
-
- while (iter->index < iter->next_index) {
- *nodep = rcu_dereference_raw(*slot);
- if (*nodep && *nodep != sib)
- return slot;
- slot++;
- iter->index = __radix_tree_iter_add(iter, 1);
- iter->tags >>= 1;
- }
-
- *nodep = NULL;
- return NULL;
-}
-
-void __rcu **__radix_tree_next_slot(void __rcu **slot,
- struct radix_tree_iter *iter, unsigned flags)
-{
- unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK;
- struct radix_tree_node *node = rcu_dereference_raw(*slot);
-
- slot = skip_siblings(&node, slot, iter);
-
- while (radix_tree_is_internal_node(node)) {
- unsigned offset;
- unsigned long next_index;
-
- if (node == RADIX_TREE_RETRY)
- return slot;
- node = entry_to_node(node);
- iter->node = node;
- iter->shift = node->shift;
-
- if (flags & RADIX_TREE_ITER_TAGGED) {
- offset = radix_tree_find_next_bit(node, tag, 0);
- if (offset == RADIX_TREE_MAP_SIZE)
- return NULL;
- slot = &node->slots[offset];
- iter->index = __radix_tree_iter_add(iter, offset);
- set_iter_tags(iter, node, offset, tag);
- node = rcu_dereference_raw(*slot);
- } else {
- offset = 0;
- slot = &node->slots[0];
- for (;;) {
- node = rcu_dereference_raw(*slot);
- if (node)
- break;
- slot++;
- offset++;
- if (offset == RADIX_TREE_MAP_SIZE)
- return NULL;
- }
- iter->index = __radix_tree_iter_add(iter, offset);
- }
- if ((flags & RADIX_TREE_ITER_CONTIG) && (offset > 0))
- goto none;
- next_index = (iter->index | shift_maxindex(iter->shift)) + 1;
- if (next_index < iter->next_index)
- iter->next_index = next_index;
- }
-
- return slot;
- none:
- iter->next_index = 0;
- return NULL;
-}
-EXPORT_SYMBOL(__radix_tree_next_slot);
-#else
-static void __rcu **skip_siblings(struct radix_tree_node **nodep,
- void __rcu **slot, struct radix_tree_iter *iter)
-{
- return slot;
-}
-#endif
-
void __rcu **radix_tree_iter_resume(void __rcu **slot,
struct radix_tree_iter *iter)
{
- struct radix_tree_node *node;
-
slot++;
iter->index = __radix_tree_iter_add(iter, 1);
- skip_siblings(&node, slot, iter);
iter->next_index = iter->index;
iter->tags = 0;
return NULL;
@@ -1746,8 +1209,7 @@ void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root,
iter->next_index = maxindex + 1;
iter->tags = 1;
iter->node = NULL;
- __set_iter_shift(iter, 0);
- return (void __rcu **)&root->rnode;
+ return (void __rcu **)&root->xa_head;
}
do {
@@ -1767,8 +1229,6 @@ void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root,
while (++offset < RADIX_TREE_MAP_SIZE) {
void *slot = rcu_dereference_raw(
node->slots[offset]);
- if (is_sibling_entry(node, slot))
- continue;
if (slot)
break;
}
@@ -1786,13 +1246,12 @@ void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root,
goto restart;
if (child == RADIX_TREE_RETRY)
break;
- } while (radix_tree_is_internal_node(child));
+ } while (node->shift && radix_tree_is_internal_node(child));
/* Update the iterator state */
- iter->index = (index &~ node_maxindex(node)) | (offset << node->shift);
+ iter->index = (index &~ node_maxindex(node)) | offset;
iter->next_index = (index | node_maxindex(node)) + 1;
iter->node = node;
- __set_iter_shift(iter, node->shift);
if (flags & RADIX_TREE_ITER_TAGGED)
set_iter_tags(iter, node, offset, tag);
@@ -1849,48 +1308,6 @@ radix_tree_gang_lookup(const struct radix_tree_root *root, void **results,
EXPORT_SYMBOL(radix_tree_gang_lookup);
/**
- * radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree
- * @root: radix tree root
- * @results: where the results of the lookup are placed
- * @indices: where their indices should be placed (but usually NULL)
- * @first_index: start the lookup from this key
- * @max_items: place up to this many items at *results
- *
- * Performs an index-ascending scan of the tree for present items. Places
- * their slots at *@results and returns the number of items which were
- * placed at *@results.
- *
- * The implementation is naive.
- *
- * Like radix_tree_gang_lookup as far as RCU and locking goes. Slots must
- * be dereferenced with radix_tree_deref_slot, and if using only RCU
- * protection, radix_tree_deref_slot may fail requiring a retry.
- */
-unsigned int
-radix_tree_gang_lookup_slot(const struct radix_tree_root *root,
- void __rcu ***results, unsigned long *indices,
- unsigned long first_index, unsigned int max_items)
-{
- struct radix_tree_iter iter;
- void __rcu **slot;
- unsigned int ret = 0;
-
- if (unlikely(!max_items))
- return 0;
-
- radix_tree_for_each_slot(slot, root, &iter, first_index) {
- results[ret] = slot;
- if (indices)
- indices[ret] = iter.index;
- if (++ret == max_items)
- break;
- }
-
- return ret;
-}
-EXPORT_SYMBOL(radix_tree_gang_lookup_slot);
-
-/**
* radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree
* based on a tag
* @root: radix tree root
@@ -1966,28 +1383,11 @@ radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *root,
}
EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot);
-/**
- * __radix_tree_delete_node - try to free node after clearing a slot
- * @root: radix tree root
- * @node: node containing @index
- * @update_node: callback for changing leaf nodes
- *
- * After clearing the slot at @index in @node from radix tree
- * rooted at @root, call this function to attempt freeing the
- * node and shrinking the tree.
- */
-void __radix_tree_delete_node(struct radix_tree_root *root,
- struct radix_tree_node *node,
- radix_tree_update_node_t update_node)
-{
- delete_node(root, node, update_node);
-}
-
static bool __radix_tree_delete(struct radix_tree_root *root,
struct radix_tree_node *node, void __rcu **slot)
{
void *old = rcu_dereference_raw(*slot);
- int exceptional = radix_tree_exceptional_entry(old) ? -1 : 0;
+ int values = xa_is_value(old) ? -1 : 0;
unsigned offset = get_slot_offset(node, slot);
int tag;
@@ -1997,8 +1397,8 @@ static bool __radix_tree_delete(struct radix_tree_root *root,
for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
node_tag_clear(root, node, tag, offset);
- replace_slot(slot, NULL, node, -1, exceptional);
- return node && delete_node(root, node, NULL);
+ replace_slot(slot, NULL, node, -1, values);
+ return node && delete_node(root, node);
}
/**
@@ -2036,10 +1436,12 @@ void *radix_tree_delete_item(struct radix_tree_root *root,
unsigned long index, void *item)
{
struct radix_tree_node *node = NULL;
- void __rcu **slot;
+ void __rcu **slot = NULL;
void *entry;
entry = __radix_tree_lookup(root, index, &node, &slot);
+ if (!slot)
+ return NULL;
if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE,
get_slot_offset(node, slot))))
return NULL;
@@ -2068,19 +1470,6 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
}
EXPORT_SYMBOL(radix_tree_delete);
-void radix_tree_clear_tags(struct radix_tree_root *root,
- struct radix_tree_node *node,
- void __rcu **slot)
-{
- if (node) {
- unsigned int tag, offset = get_slot_offset(node, slot);
- for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
- node_tag_clear(root, node, tag, offset);
- } else {
- root_tag_clear_all(root);
- }
-}
-
/**
* radix_tree_tagged - test whether any items in the tree are tagged
* @root: radix tree root
@@ -2106,42 +1495,12 @@ void idr_preload(gfp_t gfp_mask)
}
EXPORT_SYMBOL(idr_preload);
-/**
- * ida_pre_get - reserve resources for ida allocation
- * @ida: ida handle
- * @gfp: memory allocation flags
- *
- * This function should be called before calling ida_get_new_above(). If it
- * is unable to allocate memory, it will return %0. On success, it returns %1.
- */
-int ida_pre_get(struct ida *ida, gfp_t gfp)
-{
- /*
- * The IDA API has no preload_end() equivalent. Instead,
- * ida_get_new() can return -EAGAIN, prompting the caller
- * to return to the ida_pre_get() step.
- */
- if (!__radix_tree_preload(gfp, IDA_PRELOAD_SIZE))
- preempt_enable();
-
- if (!this_cpu_read(ida_bitmap)) {
- struct ida_bitmap *bitmap = kzalloc(sizeof(*bitmap), gfp);
- if (!bitmap)
- return 0;
- if (this_cpu_cmpxchg(ida_bitmap, NULL, bitmap))
- kfree(bitmap);
- }
-
- return 1;
-}
-EXPORT_SYMBOL(ida_pre_get);
-
void __rcu **idr_get_free(struct radix_tree_root *root,
struct radix_tree_iter *iter, gfp_t gfp,
unsigned long max)
{
struct radix_tree_node *node = NULL, *child;
- void __rcu **slot = (void __rcu **)&root->rnode;
+ void __rcu **slot = (void __rcu **)&root->xa_head;
unsigned long maxindex, start = iter->next_index;
unsigned int shift, offset = 0;
@@ -2157,8 +1516,10 @@ void __rcu **idr_get_free(struct radix_tree_root *root,
if (error < 0)
return ERR_PTR(error);
shift = error;
- child = rcu_dereference_raw(root->rnode);
+ child = rcu_dereference_raw(root->xa_head);
}
+ if (start == 0 && shift == 0)
+ shift = RADIX_TREE_MAP_SHIFT;
while (shift) {
shift -= RADIX_TREE_MAP_SHIFT;
@@ -2201,7 +1562,6 @@ void __rcu **idr_get_free(struct radix_tree_root *root,
else
iter->next_index = 1;
iter->node = node;
- __set_iter_shift(iter, shift);
set_iter_tags(iter, node, offset, IDR_FREE);
return slot;
@@ -2220,10 +1580,10 @@ void __rcu **idr_get_free(struct radix_tree_root *root,
*/
void idr_destroy(struct idr *idr)
{
- struct radix_tree_node *node = rcu_dereference_raw(idr->idr_rt.rnode);
+ struct radix_tree_node *node = rcu_dereference_raw(idr->idr_rt.xa_head);
if (radix_tree_is_internal_node(node))
radix_tree_free_nodes(node);
- idr->idr_rt.rnode = NULL;
+ idr->idr_rt.xa_head = NULL;
root_tag_set(&idr->idr_rt, IDR_FREE);
}
EXPORT_SYMBOL(idr_destroy);
@@ -2237,31 +1597,6 @@ radix_tree_node_ctor(void *arg)
INIT_LIST_HEAD(&node->private_list);
}
-static __init unsigned long __maxindex(unsigned int height)
-{
- unsigned int width = height * RADIX_TREE_MAP_SHIFT;
- int shift = RADIX_TREE_INDEX_BITS - width;
-
- if (shift < 0)
- return ~0UL;
- if (shift >= BITS_PER_LONG)
- return 0UL;
- return ~0UL >> shift;
-}
-
-static __init void radix_tree_init_maxnodes(void)
-{
- unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH + 1];
- unsigned int i, j;
-
- for (i = 0; i < ARRAY_SIZE(height_to_maxindex); i++)
- height_to_maxindex[i] = __maxindex(i);
- for (i = 0; i < ARRAY_SIZE(height_to_maxnodes); i++) {
- for (j = i; j > 0; j--)
- height_to_maxnodes[i] += height_to_maxindex[j - 1] + 1;
- }
-}
-
static int radix_tree_cpu_dead(unsigned int cpu)
{
struct radix_tree_preload *rtp;
@@ -2275,8 +1610,6 @@ static int radix_tree_cpu_dead(unsigned int cpu)
kmem_cache_free(radix_tree_node_cachep, node);
rtp->nr--;
}
- kfree(per_cpu(ida_bitmap, cpu));
- per_cpu(ida_bitmap, cpu) = NULL;
return 0;
}
@@ -2285,11 +1618,12 @@ void __init radix_tree_init(void)
int ret;
BUILD_BUG_ON(RADIX_TREE_MAX_TAGS + __GFP_BITS_SHIFT > 32);
+ BUILD_BUG_ON(ROOT_IS_IDR & ~GFP_ZONEMASK);
+ BUILD_BUG_ON(XA_CHUNK_SIZE > 255);
radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
sizeof(struct radix_tree_node), 0,
SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
radix_tree_node_ctor);
- radix_tree_init_maxnodes();
ret = cpuhp_setup_state_nocalls(CPUHP_RADIX_DEAD, "lib/radix:dead",
NULL, radix_tree_cpu_dead);
WARN_ON(ret < 0);
diff --git a/lib/raid6/.gitignore b/lib/raid6/.gitignore
index f01b1cb04f91..3de0d8921286 100644
--- a/lib/raid6/.gitignore
+++ b/lib/raid6/.gitignore
@@ -4,3 +4,4 @@ int*.c
tables.c
neon?.c
s390vx?.c
+vpermxor*.c
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index 4add700ddfe3..2f8b61dfd9b0 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -5,9 +5,9 @@ raid6_pq-y += algos.o recov.o tables.o int1.o int2.o int4.o \
int8.o int16.o int32.o
raid6_pq-$(CONFIG_X86) += recov_ssse3.o recov_avx2.o mmx.o sse1.o sse2.o avx2.o avx512.o recov_avx512.o
-raid6_pq-$(CONFIG_ALTIVEC) += altivec1.o altivec2.o altivec4.o altivec8.o
+raid6_pq-$(CONFIG_ALTIVEC) += altivec1.o altivec2.o altivec4.o altivec8.o \
+ vpermxor1.o vpermxor2.o vpermxor4.o vpermxor8.o
raid6_pq-$(CONFIG_KERNEL_MODE_NEON) += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o
-raid6_pq-$(CONFIG_TILEGX) += tilegx8.o
raid6_pq-$(CONFIG_S390) += s390vx8.o recov_s390xc.o
hostprogs-y += mktables
@@ -91,6 +91,30 @@ $(obj)/altivec8.c: UNROLL := 8
$(obj)/altivec8.c: $(src)/altivec.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
+CFLAGS_vpermxor1.o += $(altivec_flags)
+targets += vpermxor1.c
+$(obj)/vpermxor1.c: UNROLL := 1
+$(obj)/vpermxor1.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
+ $(call if_changed,unroll)
+
+CFLAGS_vpermxor2.o += $(altivec_flags)
+targets += vpermxor2.c
+$(obj)/vpermxor2.c: UNROLL := 2
+$(obj)/vpermxor2.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
+ $(call if_changed,unroll)
+
+CFLAGS_vpermxor4.o += $(altivec_flags)
+targets += vpermxor4.c
+$(obj)/vpermxor4.c: UNROLL := 4
+$(obj)/vpermxor4.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
+ $(call if_changed,unroll)
+
+CFLAGS_vpermxor8.o += $(altivec_flags)
+targets += vpermxor8.c
+$(obj)/vpermxor8.c: UNROLL := 8
+$(obj)/vpermxor8.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
+ $(call if_changed,unroll)
+
CFLAGS_neon1.o += $(NEON_FLAGS)
targets += neon1.c
$(obj)/neon1.c: UNROLL := 1
@@ -115,11 +139,6 @@ $(obj)/neon8.c: UNROLL := 8
$(obj)/neon8.c: $(src)/neon.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
-targets += tilegx8.c
-$(obj)/tilegx8.c: UNROLL := 8
-$(obj)/tilegx8.c: $(src)/tilegx.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
targets += s390vx8.c
$(obj)/s390vx8.c: UNROLL := 8
$(obj)/s390vx8.c: $(src)/s390vx.uc $(src)/unroll.awk FORCE
diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c
index 476994723258..5065b1e7e327 100644
--- a/lib/raid6/algos.c
+++ b/lib/raid6/algos.c
@@ -74,9 +74,10 @@ const struct raid6_calls * const raid6_algos[] = {
&raid6_altivec2,
&raid6_altivec4,
&raid6_altivec8,
-#endif
-#if defined(CONFIG_TILEGX)
- &raid6_tilegx8,
+ &raid6_vpermxor1,
+ &raid6_vpermxor2,
+ &raid6_vpermxor4,
+ &raid6_vpermxor8,
#endif
#if defined(CONFIG_S390)
&raid6_s390vx8,
diff --git a/lib/raid6/altivec.uc b/lib/raid6/altivec.uc
index 682aae8a1fef..d20ed0d11411 100644
--- a/lib/raid6/altivec.uc
+++ b/lib/raid6/altivec.uc
@@ -24,10 +24,13 @@
#include <linux/raid/pq.h>
+#ifdef CONFIG_ALTIVEC
+
#include <altivec.h>
#ifdef __KERNEL__
# include <asm/cputable.h>
# include <asm/switch_to.h>
+#endif /* __KERNEL__ */
/*
* This is the C data type to use. We use a vector of
diff --git a/lib/raid6/s390vx.uc b/lib/raid6/s390vx.uc
index 140fa8bb5c23..914ebe98fc21 100644
--- a/lib/raid6/s390vx.uc
+++ b/lib/raid6/s390vx.uc
@@ -55,22 +55,24 @@ static inline void XOR(int x, int y, int z)
asm volatile ("VX %0,%1,%2" : : "i" (x), "i" (y), "i" (z));
}
-static inline void LOAD_DATA(int x, int n, u8 *ptr)
+static inline void LOAD_DATA(int x, u8 *ptr)
{
- typedef struct { u8 _[16*n]; } addrtype;
+ typedef struct { u8 _[16 * $#]; } addrtype;
register addrtype *__ptr asm("1") = (addrtype *) ptr;
asm volatile ("VLM %2,%3,0,%r1"
- : : "m" (*__ptr), "a" (__ptr), "i" (x), "i" (x + n - 1));
+ : : "m" (*__ptr), "a" (__ptr), "i" (x),
+ "i" (x + $# - 1));
}
-static inline void STORE_DATA(int x, int n, u8 *ptr)
+static inline void STORE_DATA(int x, u8 *ptr)
{
- typedef struct { u8 _[16*n]; } addrtype;
+ typedef struct { u8 _[16 * $#]; } addrtype;
register addrtype *__ptr asm("1") = (addrtype *) ptr;
asm volatile ("VSTM %2,%3,0,1"
- : "=m" (*__ptr) : "a" (__ptr), "i" (x), "i" (x + n - 1));
+ : "=m" (*__ptr) : "a" (__ptr), "i" (x),
+ "i" (x + $# - 1));
}
static inline void COPY_VEC(int x, int y)
@@ -93,19 +95,19 @@ static void raid6_s390vx$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
q = dptr[z0 + 2]; /* RS syndrome */
for (d = 0; d < bytes; d += $#*NSIZE) {
- LOAD_DATA(0,$#,&dptr[z0][d]);
+ LOAD_DATA(0,&dptr[z0][d]);
COPY_VEC(8+$$,0+$$);
for (z = z0 - 1; z >= 0; z--) {
MASK(16+$$,8+$$);
AND(16+$$,16+$$,25);
SHLBYTE(8+$$,8+$$);
XOR(8+$$,8+$$,16+$$);
- LOAD_DATA(16,$#,&dptr[z][d]);
+ LOAD_DATA(16,&dptr[z][d]);
XOR(0+$$,0+$$,16+$$);
XOR(8+$$,8+$$,16+$$);
}
- STORE_DATA(0,$#,&p[d]);
- STORE_DATA(8,$#,&q[d]);
+ STORE_DATA(0,&p[d]);
+ STORE_DATA(8,&q[d]);
}
kernel_fpu_end(&vxstate, KERNEL_VXR);
}
@@ -127,14 +129,14 @@ static void raid6_s390vx$#_xor_syndrome(int disks, int start, int stop,
for (d = 0; d < bytes; d += $#*NSIZE) {
/* P/Q data pages */
- LOAD_DATA(0,$#,&dptr[z0][d]);
+ LOAD_DATA(0,&dptr[z0][d]);
COPY_VEC(8+$$,0+$$);
for (z = z0 - 1; z >= start; z--) {
MASK(16+$$,8+$$);
AND(16+$$,16+$$,25);
SHLBYTE(8+$$,8+$$);
XOR(8+$$,8+$$,16+$$);
- LOAD_DATA(16,$#,&dptr[z][d]);
+ LOAD_DATA(16,&dptr[z][d]);
XOR(0+$$,0+$$,16+$$);
XOR(8+$$,8+$$,16+$$);
}
@@ -145,12 +147,12 @@ static void raid6_s390vx$#_xor_syndrome(int disks, int start, int stop,
SHLBYTE(8+$$,8+$$);
XOR(8+$$,8+$$,16+$$);
}
- LOAD_DATA(16,$#,&p[d]);
+ LOAD_DATA(16,&p[d]);
XOR(16+$$,16+$$,0+$$);
- STORE_DATA(16,$#,&p[d]);
- LOAD_DATA(16,$#,&q[d]);
+ STORE_DATA(16,&p[d]);
+ LOAD_DATA(16,&q[d]);
XOR(16+$$,16+$$,8+$$);
- STORE_DATA(16,$#,&q[d]);
+ STORE_DATA(16,&q[d]);
}
kernel_fpu_end(&vxstate, KERNEL_VXR);
}
diff --git a/lib/raid6/sse2.c b/lib/raid6/sse2.c
index 1d2276b007ee..8191e1d0d2fb 100644
--- a/lib/raid6/sse2.c
+++ b/lib/raid6/sse2.c
@@ -91,7 +91,7 @@ static void raid6_sse21_gen_syndrome(int disks, size_t bytes, void **ptrs)
static void raid6_sse21_xor_syndrome(int disks, int start, int stop,
size_t bytes, void **ptrs)
- {
+{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
@@ -200,9 +200,9 @@ static void raid6_sse22_gen_syndrome(int disks, size_t bytes, void **ptrs)
kernel_fpu_end();
}
- static void raid6_sse22_xor_syndrome(int disks, int start, int stop,
+static void raid6_sse22_xor_syndrome(int disks, int start, int stop,
size_t bytes, void **ptrs)
- {
+{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
@@ -265,7 +265,7 @@ static void raid6_sse22_gen_syndrome(int disks, size_t bytes, void **ptrs)
asm volatile("sfence" : : : "memory");
kernel_fpu_end();
- }
+}
const struct raid6_calls raid6_sse2x2 = {
raid6_sse22_gen_syndrome,
@@ -366,9 +366,9 @@ static void raid6_sse24_gen_syndrome(int disks, size_t bytes, void **ptrs)
kernel_fpu_end();
}
- static void raid6_sse24_xor_syndrome(int disks, int start, int stop,
+static void raid6_sse24_xor_syndrome(int disks, int start, int stop,
size_t bytes, void **ptrs)
- {
+{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
@@ -471,7 +471,7 @@ static void raid6_sse24_gen_syndrome(int disks, size_t bytes, void **ptrs)
}
asm volatile("sfence" : : : "memory");
kernel_fpu_end();
- }
+}
const struct raid6_calls raid6_sse2x4 = {
diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile
index be1010bdc435..79777645cac9 100644
--- a/lib/raid6/test/Makefile
+++ b/lib/raid6/test/Makefile
@@ -27,7 +27,7 @@ ifeq ($(ARCH),arm)
CFLAGS += -I../../../arch/arm/include -mfpu=neon
HAS_NEON = yes
endif
-ifeq ($(ARCH),arm64)
+ifeq ($(ARCH),aarch64)
CFLAGS += -I../../../arch/arm64/include
HAS_NEON = yes
endif
@@ -41,19 +41,18 @@ ifeq ($(IS_X86),yes)
gcc -c -x assembler - >&/dev/null && \
rm ./-.o && echo -DCONFIG_AS_AVX512=1)
else ifeq ($(HAS_NEON),yes)
- OBJS += neon.o neon1.o neon2.o neon4.o neon8.o
+ OBJS += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o
CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1
else
HAS_ALTIVEC := $(shell printf '\#include <altivec.h>\nvector int a;\n' |\
- gcc -c -x c - >&/dev/null && \
- rm ./-.o && echo yes)
+ gcc -c -x c - >/dev/null && rm ./-.o && echo yes)
ifeq ($(HAS_ALTIVEC),yes)
- OBJS += altivec1.o altivec2.o altivec4.o altivec8.o
+ CFLAGS += -I../../../arch/powerpc/include
+ CFLAGS += -DCONFIG_ALTIVEC
+ OBJS += altivec1.o altivec2.o altivec4.o altivec8.o \
+ vpermxor1.o vpermxor2.o vpermxor4.o vpermxor8.o
endif
endif
-ifeq ($(ARCH),tilegx)
-OBJS += tilegx8.o
-endif
.c.o:
$(CC) $(CFLAGS) -c -o $@ $<
@@ -98,6 +97,18 @@ altivec4.c: altivec.uc ../unroll.awk
altivec8.c: altivec.uc ../unroll.awk
$(AWK) ../unroll.awk -vN=8 < altivec.uc > $@
+vpermxor1.c: vpermxor.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=1 < vpermxor.uc > $@
+
+vpermxor2.c: vpermxor.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=2 < vpermxor.uc > $@
+
+vpermxor4.c: vpermxor.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=4 < vpermxor.uc > $@
+
+vpermxor8.c: vpermxor.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=8 < vpermxor.uc > $@
+
int1.c: int.uc ../unroll.awk
$(AWK) ../unroll.awk -vN=1 < int.uc > $@
@@ -116,15 +127,11 @@ int16.c: int.uc ../unroll.awk
int32.c: int.uc ../unroll.awk
$(AWK) ../unroll.awk -vN=32 < int.uc > $@
-tilegx8.c: tilegx.uc ../unroll.awk
- $(AWK) ../unroll.awk -vN=8 < tilegx.uc > $@
-
tables.c: mktables
./mktables > tables.c
clean:
- rm -f *.o *.a mktables mktables.c *.uc int*.c altivec*.c neon*.c tables.c raid6test
- rm -f tilegx*.c
+ rm -f *.o *.a mktables mktables.c *.uc int*.c altivec*.c vpermxor*.c neon*.c tables.c raid6test
spotless: clean
rm -f *~
diff --git a/lib/raid6/tilegx.uc b/lib/raid6/tilegx.uc
deleted file mode 100644
index 2dd291a11264..000000000000
--- a/lib/raid6/tilegx.uc
+++ /dev/null
@@ -1,87 +0,0 @@
-/* -*- linux-c -*- ------------------------------------------------------- *
- *
- * Copyright 2002 H. Peter Anvin - All Rights Reserved
- * Copyright 2012 Tilera Corporation - All Rights Reserved
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
- * Boston MA 02111-1307, USA; either version 2 of the License, or
- * (at your option) any later version; incorporated herein by reference.
- *
- * ----------------------------------------------------------------------- */
-
-/*
- * tilegx$#.c
- *
- * $#-way unrolled TILE-Gx SIMD for RAID-6 math.
- *
- * This file is postprocessed using unroll.awk.
- *
- */
-
-#include <linux/raid/pq.h>
-
-/* Create 8 byte copies of constant byte */
-# define NBYTES(x) (__insn_v1addi(0, x))
-# define NSIZE 8
-
-/*
- * The SHLBYTE() operation shifts each byte left by 1, *not*
- * rolling over into the next byte
- */
-static inline __attribute_const__ u64 SHLBYTE(u64 v)
-{
- /* Vector One Byte Shift Left Immediate. */
- return __insn_v1shli(v, 1);
-}
-
-/*
- * The MASK() operation returns 0xFF in any byte for which the high
- * bit is 1, 0x00 for any byte for which the high bit is 0.
- */
-static inline __attribute_const__ u64 MASK(u64 v)
-{
- /* Vector One Byte Shift Right Signed Immediate. */
- return __insn_v1shrsi(v, 7);
-}
-
-
-void raid6_tilegx$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
-{
- u8 **dptr = (u8 **)ptrs;
- u64 *p, *q;
- int d, z, z0;
-
- u64 wd$$, wq$$, wp$$, w1$$, w2$$;
- u64 x1d = NBYTES(0x1d);
- u64 * z0ptr;
-
- z0 = disks - 3; /* Highest data disk */
- p = (u64 *)dptr[z0+1]; /* XOR parity */
- q = (u64 *)dptr[z0+2]; /* RS syndrome */
-
- z0ptr = (u64 *)&dptr[z0][0];
- for ( d = 0 ; d < bytes ; d += NSIZE*$# ) {
- wq$$ = wp$$ = *z0ptr++;
- for ( z = z0-1 ; z >= 0 ; z-- ) {
- wd$$ = *(u64 *)&dptr[z][d+$$*NSIZE];
- wp$$ = wp$$ ^ wd$$;
- w2$$ = MASK(wq$$);
- w1$$ = SHLBYTE(wq$$);
- w2$$ = w2$$ & x1d;
- w1$$ = w1$$ ^ w2$$;
- wq$$ = w1$$ ^ wd$$;
- }
- *p++ = wp$$;
- *q++ = wq$$;
- }
-}
-
-const struct raid6_calls raid6_tilegx$# = {
- raid6_tilegx$#_gen_syndrome,
- NULL, /* XOR not yet implemented */
- NULL,
- "tilegx$#",
- 0
-};
diff --git a/lib/raid6/vpermxor.uc b/lib/raid6/vpermxor.uc
new file mode 100644
index 000000000000..10475dc423c1
--- /dev/null
+++ b/lib/raid6/vpermxor.uc
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2017, Matt Brown, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * vpermxor$#.c
+ *
+ * Based on H. Peter Anvin's paper - The mathematics of RAID-6
+ *
+ * $#-way unrolled portable integer math RAID-6 instruction set
+ * This file is postprocessed using unroll.awk
+ *
+ * vpermxor$#.c makes use of the vpermxor instruction to optimise the RAID6 Q
+ * syndrome calculations.
+ * This can be run on systems which have both Altivec and vpermxor instruction.
+ *
+ * This instruction was introduced in POWER8 - ISA v2.07.
+ */
+
+#include <linux/raid/pq.h>
+#ifdef CONFIG_ALTIVEC
+
+#include <altivec.h>
+#ifdef __KERNEL__
+#include <asm/cputable.h>
+#include <asm/ppc-opcode.h>
+#include <asm/switch_to.h>
+#endif
+
+typedef vector unsigned char unative_t;
+#define NSIZE sizeof(unative_t)
+
+static const vector unsigned char gf_low = {0x1e, 0x1c, 0x1a, 0x18, 0x16, 0x14,
+ 0x12, 0x10, 0x0e, 0x0c, 0x0a, 0x08,
+ 0x06, 0x04, 0x02,0x00};
+static const vector unsigned char gf_high = {0xfd, 0xdd, 0xbd, 0x9d, 0x7d, 0x5d,
+ 0x3d, 0x1d, 0xe0, 0xc0, 0xa0, 0x80,
+ 0x60, 0x40, 0x20, 0x00};
+
+static void noinline raid6_vpermxor$#_gen_syndrome_real(int disks, size_t bytes,
+ void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+ unative_t wp$$, wq$$, wd$$;
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0+1]; /* XOR parity */
+ q = dptr[z0+2]; /* RS syndrome */
+
+ for (d = 0; d < bytes; d += NSIZE*$#) {
+ wp$$ = wq$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE];
+
+ for (z = z0-1; z>=0; z--) {
+ wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE];
+ /* P syndrome */
+ wp$$ = vec_xor(wp$$, wd$$);
+
+ /* Q syndrome */
+ asm(VPERMXOR(%0,%1,%2,%3):"=v"(wq$$):"v"(gf_high), "v"(gf_low), "v"(wq$$));
+ wq$$ = vec_xor(wq$$, wd$$);
+ }
+ *(unative_t *)&p[d+NSIZE*$$] = wp$$;
+ *(unative_t *)&q[d+NSIZE*$$] = wq$$;
+ }
+}
+
+static void raid6_vpermxor$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+ preempt_disable();
+ enable_kernel_altivec();
+
+ raid6_vpermxor$#_gen_syndrome_real(disks, bytes, ptrs);
+
+ disable_kernel_altivec();
+ preempt_enable();
+}
+
+int raid6_have_altivec_vpermxor(void);
+#if $# == 1
+int raid6_have_altivec_vpermxor(void)
+{
+ /* Check if arch has both altivec and the vpermxor instructions */
+# ifdef __KERNEL__
+ return (cpu_has_feature(CPU_FTR_ALTIVEC_COMP) &&
+ cpu_has_feature(CPU_FTR_ARCH_207S));
+# else
+ return 1;
+#endif
+
+}
+#endif
+
+const struct raid6_calls raid6_vpermxor$# = {
+ raid6_vpermxor$#_gen_syndrome,
+ NULL,
+ raid6_have_altivec_vpermxor,
+ "vpermxor$#",
+ 0
+};
+#endif
diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c
index 7d36c1e27ff6..b7055b2a07d3 100644
--- a/lib/rbtree_test.c
+++ b/lib/rbtree_test.c
@@ -247,7 +247,7 @@ static int __init rbtree_test_init(void)
cycles_t time1, time2, time;
struct rb_node *node;
- nodes = kmalloc(nnodes * sizeof(*nodes), GFP_KERNEL);
+ nodes = kmalloc_array(nnodes, sizeof(*nodes), GFP_KERNEL);
if (!nodes)
return -ENOMEM;
diff --git a/lib/reciprocal_div.c b/lib/reciprocal_div.c
index fcb4ce682c6f..bf043258fa00 100644
--- a/lib/reciprocal_div.c
+++ b/lib/reciprocal_div.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/bug.h>
#include <linux/kernel.h>
#include <asm/div64.h>
#include <linux/reciprocal_div.h>
@@ -26,3 +27,43 @@ struct reciprocal_value reciprocal_value(u32 d)
return R;
}
EXPORT_SYMBOL(reciprocal_value);
+
+struct reciprocal_value_adv reciprocal_value_adv(u32 d, u8 prec)
+{
+ struct reciprocal_value_adv R;
+ u32 l, post_shift;
+ u64 mhigh, mlow;
+
+ /* ceil(log2(d)) */
+ l = fls(d - 1);
+ /* NOTE: mlow/mhigh could overflow u64 when l == 32. This case needs to
+ * be handled before calling "reciprocal_value_adv", please see the
+ * comment at include/linux/reciprocal_div.h.
+ */
+ WARN(l == 32,
+ "ceil(log2(0x%08x)) == 32, %s doesn't support such divisor",
+ d, __func__);
+ post_shift = l;
+ mlow = 1ULL << (32 + l);
+ do_div(mlow, d);
+ mhigh = (1ULL << (32 + l)) + (1ULL << (32 + l - prec));
+ do_div(mhigh, d);
+
+ for (; post_shift > 0; post_shift--) {
+ u64 lo = mlow >> 1, hi = mhigh >> 1;
+
+ if (lo >= hi)
+ break;
+
+ mlow = lo;
+ mhigh = hi;
+ }
+
+ R.m = (u32)mhigh;
+ R.sh = post_shift;
+ R.exp = l;
+ R.is_wide_m = mhigh > U32_MAX;
+
+ return R;
+}
+EXPORT_SYMBOL(reciprocal_value_adv);
diff --git a/lib/reed_solomon/decode_rs.c b/lib/reed_solomon/decode_rs.c
index 0ec3f257ffdf..1db74eb098d0 100644
--- a/lib/reed_solomon/decode_rs.c
+++ b/lib/reed_solomon/decode_rs.c
@@ -1,22 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * lib/reed_solomon/decode_rs.c
- *
- * Overview:
- * Generic Reed Solomon encoder / decoder library
+ * Generic Reed Solomon encoder / decoder library
*
* Copyright 2002, Phil Karn, KA9Q
* May be used under the terms of the GNU General Public License (GPL)
*
* Adaption to the kernel by Thomas Gleixner (tglx@linutronix.de)
*
- * $Id: decode_rs.c,v 1.7 2005/11/07 11:14:59 gleixner Exp $
- *
- */
-
-/* Generic data width independent code which is included by the
- * wrappers.
+ * Generic data width independent code which is included by the wrappers.
*/
{
+ struct rs_codec *rs = rsc->codec;
int deg_lambda, el, deg_omega;
int i, j, r, k, pad;
int nn = rs->nn;
@@ -27,16 +21,22 @@
uint16_t *alpha_to = rs->alpha_to;
uint16_t *index_of = rs->index_of;
uint16_t u, q, tmp, num1, num2, den, discr_r, syn_error;
- /* Err+Eras Locator poly and syndrome poly The maximum value
- * of nroots is 8. So the necessary stack size will be about
- * 220 bytes max.
- */
- uint16_t lambda[nroots + 1], syn[nroots];
- uint16_t b[nroots + 1], t[nroots + 1], omega[nroots + 1];
- uint16_t root[nroots], reg[nroots + 1], loc[nroots];
int count = 0;
uint16_t msk = (uint16_t) rs->nn;
+ /*
+ * The decoder buffers are in the rs control struct. They are
+ * arrays sized [nroots + 1]
+ */
+ uint16_t *lambda = rsc->buffers + RS_DECODE_LAMBDA * (nroots + 1);
+ uint16_t *syn = rsc->buffers + RS_DECODE_SYN * (nroots + 1);
+ uint16_t *b = rsc->buffers + RS_DECODE_B * (nroots + 1);
+ uint16_t *t = rsc->buffers + RS_DECODE_T * (nroots + 1);
+ uint16_t *omega = rsc->buffers + RS_DECODE_OMEGA * (nroots + 1);
+ uint16_t *root = rsc->buffers + RS_DECODE_ROOT * (nroots + 1);
+ uint16_t *reg = rsc->buffers + RS_DECODE_REG * (nroots + 1);
+ uint16_t *loc = rsc->buffers + RS_DECODE_LOC * (nroots + 1);
+
/* Check length parameter for validity */
pad = nn - nroots - len;
BUG_ON(pad < 0 || pad >= nn);
diff --git a/lib/reed_solomon/encode_rs.c b/lib/reed_solomon/encode_rs.c
index 0b5b1a6728ec..9112d46e869e 100644
--- a/lib/reed_solomon/encode_rs.c
+++ b/lib/reed_solomon/encode_rs.c
@@ -1,23 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * lib/reed_solomon/encode_rs.c
- *
- * Overview:
- * Generic Reed Solomon encoder / decoder library
+ * Generic Reed Solomon encoder / decoder library
*
* Copyright 2002, Phil Karn, KA9Q
* May be used under the terms of the GNU General Public License (GPL)
*
* Adaption to the kernel by Thomas Gleixner (tglx@linutronix.de)
*
- * $Id: encode_rs.c,v 1.5 2005/11/07 11:14:59 gleixner Exp $
- *
- */
-
-/* Generic data width independent code which is included by the
- * wrappers.
- * int encode_rsX (struct rs_control *rs, uintX_t *data, int len, uintY_t *par)
+ * Generic data width independent code which is included by the wrappers.
*/
{
+ struct rs_codec *rs = rsc->codec;
int i, j, pad;
int nn = rs->nn;
int nroots = rs->nroots;
diff --git a/lib/reed_solomon/reed_solomon.c b/lib/reed_solomon/reed_solomon.c
index 06d04cfa9339..e5fdc8b9e856 100644
--- a/lib/reed_solomon/reed_solomon.c
+++ b/lib/reed_solomon/reed_solomon.c
@@ -1,43 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * lib/reed_solomon/reed_solomon.c
- *
- * Overview:
- * Generic Reed Solomon encoder / decoder library
+ * Generic Reed Solomon encoder / decoder library
*
* Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de)
*
* Reed Solomon code lifted from reed solomon library written by Phil Karn
* Copyright 2002 Phil Karn, KA9Q
*
- * $Id: rslib.c,v 1.7 2005/11/07 11:14:59 gleixner Exp $
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Description:
*
* The generic Reed Solomon library provides runtime configurable
* encoding / decoding of RS codes.
- * Each user must call init_rs to get a pointer to a rs_control
- * structure for the given rs parameters. This structure is either
- * generated or a already available matching control structure is used.
- * If a structure is generated then the polynomial arrays for
- * fast encoding / decoding are built. This can take some time so
- * make sure not to call this function from a time critical path.
- * Usually a module / driver should initialize the necessary
- * rs_control structure on module / driver init and release it
- * on exit.
- * The encoding puts the calculated syndrome into a given syndrome
- * buffer.
- * The decoding is a two step process. The first step calculates
- * the syndrome over the received (data + syndrome) and calls the
- * second stage, which does the decoding / error correction itself.
- * Many hw encoders provide a syndrome calculation over the received
- * data + syndrome and can call the second stage directly.
*
+ * Each user must call init_rs to get a pointer to a rs_control structure
+ * for the given rs parameters. The control struct is unique per instance.
+ * It points to a codec which can be shared by multiple control structures.
+ * If a codec is newly allocated then the polynomial arrays for fast
+ * encoding / decoding are built. This can take some time so make sure not
+ * to call this function from a time critical path. Usually a module /
+ * driver should initialize the necessary rs_control structure on module /
+ * driver init and release it on exit.
+ *
+ * The encoding puts the calculated syndrome into a given syndrome buffer.
+ *
+ * The decoding is a two step process. The first step calculates the
+ * syndrome over the received (data + syndrome) and calls the second stage,
+ * which does the decoding / error correction itself. Many hw encoders
+ * provide a syndrome calculation over the received data + syndrome and can
+ * call the second stage directly.
*/
-
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -46,32 +37,44 @@
#include <linux/slab.h>
#include <linux/mutex.h>
-/* This list holds all currently allocated rs control structures */
-static LIST_HEAD (rslist);
+enum {
+ RS_DECODE_LAMBDA,
+ RS_DECODE_SYN,
+ RS_DECODE_B,
+ RS_DECODE_T,
+ RS_DECODE_OMEGA,
+ RS_DECODE_ROOT,
+ RS_DECODE_REG,
+ RS_DECODE_LOC,
+ RS_DECODE_NUM_BUFFERS
+};
+
+/* This list holds all currently allocated rs codec structures */
+static LIST_HEAD(codec_list);
/* Protection for the list */
static DEFINE_MUTEX(rslistlock);
/**
- * rs_init - Initialize a Reed-Solomon codec
+ * codec_init - Initialize a Reed-Solomon codec
* @symsize: symbol size, bits (1-8)
* @gfpoly: Field generator polynomial coefficients
* @gffunc: Field generator function
* @fcr: first root of RS code generator polynomial, index form
* @prim: primitive element to generate polynomial roots
* @nroots: RS code generator polynomial degree (number of roots)
+ * @gfp: GFP_ flags for allocations
*
- * Allocate a control structure and the polynom arrays for faster
+ * Allocate a codec structure and the polynom arrays for faster
* en/decoding. Fill the arrays according to the given parameters.
*/
-static struct rs_control *rs_init(int symsize, int gfpoly, int (*gffunc)(int),
- int fcr, int prim, int nroots)
+static struct rs_codec *codec_init(int symsize, int gfpoly, int (*gffunc)(int),
+ int fcr, int prim, int nroots, gfp_t gfp)
{
- struct rs_control *rs;
int i, j, sr, root, iprim;
+ struct rs_codec *rs;
- /* Allocate the control structure */
- rs = kmalloc(sizeof (struct rs_control), GFP_KERNEL);
- if (rs == NULL)
+ rs = kzalloc(sizeof(*rs), gfp);
+ if (!rs)
return NULL;
INIT_LIST_HEAD(&rs->list);
@@ -85,17 +88,17 @@ static struct rs_control *rs_init(int symsize, int gfpoly, int (*gffunc)(int),
rs->gffunc = gffunc;
/* Allocate the arrays */
- rs->alpha_to = kmalloc(sizeof(uint16_t) * (rs->nn + 1), GFP_KERNEL);
+ rs->alpha_to = kmalloc_array(rs->nn + 1, sizeof(uint16_t), gfp);
if (rs->alpha_to == NULL)
- goto errrs;
+ goto err;
- rs->index_of = kmalloc(sizeof(uint16_t) * (rs->nn + 1), GFP_KERNEL);
+ rs->index_of = kmalloc_array(rs->nn + 1, sizeof(uint16_t), gfp);
if (rs->index_of == NULL)
- goto erralp;
+ goto err;
- rs->genpoly = kmalloc(sizeof(uint16_t) * (rs->nroots + 1), GFP_KERNEL);
+ rs->genpoly = kmalloc_array(rs->nroots + 1, sizeof(uint16_t), gfp);
if(rs->genpoly == NULL)
- goto erridx;
+ goto err;
/* Generate Galois field lookup tables */
rs->index_of[0] = rs->nn; /* log(zero) = -inf */
@@ -120,7 +123,7 @@ static struct rs_control *rs_init(int symsize, int gfpoly, int (*gffunc)(int),
}
/* If it's not primitive, exit */
if(sr != rs->alpha_to[0])
- goto errpol;
+ goto err;
/* Find prim-th root of 1, used in decoding */
for(iprim = 1; (iprim % prim) != 0; iprim += rs->nn);
@@ -148,42 +151,52 @@ static struct rs_control *rs_init(int symsize, int gfpoly, int (*gffunc)(int),
/* convert rs->genpoly[] to index form for quicker encoding */
for (i = 0; i <= nroots; i++)
rs->genpoly[i] = rs->index_of[rs->genpoly[i]];
+
+ rs->users = 1;
+ list_add(&rs->list, &codec_list);
return rs;
- /* Error exit */
-errpol:
+err:
kfree(rs->genpoly);
-erridx:
kfree(rs->index_of);
-erralp:
kfree(rs->alpha_to);
-errrs:
kfree(rs);
return NULL;
}
/**
- * free_rs - Free the rs control structure, if it is no longer used
- * @rs: the control structure which is not longer used by the
+ * free_rs - Free the rs control structure
+ * @rs: The control structure which is not longer used by the
* caller
+ *
+ * Free the control structure. If @rs is the last user of the associated
+ * codec, free the codec as well.
*/
void free_rs(struct rs_control *rs)
{
+ struct rs_codec *cd;
+
+ if (!rs)
+ return;
+
+ cd = rs->codec;
mutex_lock(&rslistlock);
- rs->users--;
- if(!rs->users) {
- list_del(&rs->list);
- kfree(rs->alpha_to);
- kfree(rs->index_of);
- kfree(rs->genpoly);
- kfree(rs);
+ cd->users--;
+ if(!cd->users) {
+ list_del(&cd->list);
+ kfree(cd->alpha_to);
+ kfree(cd->index_of);
+ kfree(cd->genpoly);
+ kfree(cd);
}
mutex_unlock(&rslistlock);
+ kfree(rs);
}
+EXPORT_SYMBOL_GPL(free_rs);
/**
- * init_rs_internal - Find a matching or allocate a new rs control structure
+ * init_rs_internal - Allocate rs control, find a matching codec or allocate a new one
* @symsize: the symbol size (number of bits)
* @gfpoly: the extended Galois field generator polynomial coefficients,
* with the 0th coefficient in the low order bit. The polynomial
@@ -191,55 +204,69 @@ void free_rs(struct rs_control *rs)
* @gffunc: pointer to function to generate the next field element,
* or the multiplicative identity element if given 0. Used
* instead of gfpoly if gfpoly is 0
- * @fcr: the first consecutive root of the rs code generator polynomial
+ * @fcr: the first consecutive root of the rs code generator polynomial
* in index form
* @prim: primitive element to generate polynomial roots
* @nroots: RS code generator polynomial degree (number of roots)
+ * @gfp: GFP_ flags for allocations
*/
static struct rs_control *init_rs_internal(int symsize, int gfpoly,
- int (*gffunc)(int), int fcr,
- int prim, int nroots)
+ int (*gffunc)(int), int fcr,
+ int prim, int nroots, gfp_t gfp)
{
- struct list_head *tmp;
- struct rs_control *rs;
+ struct list_head *tmp;
+ struct rs_control *rs;
+ unsigned int bsize;
/* Sanity checks */
if (symsize < 1)
return NULL;
if (fcr < 0 || fcr >= (1<<symsize))
- return NULL;
+ return NULL;
if (prim <= 0 || prim >= (1<<symsize))
- return NULL;
+ return NULL;
if (nroots < 0 || nroots >= (1<<symsize))
return NULL;
+ /*
+ * The decoder needs buffers in each control struct instance to
+ * avoid variable size or large fixed size allocations on
+ * stack. Size the buffers to arrays of [nroots + 1].
+ */
+ bsize = sizeof(uint16_t) * RS_DECODE_NUM_BUFFERS * (nroots + 1);
+ rs = kzalloc(sizeof(*rs) + bsize, gfp);
+ if (!rs)
+ return NULL;
+
mutex_lock(&rslistlock);
/* Walk through the list and look for a matching entry */
- list_for_each(tmp, &rslist) {
- rs = list_entry(tmp, struct rs_control, list);
- if (symsize != rs->mm)
+ list_for_each(tmp, &codec_list) {
+ struct rs_codec *cd = list_entry(tmp, struct rs_codec, list);
+
+ if (symsize != cd->mm)
continue;
- if (gfpoly != rs->gfpoly)
+ if (gfpoly != cd->gfpoly)
continue;
- if (gffunc != rs->gffunc)
+ if (gffunc != cd->gffunc)
continue;
- if (fcr != rs->fcr)
+ if (fcr != cd->fcr)
continue;
- if (prim != rs->prim)
+ if (prim != cd->prim)
continue;
- if (nroots != rs->nroots)
+ if (nroots != cd->nroots)
continue;
/* We have a matching one already */
- rs->users++;
+ cd->users++;
+ rs->codec = cd;
goto out;
}
/* Create a new one */
- rs = rs_init(symsize, gfpoly, gffunc, fcr, prim, nroots);
- if (rs) {
- rs->users = 1;
- list_add(&rs->list, &rslist);
+ rs->codec = codec_init(symsize, gfpoly, gffunc, fcr, prim, nroots, gfp);
+ if (!rs->codec) {
+ kfree(rs);
+ rs = NULL;
}
out:
mutex_unlock(&rslistlock);
@@ -247,45 +274,48 @@ out:
}
/**
- * init_rs - Find a matching or allocate a new rs control structure
+ * init_rs_gfp - Create a RS control struct and initialize it
* @symsize: the symbol size (number of bits)
* @gfpoly: the extended Galois field generator polynomial coefficients,
* with the 0th coefficient in the low order bit. The polynomial
* must be primitive;
- * @fcr: the first consecutive root of the rs code generator polynomial
+ * @fcr: the first consecutive root of the rs code generator polynomial
* in index form
* @prim: primitive element to generate polynomial roots
* @nroots: RS code generator polynomial degree (number of roots)
+ * @gfp: Memory allocation flags.
*/
-struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim,
- int nroots)
+struct rs_control *init_rs_gfp(int symsize, int gfpoly, int fcr, int prim,
+ int nroots, gfp_t gfp)
{
- return init_rs_internal(symsize, gfpoly, NULL, fcr, prim, nroots);
+ return init_rs_internal(symsize, gfpoly, NULL, fcr, prim, nroots, gfp);
}
+EXPORT_SYMBOL_GPL(init_rs_gfp);
/**
- * init_rs_non_canonical - Find a matching or allocate a new rs control
- * structure, for fields with non-canonical
- * representation
+ * init_rs_non_canonical - Allocate rs control struct for fields with
+ * non-canonical representation
* @symsize: the symbol size (number of bits)
* @gffunc: pointer to function to generate the next field element,
* or the multiplicative identity element if given 0. Used
* instead of gfpoly if gfpoly is 0
- * @fcr: the first consecutive root of the rs code generator polynomial
+ * @fcr: the first consecutive root of the rs code generator polynomial
* in index form
* @prim: primitive element to generate polynomial roots
* @nroots: RS code generator polynomial degree (number of roots)
*/
struct rs_control *init_rs_non_canonical(int symsize, int (*gffunc)(int),
- int fcr, int prim, int nroots)
+ int fcr, int prim, int nroots)
{
- return init_rs_internal(symsize, 0, gffunc, fcr, prim, nroots);
+ return init_rs_internal(symsize, 0, gffunc, fcr, prim, nroots,
+ GFP_KERNEL);
}
+EXPORT_SYMBOL_GPL(init_rs_non_canonical);
#ifdef CONFIG_REED_SOLOMON_ENC8
/**
* encode_rs8 - Calculate the parity for data values (8bit data width)
- * @rs: the rs control structure
+ * @rsc: the rs control structure
* @data: data field of a given type
* @len: data length
* @par: parity data, must be initialized by caller (usually all 0)
@@ -295,7 +325,7 @@ struct rs_control *init_rs_non_canonical(int symsize, int (*gffunc)(int),
* symbol size > 8. The calling code must take care of encoding of the
* syndrome result for storage itself.
*/
-int encode_rs8(struct rs_control *rs, uint8_t *data, int len, uint16_t *par,
+int encode_rs8(struct rs_control *rsc, uint8_t *data, int len, uint16_t *par,
uint16_t invmsk)
{
#include "encode_rs.c"
@@ -306,7 +336,7 @@ EXPORT_SYMBOL_GPL(encode_rs8);
#ifdef CONFIG_REED_SOLOMON_DEC8
/**
* decode_rs8 - Decode codeword (8bit data width)
- * @rs: the rs control structure
+ * @rsc: the rs control structure
* @data: data field of a given type
* @par: received parity data field
* @len: data length
@@ -319,9 +349,14 @@ EXPORT_SYMBOL_GPL(encode_rs8);
* The syndrome and parity uses a uint16_t data type to enable
* symbol size > 8. The calling code must take care of decoding of the
* syndrome result and the received parity before calling this code.
+ *
+ * Note: The rs_control struct @rsc contains buffers which are used for
+ * decoding, so the caller has to ensure that decoder invocations are
+ * serialized.
+ *
* Returns the number of corrected bits or -EBADMSG for uncorrectable errors.
*/
-int decode_rs8(struct rs_control *rs, uint8_t *data, uint16_t *par, int len,
+int decode_rs8(struct rs_control *rsc, uint8_t *data, uint16_t *par, int len,
uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk,
uint16_t *corr)
{
@@ -333,7 +368,7 @@ EXPORT_SYMBOL_GPL(decode_rs8);
#ifdef CONFIG_REED_SOLOMON_ENC16
/**
* encode_rs16 - Calculate the parity for data values (16bit data width)
- * @rs: the rs control structure
+ * @rsc: the rs control structure
* @data: data field of a given type
* @len: data length
* @par: parity data, must be initialized by caller (usually all 0)
@@ -341,7 +376,7 @@ EXPORT_SYMBOL_GPL(decode_rs8);
*
* Each field in the data array contains up to symbol size bits of valid data.
*/
-int encode_rs16(struct rs_control *rs, uint16_t *data, int len, uint16_t *par,
+int encode_rs16(struct rs_control *rsc, uint16_t *data, int len, uint16_t *par,
uint16_t invmsk)
{
#include "encode_rs.c"
@@ -352,7 +387,7 @@ EXPORT_SYMBOL_GPL(encode_rs16);
#ifdef CONFIG_REED_SOLOMON_DEC16
/**
* decode_rs16 - Decode codeword (16bit data width)
- * @rs: the rs control structure
+ * @rsc: the rs control structure
* @data: data field of a given type
* @par: received parity data field
* @len: data length
@@ -363,9 +398,14 @@ EXPORT_SYMBOL_GPL(encode_rs16);
* @corr: buffer to store correction bitmask on eras_pos
*
* Each field in the data array contains up to symbol size bits of valid data.
+ *
+ * Note: The rc_control struct @rsc contains buffers which are used for
+ * decoding, so the caller has to ensure that decoder invocations are
+ * serialized.
+ *
* Returns the number of corrected bits or -EBADMSG for uncorrectable errors.
*/
-int decode_rs16(struct rs_control *rs, uint16_t *data, uint16_t *par, int len,
+int decode_rs16(struct rs_control *rsc, uint16_t *data, uint16_t *par, int len,
uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk,
uint16_t *corr)
{
@@ -374,10 +414,6 @@ int decode_rs16(struct rs_control *rs, uint16_t *data, uint16_t *par, int len,
EXPORT_SYMBOL_GPL(decode_rs16);
#endif
-EXPORT_SYMBOL_GPL(init_rs);
-EXPORT_SYMBOL_GPL(init_rs_non_canonical);
-EXPORT_SYMBOL_GPL(free_rs);
-
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Reed Solomon encoder/decoder");
MODULE_AUTHOR("Phil Karn, Thomas Gleixner");
diff --git a/lib/refcount.c b/lib/refcount.c
index 0eb48353abe3..ebcf8cd49e05 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -35,13 +35,13 @@
*
*/
+#include <linux/mutex.h>
#include <linux/refcount.h>
+#include <linux/spinlock.h>
#include <linux/bug.h>
-#ifdef CONFIG_REFCOUNT_FULL
-
/**
- * refcount_add_not_zero - add a value to a refcount unless it is 0
+ * refcount_add_not_zero_checked - add a value to a refcount unless it is 0
* @i: the value to add to the refcount
* @r: the refcount
*
@@ -58,7 +58,7 @@
*
* Return: false if the passed refcount is 0, true otherwise
*/
-bool refcount_add_not_zero(unsigned int i, refcount_t *r)
+bool refcount_add_not_zero_checked(unsigned int i, refcount_t *r)
{
unsigned int new, val = atomic_read(&r->refs);
@@ -79,10 +79,10 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r)
return true;
}
-EXPORT_SYMBOL(refcount_add_not_zero);
+EXPORT_SYMBOL(refcount_add_not_zero_checked);
/**
- * refcount_add - add a value to a refcount
+ * refcount_add_checked - add a value to a refcount
* @i: the value to add to the refcount
* @r: the refcount
*
@@ -97,14 +97,14 @@ EXPORT_SYMBOL(refcount_add_not_zero);
* cases, refcount_inc(), or one of its variants, should instead be used to
* increment a reference count.
*/
-void refcount_add(unsigned int i, refcount_t *r)
+void refcount_add_checked(unsigned int i, refcount_t *r)
{
- WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
+ WARN_ONCE(!refcount_add_not_zero_checked(i, r), "refcount_t: addition on 0; use-after-free.\n");
}
-EXPORT_SYMBOL(refcount_add);
+EXPORT_SYMBOL(refcount_add_checked);
/**
- * refcount_inc_not_zero - increment a refcount unless it is 0
+ * refcount_inc_not_zero_checked - increment a refcount unless it is 0
* @r: the refcount to increment
*
* Similar to atomic_inc_not_zero(), but will saturate at UINT_MAX and WARN.
@@ -115,7 +115,7 @@ EXPORT_SYMBOL(refcount_add);
*
* Return: true if the increment was successful, false otherwise
*/
-bool refcount_inc_not_zero(refcount_t *r)
+bool refcount_inc_not_zero_checked(refcount_t *r)
{
unsigned int new, val = atomic_read(&r->refs);
@@ -134,10 +134,10 @@ bool refcount_inc_not_zero(refcount_t *r)
return true;
}
-EXPORT_SYMBOL(refcount_inc_not_zero);
+EXPORT_SYMBOL(refcount_inc_not_zero_checked);
/**
- * refcount_inc - increment a refcount
+ * refcount_inc_checked - increment a refcount
* @r: the refcount to increment
*
* Similar to atomic_inc(), but will saturate at UINT_MAX and WARN.
@@ -148,14 +148,14 @@ EXPORT_SYMBOL(refcount_inc_not_zero);
* Will WARN if the refcount is 0, as this represents a possible use-after-free
* condition.
*/
-void refcount_inc(refcount_t *r)
+void refcount_inc_checked(refcount_t *r)
{
- WARN_ONCE(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
+ WARN_ONCE(!refcount_inc_not_zero_checked(r), "refcount_t: increment on 0; use-after-free.\n");
}
-EXPORT_SYMBOL(refcount_inc);
+EXPORT_SYMBOL(refcount_inc_checked);
/**
- * refcount_sub_and_test - subtract from a refcount and test if it is 0
+ * refcount_sub_and_test_checked - subtract from a refcount and test if it is 0
* @i: amount to subtract from the refcount
* @r: the refcount
*
@@ -174,7 +174,7 @@ EXPORT_SYMBOL(refcount_inc);
*
* Return: true if the resulting refcount is 0, false otherwise
*/
-bool refcount_sub_and_test(unsigned int i, refcount_t *r)
+bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r)
{
unsigned int new, val = atomic_read(&r->refs);
@@ -192,10 +192,10 @@ bool refcount_sub_and_test(unsigned int i, refcount_t *r)
return !new;
}
-EXPORT_SYMBOL(refcount_sub_and_test);
+EXPORT_SYMBOL(refcount_sub_and_test_checked);
/**
- * refcount_dec_and_test - decrement a refcount and test if it is 0
+ * refcount_dec_and_test_checked - decrement a refcount and test if it is 0
* @r: the refcount
*
* Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
@@ -207,14 +207,14 @@ EXPORT_SYMBOL(refcount_sub_and_test);
*
* Return: true if the resulting refcount is 0, false otherwise
*/
-bool refcount_dec_and_test(refcount_t *r)
+bool refcount_dec_and_test_checked(refcount_t *r)
{
- return refcount_sub_and_test(1, r);
+ return refcount_sub_and_test_checked(1, r);
}
-EXPORT_SYMBOL(refcount_dec_and_test);
+EXPORT_SYMBOL(refcount_dec_and_test_checked);
/**
- * refcount_dec - decrement a refcount
+ * refcount_dec_checked - decrement a refcount
* @r: the refcount
*
* Similar to atomic_dec(), it will WARN on underflow and fail to decrement
@@ -223,12 +223,11 @@ EXPORT_SYMBOL(refcount_dec_and_test);
* Provides release memory ordering, such that prior loads and stores are done
* before.
*/
-void refcount_dec(refcount_t *r)
+void refcount_dec_checked(refcount_t *r)
{
- WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
+ WARN_ONCE(refcount_dec_and_test_checked(r), "refcount_t: decrement hit 0; leaking memory.\n");
}
-EXPORT_SYMBOL(refcount_dec);
-#endif /* CONFIG_REFCOUNT_FULL */
+EXPORT_SYMBOL(refcount_dec_checked);
/**
* refcount_dec_if_one - decrement a refcount if it is 1
@@ -350,3 +349,31 @@ bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
}
EXPORT_SYMBOL(refcount_dec_and_lock);
+/**
+ * refcount_dec_and_lock_irqsave - return holding spinlock with disabled
+ * interrupts if able to decrement refcount to 0
+ * @r: the refcount
+ * @lock: the spinlock to be locked
+ * @flags: saved IRQ-flags if the is acquired
+ *
+ * Same as refcount_dec_and_lock() above except that the spinlock is acquired
+ * with disabled interupts.
+ *
+ * Return: true and hold spinlock if able to decrement refcount to 0, false
+ * otherwise
+ */
+bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock,
+ unsigned long *flags)
+{
+ if (refcount_dec_not_one(r))
+ return false;
+
+ spin_lock_irqsave(lock, *flags);
+ if (!refcount_dec_and_test(r)) {
+ spin_unlock_irqrestore(lock, *flags);
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(refcount_dec_and_lock_irqsave);
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 47de025b6245..30526afa8343 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -115,8 +115,7 @@ static void bucket_table_free_rcu(struct rcu_head *head)
static union nested_table *nested_table_alloc(struct rhashtable *ht,
union nested_table __rcu **prev,
- unsigned int shifted,
- unsigned int nhash)
+ bool leaf)
{
union nested_table *ntbl;
int i;
@@ -127,10 +126,9 @@ static union nested_table *nested_table_alloc(struct rhashtable *ht,
ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
- if (ntbl && shifted) {
- for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++)
- INIT_RHT_NULLS_HEAD(ntbl[i].bucket, ht,
- (i << shifted) | nhash);
+ if (ntbl && leaf) {
+ for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++)
+ INIT_RHT_NULLS_HEAD(ntbl[i].bucket);
}
rcu_assign_pointer(*prev, ntbl);
@@ -156,7 +154,7 @@ static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
return NULL;
if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
- 0, 0)) {
+ false)) {
kfree(tbl);
return NULL;
}
@@ -175,17 +173,15 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
int i;
size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
- if (gfp != GFP_KERNEL)
- tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
- else
- tbl = kvzalloc(size, gfp);
+ tbl = kvzalloc(size, gfp);
size = nbuckets;
- if (tbl == NULL && gfp != GFP_KERNEL) {
+ if (tbl == NULL && (gfp & ~__GFP_NOFAIL) != GFP_KERNEL) {
tbl = nested_bucket_table_alloc(ht, nbuckets, gfp);
nbuckets = 0;
}
+
if (tbl == NULL)
return NULL;
@@ -206,7 +202,7 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
tbl->hash_rnd = get_random_u32();
for (i = 0; i < nbuckets; i++)
- INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
+ INIT_RHT_NULLS_HEAD(tbl->buckets[i]);
return tbl;
}
@@ -227,8 +223,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
{
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
- struct bucket_table *new_tbl = rhashtable_last_table(ht,
- rht_dereference_rcu(old_tbl->future_tbl, ht));
+ struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl);
struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash);
int err = -EAGAIN;
struct rhash_head *head, *next, *entry;
@@ -298,21 +293,14 @@ static int rhashtable_rehash_attach(struct rhashtable *ht,
struct bucket_table *old_tbl,
struct bucket_table *new_tbl)
{
- /* Protect future_tbl using the first bucket lock. */
- spin_lock_bh(old_tbl->locks);
-
- /* Did somebody beat us to it? */
- if (rcu_access_pointer(old_tbl->future_tbl)) {
- spin_unlock_bh(old_tbl->locks);
- return -EEXIST;
- }
-
/* Make insertions go into the new, empty table right away. Deletions
* and lookups will be attempted in both tables until we synchronize.
+ * As cmpxchg() provides strong barriers, we do not need
+ * rcu_assign_pointer().
*/
- rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
- spin_unlock_bh(old_tbl->locks);
+ if (cmpxchg(&old_tbl->future_tbl, NULL, new_tbl) != NULL)
+ return -EEXIST;
return 0;
}
@@ -333,6 +321,7 @@ static int rhashtable_rehash_table(struct rhashtable *ht)
err = rhashtable_rehash_chain(ht, old_hash);
if (err)
return err;
+ cond_resched();
}
/* Publish the new table pointer. */
@@ -458,7 +447,7 @@ static int rhashtable_insert_rehash(struct rhashtable *ht,
err = -ENOMEM;
- new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
+ new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC | __GFP_NOWARN);
if (new_tbl == NULL)
goto fail;
@@ -474,7 +463,7 @@ static int rhashtable_insert_rehash(struct rhashtable *ht,
fail:
/* Do not fail the insert if someone else did a rehash. */
- if (likely(rcu_dereference_raw(tbl->future_tbl)))
+ if (likely(rcu_access_pointer(tbl->future_tbl)))
return 0;
/* Schedule async rehash to retry allocation in process context. */
@@ -547,7 +536,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
return ERR_CAST(data);
- new_tbl = rcu_dereference(tbl->future_tbl);
+ new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
if (new_tbl)
return new_tbl;
@@ -606,7 +595,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
break;
spin_unlock_bh(lock);
- tbl = rcu_dereference(tbl->future_tbl);
+ tbl = rht_dereference_rcu(tbl->future_tbl, ht);
}
data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
@@ -667,8 +656,9 @@ EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
* For a completely stable walk you should construct your own data
* structure outside the hash table.
*
- * This function may sleep so you must not call it from interrupt
- * context or with spin locks held.
+ * This function may be called from any process context, including
+ * non-preemptable context, but cannot be called from softirq or
+ * hardirq context.
*
* You must call rhashtable_walk_exit after this function returns.
*/
@@ -725,6 +715,7 @@ int rhashtable_walk_start_check(struct rhashtable_iter *iter)
__acquires(RCU)
{
struct rhashtable *ht = iter->ht;
+ bool rhlist = ht->rhlist;
rcu_read_lock();
@@ -733,11 +724,52 @@ int rhashtable_walk_start_check(struct rhashtable_iter *iter)
list_del(&iter->walker.list);
spin_unlock(&ht->lock);
- if (!iter->walker.tbl && !iter->end_of_table) {
+ if (iter->end_of_table)
+ return 0;
+ if (!iter->walker.tbl) {
iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
+ iter->slot = 0;
+ iter->skip = 0;
return -EAGAIN;
}
+ if (iter->p && !rhlist) {
+ /*
+ * We need to validate that 'p' is still in the table, and
+ * if so, update 'skip'
+ */
+ struct rhash_head *p;
+ int skip = 0;
+ rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
+ skip++;
+ if (p == iter->p) {
+ iter->skip = skip;
+ goto found;
+ }
+ }
+ iter->p = NULL;
+ } else if (iter->p && rhlist) {
+ /* Need to validate that 'list' is still in the table, and
+ * if so, update 'skip' and 'p'.
+ */
+ struct rhash_head *p;
+ struct rhlist_head *list;
+ int skip = 0;
+ rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
+ for (list = container_of(p, struct rhlist_head, rhead);
+ list;
+ list = rcu_dereference(list->next)) {
+ skip++;
+ if (list == iter->list) {
+ iter->p = p;
+ iter->skip = skip;
+ goto found;
+ }
+ }
+ }
+ iter->p = NULL;
+ }
+found:
return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_start_check);
@@ -913,8 +945,6 @@ void rhashtable_walk_stop(struct rhashtable_iter *iter)
iter->walker.tbl = NULL;
spin_unlock(&ht->lock);
- iter->p = NULL;
-
out:
rcu_read_unlock();
}
@@ -922,8 +952,16 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
static size_t rounded_hashtable_size(const struct rhashtable_params *params)
{
- return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
- (unsigned long)params->min_size);
+ size_t retsize;
+
+ if (params->nelem_hint)
+ retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
+ (unsigned long)params->min_size);
+ else
+ retsize = max(HASH_DEFAULT_SIZE,
+ (unsigned long)params->min_size);
+
+ return retsize;
}
static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
@@ -952,7 +990,6 @@ static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
* .key_offset = offsetof(struct test_obj, key),
* .key_len = sizeof(int),
* .hashfn = jhash,
- * .nulls_base = (1U << RHT_BASE_SHIFT),
* };
*
* Configuration Example 2: Variable length keys
@@ -980,15 +1017,10 @@ int rhashtable_init(struct rhashtable *ht,
struct bucket_table *tbl;
size_t size;
- size = HASH_DEFAULT_SIZE;
-
if ((!params->key_len && !params->obj_hashfn) ||
(params->obj_hashfn && !params->obj_cmpfn))
return -EINVAL;
- if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
- return -EINVAL;
-
memset(ht, 0, sizeof(*ht));
mutex_init(&ht->mutex);
spin_lock_init(&ht->lock);
@@ -1008,8 +1040,7 @@ int rhashtable_init(struct rhashtable *ht,
ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
- if (params->nelem_hint)
- size = rounded_hashtable_size(&ht->p);
+ size = rounded_hashtable_size(&ht->p);
if (params->locks_mul)
ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
@@ -1026,9 +1057,16 @@ int rhashtable_init(struct rhashtable *ht,
}
}
+ /*
+ * This is api initialization and thus we need to guarantee the
+ * initial rhashtable allocation. Upon failure, retry with the
+ * smallest possible size with __GFP_NOFAIL semantics.
+ */
tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
- if (tbl == NULL)
- return -ENOMEM;
+ if (unlikely(tbl == NULL)) {
+ size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
+ tbl = bucket_table_alloc(ht, size, GFP_KERNEL | __GFP_NOFAIL);
+ }
atomic_set(&ht->nelems, 0);
@@ -1053,10 +1091,6 @@ int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params)
{
int err;
- /* No rhlist NULLs marking for now. */
- if (params->nulls_base)
- return -EINVAL;
-
err = rhashtable_init(&hlt->ht, params);
hlt->ht.rhlist = true;
return err;
@@ -1101,17 +1135,19 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
void (*free_fn)(void *ptr, void *arg),
void *arg)
{
- struct bucket_table *tbl;
+ struct bucket_table *tbl, *next_tbl;
unsigned int i;
cancel_work_sync(&ht->run_work);
mutex_lock(&ht->mutex);
tbl = rht_dereference(ht->tbl, ht);
+restart:
if (free_fn) {
for (i = 0; i < tbl->size; i++) {
struct rhash_head *pos, *next;
+ cond_resched();
for (pos = rht_dereference(*rht_bucket(tbl, i), ht),
next = !rht_is_a_nulls(pos) ?
rht_dereference(pos->next, ht) : NULL;
@@ -1123,7 +1159,12 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
}
}
+ next_tbl = rht_dereference(tbl->future_tbl, ht);
bucket_table_free(tbl);
+ if (next_tbl) {
+ tbl = next_tbl;
+ goto restart;
+ }
mutex_unlock(&ht->mutex);
}
EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
@@ -1173,25 +1214,18 @@ struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
unsigned int index = hash & ((1 << tbl->nest) - 1);
unsigned int size = tbl->size >> tbl->nest;
union nested_table *ntbl;
- unsigned int shifted;
- unsigned int nhash;
ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
hash >>= tbl->nest;
- nhash = index;
- shifted = tbl->nest;
ntbl = nested_table_alloc(ht, &ntbl[index].table,
- size <= (1 << shift) ? shifted : 0, nhash);
+ size <= (1 << shift));
while (ntbl && size > (1 << shift)) {
index = hash & ((1 << shift) - 1);
size >>= shift;
hash >>= shift;
- nhash |= index << shifted;
- shifted += shift;
ntbl = nested_table_alloc(ht, &ntbl[index].table,
- size <= (1 << shift) ? shifted : 0,
- nhash);
+ size <= (1 << shift));
}
if (!ntbl)
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 42b5ca0acf93..fdd1b8aa8ac6 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -52,7 +52,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
return 0;
}
- sb->map = kzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node);
+ sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node);
if (!sb->map)
return -ENOMEM;
@@ -100,7 +100,7 @@ static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
return -1;
}
- if (!test_and_set_bit(nr, word))
+ if (!test_and_set_bit_lock(nr, word))
break;
hint = nr + 1;
@@ -270,18 +270,33 @@ void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m)
}
EXPORT_SYMBOL_GPL(sbitmap_bitmap_show);
-static unsigned int sbq_calc_wake_batch(unsigned int depth)
+static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,
+ unsigned int depth)
{
unsigned int wake_batch;
+ unsigned int shallow_depth;
/*
* For each batch, we wake up one queue. We need to make sure that our
- * batch size is small enough that the full depth of the bitmap is
- * enough to wake up all of the queues.
+ * batch size is small enough that the full depth of the bitmap,
+ * potentially limited by a shallow depth, is enough to wake up all of
+ * the queues.
+ *
+ * Each full word of the bitmap has bits_per_word bits, and there might
+ * be a partial word. There are depth / bits_per_word full words and
+ * depth % bits_per_word bits left over. In bitwise arithmetic:
+ *
+ * bits_per_word = 1 << shift
+ * depth / bits_per_word = depth >> shift
+ * depth % bits_per_word = depth & ((1 << shift) - 1)
+ *
+ * Each word can be limited to sbq->min_shallow_depth bits.
*/
- wake_batch = SBQ_WAKE_BATCH;
- if (wake_batch > depth / SBQ_WAIT_QUEUES)
- wake_batch = max(1U, depth / SBQ_WAIT_QUEUES);
+ shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth);
+ depth = ((depth >> sbq->sb.shift) * shallow_depth +
+ min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth));
+ wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1,
+ SBQ_WAKE_BATCH);
return wake_batch;
}
@@ -307,7 +322,8 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
*per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth;
}
- sbq->wake_batch = sbq_calc_wake_batch(depth);
+ sbq->min_shallow_depth = UINT_MAX;
+ sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
atomic_set(&sbq->wake_index, 0);
sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
@@ -327,21 +343,28 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
}
EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
-void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
+static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
+ unsigned int depth)
{
- unsigned int wake_batch = sbq_calc_wake_batch(depth);
+ unsigned int wake_batch = sbq_calc_wake_batch(sbq, depth);
int i;
if (sbq->wake_batch != wake_batch) {
WRITE_ONCE(sbq->wake_batch, wake_batch);
/*
- * Pairs with the memory barrier in sbq_wake_up() to ensure that
- * the batch size is updated before the wait counts.
+ * Pairs with the memory barrier in sbitmap_queue_wake_up()
+ * to ensure that the batch size is updated before the wait
+ * counts.
*/
smp_mb__before_atomic();
for (i = 0; i < SBQ_WAIT_QUEUES; i++)
atomic_set(&sbq->ws[i].wait_cnt, 1);
}
+}
+
+void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
+{
+ sbitmap_queue_update_wake_batch(sbq, depth);
sbitmap_resize(&sbq->sb, depth);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
@@ -380,6 +403,8 @@ int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
unsigned int hint, depth;
int nr;
+ WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth);
+
hint = this_cpu_read(*sbq->alloc_hint);
depth = READ_ONCE(sbq->sb.depth);
if (unlikely(hint >= depth)) {
@@ -403,6 +428,14 @@ int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
}
EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow);
+void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
+ unsigned int min_shallow_depth)
+{
+ sbq->min_shallow_depth = min_shallow_depth;
+ sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth);
+}
+EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth);
+
static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
{
int i, wake_index;
@@ -425,52 +458,67 @@ static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
return NULL;
}
-static void sbq_wake_up(struct sbitmap_queue *sbq)
+static bool __sbq_wake_up(struct sbitmap_queue *sbq)
{
struct sbq_wait_state *ws;
unsigned int wake_batch;
int wait_cnt;
- /*
- * Pairs with the memory barrier in set_current_state() to ensure the
- * proper ordering of clear_bit()/waitqueue_active() in the waker and
- * test_and_set_bit()/prepare_to_wait()/finish_wait() in the waiter. See
- * the comment on waitqueue_active(). This is __after_atomic because we
- * just did clear_bit() in the caller.
- */
- smp_mb__after_atomic();
-
ws = sbq_wake_ptr(sbq);
if (!ws)
- return;
+ return false;
wait_cnt = atomic_dec_return(&ws->wait_cnt);
if (wait_cnt <= 0) {
+ int ret;
+
wake_batch = READ_ONCE(sbq->wake_batch);
+
/*
* Pairs with the memory barrier in sbitmap_queue_resize() to
* ensure that we see the batch size update before the wait
* count is reset.
*/
smp_mb__before_atomic();
+
/*
- * If there are concurrent callers to sbq_wake_up(), the last
- * one to decrement the wait count below zero will bump it back
- * up. If there is a concurrent resize, the count reset will
- * either cause the cmpxchg to fail or overwrite after the
- * cmpxchg.
+ * For concurrent callers of this, the one that failed the
+ * atomic_cmpxhcg() race should call this function again
+ * to wakeup a new batch on a different 'ws'.
*/
- atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wait_cnt + wake_batch);
- sbq_index_atomic_inc(&sbq->wake_index);
- wake_up_nr(&ws->wait, wake_batch);
+ ret = atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wake_batch);
+ if (ret == wait_cnt) {
+ sbq_index_atomic_inc(&sbq->wake_index);
+ wake_up_nr(&ws->wait, wake_batch);
+ return false;
+ }
+
+ return true;
}
+
+ return false;
+}
+
+void sbitmap_queue_wake_up(struct sbitmap_queue *sbq)
+{
+ while (__sbq_wake_up(sbq))
+ ;
}
+EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
unsigned int cpu)
{
- sbitmap_clear_bit(&sbq->sb, nr);
- sbq_wake_up(sbq);
+ sbitmap_clear_bit_unlock(&sbq->sb, nr);
+ /*
+ * Pairs with the memory barrier in set_current_state() to ensure the
+ * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker
+ * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the
+ * waiter. See the comment on waitqueue_active().
+ */
+ smp_mb__after_atomic();
+ sbitmap_queue_wake_up(sbq);
+
if (likely(!sbq->round_robin && nr < sbq->sb.depth))
*per_cpu_ptr(sbq->alloc_hint, cpu) = nr;
}
@@ -482,7 +530,7 @@ void sbitmap_queue_wake_all(struct sbitmap_queue *sbq)
/*
* Pairs with the memory barrier in set_current_state() like in
- * sbq_wake_up().
+ * sbitmap_queue_wake_up().
*/
smp_mb();
wake_index = atomic_read(&sbq->wake_index);
@@ -528,5 +576,6 @@ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
seq_puts(m, "}\n");
seq_printf(m, "round_robin=%d\n", sbq->round_robin);
+ seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_show);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 53728d391d3a..7c6096a71704 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -24,9 +24,6 @@
**/
struct scatterlist *sg_next(struct scatterlist *sg)
{
-#ifdef CONFIG_DEBUG_SG
- BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
if (sg_is_last(sg))
return NULL;
@@ -111,10 +108,7 @@ struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
for_each_sg(sgl, sg, nents, i)
ret = sg;
-#ifdef CONFIG_DEBUG_SG
- BUG_ON(sgl[0].sg_magic != SG_MAGIC);
BUG_ON(!sg_is_last(ret));
-#endif
return ret;
}
EXPORT_SYMBOL(sg_last);
@@ -132,14 +126,7 @@ EXPORT_SYMBOL(sg_last);
void sg_init_table(struct scatterlist *sgl, unsigned int nents)
{
memset(sgl, 0, sizeof(*sgl) * nents);
-#ifdef CONFIG_DEBUG_SG
- {
- unsigned int i;
- for (i = 0; i < nents; i++)
- sgl[i].sg_magic = SG_MAGIC;
- }
-#endif
- sg_mark_end(&sgl[nents - 1]);
+ sg_init_marker(sgl, nents);
}
EXPORT_SYMBOL(sg_init_table);
@@ -177,7 +164,8 @@ static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
return ptr;
} else
- return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
+ return kmalloc_array(nents, sizeof(struct scatterlist),
+ gfp_mask);
}
static void sg_kfree(struct scatterlist *sg, unsigned int nents)
diff --git a/lib/sg_pool.c b/lib/sg_pool.c
index 6dd30615a201..d1c1e6388eaa 100644
--- a/lib/sg_pool.c
+++ b/lib/sg_pool.c
@@ -148,10 +148,9 @@ static __init int sg_pool_init(void)
cleanup_sdb:
for (i = 0; i < SG_MEMPOOL_NR; i++) {
struct sg_pool *sgp = sg_pools + i;
- if (sgp->pool)
- mempool_destroy(sgp->pool);
- if (sgp->slab)
- kmem_cache_destroy(sgp->slab);
+
+ mempool_destroy(sgp->pool);
+ kmem_cache_destroy(sgp->slab);
}
return -ENOMEM;
diff --git a/lib/sha256.c b/lib/sha256.c
new file mode 100644
index 000000000000..4400c832e2aa
--- /dev/null
+++ b/lib/sha256.c
@@ -0,0 +1,283 @@
+/*
+ * SHA-256, as specified in
+ * http://csrc.nist.gov/groups/STM/cavp/documents/shs/sha256-384-512.pdf
+ *
+ * SHA-256 code by Jean-Luc Cooke <jlcooke@certainkey.com>.
+ *
+ * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com>
+ * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
+ * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
+ * Copyright (c) 2014 Red Hat Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/bitops.h>
+#include <linux/sha256.h>
+#include <linux/string.h>
+#include <asm/byteorder.h>
+
+static inline u32 Ch(u32 x, u32 y, u32 z)
+{
+ return z ^ (x & (y ^ z));
+}
+
+static inline u32 Maj(u32 x, u32 y, u32 z)
+{
+ return (x & y) | (z & (x | y));
+}
+
+#define e0(x) (ror32(x, 2) ^ ror32(x, 13) ^ ror32(x, 22))
+#define e1(x) (ror32(x, 6) ^ ror32(x, 11) ^ ror32(x, 25))
+#define s0(x) (ror32(x, 7) ^ ror32(x, 18) ^ (x >> 3))
+#define s1(x) (ror32(x, 17) ^ ror32(x, 19) ^ (x >> 10))
+
+static inline void LOAD_OP(int I, u32 *W, const u8 *input)
+{
+ W[I] = __be32_to_cpu(((__be32 *)(input))[I]);
+}
+
+static inline void BLEND_OP(int I, u32 *W)
+{
+ W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16];
+}
+
+static void sha256_transform(u32 *state, const u8 *input)
+{
+ u32 a, b, c, d, e, f, g, h, t1, t2;
+ u32 W[64];
+ int i;
+
+ /* load the input */
+ for (i = 0; i < 16; i++)
+ LOAD_OP(i, W, input);
+
+ /* now blend */
+ for (i = 16; i < 64; i++)
+ BLEND_OP(i, W);
+
+ /* load the state into our registers */
+ a = state[0]; b = state[1]; c = state[2]; d = state[3];
+ e = state[4]; f = state[5]; g = state[6]; h = state[7];
+
+ /* now iterate */
+ t1 = h + e1(e) + Ch(e, f, g) + 0x428a2f98 + W[0];
+ t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2;
+ t1 = g + e1(d) + Ch(d, e, f) + 0x71374491 + W[1];
+ t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2;
+ t1 = f + e1(c) + Ch(c, d, e) + 0xb5c0fbcf + W[2];
+ t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2;
+ t1 = e + e1(b) + Ch(b, c, d) + 0xe9b5dba5 + W[3];
+ t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2;
+ t1 = d + e1(a) + Ch(a, b, c) + 0x3956c25b + W[4];
+ t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2;
+ t1 = c + e1(h) + Ch(h, a, b) + 0x59f111f1 + W[5];
+ t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2;
+ t1 = b + e1(g) + Ch(g, h, a) + 0x923f82a4 + W[6];
+ t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2;
+ t1 = a + e1(f) + Ch(f, g, h) + 0xab1c5ed5 + W[7];
+ t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2;
+
+ t1 = h + e1(e) + Ch(e, f, g) + 0xd807aa98 + W[8];
+ t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2;
+ t1 = g + e1(d) + Ch(d, e, f) + 0x12835b01 + W[9];
+ t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2;
+ t1 = f + e1(c) + Ch(c, d, e) + 0x243185be + W[10];
+ t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2;
+ t1 = e + e1(b) + Ch(b, c, d) + 0x550c7dc3 + W[11];
+ t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2;
+ t1 = d + e1(a) + Ch(a, b, c) + 0x72be5d74 + W[12];
+ t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2;
+ t1 = c + e1(h) + Ch(h, a, b) + 0x80deb1fe + W[13];
+ t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2;
+ t1 = b + e1(g) + Ch(g, h, a) + 0x9bdc06a7 + W[14];
+ t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2;
+ t1 = a + e1(f) + Ch(f, g, h) + 0xc19bf174 + W[15];
+ t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
+
+ t1 = h + e1(e) + Ch(e, f, g) + 0xe49b69c1 + W[16];
+ t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2;
+ t1 = g + e1(d) + Ch(d, e, f) + 0xefbe4786 + W[17];
+ t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2;
+ t1 = f + e1(c) + Ch(c, d, e) + 0x0fc19dc6 + W[18];
+ t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2;
+ t1 = e + e1(b) + Ch(b, c, d) + 0x240ca1cc + W[19];
+ t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2;
+ t1 = d + e1(a) + Ch(a, b, c) + 0x2de92c6f + W[20];
+ t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2;
+ t1 = c + e1(h) + Ch(h, a, b) + 0x4a7484aa + W[21];
+ t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2;
+ t1 = b + e1(g) + Ch(g, h, a) + 0x5cb0a9dc + W[22];
+ t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2;
+ t1 = a + e1(f) + Ch(f, g, h) + 0x76f988da + W[23];
+ t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
+
+ t1 = h + e1(e) + Ch(e, f, g) + 0x983e5152 + W[24];
+ t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2;
+ t1 = g + e1(d) + Ch(d, e, f) + 0xa831c66d + W[25];
+ t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2;
+ t1 = f + e1(c) + Ch(c, d, e) + 0xb00327c8 + W[26];
+ t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2;
+ t1 = e + e1(b) + Ch(b, c, d) + 0xbf597fc7 + W[27];
+ t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2;
+ t1 = d + e1(a) + Ch(a, b, c) + 0xc6e00bf3 + W[28];
+ t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2;
+ t1 = c + e1(h) + Ch(h, a, b) + 0xd5a79147 + W[29];
+ t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2;
+ t1 = b + e1(g) + Ch(g, h, a) + 0x06ca6351 + W[30];
+ t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2;
+ t1 = a + e1(f) + Ch(f, g, h) + 0x14292967 + W[31];
+ t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
+
+ t1 = h + e1(e) + Ch(e, f, g) + 0x27b70a85 + W[32];
+ t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2;
+ t1 = g + e1(d) + Ch(d, e, f) + 0x2e1b2138 + W[33];
+ t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2;
+ t1 = f + e1(c) + Ch(c, d, e) + 0x4d2c6dfc + W[34];
+ t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2;
+ t1 = e + e1(b) + Ch(b, c, d) + 0x53380d13 + W[35];
+ t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2;
+ t1 = d + e1(a) + Ch(a, b, c) + 0x650a7354 + W[36];
+ t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2;
+ t1 = c + e1(h) + Ch(h, a, b) + 0x766a0abb + W[37];
+ t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2;
+ t1 = b + e1(g) + Ch(g, h, a) + 0x81c2c92e + W[38];
+ t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2;
+ t1 = a + e1(f) + Ch(f, g, h) + 0x92722c85 + W[39];
+ t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
+
+ t1 = h + e1(e) + Ch(e, f, g) + 0xa2bfe8a1 + W[40];
+ t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2;
+ t1 = g + e1(d) + Ch(d, e, f) + 0xa81a664b + W[41];
+ t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2;
+ t1 = f + e1(c) + Ch(c, d, e) + 0xc24b8b70 + W[42];
+ t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2;
+ t1 = e + e1(b) + Ch(b, c, d) + 0xc76c51a3 + W[43];
+ t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2;
+ t1 = d + e1(a) + Ch(a, b, c) + 0xd192e819 + W[44];
+ t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2;
+ t1 = c + e1(h) + Ch(h, a, b) + 0xd6990624 + W[45];
+ t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2;
+ t1 = b + e1(g) + Ch(g, h, a) + 0xf40e3585 + W[46];
+ t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2;
+ t1 = a + e1(f) + Ch(f, g, h) + 0x106aa070 + W[47];
+ t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
+
+ t1 = h + e1(e) + Ch(e, f, g) + 0x19a4c116 + W[48];
+ t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2;
+ t1 = g + e1(d) + Ch(d, e, f) + 0x1e376c08 + W[49];
+ t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2;
+ t1 = f + e1(c) + Ch(c, d, e) + 0x2748774c + W[50];
+ t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2;
+ t1 = e + e1(b) + Ch(b, c, d) + 0x34b0bcb5 + W[51];
+ t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2;
+ t1 = d + e1(a) + Ch(a, b, c) + 0x391c0cb3 + W[52];
+ t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2;
+ t1 = c + e1(h) + Ch(h, a, b) + 0x4ed8aa4a + W[53];
+ t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2;
+ t1 = b + e1(g) + Ch(g, h, a) + 0x5b9cca4f + W[54];
+ t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2;
+ t1 = a + e1(f) + Ch(f, g, h) + 0x682e6ff3 + W[55];
+ t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
+
+ t1 = h + e1(e) + Ch(e, f, g) + 0x748f82ee + W[56];
+ t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2;
+ t1 = g + e1(d) + Ch(d, e, f) + 0x78a5636f + W[57];
+ t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2;
+ t1 = f + e1(c) + Ch(c, d, e) + 0x84c87814 + W[58];
+ t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2;
+ t1 = e + e1(b) + Ch(b, c, d) + 0x8cc70208 + W[59];
+ t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2;
+ t1 = d + e1(a) + Ch(a, b, c) + 0x90befffa + W[60];
+ t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2;
+ t1 = c + e1(h) + Ch(h, a, b) + 0xa4506ceb + W[61];
+ t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2;
+ t1 = b + e1(g) + Ch(g, h, a) + 0xbef9a3f7 + W[62];
+ t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2;
+ t1 = a + e1(f) + Ch(f, g, h) + 0xc67178f2 + W[63];
+ t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
+
+ state[0] += a; state[1] += b; state[2] += c; state[3] += d;
+ state[4] += e; state[5] += f; state[6] += g; state[7] += h;
+
+ /* clear any sensitive info... */
+ a = b = c = d = e = f = g = h = t1 = t2 = 0;
+ memset(W, 0, 64 * sizeof(u32));
+}
+
+int sha256_init(struct sha256_state *sctx)
+{
+ sctx->state[0] = SHA256_H0;
+ sctx->state[1] = SHA256_H1;
+ sctx->state[2] = SHA256_H2;
+ sctx->state[3] = SHA256_H3;
+ sctx->state[4] = SHA256_H4;
+ sctx->state[5] = SHA256_H5;
+ sctx->state[6] = SHA256_H6;
+ sctx->state[7] = SHA256_H7;
+ sctx->count = 0;
+
+ return 0;
+}
+
+int sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len)
+{
+ unsigned int partial, done;
+ const u8 *src;
+
+ partial = sctx->count & 0x3f;
+ sctx->count += len;
+ done = 0;
+ src = data;
+
+ if ((partial + len) > 63) {
+ if (partial) {
+ done = -partial;
+ memcpy(sctx->buf + partial, data, done + 64);
+ src = sctx->buf;
+ }
+
+ do {
+ sha256_transform(sctx->state, src);
+ done += 64;
+ src = data + done;
+ } while (done + 63 < len);
+
+ partial = 0;
+ }
+ memcpy(sctx->buf + partial, src, len - done);
+
+ return 0;
+}
+
+int sha256_final(struct sha256_state *sctx, u8 *out)
+{
+ __be32 *dst = (__be32 *)out;
+ __be64 bits;
+ unsigned int index, pad_len;
+ int i;
+ static const u8 padding[64] = { 0x80, };
+
+ /* Save number of bits */
+ bits = cpu_to_be64(sctx->count << 3);
+
+ /* Pad out to 56 mod 64. */
+ index = sctx->count & 0x3f;
+ pad_len = (index < 56) ? (56 - index) : ((64+56) - index);
+ sha256_update(sctx, padding, pad_len);
+
+ /* Append length (before padding) */
+ sha256_update(sctx, (const u8 *)&bits, sizeof(bits));
+
+ /* Store state in digest */
+ for (i = 0; i < 8; i++)
+ dst[i] = cpu_to_be32(sctx->state[i]);
+
+ /* Zeroize sensitive information. */
+ memset(sctx, 0, sizeof(*sctx));
+
+ return 0;
+}
diff --git a/lib/string.c b/lib/string.c
index 2c0900a5d51a..38e4ca08e757 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -27,6 +27,7 @@
#include <linux/export.h>
#include <linux/bug.h>
#include <linux/errno.h>
+#include <linux/slab.h>
#include <asm/byteorder.h>
#include <asm/word-at-a-time.h>
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
deleted file mode 100644
index c43ec2271469..000000000000
--- a/lib/swiotlb.c
+++ /dev/null
@@ -1,1135 +0,0 @@
-/*
- * Dynamic DMA mapping support.
- *
- * This implementation is a fallback for platforms that do not support
- * I/O TLBs (aka DMA address translation hardware).
- * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
- * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
- * Copyright (C) 2000, 2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- *
- * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
- * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid
- * unnecessary i-cache flushing.
- * 04/07/.. ak Better overflow handling. Assorted fixes.
- * 05/09/10 linville Add support for syncing ranges, support syncing for
- * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
- * 08/12/11 beckyb Add highmem support
- */
-
-#include <linux/cache.h>
-#include <linux/dma-direct.h>
-#include <linux/mm.h>
-#include <linux/export.h>
-#include <linux/spinlock.h>
-#include <linux/string.h>
-#include <linux/swiotlb.h>
-#include <linux/pfn.h>
-#include <linux/types.h>
-#include <linux/ctype.h>
-#include <linux/highmem.h>
-#include <linux/gfp.h>
-#include <linux/scatterlist.h>
-#include <linux/mem_encrypt.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#include <linux/init.h>
-#include <linux/bootmem.h>
-#include <linux/iommu-helper.h>
-
-#define CREATE_TRACE_POINTS
-#include <trace/events/swiotlb.h>
-
-#define OFFSET(val,align) ((unsigned long) \
- ( (val) & ( (align) - 1)))
-
-#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
-
-/*
- * Minimum IO TLB size to bother booting with. Systems with mainly
- * 64bit capable cards will only lightly use the swiotlb. If we can't
- * allocate a contiguous 1MB, we're probably in trouble anyway.
- */
-#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
-
-enum swiotlb_force swiotlb_force;
-
-/*
- * Used to do a quick range check in swiotlb_tbl_unmap_single and
- * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
- * API.
- */
-static phys_addr_t io_tlb_start, io_tlb_end;
-
-/*
- * The number of IO TLB blocks (in groups of 64) between io_tlb_start and
- * io_tlb_end. This is command line adjustable via setup_io_tlb_npages.
- */
-static unsigned long io_tlb_nslabs;
-
-/*
- * When the IOMMU overflows we return a fallback buffer. This sets the size.
- */
-static unsigned long io_tlb_overflow = 32*1024;
-
-static phys_addr_t io_tlb_overflow_buffer;
-
-/*
- * This is a free list describing the number of free entries available from
- * each index
- */
-static unsigned int *io_tlb_list;
-static unsigned int io_tlb_index;
-
-/*
- * Max segment that we can provide which (if pages are contingous) will
- * not be bounced (unless SWIOTLB_FORCE is set).
- */
-unsigned int max_segment;
-
-/*
- * We need to save away the original address corresponding to a mapped entry
- * for the sync operations.
- */
-#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
-static phys_addr_t *io_tlb_orig_addr;
-
-/*
- * Protect the above data structures in the map and unmap calls
- */
-static DEFINE_SPINLOCK(io_tlb_lock);
-
-static int late_alloc;
-
-static int __init
-setup_io_tlb_npages(char *str)
-{
- if (isdigit(*str)) {
- io_tlb_nslabs = simple_strtoul(str, &str, 0);
- /* avoid tail segment of size < IO_TLB_SEGSIZE */
- io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
- }
- if (*str == ',')
- ++str;
- if (!strcmp(str, "force")) {
- swiotlb_force = SWIOTLB_FORCE;
- } else if (!strcmp(str, "noforce")) {
- swiotlb_force = SWIOTLB_NO_FORCE;
- io_tlb_nslabs = 1;
- }
-
- return 0;
-}
-early_param("swiotlb", setup_io_tlb_npages);
-/* make io_tlb_overflow tunable too? */
-
-unsigned long swiotlb_nr_tbl(void)
-{
- return io_tlb_nslabs;
-}
-EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
-
-unsigned int swiotlb_max_segment(void)
-{
- return max_segment;
-}
-EXPORT_SYMBOL_GPL(swiotlb_max_segment);
-
-void swiotlb_set_max_segment(unsigned int val)
-{
- if (swiotlb_force == SWIOTLB_FORCE)
- max_segment = 1;
- else
- max_segment = rounddown(val, PAGE_SIZE);
-}
-
-/* default to 64MB */
-#define IO_TLB_DEFAULT_SIZE (64UL<<20)
-unsigned long swiotlb_size_or_default(void)
-{
- unsigned long size;
-
- size = io_tlb_nslabs << IO_TLB_SHIFT;
-
- return size ? size : (IO_TLB_DEFAULT_SIZE);
-}
-
-void __weak swiotlb_set_mem_attributes(void *vaddr, unsigned long size) { }
-
-/* For swiotlb, clear memory encryption mask from dma addresses */
-static dma_addr_t swiotlb_phys_to_dma(struct device *hwdev,
- phys_addr_t address)
-{
- return __sme_clr(phys_to_dma(hwdev, address));
-}
-
-/* Note that this doesn't work with highmem page */
-static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
- volatile void *address)
-{
- return phys_to_dma(hwdev, virt_to_phys(address));
-}
-
-static bool no_iotlb_memory;
-
-void swiotlb_print_info(void)
-{
- unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
- unsigned char *vstart, *vend;
-
- if (no_iotlb_memory) {
- pr_warn("software IO TLB: No low mem\n");
- return;
- }
-
- vstart = phys_to_virt(io_tlb_start);
- vend = phys_to_virt(io_tlb_end);
-
- printk(KERN_INFO "software IO TLB [mem %#010llx-%#010llx] (%luMB) mapped at [%p-%p]\n",
- (unsigned long long)io_tlb_start,
- (unsigned long long)io_tlb_end,
- bytes >> 20, vstart, vend - 1);
-}
-
-/*
- * Early SWIOTLB allocation may be too early to allow an architecture to
- * perform the desired operations. This function allows the architecture to
- * call SWIOTLB when the operations are possible. It needs to be called
- * before the SWIOTLB memory is used.
- */
-void __init swiotlb_update_mem_attributes(void)
-{
- void *vaddr;
- unsigned long bytes;
-
- if (no_iotlb_memory || late_alloc)
- return;
-
- vaddr = phys_to_virt(io_tlb_start);
- bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT);
- swiotlb_set_mem_attributes(vaddr, bytes);
- memset(vaddr, 0, bytes);
-
- vaddr = phys_to_virt(io_tlb_overflow_buffer);
- bytes = PAGE_ALIGN(io_tlb_overflow);
- swiotlb_set_mem_attributes(vaddr, bytes);
- memset(vaddr, 0, bytes);
-}
-
-int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
-{
- void *v_overflow_buffer;
- unsigned long i, bytes;
-
- bytes = nslabs << IO_TLB_SHIFT;
-
- io_tlb_nslabs = nslabs;
- io_tlb_start = __pa(tlb);
- io_tlb_end = io_tlb_start + bytes;
-
- /*
- * Get the overflow emergency buffer
- */
- v_overflow_buffer = memblock_virt_alloc_low_nopanic(
- PAGE_ALIGN(io_tlb_overflow),
- PAGE_SIZE);
- if (!v_overflow_buffer)
- return -ENOMEM;
-
- io_tlb_overflow_buffer = __pa(v_overflow_buffer);
-
- /*
- * Allocate and initialize the free list array. This array is used
- * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
- * between io_tlb_start and io_tlb_end.
- */
- io_tlb_list = memblock_virt_alloc(
- PAGE_ALIGN(io_tlb_nslabs * sizeof(int)),
- PAGE_SIZE);
- io_tlb_orig_addr = memblock_virt_alloc(
- PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)),
- PAGE_SIZE);
- for (i = 0; i < io_tlb_nslabs; i++) {
- io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
- io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
- }
- io_tlb_index = 0;
-
- if (verbose)
- swiotlb_print_info();
-
- swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
- return 0;
-}
-
-/*
- * Statically reserve bounce buffer space and initialize bounce buffer data
- * structures for the software IO TLB used to implement the DMA API.
- */
-void __init
-swiotlb_init(int verbose)
-{
- size_t default_size = IO_TLB_DEFAULT_SIZE;
- unsigned char *vstart;
- unsigned long bytes;
-
- if (!io_tlb_nslabs) {
- io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
- io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
- }
-
- bytes = io_tlb_nslabs << IO_TLB_SHIFT;
-
- /* Get IO TLB memory from the low pages */
- vstart = memblock_virt_alloc_low_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE);
- if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
- return;
-
- if (io_tlb_start)
- memblock_free_early(io_tlb_start,
- PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
- pr_warn("Cannot allocate SWIOTLB buffer");
- no_iotlb_memory = true;
-}
-
-/*
- * Systems with larger DMA zones (those that don't support ISA) can
- * initialize the swiotlb later using the slab allocator if needed.
- * This should be just like above, but with some error catching.
- */
-int
-swiotlb_late_init_with_default_size(size_t default_size)
-{
- unsigned long bytes, req_nslabs = io_tlb_nslabs;
- unsigned char *vstart = NULL;
- unsigned int order;
- int rc = 0;
-
- if (!io_tlb_nslabs) {
- io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
- io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
- }
-
- /*
- * Get IO TLB memory from the low pages
- */
- order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
- io_tlb_nslabs = SLABS_PER_PAGE << order;
- bytes = io_tlb_nslabs << IO_TLB_SHIFT;
-
- while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
- vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
- order);
- if (vstart)
- break;
- order--;
- }
-
- if (!vstart) {
- io_tlb_nslabs = req_nslabs;
- return -ENOMEM;
- }
- if (order != get_order(bytes)) {
- printk(KERN_WARNING "Warning: only able to allocate %ld MB "
- "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
- io_tlb_nslabs = SLABS_PER_PAGE << order;
- }
- rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs);
- if (rc)
- free_pages((unsigned long)vstart, order);
-
- return rc;
-}
-
-int
-swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
-{
- unsigned long i, bytes;
- unsigned char *v_overflow_buffer;
-
- bytes = nslabs << IO_TLB_SHIFT;
-
- io_tlb_nslabs = nslabs;
- io_tlb_start = virt_to_phys(tlb);
- io_tlb_end = io_tlb_start + bytes;
-
- swiotlb_set_mem_attributes(tlb, bytes);
- memset(tlb, 0, bytes);
-
- /*
- * Get the overflow emergency buffer
- */
- v_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
- get_order(io_tlb_overflow));
- if (!v_overflow_buffer)
- goto cleanup2;
-
- swiotlb_set_mem_attributes(v_overflow_buffer, io_tlb_overflow);
- memset(v_overflow_buffer, 0, io_tlb_overflow);
- io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer);
-
- /*
- * Allocate and initialize the free list array. This array is used
- * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
- * between io_tlb_start and io_tlb_end.
- */
- io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
- get_order(io_tlb_nslabs * sizeof(int)));
- if (!io_tlb_list)
- goto cleanup3;
-
- io_tlb_orig_addr = (phys_addr_t *)
- __get_free_pages(GFP_KERNEL,
- get_order(io_tlb_nslabs *
- sizeof(phys_addr_t)));
- if (!io_tlb_orig_addr)
- goto cleanup4;
-
- for (i = 0; i < io_tlb_nslabs; i++) {
- io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
- io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
- }
- io_tlb_index = 0;
-
- swiotlb_print_info();
-
- late_alloc = 1;
-
- swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
-
- return 0;
-
-cleanup4:
- free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
- sizeof(int)));
- io_tlb_list = NULL;
-cleanup3:
- free_pages((unsigned long)v_overflow_buffer,
- get_order(io_tlb_overflow));
- io_tlb_overflow_buffer = 0;
-cleanup2:
- io_tlb_end = 0;
- io_tlb_start = 0;
- io_tlb_nslabs = 0;
- max_segment = 0;
- return -ENOMEM;
-}
-
-void __init swiotlb_exit(void)
-{
- if (!io_tlb_orig_addr)
- return;
-
- if (late_alloc) {
- free_pages((unsigned long)phys_to_virt(io_tlb_overflow_buffer),
- get_order(io_tlb_overflow));
- free_pages((unsigned long)io_tlb_orig_addr,
- get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
- free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
- sizeof(int)));
- free_pages((unsigned long)phys_to_virt(io_tlb_start),
- get_order(io_tlb_nslabs << IO_TLB_SHIFT));
- } else {
- memblock_free_late(io_tlb_overflow_buffer,
- PAGE_ALIGN(io_tlb_overflow));
- memblock_free_late(__pa(io_tlb_orig_addr),
- PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
- memblock_free_late(__pa(io_tlb_list),
- PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
- memblock_free_late(io_tlb_start,
- PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
- }
- io_tlb_nslabs = 0;
- max_segment = 0;
-}
-
-int is_swiotlb_buffer(phys_addr_t paddr)
-{
- return paddr >= io_tlb_start && paddr < io_tlb_end;
-}
-
-/*
- * Bounce: copy the swiotlb buffer back to the original dma location
- */
-static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
- size_t size, enum dma_data_direction dir)
-{
- unsigned long pfn = PFN_DOWN(orig_addr);
- unsigned char *vaddr = phys_to_virt(tlb_addr);
-
- if (PageHighMem(pfn_to_page(pfn))) {
- /* The buffer does not have a mapping. Map it in and copy */
- unsigned int offset = orig_addr & ~PAGE_MASK;
- char *buffer;
- unsigned int sz = 0;
- unsigned long flags;
-
- while (size) {
- sz = min_t(size_t, PAGE_SIZE - offset, size);
-
- local_irq_save(flags);
- buffer = kmap_atomic(pfn_to_page(pfn));
- if (dir == DMA_TO_DEVICE)
- memcpy(vaddr, buffer + offset, sz);
- else
- memcpy(buffer + offset, vaddr, sz);
- kunmap_atomic(buffer);
- local_irq_restore(flags);
-
- size -= sz;
- pfn++;
- vaddr += sz;
- offset = 0;
- }
- } else if (dir == DMA_TO_DEVICE) {
- memcpy(vaddr, phys_to_virt(orig_addr), size);
- } else {
- memcpy(phys_to_virt(orig_addr), vaddr, size);
- }
-}
-
-phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
- dma_addr_t tbl_dma_addr,
- phys_addr_t orig_addr, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- unsigned long flags;
- phys_addr_t tlb_addr;
- unsigned int nslots, stride, index, wrap;
- int i;
- unsigned long mask;
- unsigned long offset_slots;
- unsigned long max_slots;
-
- if (no_iotlb_memory)
- panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
-
- if (mem_encrypt_active())
- pr_warn_once("%s is active and system is using DMA bounce buffers\n",
- sme_active() ? "SME" : "SEV");
-
- mask = dma_get_seg_boundary(hwdev);
-
- tbl_dma_addr &= mask;
-
- offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
-
- /*
- * Carefully handle integer overflow which can occur when mask == ~0UL.
- */
- max_slots = mask + 1
- ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
- : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
-
- /*
- * For mappings greater than or equal to a page, we limit the stride
- * (and hence alignment) to a page size.
- */
- nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
- if (size >= PAGE_SIZE)
- stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
- else
- stride = 1;
-
- BUG_ON(!nslots);
-
- /*
- * Find suitable number of IO TLB entries size that will fit this
- * request and allocate a buffer from that IO TLB pool.
- */
- spin_lock_irqsave(&io_tlb_lock, flags);
- index = ALIGN(io_tlb_index, stride);
- if (index >= io_tlb_nslabs)
- index = 0;
- wrap = index;
-
- do {
- while (iommu_is_span_boundary(index, nslots, offset_slots,
- max_slots)) {
- index += stride;
- if (index >= io_tlb_nslabs)
- index = 0;
- if (index == wrap)
- goto not_found;
- }
-
- /*
- * If we find a slot that indicates we have 'nslots' number of
- * contiguous buffers, we allocate the buffers from that slot
- * and mark the entries as '0' indicating unavailable.
- */
- if (io_tlb_list[index] >= nslots) {
- int count = 0;
-
- for (i = index; i < (int) (index + nslots); i++)
- io_tlb_list[i] = 0;
- for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
- io_tlb_list[i] = ++count;
- tlb_addr = io_tlb_start + (index << IO_TLB_SHIFT);
-
- /*
- * Update the indices to avoid searching in the next
- * round.
- */
- io_tlb_index = ((index + nslots) < io_tlb_nslabs
- ? (index + nslots) : 0);
-
- goto found;
- }
- index += stride;
- if (index >= io_tlb_nslabs)
- index = 0;
- } while (index != wrap);
-
-not_found:
- spin_unlock_irqrestore(&io_tlb_lock, flags);
- if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
- dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size);
- return SWIOTLB_MAP_ERROR;
-found:
- spin_unlock_irqrestore(&io_tlb_lock, flags);
-
- /*
- * Save away the mapping from the original address to the DMA address.
- * This is needed when we sync the memory. Then we sync the buffer if
- * needed.
- */
- for (i = 0; i < nslots; i++)
- io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
- (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
- swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE);
-
- return tlb_addr;
-}
-
-/*
- * Allocates bounce buffer and returns its kernel virtual address.
- */
-
-static phys_addr_t
-map_single(struct device *hwdev, phys_addr_t phys, size_t size,
- enum dma_data_direction dir, unsigned long attrs)
-{
- dma_addr_t start_dma_addr;
-
- if (swiotlb_force == SWIOTLB_NO_FORCE) {
- dev_warn_ratelimited(hwdev, "Cannot do DMA to address %pa\n",
- &phys);
- return SWIOTLB_MAP_ERROR;
- }
-
- start_dma_addr = swiotlb_phys_to_dma(hwdev, io_tlb_start);
- return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size,
- dir, attrs);
-}
-
-/*
- * dma_addr is the kernel virtual address of the bounce buffer to unmap.
- */
-void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- unsigned long flags;
- int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
- int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
- phys_addr_t orig_addr = io_tlb_orig_addr[index];
-
- /*
- * First, sync the memory before unmapping the entry
- */
- if (orig_addr != INVALID_PHYS_ADDR &&
- !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
- ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
- swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
-
- /*
- * Return the buffer to the free list by setting the corresponding
- * entries to indicate the number of contiguous entries available.
- * While returning the entries to the free list, we merge the entries
- * with slots below and above the pool being returned.
- */
- spin_lock_irqsave(&io_tlb_lock, flags);
- {
- count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
- io_tlb_list[index + nslots] : 0);
- /*
- * Step 1: return the slots to the free list, merging the
- * slots with superceeding slots
- */
- for (i = index + nslots - 1; i >= index; i--) {
- io_tlb_list[i] = ++count;
- io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
- }
- /*
- * Step 2: merge the returned slots with the preceding slots,
- * if available (non zero)
- */
- for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
- io_tlb_list[i] = ++count;
- }
- spin_unlock_irqrestore(&io_tlb_lock, flags);
-}
-
-void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
- size_t size, enum dma_data_direction dir,
- enum dma_sync_target target)
-{
- int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
- phys_addr_t orig_addr = io_tlb_orig_addr[index];
-
- if (orig_addr == INVALID_PHYS_ADDR)
- return;
- orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1);
-
- switch (target) {
- case SYNC_FOR_CPU:
- if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
- swiotlb_bounce(orig_addr, tlb_addr,
- size, DMA_FROM_DEVICE);
- else
- BUG_ON(dir != DMA_TO_DEVICE);
- break;
- case SYNC_FOR_DEVICE:
- if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
- swiotlb_bounce(orig_addr, tlb_addr,
- size, DMA_TO_DEVICE);
- else
- BUG_ON(dir != DMA_FROM_DEVICE);
- break;
- default:
- BUG();
- }
-}
-
-static inline bool dma_coherent_ok(struct device *dev, dma_addr_t addr,
- size_t size)
-{
- u64 mask = DMA_BIT_MASK(32);
-
- if (dev && dev->coherent_dma_mask)
- mask = dev->coherent_dma_mask;
- return addr + size - 1 <= mask;
-}
-
-static void *
-swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle,
- unsigned long attrs)
-{
- phys_addr_t phys_addr;
-
- if (swiotlb_force == SWIOTLB_NO_FORCE)
- goto out_warn;
-
- phys_addr = swiotlb_tbl_map_single(dev,
- swiotlb_phys_to_dma(dev, io_tlb_start),
- 0, size, DMA_FROM_DEVICE, 0);
- if (phys_addr == SWIOTLB_MAP_ERROR)
- goto out_warn;
-
- *dma_handle = swiotlb_phys_to_dma(dev, phys_addr);
- if (dma_coherent_ok(dev, *dma_handle, size))
- goto out_unmap;
-
- memset(phys_to_virt(phys_addr), 0, size);
- return phys_to_virt(phys_addr);
-
-out_unmap:
- dev_warn(dev, "hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
- (unsigned long long)(dev ? dev->coherent_dma_mask : 0),
- (unsigned long long)*dma_handle);
-
- /*
- * DMA_TO_DEVICE to avoid memcpy in unmap_single.
- * DMA_ATTR_SKIP_CPU_SYNC is optional.
- */
- swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
-out_warn:
- if ((attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) {
- dev_warn(dev,
- "swiotlb: coherent allocation failed, size=%zu\n",
- size);
- dump_stack();
- }
- return NULL;
-}
-
-void *
-swiotlb_alloc_coherent(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags)
-{
- int order = get_order(size);
- unsigned long attrs = (flags & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0;
- void *ret;
-
- ret = (void *)__get_free_pages(flags, order);
- if (ret) {
- *dma_handle = swiotlb_virt_to_bus(hwdev, ret);
- if (dma_coherent_ok(hwdev, *dma_handle, size)) {
- memset(ret, 0, size);
- return ret;
- }
- free_pages((unsigned long)ret, order);
- }
-
- return swiotlb_alloc_buffer(hwdev, size, dma_handle, attrs);
-}
-EXPORT_SYMBOL(swiotlb_alloc_coherent);
-
-static bool swiotlb_free_buffer(struct device *dev, size_t size,
- dma_addr_t dma_addr)
-{
- phys_addr_t phys_addr = dma_to_phys(dev, dma_addr);
-
- WARN_ON_ONCE(irqs_disabled());
-
- if (!is_swiotlb_buffer(phys_addr))
- return false;
-
- /*
- * DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single.
- * DMA_ATTR_SKIP_CPU_SYNC is optional.
- */
- swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
- return true;
-}
-
-void
-swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
- dma_addr_t dev_addr)
-{
- if (!swiotlb_free_buffer(hwdev, size, dev_addr))
- free_pages((unsigned long)vaddr, get_order(size));
-}
-EXPORT_SYMBOL(swiotlb_free_coherent);
-
-static void
-swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
- int do_panic)
-{
- if (swiotlb_force == SWIOTLB_NO_FORCE)
- return;
-
- /*
- * Ran out of IOMMU space for this operation. This is very bad.
- * Unfortunately the drivers cannot handle this operation properly.
- * unless they check for dma_mapping_error (most don't)
- * When the mapping is small enough return a static buffer to limit
- * the damage, or panic when the transfer is too big.
- */
- dev_err_ratelimited(dev, "DMA: Out of SW-IOMMU space for %zu bytes\n",
- size);
-
- if (size <= io_tlb_overflow || !do_panic)
- return;
-
- if (dir == DMA_BIDIRECTIONAL)
- panic("DMA: Random memory could be DMA accessed\n");
- if (dir == DMA_FROM_DEVICE)
- panic("DMA: Random memory could be DMA written\n");
- if (dir == DMA_TO_DEVICE)
- panic("DMA: Random memory could be DMA read\n");
-}
-
-/*
- * Map a single buffer of the indicated size for DMA in streaming mode. The
- * physical address to use is returned.
- *
- * Once the device is given the dma address, the device owns this memory until
- * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed.
- */
-dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- phys_addr_t map, phys = page_to_phys(page) + offset;
- dma_addr_t dev_addr = phys_to_dma(dev, phys);
-
- BUG_ON(dir == DMA_NONE);
- /*
- * If the address happens to be in the device's DMA window,
- * we can safely return the device addr and not worry about bounce
- * buffering it.
- */
- if (dma_capable(dev, dev_addr, size) && swiotlb_force != SWIOTLB_FORCE)
- return dev_addr;
-
- trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
-
- /* Oh well, have to allocate and map a bounce buffer. */
- map = map_single(dev, phys, size, dir, attrs);
- if (map == SWIOTLB_MAP_ERROR) {
- swiotlb_full(dev, size, dir, 1);
- return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer);
- }
-
- dev_addr = swiotlb_phys_to_dma(dev, map);
-
- /* Ensure that the address returned is DMA'ble */
- if (dma_capable(dev, dev_addr, size))
- return dev_addr;
-
- attrs |= DMA_ATTR_SKIP_CPU_SYNC;
- swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
-
- return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer);
-}
-
-/*
- * Unmap a single streaming mode DMA translation. The dma_addr and size must
- * match what was provided for in a previous swiotlb_map_page call. All
- * other usages are undefined.
- *
- * After this call, reads by the cpu to the buffer are guaranteed to see
- * whatever the device wrote there.
- */
-static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
-
- BUG_ON(dir == DMA_NONE);
-
- if (is_swiotlb_buffer(paddr)) {
- swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
- return;
- }
-
- if (dir != DMA_FROM_DEVICE)
- return;
-
- /*
- * phys_to_virt doesn't work with hihgmem page but we could
- * call dma_mark_clean() with hihgmem page here. However, we
- * are fine since dma_mark_clean() is null on POWERPC. We can
- * make dma_mark_clean() take a physical address if necessary.
- */
- dma_mark_clean(phys_to_virt(paddr), size);
-}
-
-void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- unmap_single(hwdev, dev_addr, size, dir, attrs);
-}
-
-/*
- * Make physical memory consistent for a single streaming mode DMA translation
- * after a transfer.
- *
- * If you perform a swiotlb_map_page() but wish to interrogate the buffer
- * using the cpu, yet do not wish to teardown the dma mapping, you must
- * call this function before doing so. At the next point you give the dma
- * address back to the card, you must first perform a
- * swiotlb_dma_sync_for_device, and then the device again owns the buffer
- */
-static void
-swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir,
- enum dma_sync_target target)
-{
- phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
-
- BUG_ON(dir == DMA_NONE);
-
- if (is_swiotlb_buffer(paddr)) {
- swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
- return;
- }
-
- if (dir != DMA_FROM_DEVICE)
- return;
-
- dma_mark_clean(phys_to_virt(paddr), size);
-}
-
-void
-swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir)
-{
- swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
-}
-
-void
-swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir)
-{
- swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
-}
-
-/*
- * Map a set of buffers described by scatterlist in streaming mode for DMA.
- * This is the scatter-gather version of the above swiotlb_map_page
- * interface. Here the scatter gather list elements are each tagged with the
- * appropriate dma address and length. They are obtained via
- * sg_dma_{address,length}(SG).
- *
- * NOTE: An implementation may be able to use a smaller number of
- * DMA address/length pairs than there are SG table elements.
- * (for example via virtual mapping capabilities)
- * The routine returns the number of addr/length pairs actually
- * used, at most nents.
- *
- * Device ownership issues as mentioned above for swiotlb_map_page are the
- * same here.
- */
-int
-swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
- enum dma_data_direction dir, unsigned long attrs)
-{
- struct scatterlist *sg;
- int i;
-
- BUG_ON(dir == DMA_NONE);
-
- for_each_sg(sgl, sg, nelems, i) {
- phys_addr_t paddr = sg_phys(sg);
- dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
-
- if (swiotlb_force == SWIOTLB_FORCE ||
- !dma_capable(hwdev, dev_addr, sg->length)) {
- phys_addr_t map = map_single(hwdev, sg_phys(sg),
- sg->length, dir, attrs);
- if (map == SWIOTLB_MAP_ERROR) {
- /* Don't panic here, we expect map_sg users
- to do proper error handling. */
- swiotlb_full(hwdev, sg->length, dir, 0);
- attrs |= DMA_ATTR_SKIP_CPU_SYNC;
- swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
- attrs);
- sg_dma_len(sgl) = 0;
- return 0;
- }
- sg->dma_address = swiotlb_phys_to_dma(hwdev, map);
- } else
- sg->dma_address = dev_addr;
- sg_dma_len(sg) = sg->length;
- }
- return nelems;
-}
-
-/*
- * Unmap a set of streaming mode DMA translations. Again, cpu read rules
- * concerning calls here are the same as for swiotlb_unmap_page() above.
- */
-void
-swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
- int nelems, enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct scatterlist *sg;
- int i;
-
- BUG_ON(dir == DMA_NONE);
-
- for_each_sg(sgl, sg, nelems, i)
- unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir,
- attrs);
-}
-
-/*
- * Make physical memory consistent for a set of streaming mode DMA translations
- * after a transfer.
- *
- * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
- * and usage.
- */
-static void
-swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
- int nelems, enum dma_data_direction dir,
- enum dma_sync_target target)
-{
- struct scatterlist *sg;
- int i;
-
- for_each_sg(sgl, sg, nelems, i)
- swiotlb_sync_single(hwdev, sg->dma_address,
- sg_dma_len(sg), dir, target);
-}
-
-void
-swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
- int nelems, enum dma_data_direction dir)
-{
- swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
-}
-
-void
-swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
- int nelems, enum dma_data_direction dir)
-{
- swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
-}
-
-int
-swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
-{
- return (dma_addr == swiotlb_phys_to_dma(hwdev, io_tlb_overflow_buffer));
-}
-
-/*
- * Return whether the given device DMA address mask can be supported
- * properly. For example, if your device can only drive the low 24-bits
- * during bus mastering, then you would pass 0x00ffffff as the mask to
- * this function.
- */
-int
-swiotlb_dma_supported(struct device *hwdev, u64 mask)
-{
- return swiotlb_phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
-}
-
-#ifdef CONFIG_DMA_DIRECT_OPS
-void *swiotlb_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t gfp, unsigned long attrs)
-{
- void *vaddr;
-
- /* temporary workaround: */
- if (gfp & __GFP_NOWARN)
- attrs |= DMA_ATTR_NO_WARN;
-
- /*
- * Don't print a warning when the first allocation attempt fails.
- * swiotlb_alloc_coherent() will print a warning when the DMA memory
- * allocation ultimately failed.
- */
- gfp |= __GFP_NOWARN;
-
- vaddr = dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
- if (!vaddr)
- vaddr = swiotlb_alloc_buffer(dev, size, dma_handle, attrs);
- return vaddr;
-}
-
-void swiotlb_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_addr, unsigned long attrs)
-{
- if (!swiotlb_free_buffer(dev, size, dma_addr))
- dma_direct_free(dev, size, vaddr, dma_addr, attrs);
-}
-
-const struct dma_map_ops swiotlb_dma_ops = {
- .mapping_error = swiotlb_dma_mapping_error,
- .alloc = swiotlb_alloc,
- .free = swiotlb_free,
- .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
- .sync_single_for_device = swiotlb_sync_single_for_device,
- .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
- .sync_sg_for_device = swiotlb_sync_sg_for_device,
- .map_sg = swiotlb_map_sg_attrs,
- .unmap_sg = swiotlb_unmap_sg_attrs,
- .map_page = swiotlb_map_page,
- .unmap_page = swiotlb_unmap_page,
- .dma_supported = swiotlb_dma_supported,
-};
-#endif /* CONFIG_DMA_DIRECT_OPS */
diff --git a/lib/test_bitfield.c b/lib/test_bitfield.c
new file mode 100644
index 000000000000..5b8f4108662d
--- /dev/null
+++ b/lib/test_bitfield.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test cases for bitfield helpers.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/bitfield.h>
+
+#define CHECK_ENC_GET_U(tp, v, field, res) do { \
+ { \
+ u##tp _res; \
+ \
+ _res = u##tp##_encode_bits(v, field); \
+ if (_res != res) { \
+ pr_warn("u" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != " #res "\n",\
+ (u64)_res); \
+ return -EINVAL; \
+ } \
+ if (u##tp##_get_bits(_res, field) != v) \
+ return -EINVAL; \
+ } \
+ } while (0)
+
+#define CHECK_ENC_GET_LE(tp, v, field, res) do { \
+ { \
+ __le##tp _res; \
+ \
+ _res = le##tp##_encode_bits(v, field); \
+ if (_res != cpu_to_le##tp(res)) { \
+ pr_warn("le" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != 0x%llx\n",\
+ (u64)le##tp##_to_cpu(_res), \
+ (u64)(res)); \
+ return -EINVAL; \
+ } \
+ if (le##tp##_get_bits(_res, field) != v) \
+ return -EINVAL; \
+ } \
+ } while (0)
+
+#define CHECK_ENC_GET_BE(tp, v, field, res) do { \
+ { \
+ __be##tp _res; \
+ \
+ _res = be##tp##_encode_bits(v, field); \
+ if (_res != cpu_to_be##tp(res)) { \
+ pr_warn("be" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != 0x%llx\n",\
+ (u64)be##tp##_to_cpu(_res), \
+ (u64)(res)); \
+ return -EINVAL; \
+ } \
+ if (be##tp##_get_bits(_res, field) != v) \
+ return -EINVAL; \
+ } \
+ } while (0)
+
+#define CHECK_ENC_GET(tp, v, field, res) do { \
+ CHECK_ENC_GET_U(tp, v, field, res); \
+ CHECK_ENC_GET_LE(tp, v, field, res); \
+ CHECK_ENC_GET_BE(tp, v, field, res); \
+ } while (0)
+
+static int test_constants(void)
+{
+ /*
+ * NOTE
+ * This whole function compiles (or at least should, if everything
+ * is going according to plan) to nothing after optimisation.
+ */
+
+ CHECK_ENC_GET(16, 1, 0x000f, 0x0001);
+ CHECK_ENC_GET(16, 3, 0x00f0, 0x0030);
+ CHECK_ENC_GET(16, 5, 0x0f00, 0x0500);
+ CHECK_ENC_GET(16, 7, 0xf000, 0x7000);
+ CHECK_ENC_GET(16, 14, 0x000f, 0x000e);
+ CHECK_ENC_GET(16, 15, 0x00f0, 0x00f0);
+
+ CHECK_ENC_GET_U(8, 1, 0x0f, 0x01);
+ CHECK_ENC_GET_U(8, 3, 0xf0, 0x30);
+ CHECK_ENC_GET_U(8, 14, 0x0f, 0x0e);
+ CHECK_ENC_GET_U(8, 15, 0xf0, 0xf0);
+
+ CHECK_ENC_GET(32, 1, 0x00000f00, 0x00000100);
+ CHECK_ENC_GET(32, 3, 0x0000f000, 0x00003000);
+ CHECK_ENC_GET(32, 5, 0x000f0000, 0x00050000);
+ CHECK_ENC_GET(32, 7, 0x00f00000, 0x00700000);
+ CHECK_ENC_GET(32, 14, 0x0f000000, 0x0e000000);
+ CHECK_ENC_GET(32, 15, 0xf0000000, 0xf0000000);
+
+ CHECK_ENC_GET(64, 1, 0x00000f0000000000ull, 0x0000010000000000ull);
+ CHECK_ENC_GET(64, 3, 0x0000f00000000000ull, 0x0000300000000000ull);
+ CHECK_ENC_GET(64, 5, 0x000f000000000000ull, 0x0005000000000000ull);
+ CHECK_ENC_GET(64, 7, 0x00f0000000000000ull, 0x0070000000000000ull);
+ CHECK_ENC_GET(64, 14, 0x0f00000000000000ull, 0x0e00000000000000ull);
+ CHECK_ENC_GET(64, 15, 0xf000000000000000ull, 0xf000000000000000ull);
+
+ return 0;
+}
+
+#define CHECK(tp, mask) do { \
+ u64 v; \
+ \
+ for (v = 0; v < 1 << hweight32(mask); v++) \
+ if (tp##_encode_bits(v, mask) != v << __ffs64(mask)) \
+ return -EINVAL; \
+ } while (0)
+
+static int test_variables(void)
+{
+ CHECK(u8, 0x0f);
+ CHECK(u8, 0xf0);
+ CHECK(u8, 0x38);
+
+ CHECK(u16, 0x0038);
+ CHECK(u16, 0x0380);
+ CHECK(u16, 0x3800);
+ CHECK(u16, 0x8000);
+
+ CHECK(u32, 0x80000000);
+ CHECK(u32, 0x7f000000);
+ CHECK(u32, 0x07e00000);
+ CHECK(u32, 0x00018000);
+
+ CHECK(u64, 0x8000000000000000ull);
+ CHECK(u64, 0x7f00000000000000ull);
+ CHECK(u64, 0x0001800000000000ull);
+ CHECK(u64, 0x0000000080000000ull);
+ CHECK(u64, 0x000000007f000000ull);
+ CHECK(u64, 0x0000000018000000ull);
+ CHECK(u64, 0x0000001f8000000ull);
+
+ return 0;
+}
+
+static int __init test_bitfields(void)
+{
+ int ret = test_constants();
+
+ if (ret) {
+ pr_warn("constant tests failed!\n");
+ return ret;
+ }
+
+ ret = test_variables();
+ if (ret) {
+ pr_warn("variable tests failed!\n");
+ return ret;
+ }
+
+#ifdef TEST_BITFIELD_COMPILE
+ /* these should fail compilation */
+ CHECK_ENC_GET(16, 16, 0x0f00, 0x1000);
+ u32_encode_bits(7, 0x06000000);
+
+ /* this should at least give a warning */
+ u16_encode_bits(0, 0x60000);
+#endif
+
+ pr_info("tests passed\n");
+
+ return 0;
+}
+module_init(test_bitfields)
+
+MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index b3f235baa05d..6cd7d0740005 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -255,6 +255,10 @@ static const struct test_bitmap_parselist parselist_tests[] __initconst = {
{-EINVAL, "-1", NULL, 8, 0},
{-EINVAL, "-0", NULL, 8, 0},
{-EINVAL, "10-1", NULL, 8, 0},
+ {-EINVAL, "0-31:", NULL, 8, 0},
+ {-EINVAL, "0-31:0", NULL, 8, 0},
+ {-EINVAL, "0-31:0/0", NULL, 8, 0},
+ {-EINVAL, "0-31:1/0", NULL, 8, 0},
{-EINVAL, "0-31:10/1", NULL, 8, 0},
};
@@ -292,15 +296,17 @@ static void __init test_bitmap_parselist(void)
}
}
+#define EXP_BYTES (sizeof(exp) * 8)
+
static void __init test_bitmap_arr32(void)
{
- unsigned int nbits, next_bit, len = sizeof(exp) * 8;
+ unsigned int nbits, next_bit;
u32 arr[sizeof(exp) / 4];
- DECLARE_BITMAP(bmap2, len);
+ DECLARE_BITMAP(bmap2, EXP_BYTES);
memset(arr, 0xa5, sizeof(arr));
- for (nbits = 0; nbits < len; ++nbits) {
+ for (nbits = 0; nbits < EXP_BYTES; ++nbits) {
bitmap_to_arr32(arr, exp, nbits);
bitmap_from_arr32(bmap2, arr, nbits);
expect_eq_bitmap(bmap2, exp, nbits);
@@ -312,7 +318,7 @@ static void __init test_bitmap_arr32(void)
" tail is not safely cleared: %d\n",
nbits, next_bit);
- if (nbits < len - 32)
+ if (nbits < EXP_BYTES - 32)
expect_eq_uint(arr[DIV_ROUND_UP(nbits, 32)],
0xa5a5a5a5);
}
@@ -325,23 +331,32 @@ static void noinline __init test_mem_optimisations(void)
unsigned int start, nbits;
for (start = 0; start < 1024; start += 8) {
- memset(bmap1, 0x5a, sizeof(bmap1));
- memset(bmap2, 0x5a, sizeof(bmap2));
for (nbits = 0; nbits < 1024 - start; nbits += 8) {
+ memset(bmap1, 0x5a, sizeof(bmap1));
+ memset(bmap2, 0x5a, sizeof(bmap2));
+
bitmap_set(bmap1, start, nbits);
__bitmap_set(bmap2, start, nbits);
- if (!bitmap_equal(bmap1, bmap2, 1024))
+ if (!bitmap_equal(bmap1, bmap2, 1024)) {
printk("set not equal %d %d\n", start, nbits);
- if (!__bitmap_equal(bmap1, bmap2, 1024))
+ failed_tests++;
+ }
+ if (!__bitmap_equal(bmap1, bmap2, 1024)) {
printk("set not __equal %d %d\n", start, nbits);
+ failed_tests++;
+ }
bitmap_clear(bmap1, start, nbits);
__bitmap_clear(bmap2, start, nbits);
- if (!bitmap_equal(bmap1, bmap2, 1024))
+ if (!bitmap_equal(bmap1, bmap2, 1024)) {
printk("clear not equal %d %d\n", start, nbits);
- if (!__bitmap_equal(bmap1, bmap2, 1024))
+ failed_tests++;
+ }
+ if (!__bitmap_equal(bmap1, bmap2, 1024)) {
printk("clear not __equal %d %d\n", start,
nbits);
+ failed_tests++;
+ }
}
}
}
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 3e9335493fe4..aa22bcaec1dc 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -356,29 +356,22 @@ static int bpf_fill_maxinsns11(struct bpf_test *self)
return __bpf_fill_ja(self, BPF_MAXINSNS, 68);
}
-static int bpf_fill_ja(struct bpf_test *self)
-{
- /* Hits exactly 11 passes on x86_64 JIT. */
- return __bpf_fill_ja(self, 12, 9);
-}
-
-static int bpf_fill_ld_abs_get_processor_id(struct bpf_test *self)
+static int bpf_fill_maxinsns12(struct bpf_test *self)
{
unsigned int len = BPF_MAXINSNS;
struct sock_filter *insn;
- int i;
+ int i = 0;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
if (!insn)
return -ENOMEM;
- for (i = 0; i < len - 1; i += 2) {
- insn[i] = __BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 0);
- insn[i + 1] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
- SKF_AD_OFF + SKF_AD_CPU);
- }
+ insn[0] = __BPF_JUMP(BPF_JMP | BPF_JA, len - 2, 0, 0);
- insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xbee);
+ for (i = 1; i < len - 1; i++)
+ insn[i] = __BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0);
+
+ insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xabababab);
self->u.ptr.insns = insn;
self->u.ptr.len = len;
@@ -386,50 +379,22 @@ static int bpf_fill_ld_abs_get_processor_id(struct bpf_test *self)
return 0;
}
-#define PUSH_CNT 68
-/* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */
-static int bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
+static int bpf_fill_maxinsns13(struct bpf_test *self)
{
unsigned int len = BPF_MAXINSNS;
- struct bpf_insn *insn;
- int i = 0, j, k = 0;
+ struct sock_filter *insn;
+ int i = 0;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
if (!insn)
return -ENOMEM;
- insn[i++] = BPF_MOV64_REG(R6, R1);
-loop:
- for (j = 0; j < PUSH_CNT; j++) {
- insn[i++] = BPF_LD_ABS(BPF_B, 0);
- insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0x34, len - i - 2);
- i++;
- insn[i++] = BPF_MOV64_REG(R1, R6);
- insn[i++] = BPF_MOV64_IMM(R2, 1);
- insn[i++] = BPF_MOV64_IMM(R3, 2);
- insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- bpf_skb_vlan_push_proto.func - __bpf_call_base);
- insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0, len - i - 2);
- i++;
- }
-
- for (j = 0; j < PUSH_CNT; j++) {
- insn[i++] = BPF_LD_ABS(BPF_B, 0);
- insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0x34, len - i - 2);
- i++;
- insn[i++] = BPF_MOV64_REG(R1, R6);
- insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- bpf_skb_vlan_pop_proto.func - __bpf_call_base);
- insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0, len - i - 2);
- i++;
- }
- if (++k < 5)
- goto loop;
-
- for (; i < len - 1; i++)
- insn[i] = BPF_ALU32_IMM(BPF_MOV, R0, 0xbef);
+ for (i = 0; i < len - 3; i++)
+ insn[i] = __BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0);
- insn[len - 1] = BPF_EXIT_INSN();
+ insn[len - 3] = __BPF_STMT(BPF_LD | BPF_IMM, 0xabababab);
+ insn[len - 2] = __BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0);
+ insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0);
self->u.ptr.insns = insn;
self->u.ptr.len = len;
@@ -437,58 +402,29 @@ loop:
return 0;
}
-static int bpf_fill_ld_abs_vlan_push_pop2(struct bpf_test *self)
+static int bpf_fill_ja(struct bpf_test *self)
{
- struct bpf_insn *insn;
-
- insn = kmalloc_array(16, sizeof(*insn), GFP_KERNEL);
- if (!insn)
- return -ENOMEM;
-
- /* Due to func address being non-const, we need to
- * assemble this here.
- */
- insn[0] = BPF_MOV64_REG(R6, R1);
- insn[1] = BPF_LD_ABS(BPF_B, 0);
- insn[2] = BPF_LD_ABS(BPF_H, 0);
- insn[3] = BPF_LD_ABS(BPF_W, 0);
- insn[4] = BPF_MOV64_REG(R7, R6);
- insn[5] = BPF_MOV64_IMM(R6, 0);
- insn[6] = BPF_MOV64_REG(R1, R7);
- insn[7] = BPF_MOV64_IMM(R2, 1);
- insn[8] = BPF_MOV64_IMM(R3, 2);
- insn[9] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- bpf_skb_vlan_push_proto.func - __bpf_call_base);
- insn[10] = BPF_MOV64_REG(R6, R7);
- insn[11] = BPF_LD_ABS(BPF_B, 0);
- insn[12] = BPF_LD_ABS(BPF_H, 0);
- insn[13] = BPF_LD_ABS(BPF_W, 0);
- insn[14] = BPF_MOV64_IMM(R0, 42);
- insn[15] = BPF_EXIT_INSN();
-
- self->u.ptr.insns = insn;
- self->u.ptr.len = 16;
-
- return 0;
+ /* Hits exactly 11 passes on x86_64 JIT. */
+ return __bpf_fill_ja(self, 12, 9);
}
-static int bpf_fill_jump_around_ld_abs(struct bpf_test *self)
+static int bpf_fill_ld_abs_get_processor_id(struct bpf_test *self)
{
unsigned int len = BPF_MAXINSNS;
- struct bpf_insn *insn;
- int i = 0;
+ struct sock_filter *insn;
+ int i;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
if (!insn)
return -ENOMEM;
- insn[i++] = BPF_MOV64_REG(R6, R1);
- insn[i++] = BPF_LD_ABS(BPF_B, 0);
- insn[i] = BPF_JMP_IMM(BPF_JEQ, R0, 10, len - i - 2);
- i++;
- while (i < len - 1)
- insn[i++] = BPF_LD_ABS(BPF_B, 1);
- insn[i] = BPF_EXIT_INSN();
+ for (i = 0; i < len - 1; i += 2) {
+ insn[i] = __BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 0);
+ insn[i + 1] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_CPU);
+ }
+
+ insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xbee);
self->u.ptr.insns = insn;
self->u.ptr.len = len;
@@ -1988,40 +1924,6 @@ static struct bpf_test tests[] = {
{ { 0, -1 } }
},
{
- "INT: DIV + ABS",
- .u.insns_int = {
- BPF_ALU64_REG(BPF_MOV, R6, R1),
- BPF_LD_ABS(BPF_B, 3),
- BPF_ALU64_IMM(BPF_MOV, R2, 2),
- BPF_ALU32_REG(BPF_DIV, R0, R2),
- BPF_ALU64_REG(BPF_MOV, R8, R0),
- BPF_LD_ABS(BPF_B, 4),
- BPF_ALU64_REG(BPF_ADD, R8, R0),
- BPF_LD_IND(BPF_B, R8, -70),
- BPF_EXIT_INSN(),
- },
- INTERNAL,
- { 10, 20, 30, 40, 50 },
- { { 4, 0 }, { 5, 10 } }
- },
- {
- /* This one doesn't go through verifier, but is just raw insn
- * as opposed to cBPF tests from here. Thus div by 0 tests are
- * done in test_verifier in BPF kselftests.
- */
- "INT: DIV by -1",
- .u.insns_int = {
- BPF_ALU64_REG(BPF_MOV, R6, R1),
- BPF_ALU64_IMM(BPF_MOV, R7, -1),
- BPF_LD_ABS(BPF_B, 3),
- BPF_ALU32_REG(BPF_DIV, R0, R7),
- BPF_EXIT_INSN(),
- },
- INTERNAL,
- { 10, 20, 30, 40, 50 },
- { { 3, 0 }, { 4, 0 } }
- },
- {
"check: missing ret",
.u.insns = {
BPF_STMT(BPF_LD | BPF_IMM, 1),
@@ -2383,50 +2285,6 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } }
},
- {
- "nmap reduced",
- .u.insns_int = {
- BPF_MOV64_REG(R6, R1),
- BPF_LD_ABS(BPF_H, 12),
- BPF_JMP_IMM(BPF_JNE, R0, 0x806, 28),
- BPF_LD_ABS(BPF_H, 12),
- BPF_JMP_IMM(BPF_JNE, R0, 0x806, 26),
- BPF_MOV32_IMM(R0, 18),
- BPF_STX_MEM(BPF_W, R10, R0, -64),
- BPF_LDX_MEM(BPF_W, R7, R10, -64),
- BPF_LD_IND(BPF_W, R7, 14),
- BPF_STX_MEM(BPF_W, R10, R0, -60),
- BPF_MOV32_IMM(R0, 280971478),
- BPF_STX_MEM(BPF_W, R10, R0, -56),
- BPF_LDX_MEM(BPF_W, R7, R10, -56),
- BPF_LDX_MEM(BPF_W, R0, R10, -60),
- BPF_ALU32_REG(BPF_SUB, R0, R7),
- BPF_JMP_IMM(BPF_JNE, R0, 0, 15),
- BPF_LD_ABS(BPF_H, 12),
- BPF_JMP_IMM(BPF_JNE, R0, 0x806, 13),
- BPF_MOV32_IMM(R0, 22),
- BPF_STX_MEM(BPF_W, R10, R0, -56),
- BPF_LDX_MEM(BPF_W, R7, R10, -56),
- BPF_LD_IND(BPF_H, R7, 14),
- BPF_STX_MEM(BPF_W, R10, R0, -52),
- BPF_MOV32_IMM(R0, 17366),
- BPF_STX_MEM(BPF_W, R10, R0, -48),
- BPF_LDX_MEM(BPF_W, R7, R10, -48),
- BPF_LDX_MEM(BPF_W, R0, R10, -52),
- BPF_ALU32_REG(BPF_SUB, R0, R7),
- BPF_JMP_IMM(BPF_JNE, R0, 0, 2),
- BPF_MOV32_IMM(R0, 256),
- BPF_EXIT_INSN(),
- BPF_MOV32_IMM(R0, 0),
- BPF_EXIT_INSN(),
- },
- INTERNAL,
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6},
- { { 38, 256 } },
- .stack_depth = 64,
- },
/* BPF_ALU | BPF_MOV | BPF_X */
{
"ALU_MOV_X: dst = 2",
@@ -5424,21 +5282,31 @@ static struct bpf_test tests[] = {
{ /* Mainly checking JIT here. */
"BPF_MAXINSNS: Ctx heavy transformations",
{ },
+#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
+ CLASSIC | FLAG_EXPECTED_FAIL,
+#else
CLASSIC,
+#endif
{ },
{
{ 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) },
{ 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }
},
.fill_helper = bpf_fill_maxinsns6,
+ .expected_errcode = -ENOTSUPP,
},
{ /* Mainly checking JIT here. */
"BPF_MAXINSNS: Call heavy transformations",
{ },
+#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
+ CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+#else
CLASSIC | FLAG_NO_DATA,
+#endif
{ },
{ { 1, 0 }, { 10, 0 } },
.fill_helper = bpf_fill_maxinsns7,
+ .expected_errcode = -ENOTSUPP,
},
{ /* Mainly checking JIT here. */
"BPF_MAXINSNS: Jump heavy test",
@@ -5478,28 +5346,39 @@ static struct bpf_test tests[] = {
.expected_errcode = -ENOTSUPP,
},
{
- "BPF_MAXINSNS: ld_abs+get_processor_id",
+ "BPF_MAXINSNS: jump over MSH",
{ },
- CLASSIC,
- { },
- { { 1, 0xbee } },
- .fill_helper = bpf_fill_ld_abs_get_processor_id,
+ CLASSIC | FLAG_EXPECTED_FAIL,
+ { 0xfa, 0xfb, 0xfc, 0xfd, },
+ { { 4, 0xabababab } },
+ .fill_helper = bpf_fill_maxinsns12,
+ .expected_errcode = -EINVAL,
},
{
- "BPF_MAXINSNS: ld_abs+vlan_push/pop",
+ "BPF_MAXINSNS: exec all MSH",
{ },
- INTERNAL,
- { 0x34 },
- { { ETH_HLEN, 0xbef } },
- .fill_helper = bpf_fill_ld_abs_vlan_push_pop,
+#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
+ CLASSIC | FLAG_EXPECTED_FAIL,
+#else
+ CLASSIC,
+#endif
+ { 0xfa, 0xfb, 0xfc, 0xfd, },
+ { { 4, 0xababab83 } },
+ .fill_helper = bpf_fill_maxinsns13,
+ .expected_errcode = -ENOTSUPP,
},
{
- "BPF_MAXINSNS: jump around ld_abs",
+ "BPF_MAXINSNS: ld_abs+get_processor_id",
{ },
- INTERNAL,
- { 10, 11 },
- { { 2, 10 } },
- .fill_helper = bpf_fill_jump_around_ld_abs,
+#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
+ CLASSIC | FLAG_EXPECTED_FAIL,
+#else
+ CLASSIC,
+#endif
+ { },
+ { { 1, 0xbee } },
+ .fill_helper = bpf_fill_ld_abs_get_processor_id,
+ .expected_errcode = -ENOTSUPP,
},
/*
* LD_IND / LD_ABS on fragmented SKBs
@@ -5683,6 +5562,53 @@ static struct bpf_test tests[] = {
{ {0x40, 0x05 } },
},
{
+ "LD_IND byte positive offset, all ff",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff },
+ { {0x40, 0xff } },
+ },
+ {
+ "LD_IND byte positive offset, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x3f, 0 }, },
+ },
+ {
+ "LD_IND byte negative offset, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_B, -0x3f),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x3f, 0 } },
+ },
+ {
+ "LD_IND byte negative offset, multiple calls",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x3b),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 1),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 2),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 3),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 4),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0x82 }, },
+ },
+ {
"LD_IND halfword positive offset",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
@@ -5731,6 +5657,39 @@ static struct bpf_test tests[] = {
{ {0x40, 0x66cc } },
},
{
+ "LD_IND halfword positive offset, all ff",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x3d),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff },
+ { {0x40, 0xffff } },
+ },
+ {
+ "LD_IND halfword positive offset, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x3f, 0 }, },
+ },
+ {
+ "LD_IND halfword negative offset, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x3f),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x3f, 0 } },
+ },
+ {
"LD_IND word positive offset",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
@@ -5821,6 +5780,39 @@ static struct bpf_test tests[] = {
{ {0x40, 0x66cc77dd } },
},
{
+ "LD_IND word positive offset, all ff",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x3b),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff },
+ { {0x40, 0xffffffff } },
+ },
+ {
+ "LD_IND word positive offset, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x3f, 0 }, },
+ },
+ {
+ "LD_IND word negative offset, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x3f),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x3f, 0 } },
+ },
+ {
"LD_ABS byte",
.u.insns = {
BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x20),
@@ -5838,6 +5830,68 @@ static struct bpf_test tests[] = {
{ {0x40, 0xcc } },
},
{
+ "LD_ABS byte positive offset, all ff",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x3f),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff },
+ { {0x40, 0xff } },
+ },
+ {
+ "LD_ABS byte positive offset, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x3f),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x3f, 0 }, },
+ },
+ {
+ "LD_ABS byte negative offset, out of bounds load",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_B, -1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_EXPECTED_FAIL,
+ .expected_errcode = -EINVAL,
+ },
+ {
+ "LD_ABS byte negative offset, in bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3f),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0x82 }, },
+ },
+ {
+ "LD_ABS byte negative offset, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3f),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x3f, 0 }, },
+ },
+ {
+ "LD_ABS byte negative offset, multiple calls",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3c),
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3d),
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3e),
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3f),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0x82 }, },
+ },
+ {
"LD_ABS halfword",
.u.insns = {
BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x22),
@@ -5872,6 +5926,55 @@ static struct bpf_test tests[] = {
{ {0x40, 0x99ff } },
},
{
+ "LD_ABS halfword positive offset, all ff",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x3e),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff },
+ { {0x40, 0xffff } },
+ },
+ {
+ "LD_ABS halfword positive offset, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x3f),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x3f, 0 }, },
+ },
+ {
+ "LD_ABS halfword negative offset, out of bounds load",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_H, -1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_EXPECTED_FAIL,
+ .expected_errcode = -EINVAL,
+ },
+ {
+ "LD_ABS halfword negative offset, in bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_H, SKF_LL_OFF + 0x3e),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0x1982 }, },
+ },
+ {
+ "LD_ABS halfword negative offset, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_H, SKF_LL_OFF + 0x3e),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x3f, 0 }, },
+ },
+ {
"LD_ABS word",
.u.insns = {
BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x1c),
@@ -5939,6 +6042,140 @@ static struct bpf_test tests[] = {
},
{ {0x40, 0x88ee99ff } },
},
+ {
+ "LD_ABS word positive offset, all ff",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x3c),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff },
+ { {0x40, 0xffffffff } },
+ },
+ {
+ "LD_ABS word positive offset, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x3f),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x3f, 0 }, },
+ },
+ {
+ "LD_ABS word negative offset, out of bounds load",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_W, -1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_EXPECTED_FAIL,
+ .expected_errcode = -EINVAL,
+ },
+ {
+ "LD_ABS word negative offset, in bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_W, SKF_LL_OFF + 0x3c),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0x25051982 }, },
+ },
+ {
+ "LD_ABS word negative offset, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_W, SKF_LL_OFF + 0x3c),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x3f, 0 }, },
+ },
+ {
+ "LDX_MSH standalone, preserved A",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3c),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0xffeebbaa }, },
+ },
+ {
+ "LDX_MSH standalone, preserved A 2",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 0x175e9d63),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3c),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3d),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3e),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3f),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0x175e9d63 }, },
+ },
+ {
+ "LDX_MSH standalone, test result 1",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3c),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0x14 }, },
+ },
+ {
+ "LDX_MSH standalone, test result 2",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3e),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0x24 }, },
+ },
+ {
+ "LDX_MSH standalone, negative offset",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, -1),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0 }, },
+ },
+ {
+ "LDX_MSH standalone, negative offset 2",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, SKF_LL_OFF + 0x3e),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0x24 }, },
+ },
+ {
+ "LDX_MSH standalone, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x40),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0 }, },
+ },
/*
* verify that the interpreter or JIT correctly sets A and X
* to 0.
@@ -6127,14 +6364,6 @@ static struct bpf_test tests[] = {
{},
{ {0x1, 0x42 } },
},
- {
- "LD_ABS with helper changing skb data",
- { },
- INTERNAL,
- { 0x34 },
- { { ETH_HLEN, 42 } },
- .fill_helper = bpf_fill_ld_abs_vlan_push_pop2,
- },
/* Checking interpreter vs JIT wrt signed extended imms. */
{
"JNE signed compare, test 1",
@@ -6265,6 +6494,7 @@ static struct sk_buff *populate_skb(char *buf, int size)
skb->queue_mapping = SKB_QUEUE_MAP;
skb->vlan_tci = SKB_VLAN_TCI;
skb->vlan_proto = htons(ETH_P_IP);
+ dev_net_set(&dev, &init_net);
skb->dev = &dev;
skb->dev->ifindex = SKB_DEV_IFINDEX;
skb->dev->type = SKB_DEV_TYPE;
@@ -6574,6 +6804,93 @@ static bool exclude_test(int test_id)
return test_id < test_range[0] || test_id > test_range[1];
}
+static __init struct sk_buff *build_test_skb(void)
+{
+ u32 headroom = NET_SKB_PAD + NET_IP_ALIGN + ETH_HLEN;
+ struct sk_buff *skb[2];
+ struct page *page[2];
+ int i, data_size = 8;
+
+ for (i = 0; i < 2; i++) {
+ page[i] = alloc_page(GFP_KERNEL);
+ if (!page[i]) {
+ if (i == 0)
+ goto err_page0;
+ else
+ goto err_page1;
+ }
+
+ /* this will set skb[i]->head_frag */
+ skb[i] = dev_alloc_skb(headroom + data_size);
+ if (!skb[i]) {
+ if (i == 0)
+ goto err_skb0;
+ else
+ goto err_skb1;
+ }
+
+ skb_reserve(skb[i], headroom);
+ skb_put(skb[i], data_size);
+ skb[i]->protocol = htons(ETH_P_IP);
+ skb_reset_network_header(skb[i]);
+ skb_set_mac_header(skb[i], -ETH_HLEN);
+
+ skb_add_rx_frag(skb[i], 0, page[i], 0, 64, 64);
+ // skb_headlen(skb[i]): 8, skb[i]->head_frag = 1
+ }
+
+ /* setup shinfo */
+ skb_shinfo(skb[0])->gso_size = 1448;
+ skb_shinfo(skb[0])->gso_type = SKB_GSO_TCPV4;
+ skb_shinfo(skb[0])->gso_type |= SKB_GSO_DODGY;
+ skb_shinfo(skb[0])->gso_segs = 0;
+ skb_shinfo(skb[0])->frag_list = skb[1];
+
+ /* adjust skb[0]'s len */
+ skb[0]->len += skb[1]->len;
+ skb[0]->data_len += skb[1]->data_len;
+ skb[0]->truesize += skb[1]->truesize;
+
+ return skb[0];
+
+err_skb1:
+ __free_page(page[1]);
+err_page1:
+ kfree_skb(skb[0]);
+err_skb0:
+ __free_page(page[0]);
+err_page0:
+ return NULL;
+}
+
+static __init int test_skb_segment(void)
+{
+ netdev_features_t features;
+ struct sk_buff *skb, *segs;
+ int ret = -1;
+
+ features = NETIF_F_SG | NETIF_F_GSO_PARTIAL | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM;
+ features |= NETIF_F_RXCSUM;
+ skb = build_test_skb();
+ if (!skb) {
+ pr_info("%s: failed to build_test_skb", __func__);
+ goto done;
+ }
+
+ segs = skb_segment(skb, features);
+ if (!IS_ERR(segs)) {
+ kfree_skb_list(segs);
+ ret = 0;
+ pr_info("%s: success in skb_segment!", __func__);
+ } else {
+ pr_info("%s: failed in skb_segment!", __func__);
+ }
+ kfree_skb(skb);
+done:
+ return ret;
+}
+
static __init int test_bpf(void)
{
int i, err_cnt = 0, pass_cnt = 0;
@@ -6632,9 +6949,11 @@ static int __init test_bpf_init(void)
return ret;
ret = test_bpf();
-
destroy_bpf_tests();
- return ret;
+ if (ret)
+ return ret;
+
+ return test_skb_segment();
}
static void __exit test_bpf_exit(void)
diff --git a/lib/test_debug_virtual.c b/lib/test_debug_virtual.c
index b9cdeecc19dc..d5a06addeb27 100644
--- a/lib/test_debug_virtual.c
+++ b/lib/test_debug_virtual.c
@@ -15,7 +15,7 @@ struct foo {
unsigned int bar;
};
-struct foo *foo;
+static struct foo *foo;
static int __init test_debug_virtual_init(void)
{
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index 078a61480573..b984806d7d7b 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -21,6 +21,7 @@
#include <linux/uaccess.h>
#include <linux/delay.h>
#include <linux/kthread.h>
+#include <linux/vmalloc.h>
#define TEST_FIRMWARE_NAME "test-firmware.bin"
#define TEST_FIRMWARE_NUM_REQS 4
@@ -617,8 +618,9 @@ static ssize_t trigger_batched_requests_store(struct device *dev,
mutex_lock(&test_fw_mutex);
- test_fw_config->reqs = vzalloc(sizeof(struct test_batched_req) *
- test_fw_config->num_requests * 2);
+ test_fw_config->reqs =
+ vzalloc(array3_size(sizeof(struct test_batched_req),
+ test_fw_config->num_requests, 2));
if (!test_fw_config->reqs) {
rc = -ENOMEM;
goto out_unlock;
@@ -719,8 +721,9 @@ ssize_t trigger_batched_requests_async_store(struct device *dev,
mutex_lock(&test_fw_mutex);
- test_fw_config->reqs = vzalloc(sizeof(struct test_batched_req) *
- test_fw_config->num_requests * 2);
+ test_fw_config->reqs =
+ vzalloc(array3_size(sizeof(struct test_batched_req),
+ test_fw_config->num_requests, 2));
if (!test_fw_config->reqs) {
rc = -ENOMEM;
goto out;
diff --git a/lib/test_hexdump.c b/lib/test_hexdump.c
index 3f415d8101f3..626f580b4ff7 100644
--- a/lib/test_hexdump.c
+++ b/lib/test_hexdump.c
@@ -18,7 +18,7 @@ static const unsigned char data_b[] = {
static const unsigned char data_a[] = ".2.{....p..$}.4...1.....L...C...";
-static const char * const test_data_1_le[] __initconst = {
+static const char * const test_data_1[] __initconst = {
"be", "32", "db", "7b", "0a", "18", "93", "b2",
"70", "ba", "c4", "24", "7d", "83", "34", "9b",
"a6", "9c", "31", "ad", "9c", "0f", "ac", "e9",
@@ -32,16 +32,33 @@ static const char * const test_data_2_le[] __initconst = {
"d14c", "9919", "b143", "0caf",
};
+static const char * const test_data_2_be[] __initconst = {
+ "be32", "db7b", "0a18", "93b2",
+ "70ba", "c424", "7d83", "349b",
+ "a69c", "31ad", "9c0f", "ace9",
+ "4cd1", "1999", "43b1", "af0c",
+};
+
static const char * const test_data_4_le[] __initconst = {
"7bdb32be", "b293180a", "24c4ba70", "9b34837d",
"ad319ca6", "e9ac0f9c", "9919d14c", "0cafb143",
};
+static const char * const test_data_4_be[] __initconst = {
+ "be32db7b", "0a1893b2", "70bac424", "7d83349b",
+ "a69c31ad", "9c0face9", "4cd11999", "43b1af0c",
+};
+
static const char * const test_data_8_le[] __initconst = {
"b293180a7bdb32be", "9b34837d24c4ba70",
"e9ac0f9cad319ca6", "0cafb1439919d14c",
};
+static const char * const test_data_8_be[] __initconst = {
+ "be32db7b0a1893b2", "70bac4247d83349b",
+ "a69c31ad9c0face9", "4cd1199943b1af0c",
+};
+
#define FILL_CHAR '#'
static unsigned total_tests __initdata;
@@ -56,6 +73,7 @@ static void __init test_hexdump_prepare_test(size_t len, int rowsize,
size_t l = len;
int gs = groupsize, rs = rowsize;
unsigned int i;
+ const bool is_be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
if (rs != 16 && rs != 32)
rs = 16;
@@ -67,13 +85,13 @@ static void __init test_hexdump_prepare_test(size_t len, int rowsize,
gs = 1;
if (gs == 8)
- result = test_data_8_le;
+ result = is_be ? test_data_8_be : test_data_8_le;
else if (gs == 4)
- result = test_data_4_le;
+ result = is_be ? test_data_4_be : test_data_4_le;
else if (gs == 2)
- result = test_data_2_le;
+ result = is_be ? test_data_2_be : test_data_2_le;
else
- result = test_data_1_le;
+ result = test_data_1;
/* hex dump */
p = test;
diff --git a/lib/test_ida.c b/lib/test_ida.c
new file mode 100644
index 000000000000..b06880625961
--- /dev/null
+++ b/lib/test_ida.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * test_ida.c: Test the IDA API
+ * Copyright (c) 2016-2018 Microsoft Corporation
+ * Copyright (c) 2018 Oracle Corporation
+ * Author: Matthew Wilcox <willy@infradead.org>
+ */
+
+#include <linux/idr.h>
+#include <linux/module.h>
+
+static unsigned int tests_run;
+static unsigned int tests_passed;
+
+#ifdef __KERNEL__
+void ida_dump(struct ida *ida) { }
+#endif
+#define IDA_BUG_ON(ida, x) do { \
+ tests_run++; \
+ if (x) { \
+ ida_dump(ida); \
+ dump_stack(); \
+ } else { \
+ tests_passed++; \
+ } \
+} while (0)
+
+/*
+ * Straightforward checks that allocating and freeing IDs work.
+ */
+static void ida_check_alloc(struct ida *ida)
+{
+ int i, id;
+
+ for (i = 0; i < 10000; i++)
+ IDA_BUG_ON(ida, ida_alloc(ida, GFP_KERNEL) != i);
+
+ ida_free(ida, 20);
+ ida_free(ida, 21);
+ for (i = 0; i < 3; i++) {
+ id = ida_alloc(ida, GFP_KERNEL);
+ IDA_BUG_ON(ida, id < 0);
+ if (i == 2)
+ IDA_BUG_ON(ida, id != 10000);
+ }
+
+ for (i = 0; i < 5000; i++)
+ ida_free(ida, i);
+
+ IDA_BUG_ON(ida, ida_alloc_min(ida, 5000, GFP_KERNEL) != 10001);
+ ida_destroy(ida);
+
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+}
+
+/* Destroy an IDA with a single entry at @base */
+static void ida_check_destroy_1(struct ida *ida, unsigned int base)
+{
+ IDA_BUG_ON(ida, ida_alloc_min(ida, base, GFP_KERNEL) != base);
+ IDA_BUG_ON(ida, ida_is_empty(ida));
+ ida_destroy(ida);
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+}
+
+/* Check that ida_destroy and ida_is_empty work */
+static void ida_check_destroy(struct ida *ida)
+{
+ /* Destroy an already-empty IDA */
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+ ida_destroy(ida);
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+
+ ida_check_destroy_1(ida, 0);
+ ida_check_destroy_1(ida, 1);
+ ida_check_destroy_1(ida, 1023);
+ ida_check_destroy_1(ida, 1024);
+ ida_check_destroy_1(ida, 12345678);
+}
+
+/*
+ * Check what happens when we fill a leaf and then delete it. This may
+ * discover mishandling of IDR_FREE.
+ */
+static void ida_check_leaf(struct ida *ida, unsigned int base)
+{
+ unsigned long i;
+
+ for (i = 0; i < IDA_BITMAP_BITS; i++) {
+ IDA_BUG_ON(ida, ida_alloc_min(ida, base, GFP_KERNEL) !=
+ base + i);
+ }
+
+ ida_destroy(ida);
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+
+ IDA_BUG_ON(ida, ida_alloc(ida, GFP_KERNEL) != 0);
+ IDA_BUG_ON(ida, ida_is_empty(ida));
+ ida_free(ida, 0);
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+}
+
+/*
+ * Check allocations up to and slightly above the maximum allowed (2^31-1) ID.
+ * Allocating up to 2^31-1 should succeed, and then allocating the next one
+ * should fail.
+ */
+static void ida_check_max(struct ida *ida)
+{
+ unsigned long i, j;
+
+ for (j = 1; j < 65537; j *= 2) {
+ unsigned long base = (1UL << 31) - j;
+ for (i = 0; i < j; i++) {
+ IDA_BUG_ON(ida, ida_alloc_min(ida, base, GFP_KERNEL) !=
+ base + i);
+ }
+ IDA_BUG_ON(ida, ida_alloc_min(ida, base, GFP_KERNEL) !=
+ -ENOSPC);
+ ida_destroy(ida);
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+ }
+}
+
+/*
+ * Check handling of conversions between exceptional entries and full bitmaps.
+ */
+static void ida_check_conv(struct ida *ida)
+{
+ unsigned long i;
+
+ for (i = 0; i < IDA_BITMAP_BITS * 2; i += IDA_BITMAP_BITS) {
+ IDA_BUG_ON(ida, ida_alloc_min(ida, i + 1, GFP_KERNEL) != i + 1);
+ IDA_BUG_ON(ida, ida_alloc_min(ida, i + BITS_PER_LONG,
+ GFP_KERNEL) != i + BITS_PER_LONG);
+ ida_free(ida, i + 1);
+ ida_free(ida, i + BITS_PER_LONG);
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+ }
+
+ for (i = 0; i < IDA_BITMAP_BITS * 2; i++)
+ IDA_BUG_ON(ida, ida_alloc(ida, GFP_KERNEL) != i);
+ for (i = IDA_BITMAP_BITS * 2; i > 0; i--)
+ ida_free(ida, i - 1);
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+
+ for (i = 0; i < IDA_BITMAP_BITS + BITS_PER_LONG - 4; i++)
+ IDA_BUG_ON(ida, ida_alloc(ida, GFP_KERNEL) != i);
+ for (i = IDA_BITMAP_BITS + BITS_PER_LONG - 4; i > 0; i--)
+ ida_free(ida, i - 1);
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+}
+
+static DEFINE_IDA(ida);
+
+static int ida_checks(void)
+{
+ IDA_BUG_ON(&ida, !ida_is_empty(&ida));
+ ida_check_alloc(&ida);
+ ida_check_destroy(&ida);
+ ida_check_leaf(&ida, 0);
+ ida_check_leaf(&ida, 1024);
+ ida_check_leaf(&ida, 1024 * 64);
+ ida_check_max(&ida);
+ ida_check_conv(&ida);
+
+ printk("IDA: %u of %u tests passed\n", tests_passed, tests_run);
+ return (tests_run != tests_passed) ? 0 : -EINVAL;
+}
+
+static void ida_exit(void)
+{
+}
+
+module_init(ida_checks);
+module_exit(ida_exit);
+MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 98854a64b014..51b78405bf24 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -567,10 +567,85 @@ static noinline void __init kmem_cache_invalid_free(void)
return;
}
+ /* Trigger invalid free, the object doesn't get freed */
kmem_cache_free(cache, p + 1);
+
+ /*
+ * Properly free the object to prevent the "Objects remaining in
+ * test_cache on __kmem_cache_shutdown" BUG failure.
+ */
+ kmem_cache_free(cache, p);
+
kmem_cache_destroy(cache);
}
+static noinline void __init kasan_memchr(void)
+{
+ char *ptr;
+ size_t size = 24;
+
+ pr_info("out-of-bounds in memchr\n");
+ ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
+ if (!ptr)
+ return;
+
+ memchr(ptr, '1', size + 1);
+ kfree(ptr);
+}
+
+static noinline void __init kasan_memcmp(void)
+{
+ char *ptr;
+ size_t size = 24;
+ int arr[9];
+
+ pr_info("out-of-bounds in memcmp\n");
+ ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
+ if (!ptr)
+ return;
+
+ memset(arr, 0, sizeof(arr));
+ memcmp(ptr, arr, size+1);
+ kfree(ptr);
+}
+
+static noinline void __init kasan_strings(void)
+{
+ char *ptr;
+ size_t size = 24;
+
+ pr_info("use-after-free in strchr\n");
+ ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
+ if (!ptr)
+ return;
+
+ kfree(ptr);
+
+ /*
+ * Try to cause only 1 invalid access (less spam in dmesg).
+ * For that we need ptr to point to zeroed byte.
+ * Skip metadata that could be stored in freed object so ptr
+ * will likely point to zeroed byte.
+ */
+ ptr += 16;
+ strchr(ptr, '1');
+
+ pr_info("use-after-free in strrchr\n");
+ strrchr(ptr, '1');
+
+ pr_info("use-after-free in strcmp\n");
+ strcmp(ptr, "2");
+
+ pr_info("use-after-free in strncmp\n");
+ strncmp(ptr, "2", 1);
+
+ pr_info("use-after-free in strlen\n");
+ strlen(ptr);
+
+ pr_info("use-after-free in strnlen\n");
+ strnlen(ptr, 1);
+}
+
static int __init kmalloc_tests_init(void)
{
/*
@@ -610,6 +685,9 @@ static int __init kmalloc_tests_init(void)
use_after_scope_test();
kmem_cache_double_free();
kmem_cache_invalid_free();
+ kasan_memchr();
+ kasan_memcmp();
+ kasan_strings();
kasan_restore_multi_shot(multishot);
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index 0e5b7a61460b..e3ddd836491f 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -779,8 +779,9 @@ static int kmod_config_sync_info(struct kmod_test_device *test_dev)
struct test_config *config = &test_dev->config;
free_test_dev_info(test_dev);
- test_dev->info = vzalloc(config->num_threads *
- sizeof(struct kmod_test_device_info));
+ test_dev->info =
+ vzalloc(array_size(sizeof(struct kmod_test_device_info),
+ config->num_threads));
if (!test_dev->info)
return -ENOMEM;
diff --git a/lib/test_memcat_p.c b/lib/test_memcat_p.c
new file mode 100644
index 000000000000..849c477d49d0
--- /dev/null
+++ b/lib/test_memcat_p.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test cases for memcat_p() in lib/memcat_p.c
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+struct test_struct {
+ int num;
+ unsigned int magic;
+};
+
+#define MAGIC 0xf00ff00f
+/* Size of each of the NULL-terminated input arrays */
+#define INPUT_MAX 128
+/* Expected number of non-NULL elements in the output array */
+#define EXPECT (INPUT_MAX * 2 - 2)
+
+static int __init test_memcat_p_init(void)
+{
+ struct test_struct **in0, **in1, **out, **p;
+ int err = -ENOMEM, i, r, total = 0;
+
+ in0 = kcalloc(INPUT_MAX, sizeof(*in0), GFP_KERNEL);
+ if (!in0)
+ return err;
+
+ in1 = kcalloc(INPUT_MAX, sizeof(*in1), GFP_KERNEL);
+ if (!in1)
+ goto err_free_in0;
+
+ for (i = 0, r = 1; i < INPUT_MAX - 1; i++) {
+ in0[i] = kmalloc(sizeof(**in0), GFP_KERNEL);
+ if (!in0[i])
+ goto err_free_elements;
+
+ in1[i] = kmalloc(sizeof(**in1), GFP_KERNEL);
+ if (!in1[i]) {
+ kfree(in0[i]);
+ goto err_free_elements;
+ }
+
+ /* lifted from test_sort.c */
+ r = (r * 725861) % 6599;
+ in0[i]->num = r;
+ in1[i]->num = -r;
+ in0[i]->magic = MAGIC;
+ in1[i]->magic = MAGIC;
+ }
+
+ in0[i] = in1[i] = NULL;
+
+ out = memcat_p(in0, in1);
+ if (!out)
+ goto err_free_all_elements;
+
+ err = -EINVAL;
+ for (i = 0, p = out; *p && (i < INPUT_MAX * 2 - 1); p++, i++) {
+ total += (*p)->num;
+
+ if ((*p)->magic != MAGIC) {
+ pr_err("test failed: wrong magic at %d: %u\n", i,
+ (*p)->magic);
+ goto err_free_out;
+ }
+ }
+
+ if (total) {
+ pr_err("test failed: expected zero total, got %d\n", total);
+ goto err_free_out;
+ }
+
+ if (i != EXPECT) {
+ pr_err("test failed: expected output size %d, got %d\n",
+ EXPECT, i);
+ goto err_free_out;
+ }
+
+ for (i = 0; i < INPUT_MAX - 1; i++)
+ if (out[i] != in0[i] || out[i + INPUT_MAX - 1] != in1[i]) {
+ pr_err("test failed: wrong element order at %d\n", i);
+ goto err_free_out;
+ }
+
+ err = 0;
+ pr_info("test passed\n");
+
+err_free_out:
+ kfree(out);
+err_free_all_elements:
+ i = INPUT_MAX;
+err_free_elements:
+ for (i--; i >= 0; i--) {
+ kfree(in1[i]);
+ kfree(in0[i]);
+ }
+
+ kfree(in1);
+err_free_in0:
+ kfree(in0);
+
+ return err;
+}
+
+static void __exit test_memcat_p_exit(void)
+{
+}
+
+module_init(test_memcat_p_init);
+module_exit(test_memcat_p_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/test_overflow.c b/lib/test_overflow.c
new file mode 100644
index 000000000000..fc680562d8b6
--- /dev/null
+++ b/lib/test_overflow.c
@@ -0,0 +1,613 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Test cases for arithmetic overflow checks.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/overflow.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+
+#define DEFINE_TEST_ARRAY(t) \
+ static const struct test_ ## t { \
+ t a, b; \
+ t sum, diff, prod; \
+ bool s_of, d_of, p_of; \
+ } t ## _tests[] __initconst
+
+DEFINE_TEST_ARRAY(u8) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {1, 1, 2, 0, 1, false, false, false},
+ {0, 1, 1, U8_MAX, 0, false, true, false},
+ {1, 0, 1, 1, 0, false, false, false},
+ {0, U8_MAX, U8_MAX, 1, 0, false, true, false},
+ {U8_MAX, 0, U8_MAX, U8_MAX, 0, false, false, false},
+ {1, U8_MAX, 0, 2, U8_MAX, true, true, false},
+ {U8_MAX, 1, 0, U8_MAX-1, U8_MAX, true, false, false},
+ {U8_MAX, U8_MAX, U8_MAX-1, 0, 1, true, false, true},
+
+ {U8_MAX, U8_MAX-1, U8_MAX-2, 1, 2, true, false, true},
+ {U8_MAX-1, U8_MAX, U8_MAX-2, U8_MAX, 2, true, true, true},
+
+ {1U << 3, 1U << 3, 1U << 4, 0, 1U << 6, false, false, false},
+ {1U << 4, 1U << 4, 1U << 5, 0, 0, false, false, true},
+ {1U << 4, 1U << 3, 3*(1U << 3), 1U << 3, 1U << 7, false, false, false},
+ {1U << 7, 1U << 7, 0, 0, 0, true, false, true},
+
+ {48, 32, 80, 16, 0, false, false, true},
+ {128, 128, 0, 0, 0, true, false, true},
+ {123, 234, 101, 145, 110, true, true, true},
+};
+DEFINE_TEST_ARRAY(u16) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {1, 1, 2, 0, 1, false, false, false},
+ {0, 1, 1, U16_MAX, 0, false, true, false},
+ {1, 0, 1, 1, 0, false, false, false},
+ {0, U16_MAX, U16_MAX, 1, 0, false, true, false},
+ {U16_MAX, 0, U16_MAX, U16_MAX, 0, false, false, false},
+ {1, U16_MAX, 0, 2, U16_MAX, true, true, false},
+ {U16_MAX, 1, 0, U16_MAX-1, U16_MAX, true, false, false},
+ {U16_MAX, U16_MAX, U16_MAX-1, 0, 1, true, false, true},
+
+ {U16_MAX, U16_MAX-1, U16_MAX-2, 1, 2, true, false, true},
+ {U16_MAX-1, U16_MAX, U16_MAX-2, U16_MAX, 2, true, true, true},
+
+ {1U << 7, 1U << 7, 1U << 8, 0, 1U << 14, false, false, false},
+ {1U << 8, 1U << 8, 1U << 9, 0, 0, false, false, true},
+ {1U << 8, 1U << 7, 3*(1U << 7), 1U << 7, 1U << 15, false, false, false},
+ {1U << 15, 1U << 15, 0, 0, 0, true, false, true},
+
+ {123, 234, 357, 65425, 28782, false, true, false},
+ {1234, 2345, 3579, 64425, 10146, false, true, true},
+};
+DEFINE_TEST_ARRAY(u32) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {1, 1, 2, 0, 1, false, false, false},
+ {0, 1, 1, U32_MAX, 0, false, true, false},
+ {1, 0, 1, 1, 0, false, false, false},
+ {0, U32_MAX, U32_MAX, 1, 0, false, true, false},
+ {U32_MAX, 0, U32_MAX, U32_MAX, 0, false, false, false},
+ {1, U32_MAX, 0, 2, U32_MAX, true, true, false},
+ {U32_MAX, 1, 0, U32_MAX-1, U32_MAX, true, false, false},
+ {U32_MAX, U32_MAX, U32_MAX-1, 0, 1, true, false, true},
+
+ {U32_MAX, U32_MAX-1, U32_MAX-2, 1, 2, true, false, true},
+ {U32_MAX-1, U32_MAX, U32_MAX-2, U32_MAX, 2, true, true, true},
+
+ {1U << 15, 1U << 15, 1U << 16, 0, 1U << 30, false, false, false},
+ {1U << 16, 1U << 16, 1U << 17, 0, 0, false, false, true},
+ {1U << 16, 1U << 15, 3*(1U << 15), 1U << 15, 1U << 31, false, false, false},
+ {1U << 31, 1U << 31, 0, 0, 0, true, false, true},
+
+ {-2U, 1U, -1U, -3U, -2U, false, false, false},
+ {-4U, 5U, 1U, -9U, -20U, true, false, true},
+};
+
+DEFINE_TEST_ARRAY(u64) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {1, 1, 2, 0, 1, false, false, false},
+ {0, 1, 1, U64_MAX, 0, false, true, false},
+ {1, 0, 1, 1, 0, false, false, false},
+ {0, U64_MAX, U64_MAX, 1, 0, false, true, false},
+ {U64_MAX, 0, U64_MAX, U64_MAX, 0, false, false, false},
+ {1, U64_MAX, 0, 2, U64_MAX, true, true, false},
+ {U64_MAX, 1, 0, U64_MAX-1, U64_MAX, true, false, false},
+ {U64_MAX, U64_MAX, U64_MAX-1, 0, 1, true, false, true},
+
+ {U64_MAX, U64_MAX-1, U64_MAX-2, 1, 2, true, false, true},
+ {U64_MAX-1, U64_MAX, U64_MAX-2, U64_MAX, 2, true, true, true},
+
+ {1ULL << 31, 1ULL << 31, 1ULL << 32, 0, 1ULL << 62, false, false, false},
+ {1ULL << 32, 1ULL << 32, 1ULL << 33, 0, 0, false, false, true},
+ {1ULL << 32, 1ULL << 31, 3*(1ULL << 31), 1ULL << 31, 1ULL << 63, false, false, false},
+ {1ULL << 63, 1ULL << 63, 0, 0, 0, true, false, true},
+ {1000000000ULL /* 10^9 */, 10000000000ULL /* 10^10 */,
+ 11000000000ULL, 18446744064709551616ULL, 10000000000000000000ULL,
+ false, true, false},
+ {-15ULL, 10ULL, -5ULL, -25ULL, -150ULL, false, false, true},
+};
+
+DEFINE_TEST_ARRAY(s8) = {
+ {0, 0, 0, 0, 0, false, false, false},
+
+ {0, S8_MAX, S8_MAX, -S8_MAX, 0, false, false, false},
+ {S8_MAX, 0, S8_MAX, S8_MAX, 0, false, false, false},
+ {0, S8_MIN, S8_MIN, S8_MIN, 0, false, true, false},
+ {S8_MIN, 0, S8_MIN, S8_MIN, 0, false, false, false},
+
+ {-1, S8_MIN, S8_MAX, S8_MAX, S8_MIN, true, false, true},
+ {S8_MIN, -1, S8_MAX, -S8_MAX, S8_MIN, true, false, true},
+ {-1, S8_MAX, S8_MAX-1, S8_MIN, -S8_MAX, false, false, false},
+ {S8_MAX, -1, S8_MAX-1, S8_MIN, -S8_MAX, false, true, false},
+ {-1, -S8_MAX, S8_MIN, S8_MAX-1, S8_MAX, false, false, false},
+ {-S8_MAX, -1, S8_MIN, S8_MIN+2, S8_MAX, false, false, false},
+
+ {1, S8_MIN, -S8_MAX, -S8_MAX, S8_MIN, false, true, false},
+ {S8_MIN, 1, -S8_MAX, S8_MAX, S8_MIN, false, true, false},
+ {1, S8_MAX, S8_MIN, S8_MIN+2, S8_MAX, true, false, false},
+ {S8_MAX, 1, S8_MIN, S8_MAX-1, S8_MAX, true, false, false},
+
+ {S8_MIN, S8_MIN, 0, 0, 0, true, false, true},
+ {S8_MAX, S8_MAX, -2, 0, 1, true, false, true},
+
+ {-4, -32, -36, 28, -128, false, false, true},
+ {-4, 32, 28, -36, -128, false, false, false},
+};
+
+DEFINE_TEST_ARRAY(s16) = {
+ {0, 0, 0, 0, 0, false, false, false},
+
+ {0, S16_MAX, S16_MAX, -S16_MAX, 0, false, false, false},
+ {S16_MAX, 0, S16_MAX, S16_MAX, 0, false, false, false},
+ {0, S16_MIN, S16_MIN, S16_MIN, 0, false, true, false},
+ {S16_MIN, 0, S16_MIN, S16_MIN, 0, false, false, false},
+
+ {-1, S16_MIN, S16_MAX, S16_MAX, S16_MIN, true, false, true},
+ {S16_MIN, -1, S16_MAX, -S16_MAX, S16_MIN, true, false, true},
+ {-1, S16_MAX, S16_MAX-1, S16_MIN, -S16_MAX, false, false, false},
+ {S16_MAX, -1, S16_MAX-1, S16_MIN, -S16_MAX, false, true, false},
+ {-1, -S16_MAX, S16_MIN, S16_MAX-1, S16_MAX, false, false, false},
+ {-S16_MAX, -1, S16_MIN, S16_MIN+2, S16_MAX, false, false, false},
+
+ {1, S16_MIN, -S16_MAX, -S16_MAX, S16_MIN, false, true, false},
+ {S16_MIN, 1, -S16_MAX, S16_MAX, S16_MIN, false, true, false},
+ {1, S16_MAX, S16_MIN, S16_MIN+2, S16_MAX, true, false, false},
+ {S16_MAX, 1, S16_MIN, S16_MAX-1, S16_MAX, true, false, false},
+
+ {S16_MIN, S16_MIN, 0, 0, 0, true, false, true},
+ {S16_MAX, S16_MAX, -2, 0, 1, true, false, true},
+};
+DEFINE_TEST_ARRAY(s32) = {
+ {0, 0, 0, 0, 0, false, false, false},
+
+ {0, S32_MAX, S32_MAX, -S32_MAX, 0, false, false, false},
+ {S32_MAX, 0, S32_MAX, S32_MAX, 0, false, false, false},
+ {0, S32_MIN, S32_MIN, S32_MIN, 0, false, true, false},
+ {S32_MIN, 0, S32_MIN, S32_MIN, 0, false, false, false},
+
+ {-1, S32_MIN, S32_MAX, S32_MAX, S32_MIN, true, false, true},
+ {S32_MIN, -1, S32_MAX, -S32_MAX, S32_MIN, true, false, true},
+ {-1, S32_MAX, S32_MAX-1, S32_MIN, -S32_MAX, false, false, false},
+ {S32_MAX, -1, S32_MAX-1, S32_MIN, -S32_MAX, false, true, false},
+ {-1, -S32_MAX, S32_MIN, S32_MAX-1, S32_MAX, false, false, false},
+ {-S32_MAX, -1, S32_MIN, S32_MIN+2, S32_MAX, false, false, false},
+
+ {1, S32_MIN, -S32_MAX, -S32_MAX, S32_MIN, false, true, false},
+ {S32_MIN, 1, -S32_MAX, S32_MAX, S32_MIN, false, true, false},
+ {1, S32_MAX, S32_MIN, S32_MIN+2, S32_MAX, true, false, false},
+ {S32_MAX, 1, S32_MIN, S32_MAX-1, S32_MAX, true, false, false},
+
+ {S32_MIN, S32_MIN, 0, 0, 0, true, false, true},
+ {S32_MAX, S32_MAX, -2, 0, 1, true, false, true},
+};
+DEFINE_TEST_ARRAY(s64) = {
+ {0, 0, 0, 0, 0, false, false, false},
+
+ {0, S64_MAX, S64_MAX, -S64_MAX, 0, false, false, false},
+ {S64_MAX, 0, S64_MAX, S64_MAX, 0, false, false, false},
+ {0, S64_MIN, S64_MIN, S64_MIN, 0, false, true, false},
+ {S64_MIN, 0, S64_MIN, S64_MIN, 0, false, false, false},
+
+ {-1, S64_MIN, S64_MAX, S64_MAX, S64_MIN, true, false, true},
+ {S64_MIN, -1, S64_MAX, -S64_MAX, S64_MIN, true, false, true},
+ {-1, S64_MAX, S64_MAX-1, S64_MIN, -S64_MAX, false, false, false},
+ {S64_MAX, -1, S64_MAX-1, S64_MIN, -S64_MAX, false, true, false},
+ {-1, -S64_MAX, S64_MIN, S64_MAX-1, S64_MAX, false, false, false},
+ {-S64_MAX, -1, S64_MIN, S64_MIN+2, S64_MAX, false, false, false},
+
+ {1, S64_MIN, -S64_MAX, -S64_MAX, S64_MIN, false, true, false},
+ {S64_MIN, 1, -S64_MAX, S64_MAX, S64_MIN, false, true, false},
+ {1, S64_MAX, S64_MIN, S64_MIN+2, S64_MAX, true, false, false},
+ {S64_MAX, 1, S64_MIN, S64_MAX-1, S64_MAX, true, false, false},
+
+ {S64_MIN, S64_MIN, 0, 0, 0, true, false, true},
+ {S64_MAX, S64_MAX, -2, 0, 1, true, false, true},
+
+ {-1, -1, -2, 0, 1, false, false, false},
+ {-1, -128, -129, 127, 128, false, false, false},
+ {-128, -1, -129, -127, 128, false, false, false},
+ {0, -S64_MAX, -S64_MAX, S64_MAX, 0, false, false, false},
+};
+
+#define check_one_op(t, fmt, op, sym, a, b, r, of) do { \
+ t _r; \
+ bool _of; \
+ \
+ _of = check_ ## op ## _overflow(a, b, &_r); \
+ if (_of != of) { \
+ pr_warn("expected "fmt" "sym" "fmt \
+ " to%s overflow (type %s)\n", \
+ a, b, of ? "" : " not", #t); \
+ err = 1; \
+ } \
+ if (_r != r) { \
+ pr_warn("expected "fmt" "sym" "fmt" == " \
+ fmt", got "fmt" (type %s)\n", \
+ a, b, r, _r, #t); \
+ err = 1; \
+ } \
+} while (0)
+
+#define DEFINE_TEST_FUNC(t, fmt) \
+static int __init do_test_ ## t(const struct test_ ## t *p) \
+{ \
+ int err = 0; \
+ \
+ check_one_op(t, fmt, add, "+", p->a, p->b, p->sum, p->s_of); \
+ check_one_op(t, fmt, add, "+", p->b, p->a, p->sum, p->s_of); \
+ check_one_op(t, fmt, sub, "-", p->a, p->b, p->diff, p->d_of); \
+ check_one_op(t, fmt, mul, "*", p->a, p->b, p->prod, p->p_of); \
+ check_one_op(t, fmt, mul, "*", p->b, p->a, p->prod, p->p_of); \
+ \
+ return err; \
+} \
+ \
+static int __init test_ ## t ## _overflow(void) { \
+ int err = 0; \
+ unsigned i; \
+ \
+ pr_info("%-3s: %zu arithmetic tests\n", #t, \
+ ARRAY_SIZE(t ## _tests)); \
+ for (i = 0; i < ARRAY_SIZE(t ## _tests); ++i) \
+ err |= do_test_ ## t(&t ## _tests[i]); \
+ return err; \
+}
+
+DEFINE_TEST_FUNC(u8, "%d");
+DEFINE_TEST_FUNC(s8, "%d");
+DEFINE_TEST_FUNC(u16, "%d");
+DEFINE_TEST_FUNC(s16, "%d");
+DEFINE_TEST_FUNC(u32, "%u");
+DEFINE_TEST_FUNC(s32, "%d");
+#if BITS_PER_LONG == 64
+DEFINE_TEST_FUNC(u64, "%llu");
+DEFINE_TEST_FUNC(s64, "%lld");
+#endif
+
+static int __init test_overflow_calculation(void)
+{
+ int err = 0;
+
+ err |= test_u8_overflow();
+ err |= test_s8_overflow();
+ err |= test_u16_overflow();
+ err |= test_s16_overflow();
+ err |= test_u32_overflow();
+ err |= test_s32_overflow();
+#if BITS_PER_LONG == 64
+ err |= test_u64_overflow();
+ err |= test_s64_overflow();
+#endif
+
+ return err;
+}
+
+static int __init test_overflow_shift(void)
+{
+ int err = 0;
+
+/* Args are: value, shift, type, expected result, overflow expected */
+#define TEST_ONE_SHIFT(a, s, t, expect, of) ({ \
+ int __failed = 0; \
+ typeof(a) __a = (a); \
+ typeof(s) __s = (s); \
+ t __e = (expect); \
+ t __d; \
+ bool __of = check_shl_overflow(__a, __s, &__d); \
+ if (__of != of) { \
+ pr_warn("expected (%s)(%s << %s) to%s overflow\n", \
+ #t, #a, #s, of ? "" : " not"); \
+ __failed = 1; \
+ } else if (!__of && __d != __e) { \
+ pr_warn("expected (%s)(%s << %s) == %s\n", \
+ #t, #a, #s, #expect); \
+ if ((t)-1 < 0) \
+ pr_warn("got %lld\n", (s64)__d); \
+ else \
+ pr_warn("got %llu\n", (u64)__d); \
+ __failed = 1; \
+ } \
+ if (!__failed) \
+ pr_info("ok: (%s)(%s << %s) == %s\n", #t, #a, #s, \
+ of ? "overflow" : #expect); \
+ __failed; \
+})
+
+ /* Sane shifts. */
+ err |= TEST_ONE_SHIFT(1, 0, u8, 1 << 0, false);
+ err |= TEST_ONE_SHIFT(1, 4, u8, 1 << 4, false);
+ err |= TEST_ONE_SHIFT(1, 7, u8, 1 << 7, false);
+ err |= TEST_ONE_SHIFT(0xF, 4, u8, 0xF << 4, false);
+ err |= TEST_ONE_SHIFT(1, 0, u16, 1 << 0, false);
+ err |= TEST_ONE_SHIFT(1, 10, u16, 1 << 10, false);
+ err |= TEST_ONE_SHIFT(1, 15, u16, 1 << 15, false);
+ err |= TEST_ONE_SHIFT(0xFF, 8, u16, 0xFF << 8, false);
+ err |= TEST_ONE_SHIFT(1, 0, int, 1 << 0, false);
+ err |= TEST_ONE_SHIFT(1, 16, int, 1 << 16, false);
+ err |= TEST_ONE_SHIFT(1, 30, int, 1 << 30, false);
+ err |= TEST_ONE_SHIFT(1, 0, s32, 1 << 0, false);
+ err |= TEST_ONE_SHIFT(1, 16, s32, 1 << 16, false);
+ err |= TEST_ONE_SHIFT(1, 30, s32, 1 << 30, false);
+ err |= TEST_ONE_SHIFT(1, 0, unsigned int, 1U << 0, false);
+ err |= TEST_ONE_SHIFT(1, 20, unsigned int, 1U << 20, false);
+ err |= TEST_ONE_SHIFT(1, 31, unsigned int, 1U << 31, false);
+ err |= TEST_ONE_SHIFT(0xFFFFU, 16, unsigned int, 0xFFFFU << 16, false);
+ err |= TEST_ONE_SHIFT(1, 0, u32, 1U << 0, false);
+ err |= TEST_ONE_SHIFT(1, 20, u32, 1U << 20, false);
+ err |= TEST_ONE_SHIFT(1, 31, u32, 1U << 31, false);
+ err |= TEST_ONE_SHIFT(0xFFFFU, 16, u32, 0xFFFFU << 16, false);
+ err |= TEST_ONE_SHIFT(1, 0, u64, 1ULL << 0, false);
+ err |= TEST_ONE_SHIFT(1, 40, u64, 1ULL << 40, false);
+ err |= TEST_ONE_SHIFT(1, 63, u64, 1ULL << 63, false);
+ err |= TEST_ONE_SHIFT(0xFFFFFFFFULL, 32, u64,
+ 0xFFFFFFFFULL << 32, false);
+
+ /* Sane shift: start and end with 0, without a too-wide shift. */
+ err |= TEST_ONE_SHIFT(0, 7, u8, 0, false);
+ err |= TEST_ONE_SHIFT(0, 15, u16, 0, false);
+ err |= TEST_ONE_SHIFT(0, 31, unsigned int, 0, false);
+ err |= TEST_ONE_SHIFT(0, 31, u32, 0, false);
+ err |= TEST_ONE_SHIFT(0, 63, u64, 0, false);
+
+ /* Sane shift: start and end with 0, without reaching signed bit. */
+ err |= TEST_ONE_SHIFT(0, 6, s8, 0, false);
+ err |= TEST_ONE_SHIFT(0, 14, s16, 0, false);
+ err |= TEST_ONE_SHIFT(0, 30, int, 0, false);
+ err |= TEST_ONE_SHIFT(0, 30, s32, 0, false);
+ err |= TEST_ONE_SHIFT(0, 62, s64, 0, false);
+
+ /* Overflow: shifted the bit off the end. */
+ err |= TEST_ONE_SHIFT(1, 8, u8, 0, true);
+ err |= TEST_ONE_SHIFT(1, 16, u16, 0, true);
+ err |= TEST_ONE_SHIFT(1, 32, unsigned int, 0, true);
+ err |= TEST_ONE_SHIFT(1, 32, u32, 0, true);
+ err |= TEST_ONE_SHIFT(1, 64, u64, 0, true);
+
+ /* Overflow: shifted into the signed bit. */
+ err |= TEST_ONE_SHIFT(1, 7, s8, 0, true);
+ err |= TEST_ONE_SHIFT(1, 15, s16, 0, true);
+ err |= TEST_ONE_SHIFT(1, 31, int, 0, true);
+ err |= TEST_ONE_SHIFT(1, 31, s32, 0, true);
+ err |= TEST_ONE_SHIFT(1, 63, s64, 0, true);
+
+ /* Overflow: high bit falls off unsigned types. */
+ /* 10010110 */
+ err |= TEST_ONE_SHIFT(150, 1, u8, 0, true);
+ /* 1000100010010110 */
+ err |= TEST_ONE_SHIFT(34966, 1, u16, 0, true);
+ /* 10000100000010001000100010010110 */
+ err |= TEST_ONE_SHIFT(2215151766U, 1, u32, 0, true);
+ err |= TEST_ONE_SHIFT(2215151766U, 1, unsigned int, 0, true);
+ /* 1000001000010000010000000100000010000100000010001000100010010110 */
+ err |= TEST_ONE_SHIFT(9372061470395238550ULL, 1, u64, 0, true);
+
+ /* Overflow: bit shifted into signed bit on signed types. */
+ /* 01001011 */
+ err |= TEST_ONE_SHIFT(75, 1, s8, 0, true);
+ /* 0100010001001011 */
+ err |= TEST_ONE_SHIFT(17483, 1, s16, 0, true);
+ /* 01000010000001000100010001001011 */
+ err |= TEST_ONE_SHIFT(1107575883, 1, s32, 0, true);
+ err |= TEST_ONE_SHIFT(1107575883, 1, int, 0, true);
+ /* 0100000100001000001000000010000001000010000001000100010001001011 */
+ err |= TEST_ONE_SHIFT(4686030735197619275LL, 1, s64, 0, true);
+
+ /* Overflow: bit shifted past signed bit on signed types. */
+ /* 01001011 */
+ err |= TEST_ONE_SHIFT(75, 2, s8, 0, true);
+ /* 0100010001001011 */
+ err |= TEST_ONE_SHIFT(17483, 2, s16, 0, true);
+ /* 01000010000001000100010001001011 */
+ err |= TEST_ONE_SHIFT(1107575883, 2, s32, 0, true);
+ err |= TEST_ONE_SHIFT(1107575883, 2, int, 0, true);
+ /* 0100000100001000001000000010000001000010000001000100010001001011 */
+ err |= TEST_ONE_SHIFT(4686030735197619275LL, 2, s64, 0, true);
+
+ /* Overflow: values larger than destination type. */
+ err |= TEST_ONE_SHIFT(0x100, 0, u8, 0, true);
+ err |= TEST_ONE_SHIFT(0xFF, 0, s8, 0, true);
+ err |= TEST_ONE_SHIFT(0x10000U, 0, u16, 0, true);
+ err |= TEST_ONE_SHIFT(0xFFFFU, 0, s16, 0, true);
+ err |= TEST_ONE_SHIFT(0x100000000ULL, 0, u32, 0, true);
+ err |= TEST_ONE_SHIFT(0x100000000ULL, 0, unsigned int, 0, true);
+ err |= TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, s32, 0, true);
+ err |= TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, int, 0, true);
+ err |= TEST_ONE_SHIFT(0xFFFFFFFFFFFFFFFFULL, 0, s64, 0, true);
+
+ /* Nonsense: negative initial value. */
+ err |= TEST_ONE_SHIFT(-1, 0, s8, 0, true);
+ err |= TEST_ONE_SHIFT(-1, 0, u8, 0, true);
+ err |= TEST_ONE_SHIFT(-5, 0, s16, 0, true);
+ err |= TEST_ONE_SHIFT(-5, 0, u16, 0, true);
+ err |= TEST_ONE_SHIFT(-10, 0, int, 0, true);
+ err |= TEST_ONE_SHIFT(-10, 0, unsigned int, 0, true);
+ err |= TEST_ONE_SHIFT(-100, 0, s32, 0, true);
+ err |= TEST_ONE_SHIFT(-100, 0, u32, 0, true);
+ err |= TEST_ONE_SHIFT(-10000, 0, s64, 0, true);
+ err |= TEST_ONE_SHIFT(-10000, 0, u64, 0, true);
+
+ /* Nonsense: negative shift values. */
+ err |= TEST_ONE_SHIFT(0, -5, s8, 0, true);
+ err |= TEST_ONE_SHIFT(0, -5, u8, 0, true);
+ err |= TEST_ONE_SHIFT(0, -10, s16, 0, true);
+ err |= TEST_ONE_SHIFT(0, -10, u16, 0, true);
+ err |= TEST_ONE_SHIFT(0, -15, int, 0, true);
+ err |= TEST_ONE_SHIFT(0, -15, unsigned int, 0, true);
+ err |= TEST_ONE_SHIFT(0, -20, s32, 0, true);
+ err |= TEST_ONE_SHIFT(0, -20, u32, 0, true);
+ err |= TEST_ONE_SHIFT(0, -30, s64, 0, true);
+ err |= TEST_ONE_SHIFT(0, -30, u64, 0, true);
+
+ /* Overflow: shifted at or beyond entire type's bit width. */
+ err |= TEST_ONE_SHIFT(0, 8, u8, 0, true);
+ err |= TEST_ONE_SHIFT(0, 9, u8, 0, true);
+ err |= TEST_ONE_SHIFT(0, 8, s8, 0, true);
+ err |= TEST_ONE_SHIFT(0, 9, s8, 0, true);
+ err |= TEST_ONE_SHIFT(0, 16, u16, 0, true);
+ err |= TEST_ONE_SHIFT(0, 17, u16, 0, true);
+ err |= TEST_ONE_SHIFT(0, 16, s16, 0, true);
+ err |= TEST_ONE_SHIFT(0, 17, s16, 0, true);
+ err |= TEST_ONE_SHIFT(0, 32, u32, 0, true);
+ err |= TEST_ONE_SHIFT(0, 33, u32, 0, true);
+ err |= TEST_ONE_SHIFT(0, 32, int, 0, true);
+ err |= TEST_ONE_SHIFT(0, 33, int, 0, true);
+ err |= TEST_ONE_SHIFT(0, 32, s32, 0, true);
+ err |= TEST_ONE_SHIFT(0, 33, s32, 0, true);
+ err |= TEST_ONE_SHIFT(0, 64, u64, 0, true);
+ err |= TEST_ONE_SHIFT(0, 65, u64, 0, true);
+ err |= TEST_ONE_SHIFT(0, 64, s64, 0, true);
+ err |= TEST_ONE_SHIFT(0, 65, s64, 0, true);
+
+ /*
+ * Corner case: for unsigned types, we fail when we've shifted
+ * through the entire width of bits. For signed types, we might
+ * want to match this behavior, but that would mean noticing if
+ * we shift through all but the signed bit, and this is not
+ * currently detected (but we'll notice an overflow into the
+ * signed bit). So, for now, we will test this condition but
+ * mark it as not expected to overflow.
+ */
+ err |= TEST_ONE_SHIFT(0, 7, s8, 0, false);
+ err |= TEST_ONE_SHIFT(0, 15, s16, 0, false);
+ err |= TEST_ONE_SHIFT(0, 31, int, 0, false);
+ err |= TEST_ONE_SHIFT(0, 31, s32, 0, false);
+ err |= TEST_ONE_SHIFT(0, 63, s64, 0, false);
+
+ return err;
+}
+
+/*
+ * Deal with the various forms of allocator arguments. See comments above
+ * the DEFINE_TEST_ALLOC() instances for mapping of the "bits".
+ */
+#define alloc010(alloc, arg, sz) alloc(sz, GFP_KERNEL)
+#define alloc011(alloc, arg, sz) alloc(sz, GFP_KERNEL, NUMA_NO_NODE)
+#define alloc000(alloc, arg, sz) alloc(sz)
+#define alloc001(alloc, arg, sz) alloc(sz, NUMA_NO_NODE)
+#define alloc110(alloc, arg, sz) alloc(arg, sz, GFP_KERNEL)
+#define free0(free, arg, ptr) free(ptr)
+#define free1(free, arg, ptr) free(arg, ptr)
+
+/* Wrap around to 8K */
+#define TEST_SIZE (9 << PAGE_SHIFT)
+
+#define DEFINE_TEST_ALLOC(func, free_func, want_arg, want_gfp, want_node)\
+static int __init test_ ## func (void *arg) \
+{ \
+ volatile size_t a = TEST_SIZE; \
+ volatile size_t b = (SIZE_MAX / TEST_SIZE) + 1; \
+ void *ptr; \
+ \
+ /* Tiny allocation test. */ \
+ ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, 1);\
+ if (!ptr) { \
+ pr_warn(#func " failed regular allocation?!\n"); \
+ return 1; \
+ } \
+ free ## want_arg (free_func, arg, ptr); \
+ \
+ /* Wrapped allocation test. */ \
+ ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, \
+ a * b); \
+ if (!ptr) { \
+ pr_warn(#func " unexpectedly failed bad wrapping?!\n"); \
+ return 1; \
+ } \
+ free ## want_arg (free_func, arg, ptr); \
+ \
+ /* Saturated allocation test. */ \
+ ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, \
+ array_size(a, b)); \
+ if (ptr) { \
+ pr_warn(#func " missed saturation!\n"); \
+ free ## want_arg (free_func, arg, ptr); \
+ return 1; \
+ } \
+ pr_info(#func " detected saturation\n"); \
+ return 0; \
+}
+
+/*
+ * Allocator uses a trailing node argument --------+ (e.g. kmalloc_node())
+ * Allocator uses the gfp_t argument -----------+ | (e.g. kmalloc())
+ * Allocator uses a special leading argument + | | (e.g. devm_kmalloc())
+ * | | |
+ */
+DEFINE_TEST_ALLOC(kmalloc, kfree, 0, 1, 0);
+DEFINE_TEST_ALLOC(kmalloc_node, kfree, 0, 1, 1);
+DEFINE_TEST_ALLOC(kzalloc, kfree, 0, 1, 0);
+DEFINE_TEST_ALLOC(kzalloc_node, kfree, 0, 1, 1);
+DEFINE_TEST_ALLOC(vmalloc, vfree, 0, 0, 0);
+DEFINE_TEST_ALLOC(vmalloc_node, vfree, 0, 0, 1);
+DEFINE_TEST_ALLOC(vzalloc, vfree, 0, 0, 0);
+DEFINE_TEST_ALLOC(vzalloc_node, vfree, 0, 0, 1);
+DEFINE_TEST_ALLOC(kvmalloc, kvfree, 0, 1, 0);
+DEFINE_TEST_ALLOC(kvmalloc_node, kvfree, 0, 1, 1);
+DEFINE_TEST_ALLOC(kvzalloc, kvfree, 0, 1, 0);
+DEFINE_TEST_ALLOC(kvzalloc_node, kvfree, 0, 1, 1);
+DEFINE_TEST_ALLOC(devm_kmalloc, devm_kfree, 1, 1, 0);
+DEFINE_TEST_ALLOC(devm_kzalloc, devm_kfree, 1, 1, 0);
+
+static int __init test_overflow_allocation(void)
+{
+ const char device_name[] = "overflow-test";
+ struct device *dev;
+ int err = 0;
+
+ /* Create dummy device for devm_kmalloc()-family tests. */
+ dev = root_device_register(device_name);
+ if (IS_ERR(dev)) {
+ pr_warn("Cannot register test device\n");
+ return 1;
+ }
+
+ err |= test_kmalloc(NULL);
+ err |= test_kmalloc_node(NULL);
+ err |= test_kzalloc(NULL);
+ err |= test_kzalloc_node(NULL);
+ err |= test_kvmalloc(NULL);
+ err |= test_kvmalloc_node(NULL);
+ err |= test_kvzalloc(NULL);
+ err |= test_kvzalloc_node(NULL);
+ err |= test_vmalloc(NULL);
+ err |= test_vmalloc_node(NULL);
+ err |= test_vzalloc(NULL);
+ err |= test_vzalloc_node(NULL);
+ err |= test_devm_kmalloc(dev);
+ err |= test_devm_kzalloc(dev);
+
+ device_unregister(dev);
+
+ return err;
+}
+
+static int __init test_module_init(void)
+{
+ int err = 0;
+
+ err |= test_overflow_calculation();
+ err |= test_overflow_shift();
+ err |= test_overflow_allocation();
+
+ if (err) {
+ pr_warn("FAIL!\n");
+ err = -EINVAL;
+ } else {
+ pr_info("all tests passed\n");
+ }
+
+ return err;
+}
+
+static void __exit test_module_exit(void)
+{ }
+
+module_init(test_module_init);
+module_exit(test_module_exit);
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/lib/test_printf.c b/lib/test_printf.c
index 71ebfa43ad05..53527ea822b5 100644
--- a/lib/test_printf.c
+++ b/lib/test_printf.c
@@ -204,8 +204,9 @@ test_string(void)
#if BITS_PER_LONG == 64
#define PTR_WIDTH 16
-#define PTR ((void *)0xffff0123456789ab)
+#define PTR ((void *)0xffff0123456789abUL)
#define PTR_STR "ffff0123456789ab"
+#define PTR_VAL_NO_CRNG "(____ptrval____)"
#define ZEROS "00000000" /* hex 32 zero bits */
static int __init
@@ -216,7 +217,16 @@ plain_format(void)
nchars = snprintf(buf, PLAIN_BUF_SIZE, "%p", PTR);
- if (nchars != PTR_WIDTH || strncmp(buf, ZEROS, strlen(ZEROS)) != 0)
+ if (nchars != PTR_WIDTH)
+ return -1;
+
+ if (strncmp(buf, PTR_VAL_NO_CRNG, PTR_WIDTH) == 0) {
+ pr_warn("crng possibly not yet initialized. plain 'p' buffer contains \"%s\"",
+ PTR_VAL_NO_CRNG);
+ return 0;
+ }
+
+ if (strncmp(buf, ZEROS, strlen(ZEROS)) != 0)
return -1;
return 0;
@@ -227,6 +237,7 @@ plain_format(void)
#define PTR_WIDTH 8
#define PTR ((void *)0x456789ab)
#define PTR_STR "456789ab"
+#define PTR_VAL_NO_CRNG "(ptrval)"
static int __init
plain_format(void)
@@ -245,7 +256,16 @@ plain_hash(void)
nchars = snprintf(buf, PLAIN_BUF_SIZE, "%p", PTR);
- if (nchars != PTR_WIDTH || strncmp(buf, PTR_STR, PTR_WIDTH) == 0)
+ if (nchars != PTR_WIDTH)
+ return -1;
+
+ if (strncmp(buf, PTR_VAL_NO_CRNG, PTR_WIDTH) == 0) {
+ pr_warn("crng possibly not yet initialized. plain 'p' buffer contains \"%s\"",
+ PTR_VAL_NO_CRNG);
+ return 0;
+ }
+
+ if (strncmp(buf, PTR_STR, PTR_WIDTH) == 0)
return -1;
return 0;
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index f4000c137dbe..82ac39ce5310 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -83,7 +83,7 @@ static u32 my_hashfn(const void *data, u32 len, u32 seed)
{
const struct test_obj_rhl *obj = data;
- return (obj->value.id % 10) << RHT_HASH_RESERVED_SPACE;
+ return (obj->value.id % 10);
}
static int my_cmpfn(struct rhashtable_compare_arg *arg, const void *obj)
@@ -99,7 +99,6 @@ static struct rhashtable_params test_rht_params = {
.key_offset = offsetof(struct test_obj, value),
.key_len = sizeof(struct test_obj_val),
.hashfn = jhash,
- .nulls_base = (3U << RHT_BASE_SHIFT),
};
static struct rhashtable_params test_rht_params_dup = {
@@ -285,17 +284,17 @@ static int __init test_rhltable(unsigned int entries)
if (entries == 0)
entries = 1;
- rhl_test_objects = vzalloc(sizeof(*rhl_test_objects) * entries);
+ rhl_test_objects = vzalloc(array_size(entries,
+ sizeof(*rhl_test_objects)));
if (!rhl_test_objects)
return -ENOMEM;
ret = -ENOMEM;
- obj_in_table = vzalloc(BITS_TO_LONGS(entries) * sizeof(unsigned long));
+ obj_in_table = vzalloc(array_size(sizeof(unsigned long),
+ BITS_TO_LONGS(entries)));
if (!obj_in_table)
goto out_free;
- /* nulls_base not supported in rhlist interface */
- test_rht_params.nulls_base = 0;
err = rhltable_init(&rhlt, &test_rht_params);
if (WARN_ON(err))
goto out_free;
@@ -499,6 +498,8 @@ static unsigned int __init print_ht(struct rhltable *rhlt)
unsigned int i, cnt = 0;
ht = &rhlt->ht;
+ /* Take the mutex to avoid RCU warning */
+ mutex_lock(&ht->mutex);
tbl = rht_dereference(ht->tbl, ht);
for (i = 0; i < tbl->size; i++) {
struct rhash_head *pos, *next;
@@ -532,6 +533,7 @@ static unsigned int __init print_ht(struct rhltable *rhlt)
}
}
printk(KERN_ERR "\n---- ht: ----%s\n-------------\n", buff);
+ mutex_unlock(&ht->mutex);
return cnt;
}
@@ -706,7 +708,8 @@ static int __init test_rht_init(void)
test_rht_params.max_size = max_size ? : roundup_pow_of_two(entries);
test_rht_params.nelem_hint = size;
- objs = vzalloc((test_rht_params.max_size + 1) * sizeof(struct test_obj));
+ objs = vzalloc(array_size(sizeof(struct test_obj),
+ test_rht_params.max_size + 1));
if (!objs)
return -ENOMEM;
@@ -753,10 +756,10 @@ static int __init test_rht_init(void)
pr_info("Testing concurrent rhashtable access from %d threads\n",
tcount);
sema_init(&prestart_sem, 1 - tcount);
- tdata = vzalloc(tcount * sizeof(struct thread_data));
+ tdata = vzalloc(array_size(tcount, sizeof(struct thread_data)));
if (!tdata)
return -ENOMEM;
- objs = vzalloc(tcount * entries * sizeof(struct test_obj));
+ objs = vzalloc(array3_size(sizeof(struct test_obj), tcount, entries));
if (!objs) {
vfree(tdata);
return -ENOMEM;
diff --git a/lib/test_ubsan.c b/lib/test_ubsan.c
new file mode 100644
index 000000000000..280f4979d00e
--- /dev/null
+++ b/lib/test_ubsan.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+typedef void(*test_ubsan_fp)(void);
+
+static void test_ubsan_add_overflow(void)
+{
+ volatile int val = INT_MAX;
+
+ val += 2;
+}
+
+static void test_ubsan_sub_overflow(void)
+{
+ volatile int val = INT_MIN;
+ volatile int val2 = 2;
+
+ val -= val2;
+}
+
+static void test_ubsan_mul_overflow(void)
+{
+ volatile int val = INT_MAX / 2;
+
+ val *= 3;
+}
+
+static void test_ubsan_negate_overflow(void)
+{
+ volatile int val = INT_MIN;
+
+ val = -val;
+}
+
+static void test_ubsan_divrem_overflow(void)
+{
+ volatile int val = 16;
+ volatile int val2 = 0;
+
+ val /= val2;
+}
+
+static void test_ubsan_vla_bound_not_positive(void)
+{
+ volatile int size = -1;
+ char buf[size];
+
+ (void)buf;
+}
+
+static void test_ubsan_shift_out_of_bounds(void)
+{
+ volatile int val = -1;
+ int val2 = 10;
+
+ val2 <<= val;
+}
+
+static void test_ubsan_out_of_bounds(void)
+{
+ volatile int i = 4, j = 5;
+ volatile int arr[i];
+
+ arr[j] = i;
+}
+
+static void test_ubsan_load_invalid_value(void)
+{
+ volatile char *dst, *src;
+ bool val, val2, *ptr;
+ char c = 4;
+
+ dst = (char *)&val;
+ src = &c;
+ *dst = *src;
+
+ ptr = &val2;
+ val2 = val;
+}
+
+static void test_ubsan_null_ptr_deref(void)
+{
+ volatile int *ptr = NULL;
+ int val;
+
+ val = *ptr;
+}
+
+static void test_ubsan_misaligned_access(void)
+{
+ volatile char arr[5] __aligned(4) = {1, 2, 3, 4, 5};
+ volatile int *ptr, val = 6;
+
+ ptr = (int *)(arr + 1);
+ *ptr = val;
+}
+
+static void test_ubsan_object_size_mismatch(void)
+{
+ /* "((aligned(8)))" helps this not into be misaligned for ptr-access. */
+ volatile int val __aligned(8) = 4;
+ volatile long long *ptr, val2;
+
+ ptr = (long long *)&val;
+ val2 = *ptr;
+}
+
+static const test_ubsan_fp test_ubsan_array[] = {
+ test_ubsan_add_overflow,
+ test_ubsan_sub_overflow,
+ test_ubsan_mul_overflow,
+ test_ubsan_negate_overflow,
+ test_ubsan_divrem_overflow,
+ test_ubsan_vla_bound_not_positive,
+ test_ubsan_shift_out_of_bounds,
+ test_ubsan_out_of_bounds,
+ test_ubsan_load_invalid_value,
+ //test_ubsan_null_ptr_deref, /* exclude it because there is a crash */
+ test_ubsan_misaligned_access,
+ test_ubsan_object_size_mismatch,
+};
+
+static int __init test_ubsan_init(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(test_ubsan_array); i++)
+ test_ubsan_array[i]();
+
+ (void)test_ubsan_null_ptr_deref; /* to avoid unsed-function warning */
+ return 0;
+}
+module_init(test_ubsan_init);
+
+static void __exit test_ubsan_exit(void)
+{
+ /* do nothing */
+}
+module_exit(test_ubsan_exit);
+
+MODULE_AUTHOR("Jinbum Park <jinb.park7@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c
index 4621db801b23..e161f0498f42 100644
--- a/lib/test_user_copy.c
+++ b/lib/test_user_copy.c
@@ -31,11 +31,8 @@
* their capability at compile-time, we just have to opt-out certain archs.
*/
#if BITS_PER_LONG == 64 || (!(defined(CONFIG_ARM) && !defined(MMU)) && \
- !defined(CONFIG_BLACKFIN) && \
- !defined(CONFIG_M32R) && \
!defined(CONFIG_M68K) && \
!defined(CONFIG_MICROBLAZE) && \
- !defined(CONFIG_MN10300) && \
!defined(CONFIG_NIOS2) && \
!defined(CONFIG_PPC32) && \
!defined(CONFIG_SUPERH))
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
new file mode 100644
index 000000000000..aa47754150ce
--- /dev/null
+++ b/lib/test_xarray.c
@@ -0,0 +1,1238 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * test_xarray.c: Test the XArray API
+ * Copyright (c) 2017-2018 Microsoft Corporation
+ * Author: Matthew Wilcox <willy@infradead.org>
+ */
+
+#include <linux/xarray.h>
+#include <linux/module.h>
+
+static unsigned int tests_run;
+static unsigned int tests_passed;
+
+#ifndef XA_DEBUG
+# ifdef __KERNEL__
+void xa_dump(const struct xarray *xa) { }
+# endif
+#undef XA_BUG_ON
+#define XA_BUG_ON(xa, x) do { \
+ tests_run++; \
+ if (x) { \
+ printk("BUG at %s:%d\n", __func__, __LINE__); \
+ xa_dump(xa); \
+ dump_stack(); \
+ } else { \
+ tests_passed++; \
+ } \
+} while (0)
+#endif
+
+static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp)
+{
+ return xa_store(xa, index, xa_mk_value(index & LONG_MAX), gfp);
+}
+
+static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp)
+{
+ u32 id = 0;
+
+ XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_value(index & LONG_MAX),
+ gfp) != 0);
+ XA_BUG_ON(xa, id != index);
+}
+
+static void xa_erase_index(struct xarray *xa, unsigned long index)
+{
+ XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_value(index & LONG_MAX));
+ XA_BUG_ON(xa, xa_load(xa, index) != NULL);
+}
+
+/*
+ * If anyone needs this, please move it to xarray.c. We have no current
+ * users outside the test suite because all current multislot users want
+ * to use the advanced API.
+ */
+static void *xa_store_order(struct xarray *xa, unsigned long index,
+ unsigned order, void *entry, gfp_t gfp)
+{
+ XA_STATE_ORDER(xas, xa, index, order);
+ void *curr;
+
+ do {
+ xas_lock(&xas);
+ curr = xas_store(&xas, entry);
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, gfp));
+
+ return curr;
+}
+
+static noinline void check_xa_err(struct xarray *xa)
+{
+ XA_BUG_ON(xa, xa_err(xa_store_index(xa, 0, GFP_NOWAIT)) != 0);
+ XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) != 0);
+#ifndef __KERNEL__
+ /* The kernel does not fail GFP_NOWAIT allocations */
+ XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
+ XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
+#endif
+ XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_KERNEL)) != 0);
+ XA_BUG_ON(xa, xa_err(xa_store(xa, 1, xa_mk_value(0), GFP_KERNEL)) != 0);
+ XA_BUG_ON(xa, xa_err(xa_erase(xa, 1)) != 0);
+// kills the test-suite :-(
+// XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL);
+}
+
+static noinline void check_xas_retry(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 0);
+ void *entry;
+
+ xa_store_index(xa, 0, GFP_KERNEL);
+ xa_store_index(xa, 1, GFP_KERNEL);
+
+ rcu_read_lock();
+ XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0));
+ xa_erase_index(xa, 1);
+ XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas)));
+ XA_BUG_ON(xa, xas_retry(&xas, NULL));
+ XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0)));
+ xas_reset(&xas);
+ XA_BUG_ON(xa, xas.xa_node != XAS_RESTART);
+ XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
+ XA_BUG_ON(xa, xas.xa_node != NULL);
+
+ XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
+ XA_BUG_ON(xa, !xa_is_internal(xas_reload(&xas)));
+ xas.xa_node = XAS_RESTART;
+ XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
+ rcu_read_unlock();
+
+ /* Make sure we can iterate through retry entries */
+ xas_lock(&xas);
+ xas_set(&xas, 0);
+ xas_store(&xas, XA_RETRY_ENTRY);
+ xas_set(&xas, 1);
+ xas_store(&xas, XA_RETRY_ENTRY);
+
+ xas_set(&xas, 0);
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ xas_store(&xas, xa_mk_value(xas.xa_index));
+ }
+ xas_unlock(&xas);
+
+ xa_erase_index(xa, 0);
+ xa_erase_index(xa, 1);
+}
+
+static noinline void check_xa_load(struct xarray *xa)
+{
+ unsigned long i, j;
+
+ for (i = 0; i < 1024; i++) {
+ for (j = 0; j < 1024; j++) {
+ void *entry = xa_load(xa, j);
+ if (j < i)
+ XA_BUG_ON(xa, xa_to_value(entry) != j);
+ else
+ XA_BUG_ON(xa, entry);
+ }
+ XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
+ }
+
+ for (i = 0; i < 1024; i++) {
+ for (j = 0; j < 1024; j++) {
+ void *entry = xa_load(xa, j);
+ if (j >= i)
+ XA_BUG_ON(xa, xa_to_value(entry) != j);
+ else
+ XA_BUG_ON(xa, entry);
+ }
+ xa_erase_index(xa, i);
+ }
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
+{
+ unsigned int order;
+ unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 8 : 1;
+
+ /* NULL elements have no marks set */
+ XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
+ xa_set_mark(xa, index, XA_MARK_0);
+ XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
+
+ /* Storing a pointer will not make a mark appear */
+ XA_BUG_ON(xa, xa_store_index(xa, index, GFP_KERNEL) != NULL);
+ XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
+ xa_set_mark(xa, index, XA_MARK_0);
+ XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
+
+ /* Setting one mark will not set another mark */
+ XA_BUG_ON(xa, xa_get_mark(xa, index + 1, XA_MARK_0));
+ XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_1));
+
+ /* Storing NULL clears marks, and they can't be set again */
+ xa_erase_index(xa, index);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
+ xa_set_mark(xa, index, XA_MARK_0);
+ XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
+
+ /*
+ * Storing a multi-index entry over entries with marks gives the
+ * entire entry the union of the marks
+ */
+ BUG_ON((index % 4) != 0);
+ for (order = 2; order < max_order; order++) {
+ unsigned long base = round_down(index, 1UL << order);
+ unsigned long next = base + (1UL << order);
+ unsigned long i;
+
+ XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL));
+ xa_set_mark(xa, index + 1, XA_MARK_0);
+ XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL));
+ xa_set_mark(xa, index + 2, XA_MARK_1);
+ XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL));
+ xa_store_order(xa, index, order, xa_mk_value(index),
+ GFP_KERNEL);
+ for (i = base; i < next; i++) {
+ XA_STATE(xas, xa, i);
+ unsigned int seen = 0;
+ void *entry;
+
+ XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
+ XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_1));
+ XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_2));
+
+ /* We should see two elements in the array */
+ xas_for_each(&xas, entry, ULONG_MAX)
+ seen++;
+ XA_BUG_ON(xa, seen != 2);
+
+ /* One of which is marked */
+ xas_set(&xas, 0);
+ seen = 0;
+ xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
+ seen++;
+ XA_BUG_ON(xa, seen != 1);
+ }
+ XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0));
+ XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1));
+ XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2));
+ xa_erase_index(xa, index);
+ xa_erase_index(xa, next);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ }
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_xa_mark_2(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 0);
+ unsigned long index;
+ unsigned int count = 0;
+ void *entry;
+
+ xa_store_index(xa, 0, GFP_KERNEL);
+ xa_set_mark(xa, 0, XA_MARK_0);
+ xas_lock(&xas);
+ xas_load(&xas);
+ xas_init_marks(&xas);
+ xas_unlock(&xas);
+ XA_BUG_ON(xa, !xa_get_mark(xa, 0, XA_MARK_0) == 0);
+
+ for (index = 3500; index < 4500; index++) {
+ xa_store_index(xa, index, GFP_KERNEL);
+ xa_set_mark(xa, index, XA_MARK_0);
+ }
+
+ xas_reset(&xas);
+ rcu_read_lock();
+ xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
+ count++;
+ rcu_read_unlock();
+ XA_BUG_ON(xa, count != 1000);
+
+ xas_lock(&xas);
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ xas_init_marks(&xas);
+ XA_BUG_ON(xa, !xa_get_mark(xa, xas.xa_index, XA_MARK_0));
+ XA_BUG_ON(xa, !xas_get_mark(&xas, XA_MARK_0));
+ }
+ xas_unlock(&xas);
+
+ xa_destroy(xa);
+}
+
+static noinline void check_xa_mark(struct xarray *xa)
+{
+ unsigned long index;
+
+ for (index = 0; index < 16384; index += 4)
+ check_xa_mark_1(xa, index);
+
+ check_xa_mark_2(xa);
+}
+
+static noinline void check_xa_shrink(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 1);
+ struct xa_node *node;
+ unsigned int order;
+ unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 15 : 1;
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+ XA_BUG_ON(xa, xa_store_index(xa, 0, GFP_KERNEL) != NULL);
+ XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
+
+ /*
+ * Check that erasing the entry at 1 shrinks the tree and properly
+ * marks the node as being deleted.
+ */
+ xas_lock(&xas);
+ XA_BUG_ON(xa, xas_load(&xas) != xa_mk_value(1));
+ node = xas.xa_node;
+ XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != xa_mk_value(0));
+ XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
+ XA_BUG_ON(xa, xa_load(xa, 1) != NULL);
+ XA_BUG_ON(xa, xas.xa_node != XAS_BOUNDS);
+ XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != XA_RETRY_ENTRY);
+ XA_BUG_ON(xa, xas_load(&xas) != NULL);
+ xas_unlock(&xas);
+ XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
+ xa_erase_index(xa, 0);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ for (order = 0; order < max_order; order++) {
+ unsigned long max = (1UL << order) - 1;
+ xa_store_order(xa, 0, order, xa_mk_value(0), GFP_KERNEL);
+ XA_BUG_ON(xa, xa_load(xa, max) != xa_mk_value(0));
+ XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL);
+ rcu_read_lock();
+ node = xa_head(xa);
+ rcu_read_unlock();
+ XA_BUG_ON(xa, xa_store_index(xa, ULONG_MAX, GFP_KERNEL) !=
+ NULL);
+ rcu_read_lock();
+ XA_BUG_ON(xa, xa_head(xa) == node);
+ rcu_read_unlock();
+ XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL);
+ xa_erase_index(xa, ULONG_MAX);
+ XA_BUG_ON(xa, xa->xa_head != node);
+ xa_erase_index(xa, 0);
+ }
+}
+
+static noinline void check_cmpxchg(struct xarray *xa)
+{
+ void *FIVE = xa_mk_value(5);
+ void *SIX = xa_mk_value(6);
+ void *LOTS = xa_mk_value(12345678);
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+ XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_KERNEL) != NULL);
+ XA_BUG_ON(xa, xa_insert(xa, 12345678, xa, GFP_KERNEL) != -EEXIST);
+ XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, SIX, FIVE, GFP_KERNEL) != LOTS);
+ XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, LOTS, FIVE, GFP_KERNEL) != LOTS);
+ XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE);
+ XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL);
+ XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL);
+ xa_erase_index(xa, 12345678);
+ xa_erase_index(xa, 5);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_reserve(struct xarray *xa)
+{
+ void *entry;
+ unsigned long index = 0;
+
+ /* An array with a reserved entry is not empty */
+ XA_BUG_ON(xa, !xa_empty(xa));
+ xa_reserve(xa, 12345678, GFP_KERNEL);
+ XA_BUG_ON(xa, xa_empty(xa));
+ XA_BUG_ON(xa, xa_load(xa, 12345678));
+ xa_release(xa, 12345678);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ /* Releasing a used entry does nothing */
+ xa_reserve(xa, 12345678, GFP_KERNEL);
+ XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL);
+ xa_release(xa, 12345678);
+ xa_erase_index(xa, 12345678);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ /* cmpxchg sees a reserved entry as NULL */
+ xa_reserve(xa, 12345678, GFP_KERNEL);
+ XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, NULL, xa_mk_value(12345678),
+ GFP_NOWAIT) != NULL);
+ xa_release(xa, 12345678);
+ xa_erase_index(xa, 12345678);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ /* Can iterate through a reserved entry */
+ xa_store_index(xa, 5, GFP_KERNEL);
+ xa_reserve(xa, 6, GFP_KERNEL);
+ xa_store_index(xa, 7, GFP_KERNEL);
+
+ xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
+ XA_BUG_ON(xa, index != 5 && index != 7);
+ }
+ xa_destroy(xa);
+}
+
+static noinline void check_xas_erase(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 0);
+ void *entry;
+ unsigned long i, j;
+
+ for (i = 0; i < 200; i++) {
+ for (j = i; j < 2 * i + 17; j++) {
+ xas_set(&xas, j);
+ do {
+ xas_lock(&xas);
+ xas_store(&xas, xa_mk_value(j));
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+ }
+
+ xas_set(&xas, ULONG_MAX);
+ do {
+ xas_lock(&xas);
+ xas_store(&xas, xa_mk_value(0));
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+
+ xas_lock(&xas);
+ xas_store(&xas, NULL);
+
+ xas_set(&xas, 0);
+ j = i;
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ XA_BUG_ON(xa, entry != xa_mk_value(j));
+ xas_store(&xas, NULL);
+ j++;
+ }
+ xas_unlock(&xas);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ }
+}
+
+#ifdef CONFIG_XARRAY_MULTI
+static noinline void check_multi_store_1(struct xarray *xa, unsigned long index,
+ unsigned int order)
+{
+ XA_STATE(xas, xa, index);
+ unsigned long min = index & ~((1UL << order) - 1);
+ unsigned long max = min + (1UL << order);
+
+ xa_store_order(xa, index, order, xa_mk_value(index), GFP_KERNEL);
+ XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_value(index));
+ XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_value(index));
+ XA_BUG_ON(xa, xa_load(xa, max) != NULL);
+ XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
+
+ XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(min)) != xa_mk_value(index));
+ XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_value(min));
+ XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_value(min));
+ XA_BUG_ON(xa, xa_load(xa, max) != NULL);
+ XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
+
+ xa_erase_index(xa, min);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_multi_store_2(struct xarray *xa, unsigned long index,
+ unsigned int order)
+{
+ XA_STATE(xas, xa, index);
+ xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL);
+
+ XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(1)) != xa_mk_value(0));
+ XA_BUG_ON(xa, xas.xa_index != index);
+ XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+#endif
+
+static noinline void check_multi_store(struct xarray *xa)
+{
+#ifdef CONFIG_XARRAY_MULTI
+ unsigned long i, j, k;
+ unsigned int max_order = (sizeof(long) == 4) ? 30 : 60;
+
+ /* Loading from any position returns the same value */
+ xa_store_order(xa, 0, 1, xa_mk_value(0), GFP_KERNEL);
+ XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
+ XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
+ XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
+ rcu_read_lock();
+ XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 2);
+ XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
+ rcu_read_unlock();
+
+ /* Storing adjacent to the value does not alter the value */
+ xa_store(xa, 3, xa, GFP_KERNEL);
+ XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
+ XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
+ XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
+ rcu_read_lock();
+ XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 3);
+ XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
+ rcu_read_unlock();
+
+ /* Overwriting multiple indexes works */
+ xa_store_order(xa, 0, 2, xa_mk_value(1), GFP_KERNEL);
+ XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(1));
+ XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(1));
+ XA_BUG_ON(xa, xa_load(xa, 2) != xa_mk_value(1));
+ XA_BUG_ON(xa, xa_load(xa, 3) != xa_mk_value(1));
+ XA_BUG_ON(xa, xa_load(xa, 4) != NULL);
+ rcu_read_lock();
+ XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 4);
+ XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 4);
+ rcu_read_unlock();
+
+ /* We can erase multiple values with a single store */
+ xa_store_order(xa, 0, 63, NULL, GFP_KERNEL);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ /* Even when the first slot is empty but the others aren't */
+ xa_store_index(xa, 1, GFP_KERNEL);
+ xa_store_index(xa, 2, GFP_KERNEL);
+ xa_store_order(xa, 0, 2, NULL, GFP_KERNEL);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ for (i = 0; i < max_order; i++) {
+ for (j = 0; j < max_order; j++) {
+ xa_store_order(xa, 0, i, xa_mk_value(i), GFP_KERNEL);
+ xa_store_order(xa, 0, j, xa_mk_value(j), GFP_KERNEL);
+
+ for (k = 0; k < max_order; k++) {
+ void *entry = xa_load(xa, (1UL << k) - 1);
+ if ((i < k) && (j < k))
+ XA_BUG_ON(xa, entry != NULL);
+ else
+ XA_BUG_ON(xa, entry != xa_mk_value(j));
+ }
+
+ xa_erase(xa, 0);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ }
+ }
+
+ for (i = 0; i < 20; i++) {
+ check_multi_store_1(xa, 200, i);
+ check_multi_store_1(xa, 0, i);
+ check_multi_store_1(xa, (1UL << i) + 1, i);
+ }
+ check_multi_store_2(xa, 4095, 9);
+#endif
+}
+
+static DEFINE_XARRAY_ALLOC(xa0);
+
+static noinline void check_xa_alloc(void)
+{
+ int i;
+ u32 id;
+
+ /* An empty array should assign 0 to the first alloc */
+ xa_alloc_index(&xa0, 0, GFP_KERNEL);
+
+ /* Erasing it should make the array empty again */
+ xa_erase_index(&xa0, 0);
+ XA_BUG_ON(&xa0, !xa_empty(&xa0));
+
+ /* And it should assign 0 again */
+ xa_alloc_index(&xa0, 0, GFP_KERNEL);
+
+ /* The next assigned ID should be 1 */
+ xa_alloc_index(&xa0, 1, GFP_KERNEL);
+ xa_erase_index(&xa0, 1);
+
+ /* Storing a value should mark it used */
+ xa_store_index(&xa0, 1, GFP_KERNEL);
+ xa_alloc_index(&xa0, 2, GFP_KERNEL);
+
+ /* If we then erase 0, it should be free */
+ xa_erase_index(&xa0, 0);
+ xa_alloc_index(&xa0, 0, GFP_KERNEL);
+
+ xa_erase_index(&xa0, 1);
+ xa_erase_index(&xa0, 2);
+
+ for (i = 1; i < 5000; i++) {
+ xa_alloc_index(&xa0, i, GFP_KERNEL);
+ }
+
+ xa_destroy(&xa0);
+
+ id = 0xfffffffeU;
+ XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0),
+ GFP_KERNEL) != 0);
+ XA_BUG_ON(&xa0, id != 0xfffffffeU);
+ XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0),
+ GFP_KERNEL) != 0);
+ XA_BUG_ON(&xa0, id != 0xffffffffU);
+ XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0),
+ GFP_KERNEL) != -ENOSPC);
+ XA_BUG_ON(&xa0, id != 0xffffffffU);
+ xa_destroy(&xa0);
+}
+
+static noinline void __check_store_iter(struct xarray *xa, unsigned long start,
+ unsigned int order, unsigned int present)
+{
+ XA_STATE_ORDER(xas, xa, start, order);
+ void *entry;
+ unsigned int count = 0;
+
+retry:
+ xas_lock(&xas);
+ xas_for_each_conflict(&xas, entry) {
+ XA_BUG_ON(xa, !xa_is_value(entry));
+ XA_BUG_ON(xa, entry < xa_mk_value(start));
+ XA_BUG_ON(xa, entry > xa_mk_value(start + (1UL << order) - 1));
+ count++;
+ }
+ xas_store(&xas, xa_mk_value(start));
+ xas_unlock(&xas);
+ if (xas_nomem(&xas, GFP_KERNEL)) {
+ count = 0;
+ goto retry;
+ }
+ XA_BUG_ON(xa, xas_error(&xas));
+ XA_BUG_ON(xa, count != present);
+ XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_value(start));
+ XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) !=
+ xa_mk_value(start));
+ xa_erase_index(xa, start);
+}
+
+static noinline void check_store_iter(struct xarray *xa)
+{
+ unsigned int i, j;
+ unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
+
+ for (i = 0; i < max_order; i++) {
+ unsigned int min = 1 << i;
+ unsigned int max = (2 << i) - 1;
+ __check_store_iter(xa, 0, i, 0);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ __check_store_iter(xa, min, i, 0);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ xa_store_index(xa, min, GFP_KERNEL);
+ __check_store_iter(xa, min, i, 1);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ xa_store_index(xa, max, GFP_KERNEL);
+ __check_store_iter(xa, min, i, 1);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ for (j = 0; j < min; j++)
+ xa_store_index(xa, j, GFP_KERNEL);
+ __check_store_iter(xa, 0, i, min);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ for (j = 0; j < min; j++)
+ xa_store_index(xa, min + j, GFP_KERNEL);
+ __check_store_iter(xa, min, i, min);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ }
+#ifdef CONFIG_XARRAY_MULTI
+ xa_store_index(xa, 63, GFP_KERNEL);
+ xa_store_index(xa, 65, GFP_KERNEL);
+ __check_store_iter(xa, 64, 2, 1);
+ xa_erase_index(xa, 63);
+#endif
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_multi_find(struct xarray *xa)
+{
+#ifdef CONFIG_XARRAY_MULTI
+ unsigned long index;
+
+ xa_store_order(xa, 12, 2, xa_mk_value(12), GFP_KERNEL);
+ XA_BUG_ON(xa, xa_store_index(xa, 16, GFP_KERNEL) != NULL);
+
+ index = 0;
+ XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
+ xa_mk_value(12));
+ XA_BUG_ON(xa, index != 12);
+ index = 13;
+ XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
+ xa_mk_value(12));
+ XA_BUG_ON(xa, (index < 12) || (index >= 16));
+ XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) !=
+ xa_mk_value(16));
+ XA_BUG_ON(xa, index != 16);
+
+ xa_erase_index(xa, 12);
+ xa_erase_index(xa, 16);
+ XA_BUG_ON(xa, !xa_empty(xa));
+#endif
+}
+
+static noinline void check_multi_find_2(struct xarray *xa)
+{
+ unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1;
+ unsigned int i, j;
+ void *entry;
+
+ for (i = 0; i < max_order; i++) {
+ unsigned long index = 1UL << i;
+ for (j = 0; j < index; j++) {
+ XA_STATE(xas, xa, j + index);
+ xa_store_index(xa, index - 1, GFP_KERNEL);
+ xa_store_order(xa, index, i, xa_mk_value(index),
+ GFP_KERNEL);
+ rcu_read_lock();
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ xa_erase_index(xa, index);
+ }
+ rcu_read_unlock();
+ xa_erase_index(xa, index - 1);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ }
+ }
+}
+
+static noinline void check_find(struct xarray *xa)
+{
+ unsigned long i, j, k;
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ /*
+ * Check xa_find with all pairs between 0 and 99 inclusive,
+ * starting at every index between 0 and 99
+ */
+ for (i = 0; i < 100; i++) {
+ XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
+ xa_set_mark(xa, i, XA_MARK_0);
+ for (j = 0; j < i; j++) {
+ XA_BUG_ON(xa, xa_store_index(xa, j, GFP_KERNEL) !=
+ NULL);
+ xa_set_mark(xa, j, XA_MARK_0);
+ for (k = 0; k < 100; k++) {
+ unsigned long index = k;
+ void *entry = xa_find(xa, &index, ULONG_MAX,
+ XA_PRESENT);
+ if (k <= j)
+ XA_BUG_ON(xa, index != j);
+ else if (k <= i)
+ XA_BUG_ON(xa, index != i);
+ else
+ XA_BUG_ON(xa, entry != NULL);
+
+ index = k;
+ entry = xa_find(xa, &index, ULONG_MAX,
+ XA_MARK_0);
+ if (k <= j)
+ XA_BUG_ON(xa, index != j);
+ else if (k <= i)
+ XA_BUG_ON(xa, index != i);
+ else
+ XA_BUG_ON(xa, entry != NULL);
+ }
+ xa_erase_index(xa, j);
+ XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0));
+ XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
+ }
+ xa_erase_index(xa, i);
+ XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0));
+ }
+ XA_BUG_ON(xa, !xa_empty(xa));
+ check_multi_find(xa);
+ check_multi_find_2(xa);
+}
+
+/* See find_swap_entry() in mm/shmem.c */
+static noinline unsigned long xa_find_entry(struct xarray *xa, void *item)
+{
+ XA_STATE(xas, xa, 0);
+ unsigned int checked = 0;
+ void *entry;
+
+ rcu_read_lock();
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ if (xas_retry(&xas, entry))
+ continue;
+ if (entry == item)
+ break;
+ checked++;
+ if ((checked % 4) != 0)
+ continue;
+ xas_pause(&xas);
+ }
+ rcu_read_unlock();
+
+ return entry ? xas.xa_index : -1;
+}
+
+static noinline void check_find_entry(struct xarray *xa)
+{
+#ifdef CONFIG_XARRAY_MULTI
+ unsigned int order;
+ unsigned long offset, index;
+
+ for (order = 0; order < 20; order++) {
+ for (offset = 0; offset < (1UL << (order + 3));
+ offset += (1UL << order)) {
+ for (index = 0; index < (1UL << (order + 5));
+ index += (1UL << order)) {
+ xa_store_order(xa, index, order,
+ xa_mk_value(index), GFP_KERNEL);
+ XA_BUG_ON(xa, xa_load(xa, index) !=
+ xa_mk_value(index));
+ XA_BUG_ON(xa, xa_find_entry(xa,
+ xa_mk_value(index)) != index);
+ }
+ XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
+ xa_destroy(xa);
+ }
+ }
+#endif
+
+ XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
+ xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
+ XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
+ XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_value(LONG_MAX)) != -1);
+ xa_erase_index(xa, ULONG_MAX);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_move_small(struct xarray *xa, unsigned long idx)
+{
+ XA_STATE(xas, xa, 0);
+ unsigned long i;
+
+ xa_store_index(xa, 0, GFP_KERNEL);
+ xa_store_index(xa, idx, GFP_KERNEL);
+
+ rcu_read_lock();
+ for (i = 0; i < idx * 4; i++) {
+ void *entry = xas_next(&xas);
+ if (i <= idx)
+ XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
+ XA_BUG_ON(xa, xas.xa_index != i);
+ if (i == 0 || i == idx)
+ XA_BUG_ON(xa, entry != xa_mk_value(i));
+ else
+ XA_BUG_ON(xa, entry != NULL);
+ }
+ xas_next(&xas);
+ XA_BUG_ON(xa, xas.xa_index != i);
+
+ do {
+ void *entry = xas_prev(&xas);
+ i--;
+ if (i <= idx)
+ XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
+ XA_BUG_ON(xa, xas.xa_index != i);
+ if (i == 0 || i == idx)
+ XA_BUG_ON(xa, entry != xa_mk_value(i));
+ else
+ XA_BUG_ON(xa, entry != NULL);
+ } while (i > 0);
+
+ xas_set(&xas, ULONG_MAX);
+ XA_BUG_ON(xa, xas_next(&xas) != NULL);
+ XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
+ XA_BUG_ON(xa, xas_next(&xas) != xa_mk_value(0));
+ XA_BUG_ON(xa, xas.xa_index != 0);
+ XA_BUG_ON(xa, xas_prev(&xas) != NULL);
+ XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
+ rcu_read_unlock();
+
+ xa_erase_index(xa, 0);
+ xa_erase_index(xa, idx);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_move(struct xarray *xa)
+{
+ XA_STATE(xas, xa, (1 << 16) - 1);
+ unsigned long i;
+
+ for (i = 0; i < (1 << 16); i++)
+ XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
+
+ rcu_read_lock();
+ do {
+ void *entry = xas_prev(&xas);
+ i--;
+ XA_BUG_ON(xa, entry != xa_mk_value(i));
+ XA_BUG_ON(xa, i != xas.xa_index);
+ } while (i != 0);
+
+ XA_BUG_ON(xa, xas_prev(&xas) != NULL);
+ XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
+
+ do {
+ void *entry = xas_next(&xas);
+ XA_BUG_ON(xa, entry != xa_mk_value(i));
+ XA_BUG_ON(xa, i != xas.xa_index);
+ i++;
+ } while (i < (1 << 16));
+ rcu_read_unlock();
+
+ for (i = (1 << 8); i < (1 << 15); i++)
+ xa_erase_index(xa, i);
+
+ i = xas.xa_index;
+
+ rcu_read_lock();
+ do {
+ void *entry = xas_prev(&xas);
+ i--;
+ if ((i < (1 << 8)) || (i >= (1 << 15)))
+ XA_BUG_ON(xa, entry != xa_mk_value(i));
+ else
+ XA_BUG_ON(xa, entry != NULL);
+ XA_BUG_ON(xa, i != xas.xa_index);
+ } while (i != 0);
+
+ XA_BUG_ON(xa, xas_prev(&xas) != NULL);
+ XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
+
+ do {
+ void *entry = xas_next(&xas);
+ if ((i < (1 << 8)) || (i >= (1 << 15)))
+ XA_BUG_ON(xa, entry != xa_mk_value(i));
+ else
+ XA_BUG_ON(xa, entry != NULL);
+ XA_BUG_ON(xa, i != xas.xa_index);
+ i++;
+ } while (i < (1 << 16));
+ rcu_read_unlock();
+
+ xa_destroy(xa);
+
+ for (i = 0; i < 16; i++)
+ check_move_small(xa, 1UL << i);
+
+ for (i = 2; i < 16; i++)
+ check_move_small(xa, (1UL << i) - 1);
+}
+
+static noinline void xa_store_many_order(struct xarray *xa,
+ unsigned long index, unsigned order)
+{
+ XA_STATE_ORDER(xas, xa, index, order);
+ unsigned int i = 0;
+
+ do {
+ xas_lock(&xas);
+ XA_BUG_ON(xa, xas_find_conflict(&xas));
+ xas_create_range(&xas);
+ if (xas_error(&xas))
+ goto unlock;
+ for (i = 0; i < (1U << order); i++) {
+ XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(index + i)));
+ xas_next(&xas);
+ }
+unlock:
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+
+ XA_BUG_ON(xa, xas_error(&xas));
+}
+
+static noinline void check_create_range_1(struct xarray *xa,
+ unsigned long index, unsigned order)
+{
+ unsigned long i;
+
+ xa_store_many_order(xa, index, order);
+ for (i = index; i < index + (1UL << order); i++)
+ xa_erase_index(xa, i);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_create_range_2(struct xarray *xa, unsigned order)
+{
+ unsigned long i;
+ unsigned long nr = 1UL << order;
+
+ for (i = 0; i < nr * nr; i += nr)
+ xa_store_many_order(xa, i, order);
+ for (i = 0; i < nr * nr; i++)
+ xa_erase_index(xa, i);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_create_range_3(void)
+{
+ XA_STATE(xas, NULL, 0);
+ xas_set_err(&xas, -EEXIST);
+ xas_create_range(&xas);
+ XA_BUG_ON(NULL, xas_error(&xas) != -EEXIST);
+}
+
+static noinline void check_create_range_4(struct xarray *xa,
+ unsigned long index, unsigned order)
+{
+ XA_STATE_ORDER(xas, xa, index, order);
+ unsigned long base = xas.xa_index;
+ unsigned long i = 0;
+
+ xa_store_index(xa, index, GFP_KERNEL);
+ do {
+ xas_lock(&xas);
+ xas_create_range(&xas);
+ if (xas_error(&xas))
+ goto unlock;
+ for (i = 0; i < (1UL << order); i++) {
+ void *old = xas_store(&xas, xa_mk_value(base + i));
+ if (xas.xa_index == index)
+ XA_BUG_ON(xa, old != xa_mk_value(base + i));
+ else
+ XA_BUG_ON(xa, old != NULL);
+ xas_next(&xas);
+ }
+unlock:
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+
+ XA_BUG_ON(xa, xas_error(&xas));
+
+ for (i = base; i < base + (1UL << order); i++)
+ xa_erase_index(xa, i);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_create_range(struct xarray *xa)
+{
+ unsigned int order;
+ unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 12 : 1;
+
+ for (order = 0; order < max_order; order++) {
+ check_create_range_1(xa, 0, order);
+ check_create_range_1(xa, 1U << order, order);
+ check_create_range_1(xa, 2U << order, order);
+ check_create_range_1(xa, 3U << order, order);
+ check_create_range_1(xa, 1U << 24, order);
+ if (order < 10)
+ check_create_range_2(xa, order);
+
+ check_create_range_4(xa, 0, order);
+ check_create_range_4(xa, 1U << order, order);
+ check_create_range_4(xa, 2U << order, order);
+ check_create_range_4(xa, 3U << order, order);
+ check_create_range_4(xa, 1U << 24, order);
+
+ check_create_range_4(xa, 1, order);
+ check_create_range_4(xa, (1U << order) + 1, order);
+ check_create_range_4(xa, (2U << order) + 1, order);
+ check_create_range_4(xa, (2U << order) - 1, order);
+ check_create_range_4(xa, (3U << order) + 1, order);
+ check_create_range_4(xa, (3U << order) - 1, order);
+ check_create_range_4(xa, (1U << 24) + 1, order);
+ }
+
+ check_create_range_3();
+}
+
+static noinline void __check_store_range(struct xarray *xa, unsigned long first,
+ unsigned long last)
+{
+#ifdef CONFIG_XARRAY_MULTI
+ xa_store_range(xa, first, last, xa_mk_value(first), GFP_KERNEL);
+
+ XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_value(first));
+ XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_value(first));
+ XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL);
+ XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL);
+
+ xa_store_range(xa, first, last, NULL, GFP_KERNEL);
+#endif
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_store_range(struct xarray *xa)
+{
+ unsigned long i, j;
+
+ for (i = 0; i < 128; i++) {
+ for (j = i; j < 128; j++) {
+ __check_store_range(xa, i, j);
+ __check_store_range(xa, 128 + i, 128 + j);
+ __check_store_range(xa, 4095 + i, 4095 + j);
+ __check_store_range(xa, 4096 + i, 4096 + j);
+ __check_store_range(xa, 123456 + i, 123456 + j);
+ __check_store_range(xa, UINT_MAX + i, UINT_MAX + j);
+ }
+ }
+}
+
+static LIST_HEAD(shadow_nodes);
+
+static void test_update_node(struct xa_node *node)
+{
+ if (node->count && node->count == node->nr_values) {
+ if (list_empty(&node->private_list))
+ list_add(&shadow_nodes, &node->private_list);
+ } else {
+ if (!list_empty(&node->private_list))
+ list_del_init(&node->private_list);
+ }
+}
+
+static noinline void shadow_remove(struct xarray *xa)
+{
+ struct xa_node *node;
+
+ xa_lock(xa);
+ while ((node = list_first_entry_or_null(&shadow_nodes,
+ struct xa_node, private_list))) {
+ XA_STATE(xas, node->array, 0);
+ XA_BUG_ON(xa, node->array != xa);
+ list_del_init(&node->private_list);
+ xas.xa_node = xa_parent_locked(node->array, node);
+ xas.xa_offset = node->offset;
+ xas.xa_shift = node->shift + XA_CHUNK_SHIFT;
+ xas_set_update(&xas, test_update_node);
+ xas_store(&xas, NULL);
+ }
+ xa_unlock(xa);
+}
+
+static noinline void check_workingset(struct xarray *xa, unsigned long index)
+{
+ XA_STATE(xas, xa, index);
+ xas_set_update(&xas, test_update_node);
+
+ do {
+ xas_lock(&xas);
+ xas_store(&xas, xa_mk_value(0));
+ xas_next(&xas);
+ xas_store(&xas, xa_mk_value(1));
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+
+ XA_BUG_ON(xa, list_empty(&shadow_nodes));
+
+ xas_lock(&xas);
+ xas_next(&xas);
+ xas_store(&xas, &xas);
+ XA_BUG_ON(xa, !list_empty(&shadow_nodes));
+
+ xas_store(&xas, xa_mk_value(2));
+ xas_unlock(&xas);
+ XA_BUG_ON(xa, list_empty(&shadow_nodes));
+
+ shadow_remove(xa);
+ XA_BUG_ON(xa, !list_empty(&shadow_nodes));
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+/*
+ * Check that the pointer / value / sibling entries are accounted the
+ * way we expect them to be.
+ */
+static noinline void check_account(struct xarray *xa)
+{
+#ifdef CONFIG_XARRAY_MULTI
+ unsigned int order;
+
+ for (order = 1; order < 12; order++) {
+ XA_STATE(xas, xa, 1 << order);
+
+ xa_store_order(xa, 0, order, xa, GFP_KERNEL);
+ xas_load(&xas);
+ XA_BUG_ON(xa, xas.xa_node->count == 0);
+ XA_BUG_ON(xa, xas.xa_node->count > (1 << order));
+ XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
+
+ xa_store_order(xa, 1 << order, order, xa_mk_value(1 << order),
+ GFP_KERNEL);
+ XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2);
+
+ xa_erase(xa, 1 << order);
+ XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
+
+ xa_erase(xa, 0);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ }
+#endif
+}
+
+static noinline void check_destroy(struct xarray *xa)
+{
+ unsigned long index;
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ /* Destroying an empty array is a no-op */
+ xa_destroy(xa);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ /* Destroying an array with a single entry */
+ for (index = 0; index < 1000; index++) {
+ xa_store_index(xa, index, GFP_KERNEL);
+ XA_BUG_ON(xa, xa_empty(xa));
+ xa_destroy(xa);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ }
+
+ /* Destroying an array with a single entry at ULONG_MAX */
+ xa_store(xa, ULONG_MAX, xa, GFP_KERNEL);
+ XA_BUG_ON(xa, xa_empty(xa));
+ xa_destroy(xa);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+#ifdef CONFIG_XARRAY_MULTI
+ /* Destroying an array with a multi-index entry */
+ xa_store_order(xa, 1 << 11, 11, xa, GFP_KERNEL);
+ XA_BUG_ON(xa, xa_empty(xa));
+ xa_destroy(xa);
+ XA_BUG_ON(xa, !xa_empty(xa));
+#endif
+}
+
+static DEFINE_XARRAY(array);
+
+static int xarray_checks(void)
+{
+ check_xa_err(&array);
+ check_xas_retry(&array);
+ check_xa_load(&array);
+ check_xa_mark(&array);
+ check_xa_shrink(&array);
+ check_xas_erase(&array);
+ check_cmpxchg(&array);
+ check_reserve(&array);
+ check_multi_store(&array);
+ check_xa_alloc();
+ check_find(&array);
+ check_find_entry(&array);
+ check_account(&array);
+ check_destroy(&array);
+ check_move(&array);
+ check_create_range(&array);
+ check_store_range(&array);
+ check_store_iter(&array);
+
+ check_workingset(&array, 0);
+ check_workingset(&array, 64);
+ check_workingset(&array, 4096);
+
+ printk("XArray: %u of %u tests passed\n", tests_passed, tests_run);
+ return (tests_run == tests_passed) ? 0 : -EINVAL;
+}
+
+static void xarray_exit(void)
+{
+}
+
+module_init(xarray_checks);
+module_exit(xarray_exit);
+MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>");
+MODULE_LICENSE("GPL");
diff --git a/lib/textsearch.c b/lib/textsearch.c
index 0b79908dfe89..5939549c0e7b 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -10,7 +10,10 @@
* Pablo Neira Ayuso <pablo@netfilter.org>
*
* ==========================================================================
- *
+ */
+
+/**
+ * DOC: ts_intro
* INTRODUCTION
*
* The textsearch infrastructure provides text searching facilities for
@@ -19,7 +22,9 @@
*
* ARCHITECTURE
*
- * User
+ * .. code-block:: none
+ *
+ * User
* +----------------+
* | finish()|<--------------(6)-----------------+
* |get_next_block()|<--------------(5)---------------+ |
@@ -33,21 +38,21 @@
* | (3)|----->| find()/next() |-----------+ |
* | (7)|----->| destroy() |----------------------+
* +----------------+ +---------------+
- *
- * (1) User configures a search by calling _prepare() specifying the
- * search parameters such as the pattern and algorithm name.
+ *
+ * (1) User configures a search by calling textsearch_prepare() specifying
+ * the search parameters such as the pattern and algorithm name.
* (2) Core requests the algorithm to allocate and initialize a search
* configuration according to the specified parameters.
- * (3) User starts the search(es) by calling _find() or _next() to
- * fetch subsequent occurrences. A state variable is provided
- * to the algorithm to store persistent variables.
+ * (3) User starts the search(es) by calling textsearch_find() or
+ * textsearch_next() to fetch subsequent occurrences. A state variable
+ * is provided to the algorithm to store persistent variables.
* (4) Core eventually resets the search offset and forwards the find()
* request to the algorithm.
* (5) Algorithm calls get_next_block() provided by the user continuously
* to fetch the data to be searched in block by block.
* (6) Algorithm invokes finish() after the last call to get_next_block
* to clean up any leftovers from get_next_block. (Optional)
- * (7) User destroys the configuration by calling _destroy().
+ * (7) User destroys the configuration by calling textsearch_destroy().
* (8) Core notifies the algorithm to destroy algorithm specific
* allocations. (Optional)
*
@@ -62,9 +67,10 @@
* amount of times and even in parallel as long as a separate struct
* ts_state variable is provided to every instance.
*
- * The actual search is performed by either calling textsearch_find_-
- * continuous() for linear data or by providing an own get_next_block()
- * implementation and calling textsearch_find(). Both functions return
+ * The actual search is performed by either calling
+ * textsearch_find_continuous() for linear data or by providing
+ * an own get_next_block() implementation and
+ * calling textsearch_find(). Both functions return
* the position of the first occurrence of the pattern or UINT_MAX if
* no match was found. Subsequent occurrences can be found by calling
* textsearch_next() regardless of the linearity of the data.
@@ -72,7 +78,7 @@
* Once you're done using a configuration it must be given back via
* textsearch_destroy.
*
- * EXAMPLE
+ * EXAMPLE::
*
* int pos;
* struct ts_config *conf;
@@ -87,13 +93,13 @@
* goto errout;
* }
*
- * pos = textsearch_find_continuous(conf, &state, example, strlen(example));
+ * pos = textsearch_find_continuous(conf, \&state, example, strlen(example));
* if (pos != UINT_MAX)
- * panic("Oh my god, dancing chickens at %d\n", pos);
+ * panic("Oh my god, dancing chickens at \%d\n", pos);
*
* textsearch_destroy(conf);
- * ==========================================================================
*/
+/* ========================================================================== */
#include <linux/module.h>
#include <linux/types.h>
@@ -225,7 +231,7 @@ static unsigned int get_linear_data(unsigned int consumed, const u8 **dst,
*
* Returns the position of first occurrence of the pattern or
* %UINT_MAX if no occurrence was found.
- */
+ */
unsigned int textsearch_find_continuous(struct ts_config *conf,
struct ts_state *state,
const void *data, unsigned int len)
diff --git a/lib/ucmpdi2.c b/lib/ucmpdi2.c
index 25ca2d4c1e19..597998169a96 100644
--- a/lib/ucmpdi2.c
+++ b/lib/ucmpdi2.c
@@ -17,7 +17,7 @@
#include <linux/module.h>
#include <linux/libgcc.h>
-word_type __ucmpdi2(unsigned long long a, unsigned long long b)
+word_type notrace __ucmpdi2(unsigned long long a, unsigned long long b)
{
const DWunion au = {.ll = a};
const DWunion bu = {.ll = b};
diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c
index d7e06b28de38..0a559a42359b 100644
--- a/lib/ucs2_string.c
+++ b/lib/ucs2_string.c
@@ -112,3 +112,5 @@ ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, unsigned long maxlength)
return j;
}
EXPORT_SYMBOL(ucs2_as_utf8);
+
+MODULE_LICENSE("GPL v2");
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index d7a708f82559..37a54a6dd594 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -336,7 +336,7 @@ char *put_dec(char *buf, unsigned long long n)
*
* If speed is not important, use snprintf(). It's easy to read the code.
*/
-int num_to_str(char *buf, int size, unsigned long long num)
+int num_to_str(char *buf, int size, unsigned long long num, unsigned int width)
{
/* put_dec requires 2-byte alignment of the buffer. */
char tmp[sizeof(num) * 3] __aligned(2);
@@ -350,11 +350,21 @@ int num_to_str(char *buf, int size, unsigned long long num)
len = put_dec(tmp, num) - tmp;
}
- if (len > size)
+ if (len > size || width > size)
return 0;
+
+ if (width > len) {
+ width = width - len;
+ for (idx = 0; idx < width; idx++)
+ buf[idx] = ' ';
+ } else {
+ width = 0;
+ }
+
for (idx = 0; idx < len; ++idx)
- buf[idx] = tmp[len - idx - 1];
- return len;
+ buf[idx + width] = tmp[len - idx - 1];
+
+ return len + width;
}
#define SIGN 1 /* unsigned/signed, must be 1 */
@@ -603,6 +613,109 @@ char *string(char *buf, char *end, const char *s, struct printf_spec spec)
}
static noinline_for_stack
+char *pointer_string(char *buf, char *end, const void *ptr,
+ struct printf_spec spec)
+{
+ spec.base = 16;
+ spec.flags |= SMALL;
+ if (spec.field_width == -1) {
+ spec.field_width = 2 * sizeof(ptr);
+ spec.flags |= ZEROPAD;
+ }
+
+ return number(buf, end, (unsigned long int)ptr, spec);
+}
+
+/* Make pointers available for printing early in the boot sequence. */
+static int debug_boot_weak_hash __ro_after_init;
+
+static int __init debug_boot_weak_hash_enable(char *str)
+{
+ debug_boot_weak_hash = 1;
+ pr_info("debug_boot_weak_hash enabled\n");
+ return 0;
+}
+early_param("debug_boot_weak_hash", debug_boot_weak_hash_enable);
+
+static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key);
+static siphash_key_t ptr_key __read_mostly;
+
+static void enable_ptr_key_workfn(struct work_struct *work)
+{
+ get_random_bytes(&ptr_key, sizeof(ptr_key));
+ /* Needs to run from preemptible context */
+ static_branch_disable(&not_filled_random_ptr_key);
+}
+
+static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
+
+static void fill_random_ptr_key(struct random_ready_callback *unused)
+{
+ /* This may be in an interrupt handler. */
+ queue_work(system_unbound_wq, &enable_ptr_key_work);
+}
+
+static struct random_ready_callback random_ready = {
+ .func = fill_random_ptr_key
+};
+
+static int __init initialize_ptr_random(void)
+{
+ int key_size = sizeof(ptr_key);
+ int ret;
+
+ /* Use hw RNG if available. */
+ if (get_random_bytes_arch(&ptr_key, key_size) == key_size) {
+ static_branch_disable(&not_filled_random_ptr_key);
+ return 0;
+ }
+
+ ret = add_random_ready_callback(&random_ready);
+ if (!ret) {
+ return 0;
+ } else if (ret == -EALREADY) {
+ /* This is in preemptible context */
+ enable_ptr_key_workfn(&enable_ptr_key_work);
+ return 0;
+ }
+
+ return ret;
+}
+early_initcall(initialize_ptr_random);
+
+/* Maps a pointer to a 32 bit unique identifier. */
+static char *ptr_to_id(char *buf, char *end, const void *ptr,
+ struct printf_spec spec)
+{
+ const char *str = sizeof(ptr) == 8 ? "(____ptrval____)" : "(ptrval)";
+ unsigned long hashval;
+
+ /* When debugging early boot use non-cryptographically secure hash. */
+ if (unlikely(debug_boot_weak_hash)) {
+ hashval = hash_long((unsigned long)ptr, 32);
+ return pointer_string(buf, end, (const void *)hashval, spec);
+ }
+
+ if (static_branch_unlikely(&not_filled_random_ptr_key)) {
+ spec.field_width = 2 * sizeof(ptr);
+ /* string length must be less than default_width */
+ return string(buf, end, str, spec);
+ }
+
+#ifdef CONFIG_64BIT
+ hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key);
+ /*
+ * Mask off the first 32 bits, this makes explicit that we have
+ * modified the address (and 32 bits is plenty for a unique ID).
+ */
+ hashval = hashval & 0xffffffff;
+#else
+ hashval = (unsigned long)siphash_1u32((u32)ptr, &ptr_key);
+#endif
+ return pointer_string(buf, end, (const void *)hashval, spec);
+}
+
+static noinline_for_stack
char *dentry_name(char *buf, char *end, const struct dentry *d, struct printf_spec spec,
const char *fmt)
{
@@ -693,6 +806,22 @@ char *symbol_string(char *buf, char *end, void *ptr,
#endif
}
+static const struct printf_spec default_str_spec = {
+ .field_width = -1,
+ .precision = -1,
+};
+
+static const struct printf_spec default_flag_spec = {
+ .base = 16,
+ .precision = -1,
+ .flags = SPECIAL | SMALL,
+};
+
+static const struct printf_spec default_dec_spec = {
+ .base = 10,
+ .precision = -1,
+};
+
static noinline_for_stack
char *resource_string(char *buf, char *end, struct resource *res,
struct printf_spec spec, const char *fmt)
@@ -722,21 +851,11 @@ char *resource_string(char *buf, char *end, struct resource *res,
.precision = -1,
.flags = SMALL | ZEROPAD,
};
- static const struct printf_spec dec_spec = {
- .base = 10,
- .precision = -1,
- .flags = 0,
- };
static const struct printf_spec str_spec = {
.field_width = -1,
.precision = 10,
.flags = LEFT,
};
- static const struct printf_spec flag_spec = {
- .base = 16,
- .precision = -1,
- .flags = SPECIAL | SMALL,
- };
/* 32-bit res (sizeof==4): 10 chars in dec, 10 in hex ("0x" + 8)
* 64-bit res (sizeof==8): 20 chars in dec, 18 in hex ("0x" + 16) */
@@ -760,10 +879,10 @@ char *resource_string(char *buf, char *end, struct resource *res,
specp = &mem_spec;
} else if (res->flags & IORESOURCE_IRQ) {
p = string(p, pend, "irq ", str_spec);
- specp = &dec_spec;
+ specp = &default_dec_spec;
} else if (res->flags & IORESOURCE_DMA) {
p = string(p, pend, "dma ", str_spec);
- specp = &dec_spec;
+ specp = &default_dec_spec;
} else if (res->flags & IORESOURCE_BUS) {
p = string(p, pend, "bus ", str_spec);
specp = &bus_spec;
@@ -793,7 +912,7 @@ char *resource_string(char *buf, char *end, struct resource *res,
p = string(p, pend, " disabled", str_spec);
} else {
p = string(p, pend, " flags ", str_spec);
- p = number(p, pend, res->flags, flag_spec);
+ p = number(p, pend, res->flags, default_flag_spec);
}
*p++ = ']';
*p = '\0';
@@ -903,9 +1022,6 @@ char *bitmap_list_string(char *buf, char *end, unsigned long *bitmap,
int cur, rbot, rtop;
bool first = true;
- /* reused to print numbers */
- spec = (struct printf_spec){ .base = 10 };
-
rbot = cur = find_first_bit(bitmap, nr_bits);
while (cur < nr_bits) {
rtop = cur;
@@ -920,13 +1036,13 @@ char *bitmap_list_string(char *buf, char *end, unsigned long *bitmap,
}
first = false;
- buf = number(buf, end, rbot, spec);
+ buf = number(buf, end, rbot, default_dec_spec);
if (rbot < rtop) {
if (buf < end)
*buf = '-';
buf++;
- buf = number(buf, end, rtop, spec);
+ buf = number(buf, end, rtop, default_dec_spec);
}
rbot = cur;
@@ -1350,13 +1466,6 @@ static noinline_for_stack
char *restricted_pointer(char *buf, char *end, const void *ptr,
struct printf_spec spec)
{
- spec.base = 16;
- spec.flags |= SMALL;
- if (spec.field_width == -1) {
- spec.field_width = 2 * sizeof(ptr);
- spec.flags |= ZEROPAD;
- }
-
switch (kptr_restrict) {
case 0:
/* Always print %pK values */
@@ -1368,8 +1477,11 @@ char *restricted_pointer(char *buf, char *end, const void *ptr,
* kptr_restrict==1 cannot be used in IRQ context
* because its test for CAP_SYSLOG would be meaningless.
*/
- if (in_irq() || in_serving_softirq() || in_nmi())
+ if (in_irq() || in_serving_softirq() || in_nmi()) {
+ if (spec.field_width == -1)
+ spec.field_width = 2 * sizeof(ptr);
return string(buf, end, "pK-error", spec);
+ }
/*
* Only print the real pointer value if the current
@@ -1394,11 +1506,12 @@ char *restricted_pointer(char *buf, char *end, const void *ptr,
break;
}
- return number(buf, end, (unsigned long)ptr, spec);
+ return pointer_string(buf, end, ptr, spec);
}
static noinline_for_stack
-char *netdev_bits(char *buf, char *end, const void *addr, const char *fmt)
+char *netdev_bits(char *buf, char *end, const void *addr,
+ struct printf_spec spec, const char *fmt)
{
unsigned long long num;
int size;
@@ -1409,9 +1522,7 @@ char *netdev_bits(char *buf, char *end, const void *addr, const char *fmt)
size = sizeof(netdev_features_t);
break;
default:
- num = (unsigned long)addr;
- size = sizeof(unsigned long);
- break;
+ return ptr_to_id(buf, end, addr, spec);
}
return special_hex_number(buf, end, num, size);
@@ -1446,15 +1557,12 @@ char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec,
return string(buf, end, NULL, spec);
switch (fmt[1]) {
- case 'r':
- return number(buf, end, clk_get_rate(clk), spec);
-
case 'n':
default:
#ifdef CONFIG_COMMON_CLK
return string(buf, end, __clk_get_name(clk), spec);
#else
- return special_hex_number(buf, end, (unsigned long)clk, sizeof(unsigned long));
+ return ptr_to_id(buf, end, clk, spec);
#endif
}
}
@@ -1464,23 +1572,13 @@ char *format_flags(char *buf, char *end, unsigned long flags,
const struct trace_print_flags *names)
{
unsigned long mask;
- const struct printf_spec strspec = {
- .field_width = -1,
- .precision = -1,
- };
- const struct printf_spec numspec = {
- .flags = SPECIAL|SMALL,
- .field_width = -1,
- .precision = -1,
- .base = 16,
- };
for ( ; flags && names->name; names++) {
mask = names->mask;
if ((flags & mask) != mask)
continue;
- buf = string(buf, end, names->name, strspec);
+ buf = string(buf, end, names->name, default_str_spec);
flags &= ~mask;
if (flags) {
@@ -1491,7 +1589,7 @@ char *format_flags(char *buf, char *end, unsigned long flags,
}
if (flags)
- buf = number(buf, end, flags, numspec);
+ buf = number(buf, end, flags, default_flag_spec);
return buf;
}
@@ -1538,22 +1636,18 @@ char *device_node_gen_full_name(const struct device_node *np, char *buf, char *e
{
int depth;
const struct device_node *parent = np->parent;
- static const struct printf_spec strspec = {
- .field_width = -1,
- .precision = -1,
- };
/* special case for root node */
if (!parent)
- return string(buf, end, "/", strspec);
+ return string(buf, end, "/", default_str_spec);
for (depth = 0; parent->parent; depth++)
parent = parent->parent;
for ( ; depth >= 0; depth--) {
- buf = string(buf, end, "/", strspec);
+ buf = string(buf, end, "/", default_str_spec);
buf = string(buf, end, device_node_name_for_depth(np, depth),
- strspec);
+ default_str_spec);
}
return buf;
}
@@ -1590,6 +1684,7 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
fmt = "f";
for (pass = false; strspn(fmt,"fnpPFcC"); fmt++, pass = true) {
+ int precision;
if (pass) {
if (buf < end)
*buf = ':';
@@ -1601,7 +1696,11 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
buf = device_node_gen_full_name(dn, buf, end);
break;
case 'n': /* name */
- buf = string(buf, end, dn->name, str_spec);
+ p = kbasename(of_node_full_name(dn));
+ precision = str_spec.precision;
+ str_spec.precision = strchrnul(p, '@') - p;
+ buf = string(buf, end, p, str_spec);
+ str_spec.precision = precision;
break;
case 'p': /* phandle */
buf = number(buf, end, (unsigned int)dn->phandle, num_spec);
@@ -1645,87 +1744,6 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
return widen_string(buf, buf - buf_start, end, spec);
}
-static noinline_for_stack
-char *pointer_string(char *buf, char *end, const void *ptr,
- struct printf_spec spec)
-{
- spec.base = 16;
- spec.flags |= SMALL;
- if (spec.field_width == -1) {
- spec.field_width = 2 * sizeof(ptr);
- spec.flags |= ZEROPAD;
- }
-
- return number(buf, end, (unsigned long int)ptr, spec);
-}
-
-static bool have_filled_random_ptr_key __read_mostly;
-static siphash_key_t ptr_key __read_mostly;
-
-static void fill_random_ptr_key(struct random_ready_callback *unused)
-{
- get_random_bytes(&ptr_key, sizeof(ptr_key));
- /*
- * have_filled_random_ptr_key==true is dependent on get_random_bytes().
- * ptr_to_id() needs to see have_filled_random_ptr_key==true
- * after get_random_bytes() returns.
- */
- smp_mb();
- WRITE_ONCE(have_filled_random_ptr_key, true);
-}
-
-static struct random_ready_callback random_ready = {
- .func = fill_random_ptr_key
-};
-
-static int __init initialize_ptr_random(void)
-{
- int ret = add_random_ready_callback(&random_ready);
-
- if (!ret) {
- return 0;
- } else if (ret == -EALREADY) {
- fill_random_ptr_key(&random_ready);
- return 0;
- }
-
- return ret;
-}
-early_initcall(initialize_ptr_random);
-
-/* Maps a pointer to a 32 bit unique identifier. */
-static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec)
-{
- unsigned long hashval;
- const int default_width = 2 * sizeof(ptr);
-
- if (unlikely(!have_filled_random_ptr_key)) {
- spec.field_width = default_width;
- /* string length must be less than default_width */
- return string(buf, end, "(ptrval)", spec);
- }
-
-#ifdef CONFIG_64BIT
- hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key);
- /*
- * Mask off the first 32 bits, this makes explicit that we have
- * modified the address (and 32 bits is plenty for a unique ID).
- */
- hashval = hashval & 0xffffffff;
-#else
- hashval = (unsigned long)siphash_1u32((u32)ptr, &ptr_key);
-#endif
-
- spec.flags |= SMALL;
- if (spec.field_width == -1) {
- spec.field_width = default_width;
- spec.flags |= ZEROPAD;
- }
- spec.base = 16;
-
- return number(buf, end, hashval, spec);
-}
-
/*
* Show a '%p' thing. A kernel extension is that the '%p' is followed
* by an extra set of alphanumeric characters that are extended format
@@ -1736,10 +1754,10 @@ static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec)
*
* Right now we handle:
*
- * - 'F' For symbolic function descriptor pointers with offset
- * - 'f' For simple symbolic function names without offset
- * - 'S' For symbolic direct pointers with offset
- * - 's' For symbolic direct pointers without offset
+ * - 'S' For symbolic direct pointers (or function descriptors) with offset
+ * - 's' For symbolic direct pointers (or function descriptors) without offset
+ * - 'F' Same as 'S'
+ * - 'f' Same as 's'
* - '[FfSs]R' as above with __builtin_extract_return_addr() translation
* - 'B' For backtraced symbolic direct pointers with offset
* - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
@@ -1820,26 +1838,20 @@ static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec)
* p page flags (see struct page) given as pointer to unsigned long
* g gfp flags (GFP_* and __GFP_*) given as pointer to gfp_t
* v vma flags (VM_*) given as pointer to unsigned long
- * - 'O' For a kobject based struct. Must be one of the following:
- * - 'OF[fnpPcCF]' For a device tree object
- * Without any optional arguments prints the full_name
- * f device node full_name
- * n device node name
- * p device node phandle
- * P device node path spec (name + @unit)
- * F device node flags
- * c major compatible string
- * C full compatible string
- *
+ * - 'OF[fnpPcCF]' For a device tree object
+ * Without any optional arguments prints the full_name
+ * f device node full_name
+ * n device node name
+ * p device node phandle
+ * P device node path spec (name + @unit)
+ * F device node flags
+ * c major compatible string
+ * C full compatible string
* - 'x' For printing the address. Equivalent to "%lx".
*
* ** When making changes please also update:
* Documentation/core-api/printk-formats.rst
*
- * Note: The difference between 'S' and 'F' is that on ia64 and ppc64
- * function pointers are really function descriptors, which contain a
- * pointer to the real address.
- *
* Note: The default behaviour (unadorned %p) is to hash the address,
* rendering it useful as a unique identifier.
*/
@@ -1935,7 +1947,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
break;
return restricted_pointer(buf, end, ptr, spec);
case 'N':
- return netdev_bits(buf, end, ptr, fmt);
+ return netdev_bits(buf, end, ptr, spec, fmt);
case 'a':
return address_val(buf, end, ptr, fmt);
case 'd':
@@ -1958,6 +1970,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
case 'F':
return device_node_string(buf, end, ptr, spec, fmt + 1);
}
+ break;
case 'x':
return pointer_string(buf, end, ptr, spec);
}
@@ -2115,6 +2128,7 @@ qualifier:
case 'x':
spec->flags |= SMALL;
+ /* fall through */
case 'X':
spec->base = 16;
@@ -2591,6 +2605,8 @@ int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args)
case 's':
case 'F':
case 'f':
+ case 'x':
+ case 'K':
save_arg(void *);
break;
default:
@@ -2765,6 +2781,8 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
case 's':
case 'F':
case 'f':
+ case 'x':
+ case 'K':
process = true;
break;
default:
@@ -2779,7 +2797,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
copy = end - str;
memcpy(str, args, copy);
str += len;
- args += len;
+ args += len + 1;
}
}
if (process)
@@ -3069,8 +3087,10 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
break;
case 'i':
base = 0;
+ /* fall through */
case 'd':
is_sign = true;
+ /* fall through */
case 'u':
break;
case '%':
diff --git a/lib/xarray.c b/lib/xarray.c
new file mode 100644
index 000000000000..8b176f009c08
--- /dev/null
+++ b/lib/xarray.c
@@ -0,0 +1,2036 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * XArray implementation
+ * Copyright (c) 2017 Microsoft Corporation
+ * Author: Matthew Wilcox <willy@infradead.org>
+ */
+
+#include <linux/bitmap.h>
+#include <linux/export.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/xarray.h>
+
+/*
+ * Coding conventions in this file:
+ *
+ * @xa is used to refer to the entire xarray.
+ * @xas is the 'xarray operation state'. It may be either a pointer to
+ * an xa_state, or an xa_state stored on the stack. This is an unfortunate
+ * ambiguity.
+ * @index is the index of the entry being operated on
+ * @mark is an xa_mark_t; a small number indicating one of the mark bits.
+ * @node refers to an xa_node; usually the primary one being operated on by
+ * this function.
+ * @offset is the index into the slots array inside an xa_node.
+ * @parent refers to the @xa_node closer to the head than @node.
+ * @entry refers to something stored in a slot in the xarray
+ */
+
+static inline unsigned int xa_lock_type(const struct xarray *xa)
+{
+ return (__force unsigned int)xa->xa_flags & 3;
+}
+
+static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type)
+{
+ if (lock_type == XA_LOCK_IRQ)
+ xas_lock_irq(xas);
+ else if (lock_type == XA_LOCK_BH)
+ xas_lock_bh(xas);
+ else
+ xas_lock(xas);
+}
+
+static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type)
+{
+ if (lock_type == XA_LOCK_IRQ)
+ xas_unlock_irq(xas);
+ else if (lock_type == XA_LOCK_BH)
+ xas_unlock_bh(xas);
+ else
+ xas_unlock(xas);
+}
+
+static inline bool xa_track_free(const struct xarray *xa)
+{
+ return xa->xa_flags & XA_FLAGS_TRACK_FREE;
+}
+
+static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark)
+{
+ if (!(xa->xa_flags & XA_FLAGS_MARK(mark)))
+ xa->xa_flags |= XA_FLAGS_MARK(mark);
+}
+
+static inline void xa_mark_clear(struct xarray *xa, xa_mark_t mark)
+{
+ if (xa->xa_flags & XA_FLAGS_MARK(mark))
+ xa->xa_flags &= ~(XA_FLAGS_MARK(mark));
+}
+
+static inline unsigned long *node_marks(struct xa_node *node, xa_mark_t mark)
+{
+ return node->marks[(__force unsigned)mark];
+}
+
+static inline bool node_get_mark(struct xa_node *node,
+ unsigned int offset, xa_mark_t mark)
+{
+ return test_bit(offset, node_marks(node, mark));
+}
+
+/* returns true if the bit was set */
+static inline bool node_set_mark(struct xa_node *node, unsigned int offset,
+ xa_mark_t mark)
+{
+ return __test_and_set_bit(offset, node_marks(node, mark));
+}
+
+/* returns true if the bit was set */
+static inline bool node_clear_mark(struct xa_node *node, unsigned int offset,
+ xa_mark_t mark)
+{
+ return __test_and_clear_bit(offset, node_marks(node, mark));
+}
+
+static inline bool node_any_mark(struct xa_node *node, xa_mark_t mark)
+{
+ return !bitmap_empty(node_marks(node, mark), XA_CHUNK_SIZE);
+}
+
+static inline void node_mark_all(struct xa_node *node, xa_mark_t mark)
+{
+ bitmap_fill(node_marks(node, mark), XA_CHUNK_SIZE);
+}
+
+#define mark_inc(mark) do { \
+ mark = (__force xa_mark_t)((__force unsigned)(mark) + 1); \
+} while (0)
+
+/*
+ * xas_squash_marks() - Merge all marks to the first entry
+ * @xas: Array operation state.
+ *
+ * Set a mark on the first entry if any entry has it set. Clear marks on
+ * all sibling entries.
+ */
+static void xas_squash_marks(const struct xa_state *xas)
+{
+ unsigned int mark = 0;
+ unsigned int limit = xas->xa_offset + xas->xa_sibs + 1;
+
+ if (!xas->xa_sibs)
+ return;
+
+ do {
+ unsigned long *marks = xas->xa_node->marks[mark];
+ if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit)
+ continue;
+ __set_bit(xas->xa_offset, marks);
+ bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs);
+ } while (mark++ != (__force unsigned)XA_MARK_MAX);
+}
+
+/* extracts the offset within this node from the index */
+static unsigned int get_offset(unsigned long index, struct xa_node *node)
+{
+ return (index >> node->shift) & XA_CHUNK_MASK;
+}
+
+static void xas_set_offset(struct xa_state *xas)
+{
+ xas->xa_offset = get_offset(xas->xa_index, xas->xa_node);
+}
+
+/* move the index either forwards (find) or backwards (sibling slot) */
+static void xas_move_index(struct xa_state *xas, unsigned long offset)
+{
+ unsigned int shift = xas->xa_node->shift;
+ xas->xa_index &= ~XA_CHUNK_MASK << shift;
+ xas->xa_index += offset << shift;
+}
+
+static void xas_advance(struct xa_state *xas)
+{
+ xas->xa_offset++;
+ xas_move_index(xas, xas->xa_offset);
+}
+
+static void *set_bounds(struct xa_state *xas)
+{
+ xas->xa_node = XAS_BOUNDS;
+ return NULL;
+}
+
+/*
+ * Starts a walk. If the @xas is already valid, we assume that it's on
+ * the right path and just return where we've got to. If we're in an
+ * error state, return NULL. If the index is outside the current scope
+ * of the xarray, return NULL without changing @xas->xa_node. Otherwise
+ * set @xas->xa_node to NULL and return the current head of the array.
+ */
+static void *xas_start(struct xa_state *xas)
+{
+ void *entry;
+
+ if (xas_valid(xas))
+ return xas_reload(xas);
+ if (xas_error(xas))
+ return NULL;
+
+ entry = xa_head(xas->xa);
+ if (!xa_is_node(entry)) {
+ if (xas->xa_index)
+ return set_bounds(xas);
+ } else {
+ if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK)
+ return set_bounds(xas);
+ }
+
+ xas->xa_node = NULL;
+ return entry;
+}
+
+static void *xas_descend(struct xa_state *xas, struct xa_node *node)
+{
+ unsigned int offset = get_offset(xas->xa_index, node);
+ void *entry = xa_entry(xas->xa, node, offset);
+
+ xas->xa_node = node;
+ if (xa_is_sibling(entry)) {
+ offset = xa_to_sibling(entry);
+ entry = xa_entry(xas->xa, node, offset);
+ }
+
+ xas->xa_offset = offset;
+ return entry;
+}
+
+/**
+ * xas_load() - Load an entry from the XArray (advanced).
+ * @xas: XArray operation state.
+ *
+ * Usually walks the @xas to the appropriate state to load the entry
+ * stored at xa_index. However, it will do nothing and return %NULL if
+ * @xas is in an error state. xas_load() will never expand the tree.
+ *
+ * If the xa_state is set up to operate on a multi-index entry, xas_load()
+ * may return %NULL or an internal entry, even if there are entries
+ * present within the range specified by @xas.
+ *
+ * Context: Any context. The caller should hold the xa_lock or the RCU lock.
+ * Return: Usually an entry in the XArray, but see description for exceptions.
+ */
+void *xas_load(struct xa_state *xas)
+{
+ void *entry = xas_start(xas);
+
+ while (xa_is_node(entry)) {
+ struct xa_node *node = xa_to_node(entry);
+
+ if (xas->xa_shift > node->shift)
+ break;
+ entry = xas_descend(xas, node);
+ }
+ return entry;
+}
+EXPORT_SYMBOL_GPL(xas_load);
+
+/* Move the radix tree node cache here */
+extern struct kmem_cache *radix_tree_node_cachep;
+extern void radix_tree_node_rcu_free(struct rcu_head *head);
+
+#define XA_RCU_FREE ((struct xarray *)1)
+
+static void xa_node_free(struct xa_node *node)
+{
+ XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
+ node->array = XA_RCU_FREE;
+ call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
+}
+
+/*
+ * xas_destroy() - Free any resources allocated during the XArray operation.
+ * @xas: XArray operation state.
+ *
+ * This function is now internal-only.
+ */
+static void xas_destroy(struct xa_state *xas)
+{
+ struct xa_node *node = xas->xa_alloc;
+
+ if (!node)
+ return;
+ XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
+ kmem_cache_free(radix_tree_node_cachep, node);
+ xas->xa_alloc = NULL;
+}
+
+/**
+ * xas_nomem() - Allocate memory if needed.
+ * @xas: XArray operation state.
+ * @gfp: Memory allocation flags.
+ *
+ * If we need to add new nodes to the XArray, we try to allocate memory
+ * with GFP_NOWAIT while holding the lock, which will usually succeed.
+ * If it fails, @xas is flagged as needing memory to continue. The caller
+ * should drop the lock and call xas_nomem(). If xas_nomem() succeeds,
+ * the caller should retry the operation.
+ *
+ * Forward progress is guaranteed as one node is allocated here and
+ * stored in the xa_state where it will be found by xas_alloc(). More
+ * nodes will likely be found in the slab allocator, but we do not tie
+ * them up here.
+ *
+ * Return: true if memory was needed, and was successfully allocated.
+ */
+bool xas_nomem(struct xa_state *xas, gfp_t gfp)
+{
+ if (xas->xa_node != XA_ERROR(-ENOMEM)) {
+ xas_destroy(xas);
+ return false;
+ }
+ xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
+ if (!xas->xa_alloc)
+ return false;
+ XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
+ xas->xa_node = XAS_RESTART;
+ return true;
+}
+EXPORT_SYMBOL_GPL(xas_nomem);
+
+/*
+ * __xas_nomem() - Drop locks and allocate memory if needed.
+ * @xas: XArray operation state.
+ * @gfp: Memory allocation flags.
+ *
+ * Internal variant of xas_nomem().
+ *
+ * Return: true if memory was needed, and was successfully allocated.
+ */
+static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
+ __must_hold(xas->xa->xa_lock)
+{
+ unsigned int lock_type = xa_lock_type(xas->xa);
+
+ if (xas->xa_node != XA_ERROR(-ENOMEM)) {
+ xas_destroy(xas);
+ return false;
+ }
+ if (gfpflags_allow_blocking(gfp)) {
+ xas_unlock_type(xas, lock_type);
+ xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
+ xas_lock_type(xas, lock_type);
+ } else {
+ xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
+ }
+ if (!xas->xa_alloc)
+ return false;
+ XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
+ xas->xa_node = XAS_RESTART;
+ return true;
+}
+
+static void xas_update(struct xa_state *xas, struct xa_node *node)
+{
+ if (xas->xa_update)
+ xas->xa_update(node);
+ else
+ XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
+}
+
+static void *xas_alloc(struct xa_state *xas, unsigned int shift)
+{
+ struct xa_node *parent = xas->xa_node;
+ struct xa_node *node = xas->xa_alloc;
+
+ if (xas_invalid(xas))
+ return NULL;
+
+ if (node) {
+ xas->xa_alloc = NULL;
+ } else {
+ node = kmem_cache_alloc(radix_tree_node_cachep,
+ GFP_NOWAIT | __GFP_NOWARN);
+ if (!node) {
+ xas_set_err(xas, -ENOMEM);
+ return NULL;
+ }
+ }
+
+ if (parent) {
+ node->offset = xas->xa_offset;
+ parent->count++;
+ XA_NODE_BUG_ON(node, parent->count > XA_CHUNK_SIZE);
+ xas_update(xas, parent);
+ }
+ XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
+ XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
+ node->shift = shift;
+ node->count = 0;
+ node->nr_values = 0;
+ RCU_INIT_POINTER(node->parent, xas->xa_node);
+ node->array = xas->xa;
+
+ return node;
+}
+
+#ifdef CONFIG_XARRAY_MULTI
+/* Returns the number of indices covered by a given xa_state */
+static unsigned long xas_size(const struct xa_state *xas)
+{
+ return (xas->xa_sibs + 1UL) << xas->xa_shift;
+}
+#endif
+
+/*
+ * Use this to calculate the maximum index that will need to be created
+ * in order to add the entry described by @xas. Because we cannot store a
+ * multiple-index entry at index 0, the calculation is a little more complex
+ * than you might expect.
+ */
+static unsigned long xas_max(struct xa_state *xas)
+{
+ unsigned long max = xas->xa_index;
+
+#ifdef CONFIG_XARRAY_MULTI
+ if (xas->xa_shift || xas->xa_sibs) {
+ unsigned long mask = xas_size(xas) - 1;
+ max |= mask;
+ if (mask == max)
+ max++;
+ }
+#endif
+
+ return max;
+}
+
+/* The maximum index that can be contained in the array without expanding it */
+static unsigned long max_index(void *entry)
+{
+ if (!xa_is_node(entry))
+ return 0;
+ return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1;
+}
+
+static void xas_shrink(struct xa_state *xas)
+{
+ struct xarray *xa = xas->xa;
+ struct xa_node *node = xas->xa_node;
+
+ for (;;) {
+ void *entry;
+
+ XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
+ if (node->count != 1)
+ break;
+ entry = xa_entry_locked(xa, node, 0);
+ if (!entry)
+ break;
+ if (!xa_is_node(entry) && node->shift)
+ break;
+ xas->xa_node = XAS_BOUNDS;
+
+ RCU_INIT_POINTER(xa->xa_head, entry);
+ if (xa_track_free(xa) && !node_get_mark(node, 0, XA_FREE_MARK))
+ xa_mark_clear(xa, XA_FREE_MARK);
+
+ node->count = 0;
+ node->nr_values = 0;
+ if (!xa_is_node(entry))
+ RCU_INIT_POINTER(node->slots[0], XA_RETRY_ENTRY);
+ xas_update(xas, node);
+ xa_node_free(node);
+ if (!xa_is_node(entry))
+ break;
+ node = xa_to_node(entry);
+ node->parent = NULL;
+ }
+}
+
+/*
+ * xas_delete_node() - Attempt to delete an xa_node
+ * @xas: Array operation state.
+ *
+ * Attempts to delete the @xas->xa_node. This will fail if xa->node has
+ * a non-zero reference count.
+ */
+static void xas_delete_node(struct xa_state *xas)
+{
+ struct xa_node *node = xas->xa_node;
+
+ for (;;) {
+ struct xa_node *parent;
+
+ XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
+ if (node->count)
+ break;
+
+ parent = xa_parent_locked(xas->xa, node);
+ xas->xa_node = parent;
+ xas->xa_offset = node->offset;
+ xa_node_free(node);
+
+ if (!parent) {
+ xas->xa->xa_head = NULL;
+ xas->xa_node = XAS_BOUNDS;
+ return;
+ }
+
+ parent->slots[xas->xa_offset] = NULL;
+ parent->count--;
+ XA_NODE_BUG_ON(parent, parent->count > XA_CHUNK_SIZE);
+ node = parent;
+ xas_update(xas, node);
+ }
+
+ if (!node->parent)
+ xas_shrink(xas);
+}
+
+/**
+ * xas_free_nodes() - Free this node and all nodes that it references
+ * @xas: Array operation state.
+ * @top: Node to free
+ *
+ * This node has been removed from the tree. We must now free it and all
+ * of its subnodes. There may be RCU walkers with references into the tree,
+ * so we must replace all entries with retry markers.
+ */
+static void xas_free_nodes(struct xa_state *xas, struct xa_node *top)
+{
+ unsigned int offset = 0;
+ struct xa_node *node = top;
+
+ for (;;) {
+ void *entry = xa_entry_locked(xas->xa, node, offset);
+
+ if (xa_is_node(entry)) {
+ node = xa_to_node(entry);
+ offset = 0;
+ continue;
+ }
+ if (entry)
+ RCU_INIT_POINTER(node->slots[offset], XA_RETRY_ENTRY);
+ offset++;
+ while (offset == XA_CHUNK_SIZE) {
+ struct xa_node *parent;
+
+ parent = xa_parent_locked(xas->xa, node);
+ offset = node->offset + 1;
+ node->count = 0;
+ node->nr_values = 0;
+ xas_update(xas, node);
+ xa_node_free(node);
+ if (node == top)
+ return;
+ node = parent;
+ }
+ }
+}
+
+/*
+ * xas_expand adds nodes to the head of the tree until it has reached
+ * sufficient height to be able to contain @xas->xa_index
+ */
+static int xas_expand(struct xa_state *xas, void *head)
+{
+ struct xarray *xa = xas->xa;
+ struct xa_node *node = NULL;
+ unsigned int shift = 0;
+ unsigned long max = xas_max(xas);
+
+ if (!head) {
+ if (max == 0)
+ return 0;
+ while ((max >> shift) >= XA_CHUNK_SIZE)
+ shift += XA_CHUNK_SHIFT;
+ return shift + XA_CHUNK_SHIFT;
+ } else if (xa_is_node(head)) {
+ node = xa_to_node(head);
+ shift = node->shift + XA_CHUNK_SHIFT;
+ }
+ xas->xa_node = NULL;
+
+ while (max > max_index(head)) {
+ xa_mark_t mark = 0;
+
+ XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
+ node = xas_alloc(xas, shift);
+ if (!node)
+ return -ENOMEM;
+
+ node->count = 1;
+ if (xa_is_value(head))
+ node->nr_values = 1;
+ RCU_INIT_POINTER(node->slots[0], head);
+
+ /* Propagate the aggregated mark info to the new child */
+ for (;;) {
+ if (xa_track_free(xa) && mark == XA_FREE_MARK) {
+ node_mark_all(node, XA_FREE_MARK);
+ if (!xa_marked(xa, XA_FREE_MARK)) {
+ node_clear_mark(node, 0, XA_FREE_MARK);
+ xa_mark_set(xa, XA_FREE_MARK);
+ }
+ } else if (xa_marked(xa, mark)) {
+ node_set_mark(node, 0, mark);
+ }
+ if (mark == XA_MARK_MAX)
+ break;
+ mark_inc(mark);
+ }
+
+ /*
+ * Now that the new node is fully initialised, we can add
+ * it to the tree
+ */
+ if (xa_is_node(head)) {
+ xa_to_node(head)->offset = 0;
+ rcu_assign_pointer(xa_to_node(head)->parent, node);
+ }
+ head = xa_mk_node(node);
+ rcu_assign_pointer(xa->xa_head, head);
+ xas_update(xas, node);
+
+ shift += XA_CHUNK_SHIFT;
+ }
+
+ xas->xa_node = node;
+ return shift;
+}
+
+/*
+ * xas_create() - Create a slot to store an entry in.
+ * @xas: XArray operation state.
+ *
+ * Most users will not need to call this function directly, as it is called
+ * by xas_store(). It is useful for doing conditional store operations
+ * (see the xa_cmpxchg() implementation for an example).
+ *
+ * Return: If the slot already existed, returns the contents of this slot.
+ * If the slot was newly created, returns NULL. If it failed to create the
+ * slot, returns NULL and indicates the error in @xas.
+ */
+static void *xas_create(struct xa_state *xas)
+{
+ struct xarray *xa = xas->xa;
+ void *entry;
+ void __rcu **slot;
+ struct xa_node *node = xas->xa_node;
+ int shift;
+ unsigned int order = xas->xa_shift;
+
+ if (xas_top(node)) {
+ entry = xa_head_locked(xa);
+ xas->xa_node = NULL;
+ shift = xas_expand(xas, entry);
+ if (shift < 0)
+ return NULL;
+ entry = xa_head_locked(xa);
+ slot = &xa->xa_head;
+ } else if (xas_error(xas)) {
+ return NULL;
+ } else if (node) {
+ unsigned int offset = xas->xa_offset;
+
+ shift = node->shift;
+ entry = xa_entry_locked(xa, node, offset);
+ slot = &node->slots[offset];
+ } else {
+ shift = 0;
+ entry = xa_head_locked(xa);
+ slot = &xa->xa_head;
+ }
+
+ while (shift > order) {
+ shift -= XA_CHUNK_SHIFT;
+ if (!entry) {
+ node = xas_alloc(xas, shift);
+ if (!node)
+ break;
+ if (xa_track_free(xa))
+ node_mark_all(node, XA_FREE_MARK);
+ rcu_assign_pointer(*slot, xa_mk_node(node));
+ } else if (xa_is_node(entry)) {
+ node = xa_to_node(entry);
+ } else {
+ break;
+ }
+ entry = xas_descend(xas, node);
+ slot = &node->slots[xas->xa_offset];
+ }
+
+ return entry;
+}
+
+/**
+ * xas_create_range() - Ensure that stores to this range will succeed
+ * @xas: XArray operation state.
+ *
+ * Creates all of the slots in the range covered by @xas. Sets @xas to
+ * create single-index entries and positions it at the beginning of the
+ * range. This is for the benefit of users which have not yet been
+ * converted to use multi-index entries.
+ */
+void xas_create_range(struct xa_state *xas)
+{
+ unsigned long index = xas->xa_index;
+ unsigned char shift = xas->xa_shift;
+ unsigned char sibs = xas->xa_sibs;
+
+ xas->xa_index |= ((sibs + 1) << shift) - 1;
+ if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift)
+ xas->xa_offset |= sibs;
+ xas->xa_shift = 0;
+ xas->xa_sibs = 0;
+
+ for (;;) {
+ xas_create(xas);
+ if (xas_error(xas))
+ goto restore;
+ if (xas->xa_index <= (index | XA_CHUNK_MASK))
+ goto success;
+ xas->xa_index -= XA_CHUNK_SIZE;
+
+ for (;;) {
+ struct xa_node *node = xas->xa_node;
+ xas->xa_node = xa_parent_locked(xas->xa, node);
+ xas->xa_offset = node->offset - 1;
+ if (node->offset != 0)
+ break;
+ }
+ }
+
+restore:
+ xas->xa_shift = shift;
+ xas->xa_sibs = sibs;
+ xas->xa_index = index;
+ return;
+success:
+ xas->xa_index = index;
+ if (xas->xa_node)
+ xas_set_offset(xas);
+}
+EXPORT_SYMBOL_GPL(xas_create_range);
+
+static void update_node(struct xa_state *xas, struct xa_node *node,
+ int count, int values)
+{
+ if (!node || (!count && !values))
+ return;
+
+ node->count += count;
+ node->nr_values += values;
+ XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
+ XA_NODE_BUG_ON(node, node->nr_values > XA_CHUNK_SIZE);
+ xas_update(xas, node);
+ if (count < 0)
+ xas_delete_node(xas);
+}
+
+/**
+ * xas_store() - Store this entry in the XArray.
+ * @xas: XArray operation state.
+ * @entry: New entry.
+ *
+ * If @xas is operating on a multi-index entry, the entry returned by this
+ * function is essentially meaningless (it may be an internal entry or it
+ * may be %NULL, even if there are non-NULL entries at some of the indices
+ * covered by the range). This is not a problem for any current users,
+ * and can be changed if needed.
+ *
+ * Return: The old entry at this index.
+ */
+void *xas_store(struct xa_state *xas, void *entry)
+{
+ struct xa_node *node;
+ void __rcu **slot = &xas->xa->xa_head;
+ unsigned int offset, max;
+ int count = 0;
+ int values = 0;
+ void *first, *next;
+ bool value = xa_is_value(entry);
+
+ if (entry)
+ first = xas_create(xas);
+ else
+ first = xas_load(xas);
+
+ if (xas_invalid(xas))
+ return first;
+ node = xas->xa_node;
+ if (node && (xas->xa_shift < node->shift))
+ xas->xa_sibs = 0;
+ if ((first == entry) && !xas->xa_sibs)
+ return first;
+
+ next = first;
+ offset = xas->xa_offset;
+ max = xas->xa_offset + xas->xa_sibs;
+ if (node) {
+ slot = &node->slots[offset];
+ if (xas->xa_sibs)
+ xas_squash_marks(xas);
+ }
+ if (!entry)
+ xas_init_marks(xas);
+
+ for (;;) {
+ /*
+ * Must clear the marks before setting the entry to NULL,
+ * otherwise xas_for_each_marked may find a NULL entry and
+ * stop early. rcu_assign_pointer contains a release barrier
+ * so the mark clearing will appear to happen before the
+ * entry is set to NULL.
+ */
+ rcu_assign_pointer(*slot, entry);
+ if (xa_is_node(next))
+ xas_free_nodes(xas, xa_to_node(next));
+ if (!node)
+ break;
+ count += !next - !entry;
+ values += !xa_is_value(first) - !value;
+ if (entry) {
+ if (offset == max)
+ break;
+ if (!xa_is_sibling(entry))
+ entry = xa_mk_sibling(xas->xa_offset);
+ } else {
+ if (offset == XA_CHUNK_MASK)
+ break;
+ }
+ next = xa_entry_locked(xas->xa, node, ++offset);
+ if (!xa_is_sibling(next)) {
+ if (!entry && (offset > max))
+ break;
+ first = next;
+ }
+ slot++;
+ }
+
+ update_node(xas, node, count, values);
+ return first;
+}
+EXPORT_SYMBOL_GPL(xas_store);
+
+/**
+ * xas_get_mark() - Returns the state of this mark.
+ * @xas: XArray operation state.
+ * @mark: Mark number.
+ *
+ * Return: true if the mark is set, false if the mark is clear or @xas
+ * is in an error state.
+ */
+bool xas_get_mark(const struct xa_state *xas, xa_mark_t mark)
+{
+ if (xas_invalid(xas))
+ return false;
+ if (!xas->xa_node)
+ return xa_marked(xas->xa, mark);
+ return node_get_mark(xas->xa_node, xas->xa_offset, mark);
+}
+EXPORT_SYMBOL_GPL(xas_get_mark);
+
+/**
+ * xas_set_mark() - Sets the mark on this entry and its parents.
+ * @xas: XArray operation state.
+ * @mark: Mark number.
+ *
+ * Sets the specified mark on this entry, and walks up the tree setting it
+ * on all the ancestor entries. Does nothing if @xas has not been walked to
+ * an entry, or is in an error state.
+ */
+void xas_set_mark(const struct xa_state *xas, xa_mark_t mark)
+{
+ struct xa_node *node = xas->xa_node;
+ unsigned int offset = xas->xa_offset;
+
+ if (xas_invalid(xas))
+ return;
+
+ while (node) {
+ if (node_set_mark(node, offset, mark))
+ return;
+ offset = node->offset;
+ node = xa_parent_locked(xas->xa, node);
+ }
+
+ if (!xa_marked(xas->xa, mark))
+ xa_mark_set(xas->xa, mark);
+}
+EXPORT_SYMBOL_GPL(xas_set_mark);
+
+/**
+ * xas_clear_mark() - Clears the mark on this entry and its parents.
+ * @xas: XArray operation state.
+ * @mark: Mark number.
+ *
+ * Clears the specified mark on this entry, and walks back to the head
+ * attempting to clear it on all the ancestor entries. Does nothing if
+ * @xas has not been walked to an entry, or is in an error state.
+ */
+void xas_clear_mark(const struct xa_state *xas, xa_mark_t mark)
+{
+ struct xa_node *node = xas->xa_node;
+ unsigned int offset = xas->xa_offset;
+
+ if (xas_invalid(xas))
+ return;
+
+ while (node) {
+ if (!node_clear_mark(node, offset, mark))
+ return;
+ if (node_any_mark(node, mark))
+ return;
+
+ offset = node->offset;
+ node = xa_parent_locked(xas->xa, node);
+ }
+
+ if (xa_marked(xas->xa, mark))
+ xa_mark_clear(xas->xa, mark);
+}
+EXPORT_SYMBOL_GPL(xas_clear_mark);
+
+/**
+ * xas_init_marks() - Initialise all marks for the entry
+ * @xas: Array operations state.
+ *
+ * Initialise all marks for the entry specified by @xas. If we're tracking
+ * free entries with a mark, we need to set it on all entries. All other
+ * marks are cleared.
+ *
+ * This implementation is not as efficient as it could be; we may walk
+ * up the tree multiple times.
+ */
+void xas_init_marks(const struct xa_state *xas)
+{
+ xa_mark_t mark = 0;
+
+ for (;;) {
+ if (xa_track_free(xas->xa) && mark == XA_FREE_MARK)
+ xas_set_mark(xas, mark);
+ else
+ xas_clear_mark(xas, mark);
+ if (mark == XA_MARK_MAX)
+ break;
+ mark_inc(mark);
+ }
+}
+EXPORT_SYMBOL_GPL(xas_init_marks);
+
+/**
+ * xas_pause() - Pause a walk to drop a lock.
+ * @xas: XArray operation state.
+ *
+ * Some users need to pause a walk and drop the lock they're holding in
+ * order to yield to a higher priority thread or carry out an operation
+ * on an entry. Those users should call this function before they drop
+ * the lock. It resets the @xas to be suitable for the next iteration
+ * of the loop after the user has reacquired the lock. If most entries
+ * found during a walk require you to call xas_pause(), the xa_for_each()
+ * iterator may be more appropriate.
+ *
+ * Note that xas_pause() only works for forward iteration. If a user needs
+ * to pause a reverse iteration, we will need a xas_pause_rev().
+ */
+void xas_pause(struct xa_state *xas)
+{
+ struct xa_node *node = xas->xa_node;
+
+ if (xas_invalid(xas))
+ return;
+
+ if (node) {
+ unsigned int offset = xas->xa_offset;
+ while (++offset < XA_CHUNK_SIZE) {
+ if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
+ break;
+ }
+ xas->xa_index += (offset - xas->xa_offset) << node->shift;
+ } else {
+ xas->xa_index++;
+ }
+ xas->xa_node = XAS_RESTART;
+}
+EXPORT_SYMBOL_GPL(xas_pause);
+
+/*
+ * __xas_prev() - Find the previous entry in the XArray.
+ * @xas: XArray operation state.
+ *
+ * Helper function for xas_prev() which handles all the complex cases
+ * out of line.
+ */
+void *__xas_prev(struct xa_state *xas)
+{
+ void *entry;
+
+ if (!xas_frozen(xas->xa_node))
+ xas->xa_index--;
+ if (xas_not_node(xas->xa_node))
+ return xas_load(xas);
+
+ if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
+ xas->xa_offset--;
+
+ while (xas->xa_offset == 255) {
+ xas->xa_offset = xas->xa_node->offset - 1;
+ xas->xa_node = xa_parent(xas->xa, xas->xa_node);
+ if (!xas->xa_node)
+ return set_bounds(xas);
+ }
+
+ for (;;) {
+ entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
+ if (!xa_is_node(entry))
+ return entry;
+
+ xas->xa_node = xa_to_node(entry);
+ xas_set_offset(xas);
+ }
+}
+EXPORT_SYMBOL_GPL(__xas_prev);
+
+/*
+ * __xas_next() - Find the next entry in the XArray.
+ * @xas: XArray operation state.
+ *
+ * Helper function for xas_next() which handles all the complex cases
+ * out of line.
+ */
+void *__xas_next(struct xa_state *xas)
+{
+ void *entry;
+
+ if (!xas_frozen(xas->xa_node))
+ xas->xa_index++;
+ if (xas_not_node(xas->xa_node))
+ return xas_load(xas);
+
+ if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
+ xas->xa_offset++;
+
+ while (xas->xa_offset == XA_CHUNK_SIZE) {
+ xas->xa_offset = xas->xa_node->offset + 1;
+ xas->xa_node = xa_parent(xas->xa, xas->xa_node);
+ if (!xas->xa_node)
+ return set_bounds(xas);
+ }
+
+ for (;;) {
+ entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
+ if (!xa_is_node(entry))
+ return entry;
+
+ xas->xa_node = xa_to_node(entry);
+ xas_set_offset(xas);
+ }
+}
+EXPORT_SYMBOL_GPL(__xas_next);
+
+/**
+ * xas_find() - Find the next present entry in the XArray.
+ * @xas: XArray operation state.
+ * @max: Highest index to return.
+ *
+ * If the @xas has not yet been walked to an entry, return the entry
+ * which has an index >= xas.xa_index. If it has been walked, the entry
+ * currently being pointed at has been processed, and so we move to the
+ * next entry.
+ *
+ * If no entry is found and the array is smaller than @max, the iterator
+ * is set to the smallest index not yet in the array. This allows @xas
+ * to be immediately passed to xas_store().
+ *
+ * Return: The entry, if found, otherwise %NULL.
+ */
+void *xas_find(struct xa_state *xas, unsigned long max)
+{
+ void *entry;
+
+ if (xas_error(xas))
+ return NULL;
+
+ if (!xas->xa_node) {
+ xas->xa_index = 1;
+ return set_bounds(xas);
+ } else if (xas_top(xas->xa_node)) {
+ entry = xas_load(xas);
+ if (entry || xas_not_node(xas->xa_node))
+ return entry;
+ } else if (!xas->xa_node->shift &&
+ xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)) {
+ xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1;
+ }
+
+ xas_advance(xas);
+
+ while (xas->xa_node && (xas->xa_index <= max)) {
+ if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
+ xas->xa_offset = xas->xa_node->offset + 1;
+ xas->xa_node = xa_parent(xas->xa, xas->xa_node);
+ continue;
+ }
+
+ entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
+ if (xa_is_node(entry)) {
+ xas->xa_node = xa_to_node(entry);
+ xas->xa_offset = 0;
+ continue;
+ }
+ if (entry && !xa_is_sibling(entry))
+ return entry;
+
+ xas_advance(xas);
+ }
+
+ if (!xas->xa_node)
+ xas->xa_node = XAS_BOUNDS;
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(xas_find);
+
+/**
+ * xas_find_marked() - Find the next marked entry in the XArray.
+ * @xas: XArray operation state.
+ * @max: Highest index to return.
+ * @mark: Mark number to search for.
+ *
+ * If the @xas has not yet been walked to an entry, return the marked entry
+ * which has an index >= xas.xa_index. If it has been walked, the entry
+ * currently being pointed at has been processed, and so we return the
+ * first marked entry with an index > xas.xa_index.
+ *
+ * If no marked entry is found and the array is smaller than @max, @xas is
+ * set to the bounds state and xas->xa_index is set to the smallest index
+ * not yet in the array. This allows @xas to be immediately passed to
+ * xas_store().
+ *
+ * If no entry is found before @max is reached, @xas is set to the restart
+ * state.
+ *
+ * Return: The entry, if found, otherwise %NULL.
+ */
+void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
+{
+ bool advance = true;
+ unsigned int offset;
+ void *entry;
+
+ if (xas_error(xas))
+ return NULL;
+
+ if (!xas->xa_node) {
+ xas->xa_index = 1;
+ goto out;
+ } else if (xas_top(xas->xa_node)) {
+ advance = false;
+ entry = xa_head(xas->xa);
+ xas->xa_node = NULL;
+ if (xas->xa_index > max_index(entry))
+ goto bounds;
+ if (!xa_is_node(entry)) {
+ if (xa_marked(xas->xa, mark))
+ return entry;
+ xas->xa_index = 1;
+ goto out;
+ }
+ xas->xa_node = xa_to_node(entry);
+ xas->xa_offset = xas->xa_index >> xas->xa_node->shift;
+ }
+
+ while (xas->xa_index <= max) {
+ if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
+ xas->xa_offset = xas->xa_node->offset + 1;
+ xas->xa_node = xa_parent(xas->xa, xas->xa_node);
+ if (!xas->xa_node)
+ break;
+ advance = false;
+ continue;
+ }
+
+ if (!advance) {
+ entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
+ if (xa_is_sibling(entry)) {
+ xas->xa_offset = xa_to_sibling(entry);
+ xas_move_index(xas, xas->xa_offset);
+ }
+ }
+
+ offset = xas_find_chunk(xas, advance, mark);
+ if (offset > xas->xa_offset) {
+ advance = false;
+ xas_move_index(xas, offset);
+ /* Mind the wrap */
+ if ((xas->xa_index - 1) >= max)
+ goto max;
+ xas->xa_offset = offset;
+ if (offset == XA_CHUNK_SIZE)
+ continue;
+ }
+
+ entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
+ if (!xa_is_node(entry))
+ return entry;
+ xas->xa_node = xa_to_node(entry);
+ xas_set_offset(xas);
+ }
+
+out:
+ if (!max)
+ goto max;
+bounds:
+ xas->xa_node = XAS_BOUNDS;
+ return NULL;
+max:
+ xas->xa_node = XAS_RESTART;
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(xas_find_marked);
+
+/**
+ * xas_find_conflict() - Find the next present entry in a range.
+ * @xas: XArray operation state.
+ *
+ * The @xas describes both a range and a position within that range.
+ *
+ * Context: Any context. Expects xa_lock to be held.
+ * Return: The next entry in the range covered by @xas or %NULL.
+ */
+void *xas_find_conflict(struct xa_state *xas)
+{
+ void *curr;
+
+ if (xas_error(xas))
+ return NULL;
+
+ if (!xas->xa_node)
+ return NULL;
+
+ if (xas_top(xas->xa_node)) {
+ curr = xas_start(xas);
+ if (!curr)
+ return NULL;
+ while (xa_is_node(curr)) {
+ struct xa_node *node = xa_to_node(curr);
+ curr = xas_descend(xas, node);
+ }
+ if (curr)
+ return curr;
+ }
+
+ if (xas->xa_node->shift > xas->xa_shift)
+ return NULL;
+
+ for (;;) {
+ if (xas->xa_node->shift == xas->xa_shift) {
+ if ((xas->xa_offset & xas->xa_sibs) == xas->xa_sibs)
+ break;
+ } else if (xas->xa_offset == XA_CHUNK_MASK) {
+ xas->xa_offset = xas->xa_node->offset;
+ xas->xa_node = xa_parent_locked(xas->xa, xas->xa_node);
+ if (!xas->xa_node)
+ break;
+ continue;
+ }
+ curr = xa_entry_locked(xas->xa, xas->xa_node, ++xas->xa_offset);
+ if (xa_is_sibling(curr))
+ continue;
+ while (xa_is_node(curr)) {
+ xas->xa_node = xa_to_node(curr);
+ xas->xa_offset = 0;
+ curr = xa_entry_locked(xas->xa, xas->xa_node, 0);
+ }
+ if (curr)
+ return curr;
+ }
+ xas->xa_offset -= xas->xa_sibs;
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(xas_find_conflict);
+
+/**
+ * xa_init_flags() - Initialise an empty XArray with flags.
+ * @xa: XArray.
+ * @flags: XA_FLAG values.
+ *
+ * If you need to initialise an XArray with special flags (eg you need
+ * to take the lock from interrupt context), use this function instead
+ * of xa_init().
+ *
+ * Context: Any context.
+ */
+void xa_init_flags(struct xarray *xa, gfp_t flags)
+{
+ unsigned int lock_type;
+ static struct lock_class_key xa_lock_irq;
+ static struct lock_class_key xa_lock_bh;
+
+ spin_lock_init(&xa->xa_lock);
+ xa->xa_flags = flags;
+ xa->xa_head = NULL;
+
+ lock_type = xa_lock_type(xa);
+ if (lock_type == XA_LOCK_IRQ)
+ lockdep_set_class(&xa->xa_lock, &xa_lock_irq);
+ else if (lock_type == XA_LOCK_BH)
+ lockdep_set_class(&xa->xa_lock, &xa_lock_bh);
+}
+EXPORT_SYMBOL(xa_init_flags);
+
+/**
+ * xa_load() - Load an entry from an XArray.
+ * @xa: XArray.
+ * @index: index into array.
+ *
+ * Context: Any context. Takes and releases the RCU lock.
+ * Return: The entry at @index in @xa.
+ */
+void *xa_load(struct xarray *xa, unsigned long index)
+{
+ XA_STATE(xas, xa, index);
+ void *entry;
+
+ rcu_read_lock();
+ do {
+ entry = xas_load(&xas);
+ if (xa_is_zero(entry))
+ entry = NULL;
+ } while (xas_retry(&xas, entry));
+ rcu_read_unlock();
+
+ return entry;
+}
+EXPORT_SYMBOL(xa_load);
+
+static void *xas_result(struct xa_state *xas, void *curr)
+{
+ if (xa_is_zero(curr))
+ return NULL;
+ XA_NODE_BUG_ON(xas->xa_node, xa_is_internal(curr));
+ if (xas_error(xas))
+ curr = xas->xa_node;
+ return curr;
+}
+
+/**
+ * __xa_erase() - Erase this entry from the XArray while locked.
+ * @xa: XArray.
+ * @index: Index into array.
+ *
+ * If the entry at this index is a multi-index entry then all indices will
+ * be erased, and the entry will no longer be a multi-index entry.
+ * This function expects the xa_lock to be held on entry.
+ *
+ * Context: Any context. Expects xa_lock to be held on entry. May
+ * release and reacquire xa_lock if @gfp flags permit.
+ * Return: The old entry at this index.
+ */
+void *__xa_erase(struct xarray *xa, unsigned long index)
+{
+ XA_STATE(xas, xa, index);
+ return xas_result(&xas, xas_store(&xas, NULL));
+}
+EXPORT_SYMBOL_GPL(__xa_erase);
+
+/**
+ * xa_store() - Store this entry in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * After this function returns, loads from this index will return @entry.
+ * Storing into an existing multislot entry updates the entry of every index.
+ * The marks associated with @index are unaffected unless @entry is %NULL.
+ *
+ * Context: Process context. Takes and releases the xa_lock. May sleep
+ * if the @gfp flags permit.
+ * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry
+ * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation
+ * failed.
+ */
+void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
+{
+ XA_STATE(xas, xa, index);
+ void *curr;
+
+ if (WARN_ON_ONCE(xa_is_internal(entry)))
+ return XA_ERROR(-EINVAL);
+
+ do {
+ xas_lock(&xas);
+ curr = xas_store(&xas, entry);
+ if (xa_track_free(xa) && entry)
+ xas_clear_mark(&xas, XA_FREE_MARK);
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, gfp));
+
+ return xas_result(&xas, curr);
+}
+EXPORT_SYMBOL(xa_store);
+
+/**
+ * __xa_store() - Store this entry in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * You must already be holding the xa_lock when calling this function.
+ * It will drop the lock if needed to allocate memory, and then reacquire
+ * it afterwards.
+ *
+ * Context: Any context. Expects xa_lock to be held on entry. May
+ * release and reacquire xa_lock if @gfp flags permit.
+ * Return: The old entry at this index or xa_err() if an error happened.
+ */
+void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
+{
+ XA_STATE(xas, xa, index);
+ void *curr;
+
+ if (WARN_ON_ONCE(xa_is_internal(entry)))
+ return XA_ERROR(-EINVAL);
+
+ do {
+ curr = xas_store(&xas, entry);
+ if (xa_track_free(xa) && entry)
+ xas_clear_mark(&xas, XA_FREE_MARK);
+ } while (__xas_nomem(&xas, gfp));
+
+ return xas_result(&xas, curr);
+}
+EXPORT_SYMBOL(__xa_store);
+
+/**
+ * xa_cmpxchg() - Conditionally replace an entry in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @old: Old value to test against.
+ * @entry: New value to place in array.
+ * @gfp: Memory allocation flags.
+ *
+ * If the entry at @index is the same as @old, replace it with @entry.
+ * If the return value is equal to @old, then the exchange was successful.
+ *
+ * Context: Process context. Takes and releases the xa_lock. May sleep
+ * if the @gfp flags permit.
+ * Return: The old value at this index or xa_err() if an error happened.
+ */
+void *xa_cmpxchg(struct xarray *xa, unsigned long index,
+ void *old, void *entry, gfp_t gfp)
+{
+ XA_STATE(xas, xa, index);
+ void *curr;
+
+ if (WARN_ON_ONCE(xa_is_internal(entry)))
+ return XA_ERROR(-EINVAL);
+
+ do {
+ xas_lock(&xas);
+ curr = xas_load(&xas);
+ if (curr == XA_ZERO_ENTRY)
+ curr = NULL;
+ if (curr == old) {
+ xas_store(&xas, entry);
+ if (xa_track_free(xa) && entry)
+ xas_clear_mark(&xas, XA_FREE_MARK);
+ }
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, gfp));
+
+ return xas_result(&xas, curr);
+}
+EXPORT_SYMBOL(xa_cmpxchg);
+
+/**
+ * __xa_cmpxchg() - Store this entry in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @old: Old value to test against.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * You must already be holding the xa_lock when calling this function.
+ * It will drop the lock if needed to allocate memory, and then reacquire
+ * it afterwards.
+ *
+ * Context: Any context. Expects xa_lock to be held on entry. May
+ * release and reacquire xa_lock if @gfp flags permit.
+ * Return: The old entry at this index or xa_err() if an error happened.
+ */
+void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
+ void *old, void *entry, gfp_t gfp)
+{
+ XA_STATE(xas, xa, index);
+ void *curr;
+
+ if (WARN_ON_ONCE(xa_is_internal(entry)))
+ return XA_ERROR(-EINVAL);
+
+ do {
+ curr = xas_load(&xas);
+ if (curr == XA_ZERO_ENTRY)
+ curr = NULL;
+ if (curr == old) {
+ xas_store(&xas, entry);
+ if (xa_track_free(xa) && entry)
+ xas_clear_mark(&xas, XA_FREE_MARK);
+ }
+ } while (__xas_nomem(&xas, gfp));
+
+ return xas_result(&xas, curr);
+}
+EXPORT_SYMBOL(__xa_cmpxchg);
+
+/**
+ * xa_reserve() - Reserve this index in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @gfp: Memory allocation flags.
+ *
+ * Ensures there is somewhere to store an entry at @index in the array.
+ * If there is already something stored at @index, this function does
+ * nothing. If there was nothing there, the entry is marked as reserved.
+ * Loads from @index will continue to see a %NULL pointer until a
+ * subsequent store to @index.
+ *
+ * If you do not use the entry that you have reserved, call xa_release()
+ * or xa_erase() to free any unnecessary memory.
+ *
+ * Context: Process context. Takes and releases the xa_lock, IRQ or BH safe
+ * if specified in XArray flags. May sleep if the @gfp flags permit.
+ * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
+ */
+int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
+{
+ XA_STATE(xas, xa, index);
+ unsigned int lock_type = xa_lock_type(xa);
+ void *curr;
+
+ do {
+ xas_lock_type(&xas, lock_type);
+ curr = xas_load(&xas);
+ if (!curr)
+ xas_store(&xas, XA_ZERO_ENTRY);
+ xas_unlock_type(&xas, lock_type);
+ } while (xas_nomem(&xas, gfp));
+
+ return xas_error(&xas);
+}
+EXPORT_SYMBOL(xa_reserve);
+
+#ifdef CONFIG_XARRAY_MULTI
+static void xas_set_range(struct xa_state *xas, unsigned long first,
+ unsigned long last)
+{
+ unsigned int shift = 0;
+ unsigned long sibs = last - first;
+ unsigned int offset = XA_CHUNK_MASK;
+
+ xas_set(xas, first);
+
+ while ((first & XA_CHUNK_MASK) == 0) {
+ if (sibs < XA_CHUNK_MASK)
+ break;
+ if ((sibs == XA_CHUNK_MASK) && (offset < XA_CHUNK_MASK))
+ break;
+ shift += XA_CHUNK_SHIFT;
+ if (offset == XA_CHUNK_MASK)
+ offset = sibs & XA_CHUNK_MASK;
+ sibs >>= XA_CHUNK_SHIFT;
+ first >>= XA_CHUNK_SHIFT;
+ }
+
+ offset = first & XA_CHUNK_MASK;
+ if (offset + sibs > XA_CHUNK_MASK)
+ sibs = XA_CHUNK_MASK - offset;
+ if ((((first + sibs + 1) << shift) - 1) > last)
+ sibs -= 1;
+
+ xas->xa_shift = shift;
+ xas->xa_sibs = sibs;
+}
+
+/**
+ * xa_store_range() - Store this entry at a range of indices in the XArray.
+ * @xa: XArray.
+ * @first: First index to affect.
+ * @last: Last index to affect.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * After this function returns, loads from any index between @first and @last,
+ * inclusive will return @entry.
+ * Storing into an existing multislot entry updates the entry of every index.
+ * The marks associated with @index are unaffected unless @entry is %NULL.
+ *
+ * Context: Process context. Takes and releases the xa_lock. May sleep
+ * if the @gfp flags permit.
+ * Return: %NULL on success, xa_err(-EINVAL) if @entry cannot be stored in
+ * an XArray, or xa_err(-ENOMEM) if memory allocation failed.
+ */
+void *xa_store_range(struct xarray *xa, unsigned long first,
+ unsigned long last, void *entry, gfp_t gfp)
+{
+ XA_STATE(xas, xa, 0);
+
+ if (WARN_ON_ONCE(xa_is_internal(entry)))
+ return XA_ERROR(-EINVAL);
+ if (last < first)
+ return XA_ERROR(-EINVAL);
+
+ do {
+ xas_lock(&xas);
+ if (entry) {
+ unsigned int order = (last == ~0UL) ? 64 :
+ ilog2(last + 1);
+ xas_set_order(&xas, last, order);
+ xas_create(&xas);
+ if (xas_error(&xas))
+ goto unlock;
+ }
+ do {
+ xas_set_range(&xas, first, last);
+ xas_store(&xas, entry);
+ if (xas_error(&xas))
+ goto unlock;
+ first += xas_size(&xas);
+ } while (first <= last);
+unlock:
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, gfp));
+
+ return xas_result(&xas, NULL);
+}
+EXPORT_SYMBOL(xa_store_range);
+#endif /* CONFIG_XARRAY_MULTI */
+
+/**
+ * __xa_alloc() - Find somewhere to store this entry in the XArray.
+ * @xa: XArray.
+ * @id: Pointer to ID.
+ * @max: Maximum ID to allocate (inclusive).
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * Allocates an unused ID in the range specified by @id and @max.
+ * Updates the @id pointer with the index, then stores the entry at that
+ * index. A concurrent lookup will not see an uninitialised @id.
+ *
+ * Context: Any context. Expects xa_lock to be held on entry. May
+ * release and reacquire xa_lock if @gfp flags permit.
+ * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if
+ * there is no more space in the XArray.
+ */
+int __xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, gfp_t gfp)
+{
+ XA_STATE(xas, xa, 0);
+ int err;
+
+ if (WARN_ON_ONCE(xa_is_internal(entry)))
+ return -EINVAL;
+ if (WARN_ON_ONCE(!xa_track_free(xa)))
+ return -EINVAL;
+
+ if (!entry)
+ entry = XA_ZERO_ENTRY;
+
+ do {
+ xas.xa_index = *id;
+ xas_find_marked(&xas, max, XA_FREE_MARK);
+ if (xas.xa_node == XAS_RESTART)
+ xas_set_err(&xas, -ENOSPC);
+ xas_store(&xas, entry);
+ xas_clear_mark(&xas, XA_FREE_MARK);
+ } while (__xas_nomem(&xas, gfp));
+
+ err = xas_error(&xas);
+ if (!err)
+ *id = xas.xa_index;
+ return err;
+}
+EXPORT_SYMBOL(__xa_alloc);
+
+/**
+ * __xa_set_mark() - Set this mark on this entry while locked.
+ * @xa: XArray.
+ * @index: Index of entry.
+ * @mark: Mark number.
+ *
+ * Attempting to set a mark on a NULL entry does not succeed.
+ *
+ * Context: Any context. Expects xa_lock to be held on entry.
+ */
+void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
+{
+ XA_STATE(xas, xa, index);
+ void *entry = xas_load(&xas);
+
+ if (entry)
+ xas_set_mark(&xas, mark);
+}
+EXPORT_SYMBOL_GPL(__xa_set_mark);
+
+/**
+ * __xa_clear_mark() - Clear this mark on this entry while locked.
+ * @xa: XArray.
+ * @index: Index of entry.
+ * @mark: Mark number.
+ *
+ * Context: Any context. Expects xa_lock to be held on entry.
+ */
+void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
+{
+ XA_STATE(xas, xa, index);
+ void *entry = xas_load(&xas);
+
+ if (entry)
+ xas_clear_mark(&xas, mark);
+}
+EXPORT_SYMBOL_GPL(__xa_clear_mark);
+
+/**
+ * xa_get_mark() - Inquire whether this mark is set on this entry.
+ * @xa: XArray.
+ * @index: Index of entry.
+ * @mark: Mark number.
+ *
+ * This function uses the RCU read lock, so the result may be out of date
+ * by the time it returns. If you need the result to be stable, use a lock.
+ *
+ * Context: Any context. Takes and releases the RCU lock.
+ * Return: True if the entry at @index has this mark set, false if it doesn't.
+ */
+bool xa_get_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
+{
+ XA_STATE(xas, xa, index);
+ void *entry;
+
+ rcu_read_lock();
+ entry = xas_start(&xas);
+ while (xas_get_mark(&xas, mark)) {
+ if (!xa_is_node(entry))
+ goto found;
+ entry = xas_descend(&xas, xa_to_node(entry));
+ }
+ rcu_read_unlock();
+ return false;
+ found:
+ rcu_read_unlock();
+ return true;
+}
+EXPORT_SYMBOL(xa_get_mark);
+
+/**
+ * xa_set_mark() - Set this mark on this entry.
+ * @xa: XArray.
+ * @index: Index of entry.
+ * @mark: Mark number.
+ *
+ * Attempting to set a mark on a NULL entry does not succeed.
+ *
+ * Context: Process context. Takes and releases the xa_lock.
+ */
+void xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
+{
+ xa_lock(xa);
+ __xa_set_mark(xa, index, mark);
+ xa_unlock(xa);
+}
+EXPORT_SYMBOL(xa_set_mark);
+
+/**
+ * xa_clear_mark() - Clear this mark on this entry.
+ * @xa: XArray.
+ * @index: Index of entry.
+ * @mark: Mark number.
+ *
+ * Clearing a mark always succeeds.
+ *
+ * Context: Process context. Takes and releases the xa_lock.
+ */
+void xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
+{
+ xa_lock(xa);
+ __xa_clear_mark(xa, index, mark);
+ xa_unlock(xa);
+}
+EXPORT_SYMBOL(xa_clear_mark);
+
+/**
+ * xa_find() - Search the XArray for an entry.
+ * @xa: XArray.
+ * @indexp: Pointer to an index.
+ * @max: Maximum index to search to.
+ * @filter: Selection criterion.
+ *
+ * Finds the entry in @xa which matches the @filter, and has the lowest
+ * index that is at least @indexp and no more than @max.
+ * If an entry is found, @indexp is updated to be the index of the entry.
+ * This function is protected by the RCU read lock, so it may not find
+ * entries which are being simultaneously added. It will not return an
+ * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
+ *
+ * Context: Any context. Takes and releases the RCU lock.
+ * Return: The entry, if found, otherwise %NULL.
+ */
+void *xa_find(struct xarray *xa, unsigned long *indexp,
+ unsigned long max, xa_mark_t filter)
+{
+ XA_STATE(xas, xa, *indexp);
+ void *entry;
+
+ rcu_read_lock();
+ do {
+ if ((__force unsigned int)filter < XA_MAX_MARKS)
+ entry = xas_find_marked(&xas, max, filter);
+ else
+ entry = xas_find(&xas, max);
+ } while (xas_retry(&xas, entry));
+ rcu_read_unlock();
+
+ if (entry)
+ *indexp = xas.xa_index;
+ return entry;
+}
+EXPORT_SYMBOL(xa_find);
+
+/**
+ * xa_find_after() - Search the XArray for a present entry.
+ * @xa: XArray.
+ * @indexp: Pointer to an index.
+ * @max: Maximum index to search to.
+ * @filter: Selection criterion.
+ *
+ * Finds the entry in @xa which matches the @filter and has the lowest
+ * index that is above @indexp and no more than @max.
+ * If an entry is found, @indexp is updated to be the index of the entry.
+ * This function is protected by the RCU read lock, so it may miss entries
+ * which are being simultaneously added. It will not return an
+ * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
+ *
+ * Context: Any context. Takes and releases the RCU lock.
+ * Return: The pointer, if found, otherwise %NULL.
+ */
+void *xa_find_after(struct xarray *xa, unsigned long *indexp,
+ unsigned long max, xa_mark_t filter)
+{
+ XA_STATE(xas, xa, *indexp + 1);
+ void *entry;
+
+ rcu_read_lock();
+ for (;;) {
+ if ((__force unsigned int)filter < XA_MAX_MARKS)
+ entry = xas_find_marked(&xas, max, filter);
+ else
+ entry = xas_find(&xas, max);
+ if (xas.xa_shift) {
+ if (xas.xa_index & ((1UL << xas.xa_shift) - 1))
+ continue;
+ } else {
+ if (xas.xa_offset < (xas.xa_index & XA_CHUNK_MASK))
+ continue;
+ }
+ if (!xas_retry(&xas, entry))
+ break;
+ }
+ rcu_read_unlock();
+
+ if (entry)
+ *indexp = xas.xa_index;
+ return entry;
+}
+EXPORT_SYMBOL(xa_find_after);
+
+static unsigned int xas_extract_present(struct xa_state *xas, void **dst,
+ unsigned long max, unsigned int n)
+{
+ void *entry;
+ unsigned int i = 0;
+
+ rcu_read_lock();
+ xas_for_each(xas, entry, max) {
+ if (xas_retry(xas, entry))
+ continue;
+ dst[i++] = entry;
+ if (i == n)
+ break;
+ }
+ rcu_read_unlock();
+
+ return i;
+}
+
+static unsigned int xas_extract_marked(struct xa_state *xas, void **dst,
+ unsigned long max, unsigned int n, xa_mark_t mark)
+{
+ void *entry;
+ unsigned int i = 0;
+
+ rcu_read_lock();
+ xas_for_each_marked(xas, entry, max, mark) {
+ if (xas_retry(xas, entry))
+ continue;
+ dst[i++] = entry;
+ if (i == n)
+ break;
+ }
+ rcu_read_unlock();
+
+ return i;
+}
+
+/**
+ * xa_extract() - Copy selected entries from the XArray into a normal array.
+ * @xa: The source XArray to copy from.
+ * @dst: The buffer to copy entries into.
+ * @start: The first index in the XArray eligible to be selected.
+ * @max: The last index in the XArray eligible to be selected.
+ * @n: The maximum number of entries to copy.
+ * @filter: Selection criterion.
+ *
+ * Copies up to @n entries that match @filter from the XArray. The
+ * copied entries will have indices between @start and @max, inclusive.
+ *
+ * The @filter may be an XArray mark value, in which case entries which are
+ * marked with that mark will be copied. It may also be %XA_PRESENT, in
+ * which case all entries which are not NULL will be copied.
+ *
+ * The entries returned may not represent a snapshot of the XArray at a
+ * moment in time. For example, if another thread stores to index 5, then
+ * index 10, calling xa_extract() may return the old contents of index 5
+ * and the new contents of index 10. Indices not modified while this
+ * function is running will not be skipped.
+ *
+ * If you need stronger guarantees, holding the xa_lock across calls to this
+ * function will prevent concurrent modification.
+ *
+ * Context: Any context. Takes and releases the RCU lock.
+ * Return: The number of entries copied.
+ */
+unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start,
+ unsigned long max, unsigned int n, xa_mark_t filter)
+{
+ XA_STATE(xas, xa, start);
+
+ if (!n)
+ return 0;
+
+ if ((__force unsigned int)filter < XA_MAX_MARKS)
+ return xas_extract_marked(&xas, dst, max, n, filter);
+ return xas_extract_present(&xas, dst, max, n);
+}
+EXPORT_SYMBOL(xa_extract);
+
+/**
+ * xa_destroy() - Free all internal data structures.
+ * @xa: XArray.
+ *
+ * After calling this function, the XArray is empty and has freed all memory
+ * allocated for its internal data structures. You are responsible for
+ * freeing the objects referenced by the XArray.
+ *
+ * Context: Any context. Takes and releases the xa_lock, interrupt-safe.
+ */
+void xa_destroy(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 0);
+ unsigned long flags;
+ void *entry;
+
+ xas.xa_node = NULL;
+ xas_lock_irqsave(&xas, flags);
+ entry = xa_head_locked(xa);
+ RCU_INIT_POINTER(xa->xa_head, NULL);
+ xas_init_marks(&xas);
+ /* lockdep checks we're still holding the lock in xas_free_nodes() */
+ if (xa_is_node(entry))
+ xas_free_nodes(&xas, xa_to_node(entry));
+ xas_unlock_irqrestore(&xas, flags);
+}
+EXPORT_SYMBOL(xa_destroy);
+
+#ifdef XA_DEBUG
+void xa_dump_node(const struct xa_node *node)
+{
+ unsigned i, j;
+
+ if (!node)
+ return;
+ if ((unsigned long)node & 3) {
+ pr_cont("node %px\n", node);
+ return;
+ }
+
+ pr_cont("node %px %s %d parent %px shift %d count %d values %d "
+ "array %px list %px %px marks",
+ node, node->parent ? "offset" : "max", node->offset,
+ node->parent, node->shift, node->count, node->nr_values,
+ node->array, node->private_list.prev, node->private_list.next);
+ for (i = 0; i < XA_MAX_MARKS; i++)
+ for (j = 0; j < XA_MARK_LONGS; j++)
+ pr_cont(" %lx", node->marks[i][j]);
+ pr_cont("\n");
+}
+
+void xa_dump_index(unsigned long index, unsigned int shift)
+{
+ if (!shift)
+ pr_info("%lu: ", index);
+ else if (shift >= BITS_PER_LONG)
+ pr_info("0-%lu: ", ~0UL);
+ else
+ pr_info("%lu-%lu: ", index, index | ((1UL << shift) - 1));
+}
+
+void xa_dump_entry(const void *entry, unsigned long index, unsigned long shift)
+{
+ if (!entry)
+ return;
+
+ xa_dump_index(index, shift);
+
+ if (xa_is_node(entry)) {
+ if (shift == 0) {
+ pr_cont("%px\n", entry);
+ } else {
+ unsigned long i;
+ struct xa_node *node = xa_to_node(entry);
+ xa_dump_node(node);
+ for (i = 0; i < XA_CHUNK_SIZE; i++)
+ xa_dump_entry(node->slots[i],
+ index + (i << node->shift), node->shift);
+ }
+ } else if (xa_is_value(entry))
+ pr_cont("value %ld (0x%lx) [%px]\n", xa_to_value(entry),
+ xa_to_value(entry), entry);
+ else if (!xa_is_internal(entry))
+ pr_cont("%px\n", entry);
+ else if (xa_is_retry(entry))
+ pr_cont("retry (%ld)\n", xa_to_internal(entry));
+ else if (xa_is_sibling(entry))
+ pr_cont("sibling (slot %ld)\n", xa_to_sibling(entry));
+ else if (xa_is_zero(entry))
+ pr_cont("zero (%ld)\n", xa_to_internal(entry));
+ else
+ pr_cont("UNKNOWN ENTRY (%px)\n", entry);
+}
+
+void xa_dump(const struct xarray *xa)
+{
+ void *entry = xa->xa_head;
+ unsigned int shift = 0;
+
+ pr_info("xarray: %px head %px flags %x marks %d %d %d\n", xa, entry,
+ xa->xa_flags, xa_marked(xa, XA_MARK_0),
+ xa_marked(xa, XA_MARK_1), xa_marked(xa, XA_MARK_2));
+ if (xa_is_node(entry))
+ shift = xa_to_node(entry)->shift + XA_CHUNK_SHIFT;
+ xa_dump_entry(entry, 0, shift);
+}
+#endif
diff --git a/lib/xz/xz_crc32.c b/lib/xz/xz_crc32.c
index 34532d14fd4c..912aae5fa09e 100644
--- a/lib/xz/xz_crc32.c
+++ b/lib/xz/xz_crc32.c
@@ -29,7 +29,7 @@ STATIC_RW_DATA uint32_t xz_crc32_table[256];
XZ_EXTERN void xz_crc32_init(void)
{
- const uint32_t poly = 0xEDB88320;
+ const uint32_t poly = CRC32_POLY_LE;
uint32_t i;
uint32_t j;
diff --git a/lib/xz/xz_private.h b/lib/xz/xz_private.h
index 482b90f363fe..09360ebb510e 100644
--- a/lib/xz/xz_private.h
+++ b/lib/xz/xz_private.h
@@ -102,6 +102,10 @@
# endif
#endif
+#ifndef CRC32_POLY_LE
+#define CRC32_POLY_LE 0xedb88320
+#endif
+
/*
* Allocate memory for LZMA2 decoder. xz_dec_lzma2_reset() must be used
* before calling xz_dec_lzma2_run().
diff --git a/lib/zlib_inflate/inflate.c b/lib/zlib_inflate/inflate.c
index 58a733b10387..48f14cd58c77 100644
--- a/lib/zlib_inflate/inflate.c
+++ b/lib/zlib_inflate/inflate.c
@@ -382,6 +382,7 @@ int zlib_inflate(z_streamp strm, int flush)
strm->adler = state->check = REVERSE(hold);
INITBITS();
state->mode = DICT;
+ /* fall through */
case DICT:
if (state->havedict == 0) {
RESTORE();
@@ -389,8 +390,10 @@ int zlib_inflate(z_streamp strm, int flush)
}
strm->adler = state->check = zlib_adler32(0L, NULL, 0);
state->mode = TYPE;
+ /* fall through */
case TYPE:
if (flush == Z_BLOCK) goto inf_leave;
+ /* fall through */
case TYPEDO:
if (state->last) {
BYTEBITS();
@@ -428,6 +431,7 @@ int zlib_inflate(z_streamp strm, int flush)
state->length = (unsigned)hold & 0xffff;
INITBITS();
state->mode = COPY;
+ /* fall through */
case COPY:
copy = state->length;
if (copy) {
@@ -461,6 +465,7 @@ int zlib_inflate(z_streamp strm, int flush)
#endif
state->have = 0;
state->mode = LENLENS;
+ /* fall through */
case LENLENS:
while (state->have < state->ncode) {
NEEDBITS(3);
@@ -481,6 +486,7 @@ int zlib_inflate(z_streamp strm, int flush)
}
state->have = 0;
state->mode = CODELENS;
+ /* fall through */
case CODELENS:
while (state->have < state->nlen + state->ndist) {
for (;;) {
@@ -554,6 +560,7 @@ int zlib_inflate(z_streamp strm, int flush)
break;
}
state->mode = LEN;
+ /* fall through */
case LEN:
if (have >= 6 && left >= 258) {
RESTORE();
@@ -593,6 +600,7 @@ int zlib_inflate(z_streamp strm, int flush)
}
state->extra = (unsigned)(this.op) & 15;
state->mode = LENEXT;
+ /* fall through */
case LENEXT:
if (state->extra) {
NEEDBITS(state->extra);
@@ -600,6 +608,7 @@ int zlib_inflate(z_streamp strm, int flush)
DROPBITS(state->extra);
}
state->mode = DIST;
+ /* fall through */
case DIST:
for (;;) {
this = state->distcode[BITS(state->distbits)];
@@ -625,6 +634,7 @@ int zlib_inflate(z_streamp strm, int flush)
state->offset = (unsigned)this.val;
state->extra = (unsigned)(this.op) & 15;
state->mode = DISTEXT;
+ /* fall through */
case DISTEXT:
if (state->extra) {
NEEDBITS(state->extra);
@@ -644,6 +654,7 @@ int zlib_inflate(z_streamp strm, int flush)
break;
}
state->mode = MATCH;
+ /* fall through */
case MATCH:
if (left == 0) goto inf_leave;
copy = out - left;
@@ -694,6 +705,7 @@ int zlib_inflate(z_streamp strm, int flush)
INITBITS();
}
state->mode = DONE;
+ /* fall through */
case DONE:
ret = Z_STREAM_END;
goto inf_leave;
diff --git a/lib/zstd/Makefile b/lib/zstd/Makefile
index dd0a359c135b..7920cbbfeae9 100644
--- a/lib/zstd/Makefile
+++ b/lib/zstd/Makefile
@@ -3,16 +3,7 @@ obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd_decompress.o
ccflags-y += -O3
-# Object files unique to zstd_compress and zstd_decompress
-zstd_compress-y := fse_compress.o huf_compress.o compress.o
-zstd_decompress-y := huf_decompress.o decompress.o
-
-# These object files are shared between the modules.
-# Always add them to zstd_compress.
-# Unless both zstd_compress and zstd_decompress are built in
-# then also add them to zstd_decompress.
-zstd_compress-y += entropy_common.o fse_decompress.o zstd_common.o
-
-ifneq ($(CONFIG_ZSTD_COMPRESS)$(CONFIG_ZSTD_DECOMPRESS),yy)
- zstd_decompress-y += entropy_common.o fse_decompress.o zstd_common.o
-endif
+zstd_compress-y := fse_compress.o huf_compress.o compress.o \
+ entropy_common.o fse_decompress.o zstd_common.o
+zstd_decompress-y := huf_decompress.o decompress.o \
+ entropy_common.o fse_decompress.o zstd_common.o