summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig22
-rw-r--r--lib/Kconfig.debug73
-rw-r--r--lib/Makefile23
-rw-r--r--lib/atomic64_test.c10
-rw-r--r--lib/crc32.c824
-rw-r--r--lib/crc32test.c856
-rw-r--r--lib/debugobjects.c58
-rw-r--r--lib/decompress_unlz4.c13
-rw-r--r--lib/digsig.c2
-rw-r--r--lib/dma-debug.c5
-rw-r--r--lib/dma-noop.c4
-rw-r--r--lib/dma-virt.c72
-rw-r--r--lib/find_bit.c4
-rw-r--r--lib/fonts/Kconfig16
-rw-r--r--lib/glob.c164
-rw-r--r--lib/globtest.c167
-rw-r--r--lib/halfmd4.c67
-rw-r--r--lib/idr.c1242
-rw-r--r--lib/ioremap.c1
-rw-r--r--lib/iov_iter.c54
-rw-r--r--lib/list_debug.c45
-rw-r--r--lib/lz4/Makefile2
-rw-r--r--lib/lz4/lz4_compress.c1141
-rw-r--r--lib/lz4/lz4_decompress.c665
-rw-r--r--lib/lz4/lz4defs.h338
-rw-r--r--lib/lz4/lz4hc_compress.c846
-rw-r--r--lib/nmi_backtrace.c2
-rw-r--r--lib/parman.c376
-rw-r--r--lib/percpu_counter.c5
-rw-r--r--lib/prime_numbers.c315
-rw-r--r--lib/radix-tree.c773
-rw-r--r--lib/rbtree.c4
-rw-r--r--lib/refcount.c267
-rw-r--r--lib/rhashtable.c273
-rw-r--r--lib/sbitmap.c139
-rw-r--r--lib/scatterlist.c6
-rw-r--r--lib/show_mem.c4
-rw-r--r--lib/siphash.c551
-rw-r--r--lib/sort.c41
-rw-r--r--lib/swiotlb.c58
-rw-r--r--lib/test_firmware.c92
-rw-r--r--lib/test_kasan.c34
-rw-r--r--lib/test_parman.c395
-rw-r--r--lib/test_siphash.c223
-rw-r--r--lib/test_sort.c44
-rw-r--r--lib/test_user_copy.c118
-rw-r--r--lib/timerqueue.c3
-rw-r--r--lib/vsprintf.c8
48 files changed, 6910 insertions, 3535 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 260a80e313b9..0c8b78a9ae2e 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -103,8 +103,7 @@ config CRC32
functions require M here.
config CRC32_SELFTEST
- bool "CRC32 perform self test on init"
- default n
+ tristate "CRC32 perform self test on init"
depends on CRC32
help
This option enables the CRC32 library functions to perform a
@@ -395,6 +394,16 @@ config HAS_DMA
depends on !NO_DMA
default y
+config DMA_NOOP_OPS
+ bool
+ depends on HAS_DMA && (!64BIT || ARCH_DMA_ADDR_T_64BIT)
+ default n
+
+config DMA_VIRT_OPS
+ bool
+ depends on HAS_DMA && (!64BIT || ARCH_DMA_ADDR_T_64BIT)
+ default n
+
config CHECK_SIGNATURE
bool
@@ -432,8 +441,7 @@ config GLOB
depends on this.
config GLOB_SELFTEST
- bool "glob self-test on init"
- default n
+ tristate "glob self-test on init"
depends on GLOB
help
This option enables a simple self-test of the glob_match
@@ -550,4 +558,10 @@ config STACKDEPOT
config SBITMAP
bool
+config PARMAN
+ tristate "parman" if COMPILE_TEST
+
+config PRIME_NUMBERS
+ tristate
+
endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index b06848a104e6..97d62c2da6c2 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -164,7 +164,7 @@ config DEBUG_INFO_REDUCED
config DEBUG_INFO_SPLIT
bool "Produce split debuginfo in .dwo files"
- depends on DEBUG_INFO
+ depends on DEBUG_INFO && !FRV
help
Generate debug info into separate .dwo files. This significantly
reduces the build directory size for builds with DEBUG_INFO,
@@ -416,6 +416,16 @@ config MAGIC_SYSRQ_DEFAULT_ENABLE
This may be set to 1 or 0 to enable or disable them all, or
to a bitmask as described in Documentation/sysrq.txt.
+config MAGIC_SYSRQ_SERIAL
+ bool "Enable magic SysRq key over serial"
+ depends on MAGIC_SYSRQ
+ default y
+ help
+ Many embedded boards have a disconnected TTL level serial which can
+ generate some garbage that can lead to spurious false sysrq detects.
+ This option allows you to decide whether you want to enable the
+ magic SysRq key.
+
config DEBUG_KERNEL
bool "Kernel debugging"
help
@@ -622,9 +632,12 @@ config DEBUG_VM_PGFLAGS
If unsure, say N.
+config ARCH_HAS_DEBUG_VIRTUAL
+ bool
+
config DEBUG_VIRTUAL
bool "Debug VM translations"
- depends on DEBUG_KERNEL && X86
+ depends on DEBUG_KERNEL && ARCH_HAS_DEBUG_VIRTUAL
help
Enable some costly sanity checks in virtual to page code. This can
catch mistakes with virt_to_page() and friends.
@@ -980,20 +993,6 @@ config DEBUG_TIMEKEEPING
If unsure, say N.
-config TIMER_STATS
- bool "Collect kernel timers statistics"
- depends on DEBUG_KERNEL && PROC_FS
- help
- If you say Y here, additional code will be inserted into the
- timer routines to collect statistics about kernel timers being
- reprogrammed. The statistics can be read from /proc/timer_stats.
- The statistics collection is started by writing 1 to /proc/timer_stats,
- writing 0 stops it. This feature is useful to collect information
- about timer usage patterns in kernel and userspace. This feature
- is lightweight if enabled in the kernel config but not activated
- (it defaults to deactivated on bootup and will only be activated
- if some application like powertop activates it explicitly).
-
config DEBUG_PREEMPT
bool "Debug preemptible kernel"
depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT
@@ -1180,6 +1179,18 @@ config LOCK_TORTURE_TEST
Say M if you want these torture tests to build as a module.
Say N if you are unsure.
+config WW_MUTEX_SELFTEST
+ tristate "Wait/wound mutex selftests"
+ help
+ This option provides a kernel module that runs tests on the
+ on the struct ww_mutex locking API.
+
+ It is recommended to enable DEBUG_WW_MUTEX_SLOWPATH in conjunction
+ with this test harness.
+
+ Say M if you want these self tests to build as a module.
+ Say N if you are unsure.
+
endmenu # lock debugging
config TRACE_IRQFLAGS
@@ -1450,6 +1461,7 @@ config RCU_CPU_STALL_TIMEOUT
config RCU_TRACE
bool "Enable tracing for RCU"
depends on DEBUG_KERNEL
+ default y if TREE_RCU
select TRACE_CLOCK
help
This option provides tracing in RCU which presents stats
@@ -1714,6 +1726,14 @@ config TEST_LIST_SORT
If unsure, say N.
+config TEST_SORT
+ bool "Array-based sort test"
+ depends on DEBUG_KERNEL
+ help
+ This option enables the self-test function of 'sort()' at boot.
+
+ If unsure, say N.
+
config KPROBES_SANITY_TEST
bool "Kprobes sanity tests"
depends on DEBUG_KERNEL
@@ -1765,9 +1785,10 @@ config PERCPU_TEST
If unsure, say N.
config ATOMIC64_SELFTEST
- bool "Perform an atomic64_t self-test at boot"
+ tristate "Perform an atomic64_t self-test"
help
- Enable this option to test the atomic64_t functions at boot.
+ Enable this option to test the atomic64_t functions at boot or
+ at module load time.
If unsure, say N.
@@ -1819,13 +1840,23 @@ config TEST_HASH
tristate "Perform selftest on hash functions"
default n
help
- Enable this option to test the kernel's integer (<linux/hash,h>)
- and string (<linux/stringhash.h>) hash functions on boot
- (or module load).
+ Enable this option to test the kernel's integer (<linux/hash.h>),
+ string (<linux/stringhash.h>), and siphash (<linux/siphash.h>)
+ hash functions on boot (or module load).
This is intended to help people writing architecture-specific
optimized versions. If unsure, say N.
+config TEST_PARMAN
+ tristate "Perform selftest on priority array manager"
+ default n
+ depends on PARMAN
+ help
+ Enable this option to test priority array manager on boot
+ (or module load).
+
+ If unsure, say N.
+
endmenu # runtime tests
config PROVIDE_OHCI1394_DMA_INIT
diff --git a/lib/Makefile b/lib/Makefile
index bc4073a8cd08..320ac46a8725 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -22,21 +22,26 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
sha1.o chacha20.o md5.o irq_regs.o argv_split.o \
flex_proportions.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
- earlycpio.o seq_buf.o nmi_backtrace.o nodemask.o win_minmax.o
+ earlycpio.o seq_buf.o siphash.o \
+ nmi_backtrace.o nodemask.o win_minmax.o
+
+CFLAGS_radix-tree.o += -DCONFIG_SPARSE_RCU_POINTER
+CFLAGS_idr.o += -DCONFIG_SPARSE_RCU_POINTER
lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o
-lib-$(CONFIG_HAS_DMA) += dma-noop.o
+lib-$(CONFIG_DMA_NOOP_OPS) += dma-noop.o
+lib-$(CONFIG_DMA_VIRT_OPS) += dma-virt.o
lib-y += kobject.o klist.o
obj-y += lockref.o
-obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
+obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \
bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
bsearch.o find_bit.o llist.o memweight.o kfifo.o \
percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \
- once.o
+ once.o refcount.o
obj-y += string_helpers.o
obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
obj-y += hexdump.o
@@ -44,17 +49,19 @@ obj-$(CONFIG_TEST_HEXDUMP) += test_hexdump.o
obj-y += kstrtox.o
obj-$(CONFIG_TEST_BPF) += test_bpf.o
obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
-obj-$(CONFIG_TEST_HASH) += test_hash.o
+obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o
obj-$(CONFIG_TEST_KASAN) += test_kasan.o
obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
obj-$(CONFIG_TEST_LKM) += test_module.o
obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o
+obj-$(CONFIG_TEST_SORT) += test_sort.o
obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o
obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
obj-$(CONFIG_TEST_PRINTF) += test_printf.o
obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
obj-$(CONFIG_TEST_UUID) += test_uuid.o
+obj-$(CONFIG_TEST_PARMAN) += test_parman.o
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG
@@ -90,6 +97,7 @@ obj-$(CONFIG_CRC16) += crc16.o
obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o
obj-$(CONFIG_CRC_ITU_T) += crc-itu-t.o
obj-$(CONFIG_CRC32) += crc32.o
+obj-$(CONFIG_CRC32_SELFTEST) += crc32test.o
obj-$(CONFIG_CRC7) += crc7.o
obj-$(CONFIG_LIBCRC32C) += libcrc32c.o
obj-$(CONFIG_CRC8) += crc8.o
@@ -159,6 +167,7 @@ obj-$(CONFIG_CORDIC) += cordic.o
obj-$(CONFIG_DQL) += dynamic_queue_limits.o
obj-$(CONFIG_GLOB) += glob.o
+obj-$(CONFIG_GLOB_SELFTEST) += globtest.o
obj-$(CONFIG_MPILIB) += mpi/
obj-$(CONFIG_SIGNATURE) += digsig.o
@@ -196,6 +205,8 @@ obj-$(CONFIG_ASN1) += asn1_decoder.o
obj-$(CONFIG_FONT_SUPPORT) += fonts/
+obj-$(CONFIG_PRIME_NUMBERS) += prime_numbers.o
+
hostprogs-y := gen_crc32table
clean-files := crc32table.h
@@ -229,3 +240,5 @@ obj-$(CONFIG_UBSAN) += ubsan.o
UBSAN_SANITIZE_ubsan.o := n
obj-$(CONFIG_SBITMAP) += sbitmap.o
+
+obj-$(CONFIG_PARMAN) += parman.o
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
index 46042901130f..fd70c0e0e673 100644
--- a/lib/atomic64_test.c
+++ b/lib/atomic64_test.c
@@ -15,6 +15,7 @@
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/atomic.h>
+#include <linux/module.h>
#ifdef CONFIG_X86
#include <asm/cpufeature.h> /* for boot_cpu_has below */
@@ -241,7 +242,7 @@ static __init void test_atomic64(void)
BUG_ON(v.counter != r);
}
-static __init int test_atomics(void)
+static __init int test_atomics_init(void)
{
test_atomic();
test_atomic64();
@@ -264,4 +265,9 @@ static __init int test_atomics(void)
return 0;
}
-core_initcall(test_atomics);
+static __exit void test_atomics_exit(void) {}
+
+module_init(test_atomics_init);
+module_exit(test_atomics_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/crc32.c b/lib/crc32.c
index 7fbd1a112b9d..6ddc92bc1460 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -340,827 +340,3 @@ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
}
#endif
EXPORT_SYMBOL(crc32_be);
-
-#ifdef CONFIG_CRC32_SELFTEST
-
-/* 4096 random bytes */
-static u8 const __aligned(8) test_buf[] __initconst =
-{
- 0x5b, 0x85, 0x21, 0xcb, 0x09, 0x68, 0x7d, 0x30,
- 0xc7, 0x69, 0xd7, 0x30, 0x92, 0xde, 0x59, 0xe4,
- 0xc9, 0x6e, 0x8b, 0xdb, 0x98, 0x6b, 0xaa, 0x60,
- 0xa8, 0xb5, 0xbc, 0x6c, 0xa9, 0xb1, 0x5b, 0x2c,
- 0xea, 0xb4, 0x92, 0x6a, 0x3f, 0x79, 0x91, 0xe4,
- 0xe9, 0x70, 0x51, 0x8c, 0x7f, 0x95, 0x6f, 0x1a,
- 0x56, 0xa1, 0x5c, 0x27, 0x03, 0x67, 0x9f, 0x3a,
- 0xe2, 0x31, 0x11, 0x29, 0x6b, 0x98, 0xfc, 0xc4,
- 0x53, 0x24, 0xc5, 0x8b, 0xce, 0x47, 0xb2, 0xb9,
- 0x32, 0xcb, 0xc1, 0xd0, 0x03, 0x57, 0x4e, 0xd4,
- 0xe9, 0x3c, 0xa1, 0x63, 0xcf, 0x12, 0x0e, 0xca,
- 0xe1, 0x13, 0xd1, 0x93, 0xa6, 0x88, 0x5c, 0x61,
- 0x5b, 0xbb, 0xf0, 0x19, 0x46, 0xb4, 0xcf, 0x9e,
- 0xb6, 0x6b, 0x4c, 0x3a, 0xcf, 0x60, 0xf9, 0x7a,
- 0x8d, 0x07, 0x63, 0xdb, 0x40, 0xe9, 0x0b, 0x6f,
- 0xad, 0x97, 0xf1, 0xed, 0xd0, 0x1e, 0x26, 0xfd,
- 0xbf, 0xb7, 0xc8, 0x04, 0x94, 0xf8, 0x8b, 0x8c,
- 0xf1, 0xab, 0x7a, 0xd4, 0xdd, 0xf3, 0xe8, 0x88,
- 0xc3, 0xed, 0x17, 0x8a, 0x9b, 0x40, 0x0d, 0x53,
- 0x62, 0x12, 0x03, 0x5f, 0x1b, 0x35, 0x32, 0x1f,
- 0xb4, 0x7b, 0x93, 0x78, 0x0d, 0xdb, 0xce, 0xa4,
- 0xc0, 0x47, 0xd5, 0xbf, 0x68, 0xe8, 0x5d, 0x74,
- 0x8f, 0x8e, 0x75, 0x1c, 0xb2, 0x4f, 0x9a, 0x60,
- 0xd1, 0xbe, 0x10, 0xf4, 0x5c, 0xa1, 0x53, 0x09,
- 0xa5, 0xe0, 0x09, 0x54, 0x85, 0x5c, 0xdc, 0x07,
- 0xe7, 0x21, 0x69, 0x7b, 0x8a, 0xfd, 0x90, 0xf1,
- 0x22, 0xd0, 0xb4, 0x36, 0x28, 0xe6, 0xb8, 0x0f,
- 0x39, 0xde, 0xc8, 0xf3, 0x86, 0x60, 0x34, 0xd2,
- 0x5e, 0xdf, 0xfd, 0xcf, 0x0f, 0xa9, 0x65, 0xf0,
- 0xd5, 0x4d, 0x96, 0x40, 0xe3, 0xdf, 0x3f, 0x95,
- 0x5a, 0x39, 0x19, 0x93, 0xf4, 0x75, 0xce, 0x22,
- 0x00, 0x1c, 0x93, 0xe2, 0x03, 0x66, 0xf4, 0x93,
- 0x73, 0x86, 0x81, 0x8e, 0x29, 0x44, 0x48, 0x86,
- 0x61, 0x7c, 0x48, 0xa3, 0x43, 0xd2, 0x9c, 0x8d,
- 0xd4, 0x95, 0xdd, 0xe1, 0x22, 0x89, 0x3a, 0x40,
- 0x4c, 0x1b, 0x8a, 0x04, 0xa8, 0x09, 0x69, 0x8b,
- 0xea, 0xc6, 0x55, 0x8e, 0x57, 0xe6, 0x64, 0x35,
- 0xf0, 0xc7, 0x16, 0x9f, 0x5d, 0x5e, 0x86, 0x40,
- 0x46, 0xbb, 0xe5, 0x45, 0x88, 0xfe, 0xc9, 0x63,
- 0x15, 0xfb, 0xf5, 0xbd, 0x71, 0x61, 0xeb, 0x7b,
- 0x78, 0x70, 0x07, 0x31, 0x03, 0x9f, 0xb2, 0xc8,
- 0xa7, 0xab, 0x47, 0xfd, 0xdf, 0xa0, 0x78, 0x72,
- 0xa4, 0x2a, 0xe4, 0xb6, 0xba, 0xc0, 0x1e, 0x86,
- 0x71, 0xe6, 0x3d, 0x18, 0x37, 0x70, 0xe6, 0xff,
- 0xe0, 0xbc, 0x0b, 0x22, 0xa0, 0x1f, 0xd3, 0xed,
- 0xa2, 0x55, 0x39, 0xab, 0xa8, 0x13, 0x73, 0x7c,
- 0x3f, 0xb2, 0xd6, 0x19, 0xac, 0xff, 0x99, 0xed,
- 0xe8, 0xe6, 0xa6, 0x22, 0xe3, 0x9c, 0xf1, 0x30,
- 0xdc, 0x01, 0x0a, 0x56, 0xfa, 0xe4, 0xc9, 0x99,
- 0xdd, 0xa8, 0xd8, 0xda, 0x35, 0x51, 0x73, 0xb4,
- 0x40, 0x86, 0x85, 0xdb, 0x5c, 0xd5, 0x85, 0x80,
- 0x14, 0x9c, 0xfd, 0x98, 0xa9, 0x82, 0xc5, 0x37,
- 0xff, 0x32, 0x5d, 0xd0, 0x0b, 0xfa, 0xdc, 0x04,
- 0x5e, 0x09, 0xd2, 0xca, 0x17, 0x4b, 0x1a, 0x8e,
- 0x15, 0xe1, 0xcc, 0x4e, 0x52, 0x88, 0x35, 0xbd,
- 0x48, 0xfe, 0x15, 0xa0, 0x91, 0xfd, 0x7e, 0x6c,
- 0x0e, 0x5d, 0x79, 0x1b, 0x81, 0x79, 0xd2, 0x09,
- 0x34, 0x70, 0x3d, 0x81, 0xec, 0xf6, 0x24, 0xbb,
- 0xfb, 0xf1, 0x7b, 0xdf, 0x54, 0xea, 0x80, 0x9b,
- 0xc7, 0x99, 0x9e, 0xbd, 0x16, 0x78, 0x12, 0x53,
- 0x5e, 0x01, 0xa7, 0x4e, 0xbd, 0x67, 0xe1, 0x9b,
- 0x4c, 0x0e, 0x61, 0x45, 0x97, 0xd2, 0xf0, 0x0f,
- 0xfe, 0x15, 0x08, 0xb7, 0x11, 0x4c, 0xe7, 0xff,
- 0x81, 0x53, 0xff, 0x91, 0x25, 0x38, 0x7e, 0x40,
- 0x94, 0xe5, 0xe0, 0xad, 0xe6, 0xd9, 0x79, 0xb6,
- 0x92, 0xc9, 0xfc, 0xde, 0xc3, 0x1a, 0x23, 0xbb,
- 0xdd, 0xc8, 0x51, 0x0c, 0x3a, 0x72, 0xfa, 0x73,
- 0x6f, 0xb7, 0xee, 0x61, 0x39, 0x03, 0x01, 0x3f,
- 0x7f, 0x94, 0x2e, 0x2e, 0xba, 0x3a, 0xbb, 0xb4,
- 0xfa, 0x6a, 0x17, 0xfe, 0xea, 0xef, 0x5e, 0x66,
- 0x97, 0x3f, 0x32, 0x3d, 0xd7, 0x3e, 0xb1, 0xf1,
- 0x6c, 0x14, 0x4c, 0xfd, 0x37, 0xd3, 0x38, 0x80,
- 0xfb, 0xde, 0xa6, 0x24, 0x1e, 0xc8, 0xca, 0x7f,
- 0x3a, 0x93, 0xd8, 0x8b, 0x18, 0x13, 0xb2, 0xe5,
- 0xe4, 0x93, 0x05, 0x53, 0x4f, 0x84, 0x66, 0xa7,
- 0x58, 0x5c, 0x7b, 0x86, 0x52, 0x6d, 0x0d, 0xce,
- 0xa4, 0x30, 0x7d, 0xb6, 0x18, 0x9f, 0xeb, 0xff,
- 0x22, 0xbb, 0x72, 0x29, 0xb9, 0x44, 0x0b, 0x48,
- 0x1e, 0x84, 0x71, 0x81, 0xe3, 0x6d, 0x73, 0x26,
- 0x92, 0xb4, 0x4d, 0x2a, 0x29, 0xb8, 0x1f, 0x72,
- 0xed, 0xd0, 0xe1, 0x64, 0x77, 0xea, 0x8e, 0x88,
- 0x0f, 0xef, 0x3f, 0xb1, 0x3b, 0xad, 0xf9, 0xc9,
- 0x8b, 0xd0, 0xac, 0xc6, 0xcc, 0xa9, 0x40, 0xcc,
- 0x76, 0xf6, 0x3b, 0x53, 0xb5, 0x88, 0xcb, 0xc8,
- 0x37, 0xf1, 0xa2, 0xba, 0x23, 0x15, 0x99, 0x09,
- 0xcc, 0xe7, 0x7a, 0x3b, 0x37, 0xf7, 0x58, 0xc8,
- 0x46, 0x8c, 0x2b, 0x2f, 0x4e, 0x0e, 0xa6, 0x5c,
- 0xea, 0x85, 0x55, 0xba, 0x02, 0x0e, 0x0e, 0x48,
- 0xbc, 0xe1, 0xb1, 0x01, 0x35, 0x79, 0x13, 0x3d,
- 0x1b, 0xc0, 0x53, 0x68, 0x11, 0xe7, 0x95, 0x0f,
- 0x9d, 0x3f, 0x4c, 0x47, 0x7b, 0x4d, 0x1c, 0xae,
- 0x50, 0x9b, 0xcb, 0xdd, 0x05, 0x8d, 0x9a, 0x97,
- 0xfd, 0x8c, 0xef, 0x0c, 0x1d, 0x67, 0x73, 0xa8,
- 0x28, 0x36, 0xd5, 0xb6, 0x92, 0x33, 0x40, 0x75,
- 0x0b, 0x51, 0xc3, 0x64, 0xba, 0x1d, 0xc2, 0xcc,
- 0xee, 0x7d, 0x54, 0x0f, 0x27, 0x69, 0xa7, 0x27,
- 0x63, 0x30, 0x29, 0xd9, 0xc8, 0x84, 0xd8, 0xdf,
- 0x9f, 0x68, 0x8d, 0x04, 0xca, 0xa6, 0xc5, 0xc7,
- 0x7a, 0x5c, 0xc8, 0xd1, 0xcb, 0x4a, 0xec, 0xd0,
- 0xd8, 0x20, 0x69, 0xc5, 0x17, 0xcd, 0x78, 0xc8,
- 0x75, 0x23, 0x30, 0x69, 0xc9, 0xd4, 0xea, 0x5c,
- 0x4f, 0x6b, 0x86, 0x3f, 0x8b, 0xfe, 0xee, 0x44,
- 0xc9, 0x7c, 0xb7, 0xdd, 0x3e, 0xe5, 0xec, 0x54,
- 0x03, 0x3e, 0xaa, 0x82, 0xc6, 0xdf, 0xb2, 0x38,
- 0x0e, 0x5d, 0xb3, 0x88, 0xd9, 0xd3, 0x69, 0x5f,
- 0x8f, 0x70, 0x8a, 0x7e, 0x11, 0xd9, 0x1e, 0x7b,
- 0x38, 0xf1, 0x42, 0x1a, 0xc0, 0x35, 0xf5, 0xc7,
- 0x36, 0x85, 0xf5, 0xf7, 0xb8, 0x7e, 0xc7, 0xef,
- 0x18, 0xf1, 0x63, 0xd6, 0x7a, 0xc6, 0xc9, 0x0e,
- 0x4d, 0x69, 0x4f, 0x84, 0xef, 0x26, 0x41, 0x0c,
- 0xec, 0xc7, 0xe0, 0x7e, 0x3c, 0x67, 0x01, 0x4c,
- 0x62, 0x1a, 0x20, 0x6f, 0xee, 0x47, 0x4d, 0xc0,
- 0x99, 0x13, 0x8d, 0x91, 0x4a, 0x26, 0xd4, 0x37,
- 0x28, 0x90, 0x58, 0x75, 0x66, 0x2b, 0x0a, 0xdf,
- 0xda, 0xee, 0x92, 0x25, 0x90, 0x62, 0x39, 0x9e,
- 0x44, 0x98, 0xad, 0xc1, 0x88, 0xed, 0xe4, 0xb4,
- 0xaf, 0xf5, 0x8c, 0x9b, 0x48, 0x4d, 0x56, 0x60,
- 0x97, 0x0f, 0x61, 0x59, 0x9e, 0xa6, 0x27, 0xfe,
- 0xc1, 0x91, 0x15, 0x38, 0xb8, 0x0f, 0xae, 0x61,
- 0x7d, 0x26, 0x13, 0x5a, 0x73, 0xff, 0x1c, 0xa3,
- 0x61, 0x04, 0x58, 0x48, 0x55, 0x44, 0x11, 0xfe,
- 0x15, 0xca, 0xc3, 0xbd, 0xca, 0xc5, 0xb4, 0x40,
- 0x5d, 0x1b, 0x7f, 0x39, 0xb5, 0x9c, 0x35, 0xec,
- 0x61, 0x15, 0x32, 0x32, 0xb8, 0x4e, 0x40, 0x9f,
- 0x17, 0x1f, 0x0a, 0x4d, 0xa9, 0x91, 0xef, 0xb7,
- 0xb0, 0xeb, 0xc2, 0x83, 0x9a, 0x6c, 0xd2, 0x79,
- 0x43, 0x78, 0x5e, 0x2f, 0xe5, 0xdd, 0x1a, 0x3c,
- 0x45, 0xab, 0x29, 0x40, 0x3a, 0x37, 0x5b, 0x6f,
- 0xd7, 0xfc, 0x48, 0x64, 0x3c, 0x49, 0xfb, 0x21,
- 0xbe, 0xc3, 0xff, 0x07, 0xfb, 0x17, 0xe9, 0xc9,
- 0x0c, 0x4c, 0x5c, 0x15, 0x9e, 0x8e, 0x22, 0x30,
- 0x0a, 0xde, 0x48, 0x7f, 0xdb, 0x0d, 0xd1, 0x2b,
- 0x87, 0x38, 0x9e, 0xcc, 0x5a, 0x01, 0x16, 0xee,
- 0x75, 0x49, 0x0d, 0x30, 0x01, 0x34, 0x6a, 0xb6,
- 0x9a, 0x5a, 0x2a, 0xec, 0xbb, 0x48, 0xac, 0xd3,
- 0x77, 0x83, 0xd8, 0x08, 0x86, 0x4f, 0x48, 0x09,
- 0x29, 0x41, 0x79, 0xa1, 0x03, 0x12, 0xc4, 0xcd,
- 0x90, 0x55, 0x47, 0x66, 0x74, 0x9a, 0xcc, 0x4f,
- 0x35, 0x8c, 0xd6, 0x98, 0xef, 0xeb, 0x45, 0xb9,
- 0x9a, 0x26, 0x2f, 0x39, 0xa5, 0x70, 0x6d, 0xfc,
- 0xb4, 0x51, 0xee, 0xf4, 0x9c, 0xe7, 0x38, 0x59,
- 0xad, 0xf4, 0xbc, 0x46, 0xff, 0x46, 0x8e, 0x60,
- 0x9c, 0xa3, 0x60, 0x1d, 0xf8, 0x26, 0x72, 0xf5,
- 0x72, 0x9d, 0x68, 0x80, 0x04, 0xf6, 0x0b, 0xa1,
- 0x0a, 0xd5, 0xa7, 0x82, 0x3a, 0x3e, 0x47, 0xa8,
- 0x5a, 0xde, 0x59, 0x4f, 0x7b, 0x07, 0xb3, 0xe9,
- 0x24, 0x19, 0x3d, 0x34, 0x05, 0xec, 0xf1, 0xab,
- 0x6e, 0x64, 0x8f, 0xd3, 0xe6, 0x41, 0x86, 0x80,
- 0x70, 0xe3, 0x8d, 0x60, 0x9c, 0x34, 0x25, 0x01,
- 0x07, 0x4d, 0x19, 0x41, 0x4e, 0x3d, 0x5c, 0x7e,
- 0xa8, 0xf5, 0xcc, 0xd5, 0x7b, 0xe2, 0x7d, 0x3d,
- 0x49, 0x86, 0x7d, 0x07, 0xb7, 0x10, 0xe3, 0x35,
- 0xb8, 0x84, 0x6d, 0x76, 0xab, 0x17, 0xc6, 0x38,
- 0xb4, 0xd3, 0x28, 0x57, 0xad, 0xd3, 0x88, 0x5a,
- 0xda, 0xea, 0xc8, 0x94, 0xcc, 0x37, 0x19, 0xac,
- 0x9c, 0x9f, 0x4b, 0x00, 0x15, 0xc0, 0xc8, 0xca,
- 0x1f, 0x15, 0xaa, 0xe0, 0xdb, 0xf9, 0x2f, 0x57,
- 0x1b, 0x24, 0xc7, 0x6f, 0x76, 0x29, 0xfb, 0xed,
- 0x25, 0x0d, 0xc0, 0xfe, 0xbd, 0x5a, 0xbf, 0x20,
- 0x08, 0x51, 0x05, 0xec, 0x71, 0xa3, 0xbf, 0xef,
- 0x5e, 0x99, 0x75, 0xdb, 0x3c, 0x5f, 0x9a, 0x8c,
- 0xbb, 0x19, 0x5c, 0x0e, 0x93, 0x19, 0xf8, 0x6a,
- 0xbc, 0xf2, 0x12, 0x54, 0x2f, 0xcb, 0x28, 0x64,
- 0x88, 0xb3, 0x92, 0x0d, 0x96, 0xd1, 0xa6, 0xe4,
- 0x1f, 0xf1, 0x4d, 0xa4, 0xab, 0x1c, 0xee, 0x54,
- 0xf2, 0xad, 0x29, 0x6d, 0x32, 0x37, 0xb2, 0x16,
- 0x77, 0x5c, 0xdc, 0x2e, 0x54, 0xec, 0x75, 0x26,
- 0xc6, 0x36, 0xd9, 0x17, 0x2c, 0xf1, 0x7a, 0xdc,
- 0x4b, 0xf1, 0xe2, 0xd9, 0x95, 0xba, 0xac, 0x87,
- 0xc1, 0xf3, 0x8e, 0x58, 0x08, 0xd8, 0x87, 0x60,
- 0xc9, 0xee, 0x6a, 0xde, 0xa4, 0xd2, 0xfc, 0x0d,
- 0xe5, 0x36, 0xc4, 0x5c, 0x52, 0xb3, 0x07, 0x54,
- 0x65, 0x24, 0xc1, 0xb1, 0xd1, 0xb1, 0x53, 0x13,
- 0x31, 0x79, 0x7f, 0x05, 0x76, 0xeb, 0x37, 0x59,
- 0x15, 0x2b, 0xd1, 0x3f, 0xac, 0x08, 0x97, 0xeb,
- 0x91, 0x98, 0xdf, 0x6c, 0x09, 0x0d, 0x04, 0x9f,
- 0xdc, 0x3b, 0x0e, 0x60, 0x68, 0x47, 0x23, 0x15,
- 0x16, 0xc6, 0x0b, 0x35, 0xf8, 0x77, 0xa2, 0x78,
- 0x50, 0xd4, 0x64, 0x22, 0x33, 0xff, 0xfb, 0x93,
- 0x71, 0x46, 0x50, 0x39, 0x1b, 0x9c, 0xea, 0x4e,
- 0x8d, 0x0c, 0x37, 0xe5, 0x5c, 0x51, 0x3a, 0x31,
- 0xb2, 0x85, 0x84, 0x3f, 0x41, 0xee, 0xa2, 0xc1,
- 0xc6, 0x13, 0x3b, 0x54, 0x28, 0xd2, 0x18, 0x37,
- 0xcc, 0x46, 0x9f, 0x6a, 0x91, 0x3d, 0x5a, 0x15,
- 0x3c, 0x89, 0xa3, 0x61, 0x06, 0x7d, 0x2e, 0x78,
- 0xbe, 0x7d, 0x40, 0xba, 0x2f, 0x95, 0xb1, 0x2f,
- 0x87, 0x3b, 0x8a, 0xbe, 0x6a, 0xf4, 0xc2, 0x31,
- 0x74, 0xee, 0x91, 0xe0, 0x23, 0xaa, 0x5d, 0x7f,
- 0xdd, 0xf0, 0x44, 0x8c, 0x0b, 0x59, 0x2b, 0xfc,
- 0x48, 0x3a, 0xdf, 0x07, 0x05, 0x38, 0x6c, 0xc9,
- 0xeb, 0x18, 0x24, 0x68, 0x8d, 0x58, 0x98, 0xd3,
- 0x31, 0xa3, 0xe4, 0x70, 0x59, 0xb1, 0x21, 0xbe,
- 0x7e, 0x65, 0x7d, 0xb8, 0x04, 0xab, 0xf6, 0xe4,
- 0xd7, 0xda, 0xec, 0x09, 0x8f, 0xda, 0x6d, 0x24,
- 0x07, 0xcc, 0x29, 0x17, 0x05, 0x78, 0x1a, 0xc1,
- 0xb1, 0xce, 0xfc, 0xaa, 0x2d, 0xe7, 0xcc, 0x85,
- 0x84, 0x84, 0x03, 0x2a, 0x0c, 0x3f, 0xa9, 0xf8,
- 0xfd, 0x84, 0x53, 0x59, 0x5c, 0xf0, 0xd4, 0x09,
- 0xf0, 0xd2, 0x6c, 0x32, 0x03, 0xb0, 0xa0, 0x8c,
- 0x52, 0xeb, 0x23, 0x91, 0x88, 0x43, 0x13, 0x46,
- 0xf6, 0x1e, 0xb4, 0x1b, 0xf5, 0x8e, 0x3a, 0xb5,
- 0x3d, 0x00, 0xf6, 0xe5, 0x08, 0x3d, 0x5f, 0x39,
- 0xd3, 0x21, 0x69, 0xbc, 0x03, 0x22, 0x3a, 0xd2,
- 0x5c, 0x84, 0xf8, 0x15, 0xc4, 0x80, 0x0b, 0xbc,
- 0x29, 0x3c, 0xf3, 0x95, 0x98, 0xcd, 0x8f, 0x35,
- 0xbc, 0xa5, 0x3e, 0xfc, 0xd4, 0x13, 0x9e, 0xde,
- 0x4f, 0xce, 0x71, 0x9d, 0x09, 0xad, 0xf2, 0x80,
- 0x6b, 0x65, 0x7f, 0x03, 0x00, 0x14, 0x7c, 0x15,
- 0x85, 0x40, 0x6d, 0x70, 0xea, 0xdc, 0xb3, 0x63,
- 0x35, 0x4f, 0x4d, 0xe0, 0xd9, 0xd5, 0x3c, 0x58,
- 0x56, 0x23, 0x80, 0xe2, 0x36, 0xdd, 0x75, 0x1d,
- 0x94, 0x11, 0x41, 0x8e, 0xe0, 0x81, 0x8e, 0xcf,
- 0xe0, 0xe5, 0xf6, 0xde, 0xd1, 0xe7, 0x04, 0x12,
- 0x79, 0x92, 0x2b, 0x71, 0x2a, 0x79, 0x8b, 0x7c,
- 0x44, 0x79, 0x16, 0x30, 0x4e, 0xf4, 0xf6, 0x9b,
- 0xb7, 0x40, 0xa3, 0x5a, 0xa7, 0x69, 0x3e, 0xc1,
- 0x3a, 0x04, 0xd0, 0x88, 0xa0, 0x3b, 0xdd, 0xc6,
- 0x9e, 0x7e, 0x1e, 0x1e, 0x8f, 0x44, 0xf7, 0x73,
- 0x67, 0x1e, 0x1a, 0x78, 0xfa, 0x62, 0xf4, 0xa9,
- 0xa8, 0xc6, 0x5b, 0xb8, 0xfa, 0x06, 0x7d, 0x5e,
- 0x38, 0x1c, 0x9a, 0x39, 0xe9, 0x39, 0x98, 0x22,
- 0x0b, 0xa7, 0xac, 0x0b, 0xf3, 0xbc, 0xf1, 0xeb,
- 0x8c, 0x81, 0xe3, 0x48, 0x8a, 0xed, 0x42, 0xc2,
- 0x38, 0xcf, 0x3e, 0xda, 0xd2, 0x89, 0x8d, 0x9c,
- 0x53, 0xb5, 0x2f, 0x41, 0x01, 0x26, 0x84, 0x9c,
- 0xa3, 0x56, 0xf6, 0x49, 0xc7, 0xd4, 0x9f, 0x93,
- 0x1b, 0x96, 0x49, 0x5e, 0xad, 0xb3, 0x84, 0x1f,
- 0x3c, 0xa4, 0xe0, 0x9b, 0xd1, 0x90, 0xbc, 0x38,
- 0x6c, 0xdd, 0x95, 0x4d, 0x9d, 0xb1, 0x71, 0x57,
- 0x2d, 0x34, 0xe8, 0xb8, 0x42, 0xc7, 0x99, 0x03,
- 0xc7, 0x07, 0x30, 0x65, 0x91, 0x55, 0xd5, 0x90,
- 0x70, 0x97, 0x37, 0x68, 0xd4, 0x11, 0xf9, 0xe8,
- 0xce, 0xec, 0xdc, 0x34, 0xd5, 0xd3, 0xb7, 0xc4,
- 0xb8, 0x97, 0x05, 0x92, 0xad, 0xf8, 0xe2, 0x36,
- 0x64, 0x41, 0xc9, 0xc5, 0x41, 0x77, 0x52, 0xd7,
- 0x2c, 0xa5, 0x24, 0x2f, 0xd9, 0x34, 0x0b, 0x47,
- 0x35, 0xa7, 0x28, 0x8b, 0xc5, 0xcd, 0xe9, 0x46,
- 0xac, 0x39, 0x94, 0x3c, 0x10, 0xc6, 0x29, 0x73,
- 0x0e, 0x0e, 0x5d, 0xe0, 0x71, 0x03, 0x8a, 0x72,
- 0x0e, 0x26, 0xb0, 0x7d, 0x84, 0xed, 0x95, 0x23,
- 0x49, 0x5a, 0x45, 0x83, 0x45, 0x60, 0x11, 0x4a,
- 0x46, 0x31, 0xd4, 0xd8, 0x16, 0x54, 0x98, 0x58,
- 0xed, 0x6d, 0xcc, 0x5d, 0xd6, 0x50, 0x61, 0x9f,
- 0x9d, 0xc5, 0x3e, 0x9d, 0x32, 0x47, 0xde, 0x96,
- 0xe1, 0x5d, 0xd8, 0xf8, 0xb4, 0x69, 0x6f, 0xb9,
- 0x15, 0x90, 0x57, 0x7a, 0xf6, 0xad, 0xb0, 0x5b,
- 0xf5, 0xa6, 0x36, 0x94, 0xfd, 0x84, 0xce, 0x1c,
- 0x0f, 0x4b, 0xd0, 0xc2, 0x5b, 0x6b, 0x56, 0xef,
- 0x73, 0x93, 0x0b, 0xc3, 0xee, 0xd9, 0xcf, 0xd3,
- 0xa4, 0x22, 0x58, 0xcd, 0x50, 0x6e, 0x65, 0xf4,
- 0xe9, 0xb7, 0x71, 0xaf, 0x4b, 0xb3, 0xb6, 0x2f,
- 0x0f, 0x0e, 0x3b, 0xc9, 0x85, 0x14, 0xf5, 0x17,
- 0xe8, 0x7a, 0x3a, 0xbf, 0x5f, 0x5e, 0xf8, 0x18,
- 0x48, 0xa6, 0x72, 0xab, 0x06, 0x95, 0xe9, 0xc8,
- 0xa7, 0xf4, 0x32, 0x44, 0x04, 0x0c, 0x84, 0x98,
- 0x73, 0xe3, 0x89, 0x8d, 0x5f, 0x7e, 0x4a, 0x42,
- 0x8f, 0xc5, 0x28, 0xb1, 0x82, 0xef, 0x1c, 0x97,
- 0x31, 0x3b, 0x4d, 0xe0, 0x0e, 0x10, 0x10, 0x97,
- 0x93, 0x49, 0x78, 0x2f, 0x0d, 0x86, 0x8b, 0xa1,
- 0x53, 0xa9, 0x81, 0x20, 0x79, 0xe7, 0x07, 0x77,
- 0xb6, 0xac, 0x5e, 0xd2, 0x05, 0xcd, 0xe9, 0xdb,
- 0x8a, 0x94, 0x82, 0x8a, 0x23, 0xb9, 0x3d, 0x1c,
- 0xa9, 0x7d, 0x72, 0x4a, 0xed, 0x33, 0xa3, 0xdb,
- 0x21, 0xa7, 0x86, 0x33, 0x45, 0xa5, 0xaa, 0x56,
- 0x45, 0xb5, 0x83, 0x29, 0x40, 0x47, 0x79, 0x04,
- 0x6e, 0xb9, 0x95, 0xd0, 0x81, 0x77, 0x2d, 0x48,
- 0x1e, 0xfe, 0xc3, 0xc2, 0x1e, 0xe5, 0xf2, 0xbe,
- 0xfd, 0x3b, 0x94, 0x9f, 0xc4, 0xc4, 0x26, 0x9d,
- 0xe4, 0x66, 0x1e, 0x19, 0xee, 0x6c, 0x79, 0x97,
- 0x11, 0x31, 0x4b, 0x0d, 0x01, 0xcb, 0xde, 0xa8,
- 0xf6, 0x6d, 0x7c, 0x39, 0x46, 0x4e, 0x7e, 0x3f,
- 0x94, 0x17, 0xdf, 0xa1, 0x7d, 0xd9, 0x1c, 0x8e,
- 0xbc, 0x7d, 0x33, 0x7d, 0xe3, 0x12, 0x40, 0xca,
- 0xab, 0x37, 0x11, 0x46, 0xd4, 0xae, 0xef, 0x44,
- 0xa2, 0xb3, 0x6a, 0x66, 0x0e, 0x0c, 0x90, 0x7f,
- 0xdf, 0x5c, 0x66, 0x5f, 0xf2, 0x94, 0x9f, 0xa6,
- 0x73, 0x4f, 0xeb, 0x0d, 0xad, 0xbf, 0xc0, 0x63,
- 0x5c, 0xdc, 0x46, 0x51, 0xe8, 0x8e, 0x90, 0x19,
- 0xa8, 0xa4, 0x3c, 0x91, 0x79, 0xfa, 0x7e, 0x58,
- 0x85, 0x13, 0x55, 0xc5, 0x19, 0x82, 0x37, 0x1b,
- 0x0a, 0x02, 0x1f, 0x99, 0x6b, 0x18, 0xf1, 0x28,
- 0x08, 0xa2, 0x73, 0xb8, 0x0f, 0x2e, 0xcd, 0xbf,
- 0xf3, 0x86, 0x7f, 0xea, 0xef, 0xd0, 0xbb, 0xa6,
- 0x21, 0xdf, 0x49, 0x73, 0x51, 0xcc, 0x36, 0xd3,
- 0x3e, 0xa0, 0xf8, 0x44, 0xdf, 0xd3, 0xa6, 0xbe,
- 0x8a, 0xd4, 0x57, 0xdd, 0x72, 0x94, 0x61, 0x0f,
- 0x82, 0xd1, 0x07, 0xb8, 0x7c, 0x18, 0x83, 0xdf,
- 0x3a, 0xe5, 0x50, 0x6a, 0x82, 0x20, 0xac, 0xa9,
- 0xa8, 0xff, 0xd9, 0xf3, 0x77, 0x33, 0x5a, 0x9e,
- 0x7f, 0x6d, 0xfe, 0x5d, 0x33, 0x41, 0x42, 0xe7,
- 0x6c, 0x19, 0xe0, 0x44, 0x8a, 0x15, 0xf6, 0x70,
- 0x98, 0xb7, 0x68, 0x4d, 0xfa, 0x97, 0x39, 0xb0,
- 0x8e, 0xe8, 0x84, 0x8b, 0x75, 0x30, 0xb7, 0x7d,
- 0x92, 0x69, 0x20, 0x9c, 0x81, 0xfb, 0x4b, 0xf4,
- 0x01, 0x50, 0xeb, 0xce, 0x0c, 0x1c, 0x6c, 0xb5,
- 0x4a, 0xd7, 0x27, 0x0c, 0xce, 0xbb, 0xe5, 0x85,
- 0xf0, 0xb6, 0xee, 0xd5, 0x70, 0xdd, 0x3b, 0xfc,
- 0xd4, 0x99, 0xf1, 0x33, 0xdd, 0x8b, 0xc4, 0x2f,
- 0xae, 0xab, 0x74, 0x96, 0x32, 0xc7, 0x4c, 0x56,
- 0x3c, 0x89, 0x0f, 0x96, 0x0b, 0x42, 0xc0, 0xcb,
- 0xee, 0x0f, 0x0b, 0x8c, 0xfb, 0x7e, 0x47, 0x7b,
- 0x64, 0x48, 0xfd, 0xb2, 0x00, 0x80, 0x89, 0xa5,
- 0x13, 0x55, 0x62, 0xfc, 0x8f, 0xe2, 0x42, 0x03,
- 0xb7, 0x4e, 0x2a, 0x79, 0xb4, 0x82, 0xea, 0x23,
- 0x49, 0xda, 0xaf, 0x52, 0x63, 0x1e, 0x60, 0x03,
- 0x89, 0x06, 0x44, 0x46, 0x08, 0xc3, 0xc4, 0x87,
- 0x70, 0x2e, 0xda, 0x94, 0xad, 0x6b, 0xe0, 0xe4,
- 0xd1, 0x8a, 0x06, 0xc2, 0xa8, 0xc0, 0xa7, 0x43,
- 0x3c, 0x47, 0x52, 0x0e, 0xc3, 0x77, 0x81, 0x11,
- 0x67, 0x0e, 0xa0, 0x70, 0x04, 0x47, 0x29, 0x40,
- 0x86, 0x0d, 0x34, 0x56, 0xa7, 0xc9, 0x35, 0x59,
- 0x68, 0xdc, 0x93, 0x81, 0x70, 0xee, 0x86, 0xd9,
- 0x80, 0x06, 0x40, 0x4f, 0x1a, 0x0d, 0x40, 0x30,
- 0x0b, 0xcb, 0x96, 0x47, 0xc1, 0xb7, 0x52, 0xfd,
- 0x56, 0xe0, 0x72, 0x4b, 0xfb, 0xbd, 0x92, 0x45,
- 0x61, 0x71, 0xc2, 0x33, 0x11, 0xbf, 0x52, 0x83,
- 0x79, 0x26, 0xe0, 0x49, 0x6b, 0xb7, 0x05, 0x8b,
- 0xe8, 0x0e, 0x87, 0x31, 0xd7, 0x9d, 0x8a, 0xf5,
- 0xc0, 0x5f, 0x2e, 0x58, 0x4a, 0xdb, 0x11, 0xb3,
- 0x6c, 0x30, 0x2a, 0x46, 0x19, 0xe3, 0x27, 0x84,
- 0x1f, 0x63, 0x6e, 0xf6, 0x57, 0xc7, 0xc9, 0xd8,
- 0x5e, 0xba, 0xb3, 0x87, 0xd5, 0x83, 0x26, 0x34,
- 0x21, 0x9e, 0x65, 0xde, 0x42, 0xd3, 0xbe, 0x7b,
- 0xbc, 0x91, 0x71, 0x44, 0x4d, 0x99, 0x3b, 0x31,
- 0xe5, 0x3f, 0x11, 0x4e, 0x7f, 0x13, 0x51, 0x3b,
- 0xae, 0x79, 0xc9, 0xd3, 0x81, 0x8e, 0x25, 0x40,
- 0x10, 0xfc, 0x07, 0x1e, 0xf9, 0x7b, 0x9a, 0x4b,
- 0x6c, 0xe3, 0xb3, 0xad, 0x1a, 0x0a, 0xdd, 0x9e,
- 0x59, 0x0c, 0xa2, 0xcd, 0xae, 0x48, 0x4a, 0x38,
- 0x5b, 0x47, 0x41, 0x94, 0x65, 0x6b, 0xbb, 0xeb,
- 0x5b, 0xe3, 0xaf, 0x07, 0x5b, 0xd4, 0x4a, 0xa2,
- 0xc9, 0x5d, 0x2f, 0x64, 0x03, 0xd7, 0x3a, 0x2c,
- 0x6e, 0xce, 0x76, 0x95, 0xb4, 0xb3, 0xc0, 0xf1,
- 0xe2, 0x45, 0x73, 0x7a, 0x5c, 0xab, 0xc1, 0xfc,
- 0x02, 0x8d, 0x81, 0x29, 0xb3, 0xac, 0x07, 0xec,
- 0x40, 0x7d, 0x45, 0xd9, 0x7a, 0x59, 0xee, 0x34,
- 0xf0, 0xe9, 0xd5, 0x7b, 0x96, 0xb1, 0x3d, 0x95,
- 0xcc, 0x86, 0xb5, 0xb6, 0x04, 0x2d, 0xb5, 0x92,
- 0x7e, 0x76, 0xf4, 0x06, 0xa9, 0xa3, 0x12, 0x0f,
- 0xb1, 0xaf, 0x26, 0xba, 0x7c, 0xfc, 0x7e, 0x1c,
- 0xbc, 0x2c, 0x49, 0x97, 0x53, 0x60, 0x13, 0x0b,
- 0xa6, 0x61, 0x83, 0x89, 0x42, 0xd4, 0x17, 0x0c,
- 0x6c, 0x26, 0x52, 0xc3, 0xb3, 0xd4, 0x67, 0xf5,
- 0xe3, 0x04, 0xb7, 0xf4, 0xcb, 0x80, 0xb8, 0xcb,
- 0x77, 0x56, 0x3e, 0xaa, 0x57, 0x54, 0xee, 0xb4,
- 0x2c, 0x67, 0xcf, 0xf2, 0xdc, 0xbe, 0x55, 0xf9,
- 0x43, 0x1f, 0x6e, 0x22, 0x97, 0x67, 0x7f, 0xc4,
- 0xef, 0xb1, 0x26, 0x31, 0x1e, 0x27, 0xdf, 0x41,
- 0x80, 0x47, 0x6c, 0xe2, 0xfa, 0xa9, 0x8c, 0x2a,
- 0xf6, 0xf2, 0xab, 0xf0, 0x15, 0xda, 0x6c, 0xc8,
- 0xfe, 0xb5, 0x23, 0xde, 0xa9, 0x05, 0x3f, 0x06,
- 0x54, 0x4c, 0xcd, 0xe1, 0xab, 0xfc, 0x0e, 0x62,
- 0x33, 0x31, 0x73, 0x2c, 0x76, 0xcb, 0xb4, 0x47,
- 0x1e, 0x20, 0xad, 0xd8, 0xf2, 0x31, 0xdd, 0xc4,
- 0x8b, 0x0c, 0x77, 0xbe, 0xe1, 0x8b, 0x26, 0x00,
- 0x02, 0x58, 0xd6, 0x8d, 0xef, 0xad, 0x74, 0x67,
- 0xab, 0x3f, 0xef, 0xcb, 0x6f, 0xb0, 0xcc, 0x81,
- 0x44, 0x4c, 0xaf, 0xe9, 0x49, 0x4f, 0xdb, 0xa0,
- 0x25, 0xa4, 0xf0, 0x89, 0xf1, 0xbe, 0xd8, 0x10,
- 0xff, 0xb1, 0x3b, 0x4b, 0xfa, 0x98, 0xf5, 0x79,
- 0x6d, 0x1e, 0x69, 0x4d, 0x57, 0xb1, 0xc8, 0x19,
- 0x1b, 0xbd, 0x1e, 0x8c, 0x84, 0xb7, 0x7b, 0xe8,
- 0xd2, 0x2d, 0x09, 0x41, 0x41, 0x37, 0x3d, 0xb1,
- 0x6f, 0x26, 0x5d, 0x71, 0x16, 0x3d, 0xb7, 0x83,
- 0x27, 0x2c, 0xa7, 0xb6, 0x50, 0xbd, 0x91, 0x86,
- 0xab, 0x24, 0xa1, 0x38, 0xfd, 0xea, 0x71, 0x55,
- 0x7e, 0x9a, 0x07, 0x77, 0x4b, 0xfa, 0x61, 0x66,
- 0x20, 0x1e, 0x28, 0x95, 0x18, 0x1b, 0xa4, 0xa0,
- 0xfd, 0xc0, 0x89, 0x72, 0x43, 0xd9, 0x3b, 0x49,
- 0x5a, 0x3f, 0x9d, 0xbf, 0xdb, 0xb4, 0x46, 0xea,
- 0x42, 0x01, 0x77, 0x23, 0x68, 0x95, 0xb6, 0x24,
- 0xb3, 0xa8, 0x6c, 0x28, 0x3b, 0x11, 0x40, 0x7e,
- 0x18, 0x65, 0x6d, 0xd8, 0x24, 0x42, 0x7d, 0x88,
- 0xc0, 0x52, 0xd9, 0x05, 0xe4, 0x95, 0x90, 0x87,
- 0x8c, 0xf4, 0xd0, 0x6b, 0xb9, 0x83, 0x99, 0x34,
- 0x6d, 0xfe, 0x54, 0x40, 0x94, 0x52, 0x21, 0x4f,
- 0x14, 0x25, 0xc5, 0xd6, 0x5e, 0x95, 0xdc, 0x0a,
- 0x2b, 0x89, 0x20, 0x11, 0x84, 0x48, 0xd6, 0x3a,
- 0xcd, 0x5c, 0x24, 0xad, 0x62, 0xe3, 0xb1, 0x93,
- 0x25, 0x8d, 0xcd, 0x7e, 0xfc, 0x27, 0xa3, 0x37,
- 0xfd, 0x84, 0xfc, 0x1b, 0xb2, 0xf1, 0x27, 0x38,
- 0x5a, 0xb7, 0xfc, 0xf2, 0xfa, 0x95, 0x66, 0xd4,
- 0xfb, 0xba, 0xa7, 0xd7, 0xa3, 0x72, 0x69, 0x48,
- 0x48, 0x8c, 0xeb, 0x28, 0x89, 0xfe, 0x33, 0x65,
- 0x5a, 0x36, 0x01, 0x7e, 0x06, 0x79, 0x0a, 0x09,
- 0x3b, 0x74, 0x11, 0x9a, 0x6e, 0xbf, 0xd4, 0x9e,
- 0x58, 0x90, 0x49, 0x4f, 0x4d, 0x08, 0xd4, 0xe5,
- 0x4a, 0x09, 0x21, 0xef, 0x8b, 0xb8, 0x74, 0x3b,
- 0x91, 0xdd, 0x36, 0x85, 0x60, 0x2d, 0xfa, 0xd4,
- 0x45, 0x7b, 0x45, 0x53, 0xf5, 0x47, 0x87, 0x7e,
- 0xa6, 0x37, 0xc8, 0x78, 0x7a, 0x68, 0x9d, 0x8d,
- 0x65, 0x2c, 0x0e, 0x91, 0x5c, 0xa2, 0x60, 0xf0,
- 0x8e, 0x3f, 0xe9, 0x1a, 0xcd, 0xaa, 0xe7, 0xd5,
- 0x77, 0x18, 0xaf, 0xc9, 0xbc, 0x18, 0xea, 0x48,
- 0x1b, 0xfb, 0x22, 0x48, 0x70, 0x16, 0x29, 0x9e,
- 0x5b, 0xc1, 0x2c, 0x66, 0x23, 0xbc, 0xf0, 0x1f,
- 0xef, 0xaf, 0xe4, 0xd6, 0x04, 0x19, 0x82, 0x7a,
- 0x0b, 0xba, 0x4b, 0x46, 0xb1, 0x6a, 0x85, 0x5d,
- 0xb4, 0x73, 0xd6, 0x21, 0xa1, 0x71, 0x60, 0x14,
- 0xee, 0x0a, 0x77, 0xc4, 0x66, 0x2e, 0xf9, 0x69,
- 0x30, 0xaf, 0x41, 0x0b, 0xc8, 0x83, 0x3c, 0x53,
- 0x99, 0x19, 0x27, 0x46, 0xf7, 0x41, 0x6e, 0x56,
- 0xdc, 0x94, 0x28, 0x67, 0x4e, 0xb7, 0x25, 0x48,
- 0x8a, 0xc2, 0xe0, 0x60, 0x96, 0xcc, 0x18, 0xf4,
- 0x84, 0xdd, 0xa7, 0x5e, 0x3e, 0x05, 0x0b, 0x26,
- 0x26, 0xb2, 0x5c, 0x1f, 0x57, 0x1a, 0x04, 0x7e,
- 0x6a, 0xe3, 0x2f, 0xb4, 0x35, 0xb6, 0x38, 0x40,
- 0x40, 0xcd, 0x6f, 0x87, 0x2e, 0xef, 0xa3, 0xd7,
- 0xa9, 0xc2, 0xe8, 0x0d, 0x27, 0xdf, 0x44, 0x62,
- 0x99, 0xa0, 0xfc, 0xcf, 0x81, 0x78, 0xcb, 0xfe,
- 0xe5, 0xa0, 0x03, 0x4e, 0x6c, 0xd7, 0xf4, 0xaf,
- 0x7a, 0xbb, 0x61, 0x82, 0xfe, 0x71, 0x89, 0xb2,
- 0x22, 0x7c, 0x8e, 0x83, 0x04, 0xce, 0xf6, 0x5d,
- 0x84, 0x8f, 0x95, 0x6a, 0x7f, 0xad, 0xfd, 0x32,
- 0x9c, 0x5e, 0xe4, 0x9c, 0x89, 0x60, 0x54, 0xaa,
- 0x96, 0x72, 0xd2, 0xd7, 0x36, 0x85, 0xa9, 0x45,
- 0xd2, 0x2a, 0xa1, 0x81, 0x49, 0x6f, 0x7e, 0x04,
- 0xfa, 0xe2, 0xfe, 0x90, 0x26, 0x77, 0x5a, 0x33,
- 0xb8, 0x04, 0x9a, 0x7a, 0xe6, 0x4c, 0x4f, 0xad,
- 0x72, 0x96, 0x08, 0x28, 0x58, 0x13, 0xf8, 0xc4,
- 0x1c, 0xf0, 0xc3, 0x45, 0x95, 0x49, 0x20, 0x8c,
- 0x9f, 0x39, 0x70, 0xe1, 0x77, 0xfe, 0xd5, 0x4b,
- 0xaf, 0x86, 0xda, 0xef, 0x22, 0x06, 0x83, 0x36,
- 0x29, 0x12, 0x11, 0x40, 0xbc, 0x3b, 0x86, 0xaa,
- 0xaa, 0x65, 0x60, 0xc3, 0x80, 0xca, 0xed, 0xa9,
- 0xf3, 0xb0, 0x79, 0x96, 0xa2, 0x55, 0x27, 0x28,
- 0x55, 0x73, 0x26, 0xa5, 0x50, 0xea, 0x92, 0x4b,
- 0x3c, 0x5c, 0x82, 0x33, 0xf0, 0x01, 0x3f, 0x03,
- 0xc1, 0x08, 0x05, 0xbf, 0x98, 0xf4, 0x9b, 0x6d,
- 0xa5, 0xa8, 0xb4, 0x82, 0x0c, 0x06, 0xfa, 0xff,
- 0x2d, 0x08, 0xf3, 0x05, 0x4f, 0x57, 0x2a, 0x39,
- 0xd4, 0x83, 0x0d, 0x75, 0x51, 0xd8, 0x5b, 0x1b,
- 0xd3, 0x51, 0x5a, 0x32, 0x2a, 0x9b, 0x32, 0xb2,
- 0xf2, 0xa4, 0x96, 0x12, 0xf2, 0xae, 0x40, 0x34,
- 0x67, 0xa8, 0xf5, 0x44, 0xd5, 0x35, 0x53, 0xfe,
- 0xa3, 0x60, 0x96, 0x63, 0x0f, 0x1f, 0x6e, 0xb0,
- 0x5a, 0x42, 0xa6, 0xfc, 0x51, 0x0b, 0x60, 0x27,
- 0xbc, 0x06, 0x71, 0xed, 0x65, 0x5b, 0x23, 0x86,
- 0x4a, 0x07, 0x3b, 0x22, 0x07, 0x46, 0xe6, 0x90,
- 0x3e, 0xf3, 0x25, 0x50, 0x1b, 0x4c, 0x7f, 0x03,
- 0x08, 0xa8, 0x36, 0x6b, 0x87, 0xe5, 0xe3, 0xdb,
- 0x9a, 0x38, 0x83, 0xff, 0x9f, 0x1a, 0x9f, 0x57,
- 0xa4, 0x2a, 0xf6, 0x37, 0xbc, 0x1a, 0xff, 0xc9,
- 0x1e, 0x35, 0x0c, 0xc3, 0x7c, 0xa3, 0xb2, 0xe5,
- 0xd2, 0xc6, 0xb4, 0x57, 0x47, 0xe4, 0x32, 0x16,
- 0x6d, 0xa9, 0xae, 0x64, 0xe6, 0x2d, 0x8d, 0xc5,
- 0x8d, 0x50, 0x8e, 0xe8, 0x1a, 0x22, 0x34, 0x2a,
- 0xd9, 0xeb, 0x51, 0x90, 0x4a, 0xb1, 0x41, 0x7d,
- 0x64, 0xf9, 0xb9, 0x0d, 0xf6, 0x23, 0x33, 0xb0,
- 0x33, 0xf4, 0xf7, 0x3f, 0x27, 0x84, 0xc6, 0x0f,
- 0x54, 0xa5, 0xc0, 0x2e, 0xec, 0x0b, 0x3a, 0x48,
- 0x6e, 0x80, 0x35, 0x81, 0x43, 0x9b, 0x90, 0xb1,
- 0xd0, 0x2b, 0xea, 0x21, 0xdc, 0xda, 0x5b, 0x09,
- 0xf4, 0xcc, 0x10, 0xb4, 0xc7, 0xfe, 0x79, 0x51,
- 0xc3, 0xc5, 0xac, 0x88, 0x74, 0x84, 0x0b, 0x4b,
- 0xca, 0x79, 0x16, 0x29, 0xfb, 0x69, 0x54, 0xdf,
- 0x41, 0x7e, 0xe9, 0xc7, 0x8e, 0xea, 0xa5, 0xfe,
- 0xfc, 0x76, 0x0e, 0x90, 0xc4, 0x92, 0x38, 0xad,
- 0x7b, 0x48, 0xe6, 0x6e, 0xf7, 0x21, 0xfd, 0x4e,
- 0x93, 0x0a, 0x7b, 0x41, 0x83, 0x68, 0xfb, 0x57,
- 0x51, 0x76, 0x34, 0xa9, 0x6c, 0x00, 0xaa, 0x4f,
- 0x66, 0x65, 0x98, 0x4a, 0x4f, 0xa3, 0xa0, 0xef,
- 0x69, 0x3f, 0xe3, 0x1c, 0x92, 0x8c, 0xfd, 0xd8,
- 0xe8, 0xde, 0x7c, 0x7f, 0x3e, 0x84, 0x8e, 0x69,
- 0x3c, 0xf1, 0xf2, 0x05, 0x46, 0xdc, 0x2f, 0x9d,
- 0x5e, 0x6e, 0x4c, 0xfb, 0xb5, 0x99, 0x2a, 0x59,
- 0x63, 0xc1, 0x34, 0xbc, 0x57, 0xc0, 0x0d, 0xb9,
- 0x61, 0x25, 0xf3, 0x33, 0x23, 0x51, 0xb6, 0x0d,
- 0x07, 0xa6, 0xab, 0x94, 0x4a, 0xb7, 0x2a, 0xea,
- 0xee, 0xac, 0xa3, 0xc3, 0x04, 0x8b, 0x0e, 0x56,
- 0xfe, 0x44, 0xa7, 0x39, 0xe2, 0xed, 0xed, 0xb4,
- 0x22, 0x2b, 0xac, 0x12, 0x32, 0x28, 0x91, 0xd8,
- 0xa5, 0xab, 0xff, 0x5f, 0xe0, 0x4b, 0xda, 0x78,
- 0x17, 0xda, 0xf1, 0x01, 0x5b, 0xcd, 0xe2, 0x5f,
- 0x50, 0x45, 0x73, 0x2b, 0xe4, 0x76, 0x77, 0xf4,
- 0x64, 0x1d, 0x43, 0xfb, 0x84, 0x7a, 0xea, 0x91,
- 0xae, 0xf9, 0x9e, 0xb7, 0xb4, 0xb0, 0x91, 0x5f,
- 0x16, 0x35, 0x9a, 0x11, 0xb8, 0xc7, 0xc1, 0x8c,
- 0xc6, 0x10, 0x8d, 0x2f, 0x63, 0x4a, 0xa7, 0x57,
- 0x3a, 0x51, 0xd6, 0x32, 0x2d, 0x64, 0x72, 0xd4,
- 0x66, 0xdc, 0x10, 0xa6, 0x67, 0xd6, 0x04, 0x23,
- 0x9d, 0x0a, 0x11, 0x77, 0xdd, 0x37, 0x94, 0x17,
- 0x3c, 0xbf, 0x8b, 0x65, 0xb0, 0x2e, 0x5e, 0x66,
- 0x47, 0x64, 0xac, 0xdd, 0xf0, 0x84, 0xfd, 0x39,
- 0xfa, 0x15, 0x5d, 0xef, 0xae, 0xca, 0xc1, 0x36,
- 0xa7, 0x5c, 0xbf, 0xc7, 0x08, 0xc2, 0x66, 0x00,
- 0x74, 0x74, 0x4e, 0x27, 0x3f, 0x55, 0x8a, 0xb7,
- 0x38, 0x66, 0x83, 0x6d, 0xcf, 0x99, 0x9e, 0x60,
- 0x8f, 0xdd, 0x2e, 0x62, 0x22, 0x0e, 0xef, 0x0c,
- 0x98, 0xa7, 0x85, 0x74, 0x3b, 0x9d, 0xec, 0x9e,
- 0xa9, 0x19, 0x72, 0xa5, 0x7f, 0x2c, 0x39, 0xb7,
- 0x7d, 0xb7, 0xf1, 0x12, 0x65, 0x27, 0x4b, 0x5a,
- 0xde, 0x17, 0xfe, 0xad, 0x44, 0xf3, 0x20, 0x4d,
- 0xfd, 0xe4, 0x1f, 0xb5, 0x81, 0xb0, 0x36, 0x37,
- 0x08, 0x6f, 0xc3, 0x0c, 0xe9, 0x85, 0x98, 0x82,
- 0xa9, 0x62, 0x0c, 0xc4, 0x97, 0xc0, 0x50, 0xc8,
- 0xa7, 0x3c, 0x50, 0x9f, 0x43, 0xb9, 0xcd, 0x5e,
- 0x4d, 0xfa, 0x1c, 0x4b, 0x0b, 0xa9, 0x98, 0x85,
- 0x38, 0x92, 0xac, 0x8d, 0xe4, 0xad, 0x9b, 0x98,
- 0xab, 0xd9, 0x38, 0xac, 0x62, 0x52, 0xa3, 0x22,
- 0x63, 0x0f, 0xbf, 0x95, 0x48, 0xdf, 0x69, 0xe7,
- 0x8b, 0x33, 0xd5, 0xb2, 0xbd, 0x05, 0x49, 0x49,
- 0x9d, 0x57, 0x73, 0x19, 0x33, 0xae, 0xfa, 0x33,
- 0xf1, 0x19, 0xa8, 0x80, 0xce, 0x04, 0x9f, 0xbc,
- 0x1d, 0x65, 0x82, 0x1b, 0xe5, 0x3a, 0x51, 0xc8,
- 0x1c, 0x21, 0xe3, 0x5d, 0xf3, 0x7d, 0x9b, 0x2f,
- 0x2c, 0x1d, 0x4a, 0x7f, 0x9b, 0x68, 0x35, 0xa3,
- 0xb2, 0x50, 0xf7, 0x62, 0x79, 0xcd, 0xf4, 0x98,
- 0x4f, 0xe5, 0x63, 0x7c, 0x3e, 0x45, 0x31, 0x8c,
- 0x16, 0xa0, 0x12, 0xc8, 0x58, 0xce, 0x39, 0xa6,
- 0xbc, 0x54, 0xdb, 0xc5, 0xe0, 0xd5, 0xba, 0xbc,
- 0xb9, 0x04, 0xf4, 0x8d, 0xe8, 0x2f, 0x15, 0x9d,
-};
-
-/* 100 test cases */
-static struct crc_test {
- u32 crc; /* random starting crc */
- u32 start; /* random 6 bit offset in buf */
- u32 length; /* random 11 bit length of test */
- u32 crc_le; /* expected crc32_le result */
- u32 crc_be; /* expected crc32_be result */
- u32 crc32c_le; /* expected crc32c_le result */
-} const test[] __initconst =
-{
- {0x674bf11d, 0x00000038, 0x00000542, 0x0af6d466, 0xd8b6e4c1, 0xf6e93d6c},
- {0x35c672c6, 0x0000003a, 0x000001aa, 0xc6d3dfba, 0x28aaf3ad, 0x0fe92aca},
- {0x496da28e, 0x00000039, 0x000005af, 0xd933660f, 0x5d57e81f, 0x52e1ebb8},
- {0x09a9b90e, 0x00000027, 0x000001f8, 0xb45fe007, 0xf45fca9a, 0x0798af9a},
- {0xdc97e5a9, 0x00000025, 0x000003b6, 0xf81a3562, 0xe0126ba2, 0x18eb3152},
- {0x47c58900, 0x0000000a, 0x000000b9, 0x8e58eccf, 0xf3afc793, 0xd00d08c7},
- {0x292561e8, 0x0000000c, 0x00000403, 0xa2ba8aaf, 0x0b797aed, 0x8ba966bc},
- {0x415037f6, 0x00000003, 0x00000676, 0xa17d52e8, 0x7f0fdf35, 0x11d694a2},
- {0x3466e707, 0x00000026, 0x00000042, 0x258319be, 0x75c484a2, 0x6ab3208d},
- {0xafd1281b, 0x00000023, 0x000002ee, 0x4428eaf8, 0x06c7ad10, 0xba4603c5},
- {0xd3857b18, 0x00000028, 0x000004a2, 0x5c430821, 0xb062b7cb, 0xe6071c6f},
- {0x1d825a8f, 0x0000002b, 0x0000050b, 0xd2c45f0c, 0xd68634e0, 0x179ec30a},
- {0x5033e3bc, 0x0000000b, 0x00000078, 0xa3ea4113, 0xac6d31fb, 0x0903beb8},
- {0x94f1fb5e, 0x0000000f, 0x000003a2, 0xfbfc50b1, 0x3cfe50ed, 0x6a7cb4fa},
- {0xc9a0fe14, 0x00000009, 0x00000473, 0x5fb61894, 0x87070591, 0xdb535801},
- {0x88a034b1, 0x0000001c, 0x000005ad, 0xc1b16053, 0x46f95c67, 0x92bed597},
- {0xf0f72239, 0x00000020, 0x0000026d, 0xa6fa58f3, 0xf8c2c1dd, 0x192a3f1b},
- {0xcc20a5e3, 0x0000003b, 0x0000067a, 0x7740185a, 0x308b979a, 0xccbaec1a},
- {0xce589c95, 0x0000002b, 0x00000641, 0xd055e987, 0x40aae25b, 0x7eabae4d},
- {0x78edc885, 0x00000035, 0x000005be, 0xa39cb14b, 0x035b0d1f, 0x28c72982},
- {0x9d40a377, 0x0000003b, 0x00000038, 0x1f47ccd2, 0x197fbc9d, 0xc3cd4d18},
- {0x703d0e01, 0x0000003c, 0x000006f1, 0x88735e7c, 0xfed57c5a, 0xbca8f0e7},
- {0x776bf505, 0x0000000f, 0x000005b2, 0x5cc4fc01, 0xf32efb97, 0x713f60b3},
- {0x4a3e7854, 0x00000027, 0x000004b8, 0x8d923c82, 0x0cbfb4a2, 0xebd08fd5},
- {0x209172dd, 0x0000003b, 0x00000356, 0xb89e9c2b, 0xd7868138, 0x64406c59},
- {0x3ba4cc5b, 0x0000002f, 0x00000203, 0xe51601a9, 0x5b2a1032, 0x7421890e},
- {0xfc62f297, 0x00000000, 0x00000079, 0x71a8e1a2, 0x5d88685f, 0xe9347603},
- {0x64280b8b, 0x00000016, 0x000007ab, 0x0fa7a30c, 0xda3a455f, 0x1bef9060},
- {0x97dd724b, 0x00000033, 0x000007ad, 0x5788b2f4, 0xd7326d32, 0x34720072},
- {0x61394b52, 0x00000035, 0x00000571, 0xc66525f1, 0xcabe7fef, 0x48310f59},
- {0x29b4faff, 0x00000024, 0x0000006e, 0xca13751e, 0x993648e0, 0x783a4213},
- {0x29bfb1dc, 0x0000000b, 0x00000244, 0x436c43f7, 0x429f7a59, 0x9e8efd41},
- {0x86ae934b, 0x00000035, 0x00000104, 0x0760ec93, 0x9cf7d0f4, 0xfc3d34a5},
- {0xc4c1024e, 0x0000002e, 0x000006b1, 0x6516a3ec, 0x19321f9c, 0x17a52ae2},
- {0x3287a80a, 0x00000026, 0x00000496, 0x0b257eb1, 0x754ebd51, 0x886d935a},
- {0xa4db423e, 0x00000023, 0x0000045d, 0x9b3a66dc, 0x873e9f11, 0xeaaeaeb2},
- {0x7a1078df, 0x00000015, 0x0000014a, 0x8c2484c5, 0x6a628659, 0x8e900a4b},
- {0x6048bd5b, 0x00000006, 0x0000006a, 0x897e3559, 0xac9961af, 0xd74662b1},
- {0xd8f9ea20, 0x0000003d, 0x00000277, 0x60eb905b, 0xed2aaf99, 0xd26752ba},
- {0xea5ec3b4, 0x0000002a, 0x000004fe, 0x869965dc, 0x6c1f833b, 0x8b1fcd62},
- {0x2dfb005d, 0x00000016, 0x00000345, 0x6a3b117e, 0xf05e8521, 0xf54342fe},
- {0x5a214ade, 0x00000020, 0x000005b6, 0x467f70be, 0xcb22ccd3, 0x5b95b988},
- {0xf0ab9cca, 0x00000032, 0x00000515, 0xed223df3, 0x7f3ef01d, 0x2e1176be},
- {0x91b444f9, 0x0000002e, 0x000007f8, 0x84e9a983, 0x5676756f, 0x66120546},
- {0x1b5d2ddb, 0x0000002e, 0x0000012c, 0xba638c4c, 0x3f42047b, 0xf256a5cc},
- {0xd824d1bb, 0x0000003a, 0x000007b5, 0x6288653b, 0x3a3ebea0, 0x4af1dd69},
- {0x0470180c, 0x00000034, 0x000001f0, 0x9d5b80d6, 0x3de08195, 0x56f0a04a},
- {0xffaa3a3f, 0x00000036, 0x00000299, 0xf3a82ab8, 0x53e0c13d, 0x74f6b6b2},
- {0x6406cfeb, 0x00000023, 0x00000600, 0xa920b8e8, 0xe4e2acf4, 0x085951fd},
- {0xb24aaa38, 0x0000003e, 0x000004a1, 0x657cc328, 0x5077b2c3, 0xc65387eb},
- {0x58b2ab7c, 0x00000039, 0x000002b4, 0x3a17ee7e, 0x9dcb3643, 0x1ca9257b},
- {0x3db85970, 0x00000006, 0x000002b6, 0x95268b59, 0xb9812c10, 0xfd196d76},
- {0x857830c5, 0x00000003, 0x00000590, 0x4ef439d5, 0xf042161d, 0x5ef88339},
- {0xe1fcd978, 0x0000003e, 0x000007d8, 0xae8d8699, 0xce0a1ef5, 0x2c3714d9},
- {0xb982a768, 0x00000016, 0x000006e0, 0x62fad3df, 0x5f8a067b, 0x58576548},
- {0x1d581ce8, 0x0000001e, 0x0000058b, 0xf0f5da53, 0x26e39eee, 0xfd7c57de},
- {0x2456719b, 0x00000025, 0x00000503, 0x4296ac64, 0xd50e4c14, 0xd5fedd59},
- {0xfae6d8f2, 0x00000000, 0x0000055d, 0x057fdf2e, 0x2a31391a, 0x1cc3b17b},
- {0xcba828e3, 0x00000039, 0x000002ce, 0xe3f22351, 0x8f00877b, 0x270eed73},
- {0x13d25952, 0x0000000a, 0x0000072d, 0x76d4b4cc, 0x5eb67ec3, 0x91ecbb11},
- {0x0342be3f, 0x00000015, 0x00000599, 0xec75d9f1, 0x9d4d2826, 0x05ed8d0c},
- {0xeaa344e0, 0x00000014, 0x000004d8, 0x72a4c981, 0x2064ea06, 0x0b09ad5b},
- {0xbbb52021, 0x0000003b, 0x00000272, 0x04af99fc, 0xaf042d35, 0xf8d511fb},
- {0xb66384dc, 0x0000001d, 0x000007fc, 0xd7629116, 0x782bd801, 0x5ad832cc},
- {0x616c01b6, 0x00000022, 0x000002c8, 0x5b1dab30, 0x783ce7d2, 0x1214d196},
- {0xce2bdaad, 0x00000016, 0x0000062a, 0x932535c8, 0x3f02926d, 0x5747218a},
- {0x00fe84d7, 0x00000005, 0x00000205, 0x850e50aa, 0x753d649c, 0xde8f14de},
- {0xbebdcb4c, 0x00000006, 0x0000055d, 0xbeaa37a2, 0x2d8c9eba, 0x3563b7b9},
- {0xd8b1a02a, 0x00000010, 0x00000387, 0x5017d2fc, 0x503541a5, 0x071475d0},
- {0x3b96cad2, 0x00000036, 0x00000347, 0x1d2372ae, 0x926cd90b, 0x54c79d60},
- {0xc94c1ed7, 0x00000005, 0x0000038b, 0x9e9fdb22, 0x144a9178, 0x4c53eee6},
- {0x1aad454e, 0x00000025, 0x000002b2, 0xc3f6315c, 0x5c7a35b3, 0x10137a3c},
- {0xa4fec9a6, 0x00000000, 0x000006d6, 0x90be5080, 0xa4107605, 0xaa9d6c73},
- {0x1bbe71e2, 0x0000001f, 0x000002fd, 0x4e504c3b, 0x284ccaf1, 0xb63d23e7},
- {0x4201c7e4, 0x00000002, 0x000002b7, 0x7822e3f9, 0x0cc912a9, 0x7f53e9cf},
- {0x23fddc96, 0x00000003, 0x00000627, 0x8a385125, 0x07767e78, 0x13c1cd83},
- {0xd82ba25c, 0x00000016, 0x0000063e, 0x98e4148a, 0x283330c9, 0x49ff5867},
- {0x786f2032, 0x0000002d, 0x0000060f, 0xf201600a, 0xf561bfcd, 0x8467f211},
- {0xfebe4e1f, 0x0000002a, 0x000004f2, 0x95e51961, 0xfd80dcab, 0x3f9683b2},
- {0x1a6e0a39, 0x00000008, 0x00000672, 0x8af6c2a5, 0x78dd84cb, 0x76a3f874},
- {0x56000ab8, 0x0000000e, 0x000000e5, 0x36bacb8f, 0x22ee1f77, 0x863b702f},
- {0x4717fe0c, 0x00000000, 0x000006ec, 0x8439f342, 0x5c8e03da, 0xdc6c58ff},
- {0xd5d5d68e, 0x0000003c, 0x000003a3, 0x46fff083, 0x177d1b39, 0x0622cc95},
- {0xc25dd6c6, 0x00000024, 0x000006c0, 0x5ceb8eb4, 0x892b0d16, 0xe85605cd},
- {0xe9b11300, 0x00000023, 0x00000683, 0x07a5d59a, 0x6c6a3208, 0x31da5f06},
- {0x95cd285e, 0x00000001, 0x00000047, 0x7b3a4368, 0x0202c07e, 0xa1f2e784},
- {0xd9245a25, 0x0000001e, 0x000003a6, 0xd33c1841, 0x1936c0d5, 0xb07cc616},
- {0x103279db, 0x00000006, 0x0000039b, 0xca09b8a0, 0x77d62892, 0xbf943b6c},
- {0x1cba3172, 0x00000027, 0x000001c8, 0xcb377194, 0xebe682db, 0x2c01af1c},
- {0x8f613739, 0x0000000c, 0x000001df, 0xb4b0bc87, 0x7710bd43, 0x0fe5f56d},
- {0x1c6aa90d, 0x0000001b, 0x0000053c, 0x70559245, 0xda7894ac, 0xf8943b2d},
- {0xaabe5b93, 0x0000003d, 0x00000715, 0xcdbf42fa, 0x0c3b99e7, 0xe4d89272},
- {0xf15dd038, 0x00000006, 0x000006db, 0x6e104aea, 0x8d5967f2, 0x7c2f6bbb},
- {0x584dd49c, 0x00000020, 0x000007bc, 0x36b6cfd6, 0xad4e23b2, 0xabbf388b},
- {0x5d8c9506, 0x00000020, 0x00000470, 0x4c62378e, 0x31d92640, 0x1dca1f4e},
- {0xb80d17b0, 0x00000032, 0x00000346, 0x22a5bb88, 0x9a7ec89f, 0x5c170e23},
- {0xdaf0592e, 0x00000023, 0x000007b0, 0x3cab3f99, 0x9b1fdd99, 0xc0e9d672},
- {0x4793cc85, 0x0000000d, 0x00000706, 0xe82e04f6, 0xed3db6b7, 0xc18bdc86},
- {0x82ebf64e, 0x00000009, 0x000007c3, 0x69d590a9, 0x9efa8499, 0xa874fcdd},
- {0xb18a0319, 0x00000026, 0x000007db, 0x1cf98dcc, 0x8fa9ad6a, 0x9dc0bb48},
-};
-
-#include <linux/time.h>
-
-static int __init crc32c_test(void)
-{
- int i;
- int errors = 0;
- int bytes = 0;
- u64 nsec;
- unsigned long flags;
-
- /* keep static to prevent cache warming code from
- * getting eliminated by the compiler */
- static u32 crc;
-
- /* pre-warm the cache */
- for (i = 0; i < 100; i++) {
- bytes += 2*test[i].length;
-
- crc ^= __crc32c_le(test[i].crc, test_buf +
- test[i].start, test[i].length);
- }
-
- /* reduce OS noise */
- local_irq_save(flags);
- local_irq_disable();
-
- nsec = ktime_get_ns();
- for (i = 0; i < 100; i++) {
- if (test[i].crc32c_le != __crc32c_le(test[i].crc, test_buf +
- test[i].start, test[i].length))
- errors++;
- }
- nsec = ktime_get_ns() - nsec;
-
- local_irq_restore(flags);
- local_irq_enable();
-
- pr_info("crc32c: CRC_LE_BITS = %d\n", CRC_LE_BITS);
-
- if (errors)
- pr_warn("crc32c: %d self tests failed\n", errors);
- else {
- pr_info("crc32c: self tests passed, processed %d bytes in %lld nsec\n",
- bytes, nsec);
- }
-
- return 0;
-}
-
-static int __init crc32c_combine_test(void)
-{
- int i, j;
- int errors = 0, runs = 0;
-
- for (i = 0; i < 10; i++) {
- u32 crc_full;
-
- crc_full = __crc32c_le(test[i].crc, test_buf + test[i].start,
- test[i].length);
- for (j = 0; j <= test[i].length; ++j) {
- u32 crc1, crc2;
- u32 len1 = j, len2 = test[i].length - j;
-
- crc1 = __crc32c_le(test[i].crc, test_buf +
- test[i].start, len1);
- crc2 = __crc32c_le(0, test_buf + test[i].start +
- len1, len2);
-
- if (!(crc_full == __crc32c_le_combine(crc1, crc2, len2) &&
- crc_full == test[i].crc32c_le))
- errors++;
- runs++;
- cond_resched();
- }
- }
-
- if (errors)
- pr_warn("crc32c_combine: %d/%d self tests failed\n", errors, runs);
- else
- pr_info("crc32c_combine: %d self tests passed\n", runs);
-
- return 0;
-}
-
-static int __init crc32_test(void)
-{
- int i;
- int errors = 0;
- int bytes = 0;
- u64 nsec;
- unsigned long flags;
-
- /* keep static to prevent cache warming code from
- * getting eliminated by the compiler */
- static u32 crc;
-
- /* pre-warm the cache */
- for (i = 0; i < 100; i++) {
- bytes += 2*test[i].length;
-
- crc ^= crc32_le(test[i].crc, test_buf +
- test[i].start, test[i].length);
-
- crc ^= crc32_be(test[i].crc, test_buf +
- test[i].start, test[i].length);
- }
-
- /* reduce OS noise */
- local_irq_save(flags);
- local_irq_disable();
-
- nsec = ktime_get_ns();
- for (i = 0; i < 100; i++) {
- if (test[i].crc_le != crc32_le(test[i].crc, test_buf +
- test[i].start, test[i].length))
- errors++;
-
- if (test[i].crc_be != crc32_be(test[i].crc, test_buf +
- test[i].start, test[i].length))
- errors++;
- }
- nsec = ktime_get_ns() - nsec;
-
- local_irq_restore(flags);
- local_irq_enable();
-
- pr_info("crc32: CRC_LE_BITS = %d, CRC_BE BITS = %d\n",
- CRC_LE_BITS, CRC_BE_BITS);
-
- if (errors)
- pr_warn("crc32: %d self tests failed\n", errors);
- else {
- pr_info("crc32: self tests passed, processed %d bytes in %lld nsec\n",
- bytes, nsec);
- }
-
- return 0;
-}
-
-static int __init crc32_combine_test(void)
-{
- int i, j;
- int errors = 0, runs = 0;
-
- for (i = 0; i < 10; i++) {
- u32 crc_full;
-
- crc_full = crc32_le(test[i].crc, test_buf + test[i].start,
- test[i].length);
- for (j = 0; j <= test[i].length; ++j) {
- u32 crc1, crc2;
- u32 len1 = j, len2 = test[i].length - j;
-
- crc1 = crc32_le(test[i].crc, test_buf +
- test[i].start, len1);
- crc2 = crc32_le(0, test_buf + test[i].start +
- len1, len2);
-
- if (!(crc_full == crc32_le_combine(crc1, crc2, len2) &&
- crc_full == test[i].crc_le))
- errors++;
- runs++;
- cond_resched();
- }
- }
-
- if (errors)
- pr_warn("crc32_combine: %d/%d self tests failed\n", errors, runs);
- else
- pr_info("crc32_combine: %d self tests passed\n", runs);
-
- return 0;
-}
-
-static int __init crc32test_init(void)
-{
- crc32_test();
- crc32c_test();
-
- crc32_combine_test();
- crc32c_combine_test();
-
- return 0;
-}
-
-static void __exit crc32_exit(void)
-{
-}
-
-module_init(crc32test_init);
-module_exit(crc32_exit);
-#endif /* CONFIG_CRC32_SELFTEST */
diff --git a/lib/crc32test.c b/lib/crc32test.c
new file mode 100644
index 000000000000..97d6a57cefcc
--- /dev/null
+++ b/lib/crc32test.c
@@ -0,0 +1,856 @@
+/*
+ * Aug 8, 2011 Bob Pearson with help from Joakim Tjernlund and George Spelvin
+ * cleaned up code to current version of sparse and added the slicing-by-8
+ * algorithm to the closely similar existing slicing-by-4 algorithm.
+ *
+ * Oct 15, 2000 Matt Domsch <Matt_Domsch@dell.com>
+ * Nicer crc32 functions/docs submitted by linux@horizon.com. Thanks!
+ * Code was from the public domain, copyright abandoned. Code was
+ * subsequently included in the kernel, thus was re-licensed under the
+ * GNU GPL v2.
+ *
+ * Oct 12, 2000 Matt Domsch <Matt_Domsch@dell.com>
+ * Same crc32 function was used in 5 other places in the kernel.
+ * I made one version, and deleted the others.
+ * There are various incantations of crc32(). Some use a seed of 0 or ~0.
+ * Some xor at the end with ~0. The generic crc32() function takes
+ * seed as an argument, and doesn't xor at the end. Then individual
+ * users can do whatever they need.
+ * drivers/net/smc9194.c uses seed ~0, doesn't xor with ~0.
+ * fs/jffs2 uses seed 0, doesn't xor with ~0.
+ * fs/partitions/efi.c uses seed ~0, xor's with ~0.
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#include <linux/crc32.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+
+#include "crc32defs.h"
+
+/* 4096 random bytes */
+static u8 const __aligned(8) test_buf[] __initconst =
+{
+ 0x5b, 0x85, 0x21, 0xcb, 0x09, 0x68, 0x7d, 0x30,
+ 0xc7, 0x69, 0xd7, 0x30, 0x92, 0xde, 0x59, 0xe4,
+ 0xc9, 0x6e, 0x8b, 0xdb, 0x98, 0x6b, 0xaa, 0x60,
+ 0xa8, 0xb5, 0xbc, 0x6c, 0xa9, 0xb1, 0x5b, 0x2c,
+ 0xea, 0xb4, 0x92, 0x6a, 0x3f, 0x79, 0x91, 0xe4,
+ 0xe9, 0x70, 0x51, 0x8c, 0x7f, 0x95, 0x6f, 0x1a,
+ 0x56, 0xa1, 0x5c, 0x27, 0x03, 0x67, 0x9f, 0x3a,
+ 0xe2, 0x31, 0x11, 0x29, 0x6b, 0x98, 0xfc, 0xc4,
+ 0x53, 0x24, 0xc5, 0x8b, 0xce, 0x47, 0xb2, 0xb9,
+ 0x32, 0xcb, 0xc1, 0xd0, 0x03, 0x57, 0x4e, 0xd4,
+ 0xe9, 0x3c, 0xa1, 0x63, 0xcf, 0x12, 0x0e, 0xca,
+ 0xe1, 0x13, 0xd1, 0x93, 0xa6, 0x88, 0x5c, 0x61,
+ 0x5b, 0xbb, 0xf0, 0x19, 0x46, 0xb4, 0xcf, 0x9e,
+ 0xb6, 0x6b, 0x4c, 0x3a, 0xcf, 0x60, 0xf9, 0x7a,
+ 0x8d, 0x07, 0x63, 0xdb, 0x40, 0xe9, 0x0b, 0x6f,
+ 0xad, 0x97, 0xf1, 0xed, 0xd0, 0x1e, 0x26, 0xfd,
+ 0xbf, 0xb7, 0xc8, 0x04, 0x94, 0xf8, 0x8b, 0x8c,
+ 0xf1, 0xab, 0x7a, 0xd4, 0xdd, 0xf3, 0xe8, 0x88,
+ 0xc3, 0xed, 0x17, 0x8a, 0x9b, 0x40, 0x0d, 0x53,
+ 0x62, 0x12, 0x03, 0x5f, 0x1b, 0x35, 0x32, 0x1f,
+ 0xb4, 0x7b, 0x93, 0x78, 0x0d, 0xdb, 0xce, 0xa4,
+ 0xc0, 0x47, 0xd5, 0xbf, 0x68, 0xe8, 0x5d, 0x74,
+ 0x8f, 0x8e, 0x75, 0x1c, 0xb2, 0x4f, 0x9a, 0x60,
+ 0xd1, 0xbe, 0x10, 0xf4, 0x5c, 0xa1, 0x53, 0x09,
+ 0xa5, 0xe0, 0x09, 0x54, 0x85, 0x5c, 0xdc, 0x07,
+ 0xe7, 0x21, 0x69, 0x7b, 0x8a, 0xfd, 0x90, 0xf1,
+ 0x22, 0xd0, 0xb4, 0x36, 0x28, 0xe6, 0xb8, 0x0f,
+ 0x39, 0xde, 0xc8, 0xf3, 0x86, 0x60, 0x34, 0xd2,
+ 0x5e, 0xdf, 0xfd, 0xcf, 0x0f, 0xa9, 0x65, 0xf0,
+ 0xd5, 0x4d, 0x96, 0x40, 0xe3, 0xdf, 0x3f, 0x95,
+ 0x5a, 0x39, 0x19, 0x93, 0xf4, 0x75, 0xce, 0x22,
+ 0x00, 0x1c, 0x93, 0xe2, 0x03, 0x66, 0xf4, 0x93,
+ 0x73, 0x86, 0x81, 0x8e, 0x29, 0x44, 0x48, 0x86,
+ 0x61, 0x7c, 0x48, 0xa3, 0x43, 0xd2, 0x9c, 0x8d,
+ 0xd4, 0x95, 0xdd, 0xe1, 0x22, 0x89, 0x3a, 0x40,
+ 0x4c, 0x1b, 0x8a, 0x04, 0xa8, 0x09, 0x69, 0x8b,
+ 0xea, 0xc6, 0x55, 0x8e, 0x57, 0xe6, 0x64, 0x35,
+ 0xf0, 0xc7, 0x16, 0x9f, 0x5d, 0x5e, 0x86, 0x40,
+ 0x46, 0xbb, 0xe5, 0x45, 0x88, 0xfe, 0xc9, 0x63,
+ 0x15, 0xfb, 0xf5, 0xbd, 0x71, 0x61, 0xeb, 0x7b,
+ 0x78, 0x70, 0x07, 0x31, 0x03, 0x9f, 0xb2, 0xc8,
+ 0xa7, 0xab, 0x47, 0xfd, 0xdf, 0xa0, 0x78, 0x72,
+ 0xa4, 0x2a, 0xe4, 0xb6, 0xba, 0xc0, 0x1e, 0x86,
+ 0x71, 0xe6, 0x3d, 0x18, 0x37, 0x70, 0xe6, 0xff,
+ 0xe0, 0xbc, 0x0b, 0x22, 0xa0, 0x1f, 0xd3, 0xed,
+ 0xa2, 0x55, 0x39, 0xab, 0xa8, 0x13, 0x73, 0x7c,
+ 0x3f, 0xb2, 0xd6, 0x19, 0xac, 0xff, 0x99, 0xed,
+ 0xe8, 0xe6, 0xa6, 0x22, 0xe3, 0x9c, 0xf1, 0x30,
+ 0xdc, 0x01, 0x0a, 0x56, 0xfa, 0xe4, 0xc9, 0x99,
+ 0xdd, 0xa8, 0xd8, 0xda, 0x35, 0x51, 0x73, 0xb4,
+ 0x40, 0x86, 0x85, 0xdb, 0x5c, 0xd5, 0x85, 0x80,
+ 0x14, 0x9c, 0xfd, 0x98, 0xa9, 0x82, 0xc5, 0x37,
+ 0xff, 0x32, 0x5d, 0xd0, 0x0b, 0xfa, 0xdc, 0x04,
+ 0x5e, 0x09, 0xd2, 0xca, 0x17, 0x4b, 0x1a, 0x8e,
+ 0x15, 0xe1, 0xcc, 0x4e, 0x52, 0x88, 0x35, 0xbd,
+ 0x48, 0xfe, 0x15, 0xa0, 0x91, 0xfd, 0x7e, 0x6c,
+ 0x0e, 0x5d, 0x79, 0x1b, 0x81, 0x79, 0xd2, 0x09,
+ 0x34, 0x70, 0x3d, 0x81, 0xec, 0xf6, 0x24, 0xbb,
+ 0xfb, 0xf1, 0x7b, 0xdf, 0x54, 0xea, 0x80, 0x9b,
+ 0xc7, 0x99, 0x9e, 0xbd, 0x16, 0x78, 0x12, 0x53,
+ 0x5e, 0x01, 0xa7, 0x4e, 0xbd, 0x67, 0xe1, 0x9b,
+ 0x4c, 0x0e, 0x61, 0x45, 0x97, 0xd2, 0xf0, 0x0f,
+ 0xfe, 0x15, 0x08, 0xb7, 0x11, 0x4c, 0xe7, 0xff,
+ 0x81, 0x53, 0xff, 0x91, 0x25, 0x38, 0x7e, 0x40,
+ 0x94, 0xe5, 0xe0, 0xad, 0xe6, 0xd9, 0x79, 0xb6,
+ 0x92, 0xc9, 0xfc, 0xde, 0xc3, 0x1a, 0x23, 0xbb,
+ 0xdd, 0xc8, 0x51, 0x0c, 0x3a, 0x72, 0xfa, 0x73,
+ 0x6f, 0xb7, 0xee, 0x61, 0x39, 0x03, 0x01, 0x3f,
+ 0x7f, 0x94, 0x2e, 0x2e, 0xba, 0x3a, 0xbb, 0xb4,
+ 0xfa, 0x6a, 0x17, 0xfe, 0xea, 0xef, 0x5e, 0x66,
+ 0x97, 0x3f, 0x32, 0x3d, 0xd7, 0x3e, 0xb1, 0xf1,
+ 0x6c, 0x14, 0x4c, 0xfd, 0x37, 0xd3, 0x38, 0x80,
+ 0xfb, 0xde, 0xa6, 0x24, 0x1e, 0xc8, 0xca, 0x7f,
+ 0x3a, 0x93, 0xd8, 0x8b, 0x18, 0x13, 0xb2, 0xe5,
+ 0xe4, 0x93, 0x05, 0x53, 0x4f, 0x84, 0x66, 0xa7,
+ 0x58, 0x5c, 0x7b, 0x86, 0x52, 0x6d, 0x0d, 0xce,
+ 0xa4, 0x30, 0x7d, 0xb6, 0x18, 0x9f, 0xeb, 0xff,
+ 0x22, 0xbb, 0x72, 0x29, 0xb9, 0x44, 0x0b, 0x48,
+ 0x1e, 0x84, 0x71, 0x81, 0xe3, 0x6d, 0x73, 0x26,
+ 0x92, 0xb4, 0x4d, 0x2a, 0x29, 0xb8, 0x1f, 0x72,
+ 0xed, 0xd0, 0xe1, 0x64, 0x77, 0xea, 0x8e, 0x88,
+ 0x0f, 0xef, 0x3f, 0xb1, 0x3b, 0xad, 0xf9, 0xc9,
+ 0x8b, 0xd0, 0xac, 0xc6, 0xcc, 0xa9, 0x40, 0xcc,
+ 0x76, 0xf6, 0x3b, 0x53, 0xb5, 0x88, 0xcb, 0xc8,
+ 0x37, 0xf1, 0xa2, 0xba, 0x23, 0x15, 0x99, 0x09,
+ 0xcc, 0xe7, 0x7a, 0x3b, 0x37, 0xf7, 0x58, 0xc8,
+ 0x46, 0x8c, 0x2b, 0x2f, 0x4e, 0x0e, 0xa6, 0x5c,
+ 0xea, 0x85, 0x55, 0xba, 0x02, 0x0e, 0x0e, 0x48,
+ 0xbc, 0xe1, 0xb1, 0x01, 0x35, 0x79, 0x13, 0x3d,
+ 0x1b, 0xc0, 0x53, 0x68, 0x11, 0xe7, 0x95, 0x0f,
+ 0x9d, 0x3f, 0x4c, 0x47, 0x7b, 0x4d, 0x1c, 0xae,
+ 0x50, 0x9b, 0xcb, 0xdd, 0x05, 0x8d, 0x9a, 0x97,
+ 0xfd, 0x8c, 0xef, 0x0c, 0x1d, 0x67, 0x73, 0xa8,
+ 0x28, 0x36, 0xd5, 0xb6, 0x92, 0x33, 0x40, 0x75,
+ 0x0b, 0x51, 0xc3, 0x64, 0xba, 0x1d, 0xc2, 0xcc,
+ 0xee, 0x7d, 0x54, 0x0f, 0x27, 0x69, 0xa7, 0x27,
+ 0x63, 0x30, 0x29, 0xd9, 0xc8, 0x84, 0xd8, 0xdf,
+ 0x9f, 0x68, 0x8d, 0x04, 0xca, 0xa6, 0xc5, 0xc7,
+ 0x7a, 0x5c, 0xc8, 0xd1, 0xcb, 0x4a, 0xec, 0xd0,
+ 0xd8, 0x20, 0x69, 0xc5, 0x17, 0xcd, 0x78, 0xc8,
+ 0x75, 0x23, 0x30, 0x69, 0xc9, 0xd4, 0xea, 0x5c,
+ 0x4f, 0x6b, 0x86, 0x3f, 0x8b, 0xfe, 0xee, 0x44,
+ 0xc9, 0x7c, 0xb7, 0xdd, 0x3e, 0xe5, 0xec, 0x54,
+ 0x03, 0x3e, 0xaa, 0x82, 0xc6, 0xdf, 0xb2, 0x38,
+ 0x0e, 0x5d, 0xb3, 0x88, 0xd9, 0xd3, 0x69, 0x5f,
+ 0x8f, 0x70, 0x8a, 0x7e, 0x11, 0xd9, 0x1e, 0x7b,
+ 0x38, 0xf1, 0x42, 0x1a, 0xc0, 0x35, 0xf5, 0xc7,
+ 0x36, 0x85, 0xf5, 0xf7, 0xb8, 0x7e, 0xc7, 0xef,
+ 0x18, 0xf1, 0x63, 0xd6, 0x7a, 0xc6, 0xc9, 0x0e,
+ 0x4d, 0x69, 0x4f, 0x84, 0xef, 0x26, 0x41, 0x0c,
+ 0xec, 0xc7, 0xe0, 0x7e, 0x3c, 0x67, 0x01, 0x4c,
+ 0x62, 0x1a, 0x20, 0x6f, 0xee, 0x47, 0x4d, 0xc0,
+ 0x99, 0x13, 0x8d, 0x91, 0x4a, 0x26, 0xd4, 0x37,
+ 0x28, 0x90, 0x58, 0x75, 0x66, 0x2b, 0x0a, 0xdf,
+ 0xda, 0xee, 0x92, 0x25, 0x90, 0x62, 0x39, 0x9e,
+ 0x44, 0x98, 0xad, 0xc1, 0x88, 0xed, 0xe4, 0xb4,
+ 0xaf, 0xf5, 0x8c, 0x9b, 0x48, 0x4d, 0x56, 0x60,
+ 0x97, 0x0f, 0x61, 0x59, 0x9e, 0xa6, 0x27, 0xfe,
+ 0xc1, 0x91, 0x15, 0x38, 0xb8, 0x0f, 0xae, 0x61,
+ 0x7d, 0x26, 0x13, 0x5a, 0x73, 0xff, 0x1c, 0xa3,
+ 0x61, 0x04, 0x58, 0x48, 0x55, 0x44, 0x11, 0xfe,
+ 0x15, 0xca, 0xc3, 0xbd, 0xca, 0xc5, 0xb4, 0x40,
+ 0x5d, 0x1b, 0x7f, 0x39, 0xb5, 0x9c, 0x35, 0xec,
+ 0x61, 0x15, 0x32, 0x32, 0xb8, 0x4e, 0x40, 0x9f,
+ 0x17, 0x1f, 0x0a, 0x4d, 0xa9, 0x91, 0xef, 0xb7,
+ 0xb0, 0xeb, 0xc2, 0x83, 0x9a, 0x6c, 0xd2, 0x79,
+ 0x43, 0x78, 0x5e, 0x2f, 0xe5, 0xdd, 0x1a, 0x3c,
+ 0x45, 0xab, 0x29, 0x40, 0x3a, 0x37, 0x5b, 0x6f,
+ 0xd7, 0xfc, 0x48, 0x64, 0x3c, 0x49, 0xfb, 0x21,
+ 0xbe, 0xc3, 0xff, 0x07, 0xfb, 0x17, 0xe9, 0xc9,
+ 0x0c, 0x4c, 0x5c, 0x15, 0x9e, 0x8e, 0x22, 0x30,
+ 0x0a, 0xde, 0x48, 0x7f, 0xdb, 0x0d, 0xd1, 0x2b,
+ 0x87, 0x38, 0x9e, 0xcc, 0x5a, 0x01, 0x16, 0xee,
+ 0x75, 0x49, 0x0d, 0x30, 0x01, 0x34, 0x6a, 0xb6,
+ 0x9a, 0x5a, 0x2a, 0xec, 0xbb, 0x48, 0xac, 0xd3,
+ 0x77, 0x83, 0xd8, 0x08, 0x86, 0x4f, 0x48, 0x09,
+ 0x29, 0x41, 0x79, 0xa1, 0x03, 0x12, 0xc4, 0xcd,
+ 0x90, 0x55, 0x47, 0x66, 0x74, 0x9a, 0xcc, 0x4f,
+ 0x35, 0x8c, 0xd6, 0x98, 0xef, 0xeb, 0x45, 0xb9,
+ 0x9a, 0x26, 0x2f, 0x39, 0xa5, 0x70, 0x6d, 0xfc,
+ 0xb4, 0x51, 0xee, 0xf4, 0x9c, 0xe7, 0x38, 0x59,
+ 0xad, 0xf4, 0xbc, 0x46, 0xff, 0x46, 0x8e, 0x60,
+ 0x9c, 0xa3, 0x60, 0x1d, 0xf8, 0x26, 0x72, 0xf5,
+ 0x72, 0x9d, 0x68, 0x80, 0x04, 0xf6, 0x0b, 0xa1,
+ 0x0a, 0xd5, 0xa7, 0x82, 0x3a, 0x3e, 0x47, 0xa8,
+ 0x5a, 0xde, 0x59, 0x4f, 0x7b, 0x07, 0xb3, 0xe9,
+ 0x24, 0x19, 0x3d, 0x34, 0x05, 0xec, 0xf1, 0xab,
+ 0x6e, 0x64, 0x8f, 0xd3, 0xe6, 0x41, 0x86, 0x80,
+ 0x70, 0xe3, 0x8d, 0x60, 0x9c, 0x34, 0x25, 0x01,
+ 0x07, 0x4d, 0x19, 0x41, 0x4e, 0x3d, 0x5c, 0x7e,
+ 0xa8, 0xf5, 0xcc, 0xd5, 0x7b, 0xe2, 0x7d, 0x3d,
+ 0x49, 0x86, 0x7d, 0x07, 0xb7, 0x10, 0xe3, 0x35,
+ 0xb8, 0x84, 0x6d, 0x76, 0xab, 0x17, 0xc6, 0x38,
+ 0xb4, 0xd3, 0x28, 0x57, 0xad, 0xd3, 0x88, 0x5a,
+ 0xda, 0xea, 0xc8, 0x94, 0xcc, 0x37, 0x19, 0xac,
+ 0x9c, 0x9f, 0x4b, 0x00, 0x15, 0xc0, 0xc8, 0xca,
+ 0x1f, 0x15, 0xaa, 0xe0, 0xdb, 0xf9, 0x2f, 0x57,
+ 0x1b, 0x24, 0xc7, 0x6f, 0x76, 0x29, 0xfb, 0xed,
+ 0x25, 0x0d, 0xc0, 0xfe, 0xbd, 0x5a, 0xbf, 0x20,
+ 0x08, 0x51, 0x05, 0xec, 0x71, 0xa3, 0xbf, 0xef,
+ 0x5e, 0x99, 0x75, 0xdb, 0x3c, 0x5f, 0x9a, 0x8c,
+ 0xbb, 0x19, 0x5c, 0x0e, 0x93, 0x19, 0xf8, 0x6a,
+ 0xbc, 0xf2, 0x12, 0x54, 0x2f, 0xcb, 0x28, 0x64,
+ 0x88, 0xb3, 0x92, 0x0d, 0x96, 0xd1, 0xa6, 0xe4,
+ 0x1f, 0xf1, 0x4d, 0xa4, 0xab, 0x1c, 0xee, 0x54,
+ 0xf2, 0xad, 0x29, 0x6d, 0x32, 0x37, 0xb2, 0x16,
+ 0x77, 0x5c, 0xdc, 0x2e, 0x54, 0xec, 0x75, 0x26,
+ 0xc6, 0x36, 0xd9, 0x17, 0x2c, 0xf1, 0x7a, 0xdc,
+ 0x4b, 0xf1, 0xe2, 0xd9, 0x95, 0xba, 0xac, 0x87,
+ 0xc1, 0xf3, 0x8e, 0x58, 0x08, 0xd8, 0x87, 0x60,
+ 0xc9, 0xee, 0x6a, 0xde, 0xa4, 0xd2, 0xfc, 0x0d,
+ 0xe5, 0x36, 0xc4, 0x5c, 0x52, 0xb3, 0x07, 0x54,
+ 0x65, 0x24, 0xc1, 0xb1, 0xd1, 0xb1, 0x53, 0x13,
+ 0x31, 0x79, 0x7f, 0x05, 0x76, 0xeb, 0x37, 0x59,
+ 0x15, 0x2b, 0xd1, 0x3f, 0xac, 0x08, 0x97, 0xeb,
+ 0x91, 0x98, 0xdf, 0x6c, 0x09, 0x0d, 0x04, 0x9f,
+ 0xdc, 0x3b, 0x0e, 0x60, 0x68, 0x47, 0x23, 0x15,
+ 0x16, 0xc6, 0x0b, 0x35, 0xf8, 0x77, 0xa2, 0x78,
+ 0x50, 0xd4, 0x64, 0x22, 0x33, 0xff, 0xfb, 0x93,
+ 0x71, 0x46, 0x50, 0x39, 0x1b, 0x9c, 0xea, 0x4e,
+ 0x8d, 0x0c, 0x37, 0xe5, 0x5c, 0x51, 0x3a, 0x31,
+ 0xb2, 0x85, 0x84, 0x3f, 0x41, 0xee, 0xa2, 0xc1,
+ 0xc6, 0x13, 0x3b, 0x54, 0x28, 0xd2, 0x18, 0x37,
+ 0xcc, 0x46, 0x9f, 0x6a, 0x91, 0x3d, 0x5a, 0x15,
+ 0x3c, 0x89, 0xa3, 0x61, 0x06, 0x7d, 0x2e, 0x78,
+ 0xbe, 0x7d, 0x40, 0xba, 0x2f, 0x95, 0xb1, 0x2f,
+ 0x87, 0x3b, 0x8a, 0xbe, 0x6a, 0xf4, 0xc2, 0x31,
+ 0x74, 0xee, 0x91, 0xe0, 0x23, 0xaa, 0x5d, 0x7f,
+ 0xdd, 0xf0, 0x44, 0x8c, 0x0b, 0x59, 0x2b, 0xfc,
+ 0x48, 0x3a, 0xdf, 0x07, 0x05, 0x38, 0x6c, 0xc9,
+ 0xeb, 0x18, 0x24, 0x68, 0x8d, 0x58, 0x98, 0xd3,
+ 0x31, 0xa3, 0xe4, 0x70, 0x59, 0xb1, 0x21, 0xbe,
+ 0x7e, 0x65, 0x7d, 0xb8, 0x04, 0xab, 0xf6, 0xe4,
+ 0xd7, 0xda, 0xec, 0x09, 0x8f, 0xda, 0x6d, 0x24,
+ 0x07, 0xcc, 0x29, 0x17, 0x05, 0x78, 0x1a, 0xc1,
+ 0xb1, 0xce, 0xfc, 0xaa, 0x2d, 0xe7, 0xcc, 0x85,
+ 0x84, 0x84, 0x03, 0x2a, 0x0c, 0x3f, 0xa9, 0xf8,
+ 0xfd, 0x84, 0x53, 0x59, 0x5c, 0xf0, 0xd4, 0x09,
+ 0xf0, 0xd2, 0x6c, 0x32, 0x03, 0xb0, 0xa0, 0x8c,
+ 0x52, 0xeb, 0x23, 0x91, 0x88, 0x43, 0x13, 0x46,
+ 0xf6, 0x1e, 0xb4, 0x1b, 0xf5, 0x8e, 0x3a, 0xb5,
+ 0x3d, 0x00, 0xf6, 0xe5, 0x08, 0x3d, 0x5f, 0x39,
+ 0xd3, 0x21, 0x69, 0xbc, 0x03, 0x22, 0x3a, 0xd2,
+ 0x5c, 0x84, 0xf8, 0x15, 0xc4, 0x80, 0x0b, 0xbc,
+ 0x29, 0x3c, 0xf3, 0x95, 0x98, 0xcd, 0x8f, 0x35,
+ 0xbc, 0xa5, 0x3e, 0xfc, 0xd4, 0x13, 0x9e, 0xde,
+ 0x4f, 0xce, 0x71, 0x9d, 0x09, 0xad, 0xf2, 0x80,
+ 0x6b, 0x65, 0x7f, 0x03, 0x00, 0x14, 0x7c, 0x15,
+ 0x85, 0x40, 0x6d, 0x70, 0xea, 0xdc, 0xb3, 0x63,
+ 0x35, 0x4f, 0x4d, 0xe0, 0xd9, 0xd5, 0x3c, 0x58,
+ 0x56, 0x23, 0x80, 0xe2, 0x36, 0xdd, 0x75, 0x1d,
+ 0x94, 0x11, 0x41, 0x8e, 0xe0, 0x81, 0x8e, 0xcf,
+ 0xe0, 0xe5, 0xf6, 0xde, 0xd1, 0xe7, 0x04, 0x12,
+ 0x79, 0x92, 0x2b, 0x71, 0x2a, 0x79, 0x8b, 0x7c,
+ 0x44, 0x79, 0x16, 0x30, 0x4e, 0xf4, 0xf6, 0x9b,
+ 0xb7, 0x40, 0xa3, 0x5a, 0xa7, 0x69, 0x3e, 0xc1,
+ 0x3a, 0x04, 0xd0, 0x88, 0xa0, 0x3b, 0xdd, 0xc6,
+ 0x9e, 0x7e, 0x1e, 0x1e, 0x8f, 0x44, 0xf7, 0x73,
+ 0x67, 0x1e, 0x1a, 0x78, 0xfa, 0x62, 0xf4, 0xa9,
+ 0xa8, 0xc6, 0x5b, 0xb8, 0xfa, 0x06, 0x7d, 0x5e,
+ 0x38, 0x1c, 0x9a, 0x39, 0xe9, 0x39, 0x98, 0x22,
+ 0x0b, 0xa7, 0xac, 0x0b, 0xf3, 0xbc, 0xf1, 0xeb,
+ 0x8c, 0x81, 0xe3, 0x48, 0x8a, 0xed, 0x42, 0xc2,
+ 0x38, 0xcf, 0x3e, 0xda, 0xd2, 0x89, 0x8d, 0x9c,
+ 0x53, 0xb5, 0x2f, 0x41, 0x01, 0x26, 0x84, 0x9c,
+ 0xa3, 0x56, 0xf6, 0x49, 0xc7, 0xd4, 0x9f, 0x93,
+ 0x1b, 0x96, 0x49, 0x5e, 0xad, 0xb3, 0x84, 0x1f,
+ 0x3c, 0xa4, 0xe0, 0x9b, 0xd1, 0x90, 0xbc, 0x38,
+ 0x6c, 0xdd, 0x95, 0x4d, 0x9d, 0xb1, 0x71, 0x57,
+ 0x2d, 0x34, 0xe8, 0xb8, 0x42, 0xc7, 0x99, 0x03,
+ 0xc7, 0x07, 0x30, 0x65, 0x91, 0x55, 0xd5, 0x90,
+ 0x70, 0x97, 0x37, 0x68, 0xd4, 0x11, 0xf9, 0xe8,
+ 0xce, 0xec, 0xdc, 0x34, 0xd5, 0xd3, 0xb7, 0xc4,
+ 0xb8, 0x97, 0x05, 0x92, 0xad, 0xf8, 0xe2, 0x36,
+ 0x64, 0x41, 0xc9, 0xc5, 0x41, 0x77, 0x52, 0xd7,
+ 0x2c, 0xa5, 0x24, 0x2f, 0xd9, 0x34, 0x0b, 0x47,
+ 0x35, 0xa7, 0x28, 0x8b, 0xc5, 0xcd, 0xe9, 0x46,
+ 0xac, 0x39, 0x94, 0x3c, 0x10, 0xc6, 0x29, 0x73,
+ 0x0e, 0x0e, 0x5d, 0xe0, 0x71, 0x03, 0x8a, 0x72,
+ 0x0e, 0x26, 0xb0, 0x7d, 0x84, 0xed, 0x95, 0x23,
+ 0x49, 0x5a, 0x45, 0x83, 0x45, 0x60, 0x11, 0x4a,
+ 0x46, 0x31, 0xd4, 0xd8, 0x16, 0x54, 0x98, 0x58,
+ 0xed, 0x6d, 0xcc, 0x5d, 0xd6, 0x50, 0x61, 0x9f,
+ 0x9d, 0xc5, 0x3e, 0x9d, 0x32, 0x47, 0xde, 0x96,
+ 0xe1, 0x5d, 0xd8, 0xf8, 0xb4, 0x69, 0x6f, 0xb9,
+ 0x15, 0x90, 0x57, 0x7a, 0xf6, 0xad, 0xb0, 0x5b,
+ 0xf5, 0xa6, 0x36, 0x94, 0xfd, 0x84, 0xce, 0x1c,
+ 0x0f, 0x4b, 0xd0, 0xc2, 0x5b, 0x6b, 0x56, 0xef,
+ 0x73, 0x93, 0x0b, 0xc3, 0xee, 0xd9, 0xcf, 0xd3,
+ 0xa4, 0x22, 0x58, 0xcd, 0x50, 0x6e, 0x65, 0xf4,
+ 0xe9, 0xb7, 0x71, 0xaf, 0x4b, 0xb3, 0xb6, 0x2f,
+ 0x0f, 0x0e, 0x3b, 0xc9, 0x85, 0x14, 0xf5, 0x17,
+ 0xe8, 0x7a, 0x3a, 0xbf, 0x5f, 0x5e, 0xf8, 0x18,
+ 0x48, 0xa6, 0x72, 0xab, 0x06, 0x95, 0xe9, 0xc8,
+ 0xa7, 0xf4, 0x32, 0x44, 0x04, 0x0c, 0x84, 0x98,
+ 0x73, 0xe3, 0x89, 0x8d, 0x5f, 0x7e, 0x4a, 0x42,
+ 0x8f, 0xc5, 0x28, 0xb1, 0x82, 0xef, 0x1c, 0x97,
+ 0x31, 0x3b, 0x4d, 0xe0, 0x0e, 0x10, 0x10, 0x97,
+ 0x93, 0x49, 0x78, 0x2f, 0x0d, 0x86, 0x8b, 0xa1,
+ 0x53, 0xa9, 0x81, 0x20, 0x79, 0xe7, 0x07, 0x77,
+ 0xb6, 0xac, 0x5e, 0xd2, 0x05, 0xcd, 0xe9, 0xdb,
+ 0x8a, 0x94, 0x82, 0x8a, 0x23, 0xb9, 0x3d, 0x1c,
+ 0xa9, 0x7d, 0x72, 0x4a, 0xed, 0x33, 0xa3, 0xdb,
+ 0x21, 0xa7, 0x86, 0x33, 0x45, 0xa5, 0xaa, 0x56,
+ 0x45, 0xb5, 0x83, 0x29, 0x40, 0x47, 0x79, 0x04,
+ 0x6e, 0xb9, 0x95, 0xd0, 0x81, 0x77, 0x2d, 0x48,
+ 0x1e, 0xfe, 0xc3, 0xc2, 0x1e, 0xe5, 0xf2, 0xbe,
+ 0xfd, 0x3b, 0x94, 0x9f, 0xc4, 0xc4, 0x26, 0x9d,
+ 0xe4, 0x66, 0x1e, 0x19, 0xee, 0x6c, 0x79, 0x97,
+ 0x11, 0x31, 0x4b, 0x0d, 0x01, 0xcb, 0xde, 0xa8,
+ 0xf6, 0x6d, 0x7c, 0x39, 0x46, 0x4e, 0x7e, 0x3f,
+ 0x94, 0x17, 0xdf, 0xa1, 0x7d, 0xd9, 0x1c, 0x8e,
+ 0xbc, 0x7d, 0x33, 0x7d, 0xe3, 0x12, 0x40, 0xca,
+ 0xab, 0x37, 0x11, 0x46, 0xd4, 0xae, 0xef, 0x44,
+ 0xa2, 0xb3, 0x6a, 0x66, 0x0e, 0x0c, 0x90, 0x7f,
+ 0xdf, 0x5c, 0x66, 0x5f, 0xf2, 0x94, 0x9f, 0xa6,
+ 0x73, 0x4f, 0xeb, 0x0d, 0xad, 0xbf, 0xc0, 0x63,
+ 0x5c, 0xdc, 0x46, 0x51, 0xe8, 0x8e, 0x90, 0x19,
+ 0xa8, 0xa4, 0x3c, 0x91, 0x79, 0xfa, 0x7e, 0x58,
+ 0x85, 0x13, 0x55, 0xc5, 0x19, 0x82, 0x37, 0x1b,
+ 0x0a, 0x02, 0x1f, 0x99, 0x6b, 0x18, 0xf1, 0x28,
+ 0x08, 0xa2, 0x73, 0xb8, 0x0f, 0x2e, 0xcd, 0xbf,
+ 0xf3, 0x86, 0x7f, 0xea, 0xef, 0xd0, 0xbb, 0xa6,
+ 0x21, 0xdf, 0x49, 0x73, 0x51, 0xcc, 0x36, 0xd3,
+ 0x3e, 0xa0, 0xf8, 0x44, 0xdf, 0xd3, 0xa6, 0xbe,
+ 0x8a, 0xd4, 0x57, 0xdd, 0x72, 0x94, 0x61, 0x0f,
+ 0x82, 0xd1, 0x07, 0xb8, 0x7c, 0x18, 0x83, 0xdf,
+ 0x3a, 0xe5, 0x50, 0x6a, 0x82, 0x20, 0xac, 0xa9,
+ 0xa8, 0xff, 0xd9, 0xf3, 0x77, 0x33, 0x5a, 0x9e,
+ 0x7f, 0x6d, 0xfe, 0x5d, 0x33, 0x41, 0x42, 0xe7,
+ 0x6c, 0x19, 0xe0, 0x44, 0x8a, 0x15, 0xf6, 0x70,
+ 0x98, 0xb7, 0x68, 0x4d, 0xfa, 0x97, 0x39, 0xb0,
+ 0x8e, 0xe8, 0x84, 0x8b, 0x75, 0x30, 0xb7, 0x7d,
+ 0x92, 0x69, 0x20, 0x9c, 0x81, 0xfb, 0x4b, 0xf4,
+ 0x01, 0x50, 0xeb, 0xce, 0x0c, 0x1c, 0x6c, 0xb5,
+ 0x4a, 0xd7, 0x27, 0x0c, 0xce, 0xbb, 0xe5, 0x85,
+ 0xf0, 0xb6, 0xee, 0xd5, 0x70, 0xdd, 0x3b, 0xfc,
+ 0xd4, 0x99, 0xf1, 0x33, 0xdd, 0x8b, 0xc4, 0x2f,
+ 0xae, 0xab, 0x74, 0x96, 0x32, 0xc7, 0x4c, 0x56,
+ 0x3c, 0x89, 0x0f, 0x96, 0x0b, 0x42, 0xc0, 0xcb,
+ 0xee, 0x0f, 0x0b, 0x8c, 0xfb, 0x7e, 0x47, 0x7b,
+ 0x64, 0x48, 0xfd, 0xb2, 0x00, 0x80, 0x89, 0xa5,
+ 0x13, 0x55, 0x62, 0xfc, 0x8f, 0xe2, 0x42, 0x03,
+ 0xb7, 0x4e, 0x2a, 0x79, 0xb4, 0x82, 0xea, 0x23,
+ 0x49, 0xda, 0xaf, 0x52, 0x63, 0x1e, 0x60, 0x03,
+ 0x89, 0x06, 0x44, 0x46, 0x08, 0xc3, 0xc4, 0x87,
+ 0x70, 0x2e, 0xda, 0x94, 0xad, 0x6b, 0xe0, 0xe4,
+ 0xd1, 0x8a, 0x06, 0xc2, 0xa8, 0xc0, 0xa7, 0x43,
+ 0x3c, 0x47, 0x52, 0x0e, 0xc3, 0x77, 0x81, 0x11,
+ 0x67, 0x0e, 0xa0, 0x70, 0x04, 0x47, 0x29, 0x40,
+ 0x86, 0x0d, 0x34, 0x56, 0xa7, 0xc9, 0x35, 0x59,
+ 0x68, 0xdc, 0x93, 0x81, 0x70, 0xee, 0x86, 0xd9,
+ 0x80, 0x06, 0x40, 0x4f, 0x1a, 0x0d, 0x40, 0x30,
+ 0x0b, 0xcb, 0x96, 0x47, 0xc1, 0xb7, 0x52, 0xfd,
+ 0x56, 0xe0, 0x72, 0x4b, 0xfb, 0xbd, 0x92, 0x45,
+ 0x61, 0x71, 0xc2, 0x33, 0x11, 0xbf, 0x52, 0x83,
+ 0x79, 0x26, 0xe0, 0x49, 0x6b, 0xb7, 0x05, 0x8b,
+ 0xe8, 0x0e, 0x87, 0x31, 0xd7, 0x9d, 0x8a, 0xf5,
+ 0xc0, 0x5f, 0x2e, 0x58, 0x4a, 0xdb, 0x11, 0xb3,
+ 0x6c, 0x30, 0x2a, 0x46, 0x19, 0xe3, 0x27, 0x84,
+ 0x1f, 0x63, 0x6e, 0xf6, 0x57, 0xc7, 0xc9, 0xd8,
+ 0x5e, 0xba, 0xb3, 0x87, 0xd5, 0x83, 0x26, 0x34,
+ 0x21, 0x9e, 0x65, 0xde, 0x42, 0xd3, 0xbe, 0x7b,
+ 0xbc, 0x91, 0x71, 0x44, 0x4d, 0x99, 0x3b, 0x31,
+ 0xe5, 0x3f, 0x11, 0x4e, 0x7f, 0x13, 0x51, 0x3b,
+ 0xae, 0x79, 0xc9, 0xd3, 0x81, 0x8e, 0x25, 0x40,
+ 0x10, 0xfc, 0x07, 0x1e, 0xf9, 0x7b, 0x9a, 0x4b,
+ 0x6c, 0xe3, 0xb3, 0xad, 0x1a, 0x0a, 0xdd, 0x9e,
+ 0x59, 0x0c, 0xa2, 0xcd, 0xae, 0x48, 0x4a, 0x38,
+ 0x5b, 0x47, 0x41, 0x94, 0x65, 0x6b, 0xbb, 0xeb,
+ 0x5b, 0xe3, 0xaf, 0x07, 0x5b, 0xd4, 0x4a, 0xa2,
+ 0xc9, 0x5d, 0x2f, 0x64, 0x03, 0xd7, 0x3a, 0x2c,
+ 0x6e, 0xce, 0x76, 0x95, 0xb4, 0xb3, 0xc0, 0xf1,
+ 0xe2, 0x45, 0x73, 0x7a, 0x5c, 0xab, 0xc1, 0xfc,
+ 0x02, 0x8d, 0x81, 0x29, 0xb3, 0xac, 0x07, 0xec,
+ 0x40, 0x7d, 0x45, 0xd9, 0x7a, 0x59, 0xee, 0x34,
+ 0xf0, 0xe9, 0xd5, 0x7b, 0x96, 0xb1, 0x3d, 0x95,
+ 0xcc, 0x86, 0xb5, 0xb6, 0x04, 0x2d, 0xb5, 0x92,
+ 0x7e, 0x76, 0xf4, 0x06, 0xa9, 0xa3, 0x12, 0x0f,
+ 0xb1, 0xaf, 0x26, 0xba, 0x7c, 0xfc, 0x7e, 0x1c,
+ 0xbc, 0x2c, 0x49, 0x97, 0x53, 0x60, 0x13, 0x0b,
+ 0xa6, 0x61, 0x83, 0x89, 0x42, 0xd4, 0x17, 0x0c,
+ 0x6c, 0x26, 0x52, 0xc3, 0xb3, 0xd4, 0x67, 0xf5,
+ 0xe3, 0x04, 0xb7, 0xf4, 0xcb, 0x80, 0xb8, 0xcb,
+ 0x77, 0x56, 0x3e, 0xaa, 0x57, 0x54, 0xee, 0xb4,
+ 0x2c, 0x67, 0xcf, 0xf2, 0xdc, 0xbe, 0x55, 0xf9,
+ 0x43, 0x1f, 0x6e, 0x22, 0x97, 0x67, 0x7f, 0xc4,
+ 0xef, 0xb1, 0x26, 0x31, 0x1e, 0x27, 0xdf, 0x41,
+ 0x80, 0x47, 0x6c, 0xe2, 0xfa, 0xa9, 0x8c, 0x2a,
+ 0xf6, 0xf2, 0xab, 0xf0, 0x15, 0xda, 0x6c, 0xc8,
+ 0xfe, 0xb5, 0x23, 0xde, 0xa9, 0x05, 0x3f, 0x06,
+ 0x54, 0x4c, 0xcd, 0xe1, 0xab, 0xfc, 0x0e, 0x62,
+ 0x33, 0x31, 0x73, 0x2c, 0x76, 0xcb, 0xb4, 0x47,
+ 0x1e, 0x20, 0xad, 0xd8, 0xf2, 0x31, 0xdd, 0xc4,
+ 0x8b, 0x0c, 0x77, 0xbe, 0xe1, 0x8b, 0x26, 0x00,
+ 0x02, 0x58, 0xd6, 0x8d, 0xef, 0xad, 0x74, 0x67,
+ 0xab, 0x3f, 0xef, 0xcb, 0x6f, 0xb0, 0xcc, 0x81,
+ 0x44, 0x4c, 0xaf, 0xe9, 0x49, 0x4f, 0xdb, 0xa0,
+ 0x25, 0xa4, 0xf0, 0x89, 0xf1, 0xbe, 0xd8, 0x10,
+ 0xff, 0xb1, 0x3b, 0x4b, 0xfa, 0x98, 0xf5, 0x79,
+ 0x6d, 0x1e, 0x69, 0x4d, 0x57, 0xb1, 0xc8, 0x19,
+ 0x1b, 0xbd, 0x1e, 0x8c, 0x84, 0xb7, 0x7b, 0xe8,
+ 0xd2, 0x2d, 0x09, 0x41, 0x41, 0x37, 0x3d, 0xb1,
+ 0x6f, 0x26, 0x5d, 0x71, 0x16, 0x3d, 0xb7, 0x83,
+ 0x27, 0x2c, 0xa7, 0xb6, 0x50, 0xbd, 0x91, 0x86,
+ 0xab, 0x24, 0xa1, 0x38, 0xfd, 0xea, 0x71, 0x55,
+ 0x7e, 0x9a, 0x07, 0x77, 0x4b, 0xfa, 0x61, 0x66,
+ 0x20, 0x1e, 0x28, 0x95, 0x18, 0x1b, 0xa4, 0xa0,
+ 0xfd, 0xc0, 0x89, 0x72, 0x43, 0xd9, 0x3b, 0x49,
+ 0x5a, 0x3f, 0x9d, 0xbf, 0xdb, 0xb4, 0x46, 0xea,
+ 0x42, 0x01, 0x77, 0x23, 0x68, 0x95, 0xb6, 0x24,
+ 0xb3, 0xa8, 0x6c, 0x28, 0x3b, 0x11, 0x40, 0x7e,
+ 0x18, 0x65, 0x6d, 0xd8, 0x24, 0x42, 0x7d, 0x88,
+ 0xc0, 0x52, 0xd9, 0x05, 0xe4, 0x95, 0x90, 0x87,
+ 0x8c, 0xf4, 0xd0, 0x6b, 0xb9, 0x83, 0x99, 0x34,
+ 0x6d, 0xfe, 0x54, 0x40, 0x94, 0x52, 0x21, 0x4f,
+ 0x14, 0x25, 0xc5, 0xd6, 0x5e, 0x95, 0xdc, 0x0a,
+ 0x2b, 0x89, 0x20, 0x11, 0x84, 0x48, 0xd6, 0x3a,
+ 0xcd, 0x5c, 0x24, 0xad, 0x62, 0xe3, 0xb1, 0x93,
+ 0x25, 0x8d, 0xcd, 0x7e, 0xfc, 0x27, 0xa3, 0x37,
+ 0xfd, 0x84, 0xfc, 0x1b, 0xb2, 0xf1, 0x27, 0x38,
+ 0x5a, 0xb7, 0xfc, 0xf2, 0xfa, 0x95, 0x66, 0xd4,
+ 0xfb, 0xba, 0xa7, 0xd7, 0xa3, 0x72, 0x69, 0x48,
+ 0x48, 0x8c, 0xeb, 0x28, 0x89, 0xfe, 0x33, 0x65,
+ 0x5a, 0x36, 0x01, 0x7e, 0x06, 0x79, 0x0a, 0x09,
+ 0x3b, 0x74, 0x11, 0x9a, 0x6e, 0xbf, 0xd4, 0x9e,
+ 0x58, 0x90, 0x49, 0x4f, 0x4d, 0x08, 0xd4, 0xe5,
+ 0x4a, 0x09, 0x21, 0xef, 0x8b, 0xb8, 0x74, 0x3b,
+ 0x91, 0xdd, 0x36, 0x85, 0x60, 0x2d, 0xfa, 0xd4,
+ 0x45, 0x7b, 0x45, 0x53, 0xf5, 0x47, 0x87, 0x7e,
+ 0xa6, 0x37, 0xc8, 0x78, 0x7a, 0x68, 0x9d, 0x8d,
+ 0x65, 0x2c, 0x0e, 0x91, 0x5c, 0xa2, 0x60, 0xf0,
+ 0x8e, 0x3f, 0xe9, 0x1a, 0xcd, 0xaa, 0xe7, 0xd5,
+ 0x77, 0x18, 0xaf, 0xc9, 0xbc, 0x18, 0xea, 0x48,
+ 0x1b, 0xfb, 0x22, 0x48, 0x70, 0x16, 0x29, 0x9e,
+ 0x5b, 0xc1, 0x2c, 0x66, 0x23, 0xbc, 0xf0, 0x1f,
+ 0xef, 0xaf, 0xe4, 0xd6, 0x04, 0x19, 0x82, 0x7a,
+ 0x0b, 0xba, 0x4b, 0x46, 0xb1, 0x6a, 0x85, 0x5d,
+ 0xb4, 0x73, 0xd6, 0x21, 0xa1, 0x71, 0x60, 0x14,
+ 0xee, 0x0a, 0x77, 0xc4, 0x66, 0x2e, 0xf9, 0x69,
+ 0x30, 0xaf, 0x41, 0x0b, 0xc8, 0x83, 0x3c, 0x53,
+ 0x99, 0x19, 0x27, 0x46, 0xf7, 0x41, 0x6e, 0x56,
+ 0xdc, 0x94, 0x28, 0x67, 0x4e, 0xb7, 0x25, 0x48,
+ 0x8a, 0xc2, 0xe0, 0x60, 0x96, 0xcc, 0x18, 0xf4,
+ 0x84, 0xdd, 0xa7, 0x5e, 0x3e, 0x05, 0x0b, 0x26,
+ 0x26, 0xb2, 0x5c, 0x1f, 0x57, 0x1a, 0x04, 0x7e,
+ 0x6a, 0xe3, 0x2f, 0xb4, 0x35, 0xb6, 0x38, 0x40,
+ 0x40, 0xcd, 0x6f, 0x87, 0x2e, 0xef, 0xa3, 0xd7,
+ 0xa9, 0xc2, 0xe8, 0x0d, 0x27, 0xdf, 0x44, 0x62,
+ 0x99, 0xa0, 0xfc, 0xcf, 0x81, 0x78, 0xcb, 0xfe,
+ 0xe5, 0xa0, 0x03, 0x4e, 0x6c, 0xd7, 0xf4, 0xaf,
+ 0x7a, 0xbb, 0x61, 0x82, 0xfe, 0x71, 0x89, 0xb2,
+ 0x22, 0x7c, 0x8e, 0x83, 0x04, 0xce, 0xf6, 0x5d,
+ 0x84, 0x8f, 0x95, 0x6a, 0x7f, 0xad, 0xfd, 0x32,
+ 0x9c, 0x5e, 0xe4, 0x9c, 0x89, 0x60, 0x54, 0xaa,
+ 0x96, 0x72, 0xd2, 0xd7, 0x36, 0x85, 0xa9, 0x45,
+ 0xd2, 0x2a, 0xa1, 0x81, 0x49, 0x6f, 0x7e, 0x04,
+ 0xfa, 0xe2, 0xfe, 0x90, 0x26, 0x77, 0x5a, 0x33,
+ 0xb8, 0x04, 0x9a, 0x7a, 0xe6, 0x4c, 0x4f, 0xad,
+ 0x72, 0x96, 0x08, 0x28, 0x58, 0x13, 0xf8, 0xc4,
+ 0x1c, 0xf0, 0xc3, 0x45, 0x95, 0x49, 0x20, 0x8c,
+ 0x9f, 0x39, 0x70, 0xe1, 0x77, 0xfe, 0xd5, 0x4b,
+ 0xaf, 0x86, 0xda, 0xef, 0x22, 0x06, 0x83, 0x36,
+ 0x29, 0x12, 0x11, 0x40, 0xbc, 0x3b, 0x86, 0xaa,
+ 0xaa, 0x65, 0x60, 0xc3, 0x80, 0xca, 0xed, 0xa9,
+ 0xf3, 0xb0, 0x79, 0x96, 0xa2, 0x55, 0x27, 0x28,
+ 0x55, 0x73, 0x26, 0xa5, 0x50, 0xea, 0x92, 0x4b,
+ 0x3c, 0x5c, 0x82, 0x33, 0xf0, 0x01, 0x3f, 0x03,
+ 0xc1, 0x08, 0x05, 0xbf, 0x98, 0xf4, 0x9b, 0x6d,
+ 0xa5, 0xa8, 0xb4, 0x82, 0x0c, 0x06, 0xfa, 0xff,
+ 0x2d, 0x08, 0xf3, 0x05, 0x4f, 0x57, 0x2a, 0x39,
+ 0xd4, 0x83, 0x0d, 0x75, 0x51, 0xd8, 0x5b, 0x1b,
+ 0xd3, 0x51, 0x5a, 0x32, 0x2a, 0x9b, 0x32, 0xb2,
+ 0xf2, 0xa4, 0x96, 0x12, 0xf2, 0xae, 0x40, 0x34,
+ 0x67, 0xa8, 0xf5, 0x44, 0xd5, 0x35, 0x53, 0xfe,
+ 0xa3, 0x60, 0x96, 0x63, 0x0f, 0x1f, 0x6e, 0xb0,
+ 0x5a, 0x42, 0xa6, 0xfc, 0x51, 0x0b, 0x60, 0x27,
+ 0xbc, 0x06, 0x71, 0xed, 0x65, 0x5b, 0x23, 0x86,
+ 0x4a, 0x07, 0x3b, 0x22, 0x07, 0x46, 0xe6, 0x90,
+ 0x3e, 0xf3, 0x25, 0x50, 0x1b, 0x4c, 0x7f, 0x03,
+ 0x08, 0xa8, 0x36, 0x6b, 0x87, 0xe5, 0xe3, 0xdb,
+ 0x9a, 0x38, 0x83, 0xff, 0x9f, 0x1a, 0x9f, 0x57,
+ 0xa4, 0x2a, 0xf6, 0x37, 0xbc, 0x1a, 0xff, 0xc9,
+ 0x1e, 0x35, 0x0c, 0xc3, 0x7c, 0xa3, 0xb2, 0xe5,
+ 0xd2, 0xc6, 0xb4, 0x57, 0x47, 0xe4, 0x32, 0x16,
+ 0x6d, 0xa9, 0xae, 0x64, 0xe6, 0x2d, 0x8d, 0xc5,
+ 0x8d, 0x50, 0x8e, 0xe8, 0x1a, 0x22, 0x34, 0x2a,
+ 0xd9, 0xeb, 0x51, 0x90, 0x4a, 0xb1, 0x41, 0x7d,
+ 0x64, 0xf9, 0xb9, 0x0d, 0xf6, 0x23, 0x33, 0xb0,
+ 0x33, 0xf4, 0xf7, 0x3f, 0x27, 0x84, 0xc6, 0x0f,
+ 0x54, 0xa5, 0xc0, 0x2e, 0xec, 0x0b, 0x3a, 0x48,
+ 0x6e, 0x80, 0x35, 0x81, 0x43, 0x9b, 0x90, 0xb1,
+ 0xd0, 0x2b, 0xea, 0x21, 0xdc, 0xda, 0x5b, 0x09,
+ 0xf4, 0xcc, 0x10, 0xb4, 0xc7, 0xfe, 0x79, 0x51,
+ 0xc3, 0xc5, 0xac, 0x88, 0x74, 0x84, 0x0b, 0x4b,
+ 0xca, 0x79, 0x16, 0x29, 0xfb, 0x69, 0x54, 0xdf,
+ 0x41, 0x7e, 0xe9, 0xc7, 0x8e, 0xea, 0xa5, 0xfe,
+ 0xfc, 0x76, 0x0e, 0x90, 0xc4, 0x92, 0x38, 0xad,
+ 0x7b, 0x48, 0xe6, 0x6e, 0xf7, 0x21, 0xfd, 0x4e,
+ 0x93, 0x0a, 0x7b, 0x41, 0x83, 0x68, 0xfb, 0x57,
+ 0x51, 0x76, 0x34, 0xa9, 0x6c, 0x00, 0xaa, 0x4f,
+ 0x66, 0x65, 0x98, 0x4a, 0x4f, 0xa3, 0xa0, 0xef,
+ 0x69, 0x3f, 0xe3, 0x1c, 0x92, 0x8c, 0xfd, 0xd8,
+ 0xe8, 0xde, 0x7c, 0x7f, 0x3e, 0x84, 0x8e, 0x69,
+ 0x3c, 0xf1, 0xf2, 0x05, 0x46, 0xdc, 0x2f, 0x9d,
+ 0x5e, 0x6e, 0x4c, 0xfb, 0xb5, 0x99, 0x2a, 0x59,
+ 0x63, 0xc1, 0x34, 0xbc, 0x57, 0xc0, 0x0d, 0xb9,
+ 0x61, 0x25, 0xf3, 0x33, 0x23, 0x51, 0xb6, 0x0d,
+ 0x07, 0xa6, 0xab, 0x94, 0x4a, 0xb7, 0x2a, 0xea,
+ 0xee, 0xac, 0xa3, 0xc3, 0x04, 0x8b, 0x0e, 0x56,
+ 0xfe, 0x44, 0xa7, 0x39, 0xe2, 0xed, 0xed, 0xb4,
+ 0x22, 0x2b, 0xac, 0x12, 0x32, 0x28, 0x91, 0xd8,
+ 0xa5, 0xab, 0xff, 0x5f, 0xe0, 0x4b, 0xda, 0x78,
+ 0x17, 0xda, 0xf1, 0x01, 0x5b, 0xcd, 0xe2, 0x5f,
+ 0x50, 0x45, 0x73, 0x2b, 0xe4, 0x76, 0x77, 0xf4,
+ 0x64, 0x1d, 0x43, 0xfb, 0x84, 0x7a, 0xea, 0x91,
+ 0xae, 0xf9, 0x9e, 0xb7, 0xb4, 0xb0, 0x91, 0x5f,
+ 0x16, 0x35, 0x9a, 0x11, 0xb8, 0xc7, 0xc1, 0x8c,
+ 0xc6, 0x10, 0x8d, 0x2f, 0x63, 0x4a, 0xa7, 0x57,
+ 0x3a, 0x51, 0xd6, 0x32, 0x2d, 0x64, 0x72, 0xd4,
+ 0x66, 0xdc, 0x10, 0xa6, 0x67, 0xd6, 0x04, 0x23,
+ 0x9d, 0x0a, 0x11, 0x77, 0xdd, 0x37, 0x94, 0x17,
+ 0x3c, 0xbf, 0x8b, 0x65, 0xb0, 0x2e, 0x5e, 0x66,
+ 0x47, 0x64, 0xac, 0xdd, 0xf0, 0x84, 0xfd, 0x39,
+ 0xfa, 0x15, 0x5d, 0xef, 0xae, 0xca, 0xc1, 0x36,
+ 0xa7, 0x5c, 0xbf, 0xc7, 0x08, 0xc2, 0x66, 0x00,
+ 0x74, 0x74, 0x4e, 0x27, 0x3f, 0x55, 0x8a, 0xb7,
+ 0x38, 0x66, 0x83, 0x6d, 0xcf, 0x99, 0x9e, 0x60,
+ 0x8f, 0xdd, 0x2e, 0x62, 0x22, 0x0e, 0xef, 0x0c,
+ 0x98, 0xa7, 0x85, 0x74, 0x3b, 0x9d, 0xec, 0x9e,
+ 0xa9, 0x19, 0x72, 0xa5, 0x7f, 0x2c, 0x39, 0xb7,
+ 0x7d, 0xb7, 0xf1, 0x12, 0x65, 0x27, 0x4b, 0x5a,
+ 0xde, 0x17, 0xfe, 0xad, 0x44, 0xf3, 0x20, 0x4d,
+ 0xfd, 0xe4, 0x1f, 0xb5, 0x81, 0xb0, 0x36, 0x37,
+ 0x08, 0x6f, 0xc3, 0x0c, 0xe9, 0x85, 0x98, 0x82,
+ 0xa9, 0x62, 0x0c, 0xc4, 0x97, 0xc0, 0x50, 0xc8,
+ 0xa7, 0x3c, 0x50, 0x9f, 0x43, 0xb9, 0xcd, 0x5e,
+ 0x4d, 0xfa, 0x1c, 0x4b, 0x0b, 0xa9, 0x98, 0x85,
+ 0x38, 0x92, 0xac, 0x8d, 0xe4, 0xad, 0x9b, 0x98,
+ 0xab, 0xd9, 0x38, 0xac, 0x62, 0x52, 0xa3, 0x22,
+ 0x63, 0x0f, 0xbf, 0x95, 0x48, 0xdf, 0x69, 0xe7,
+ 0x8b, 0x33, 0xd5, 0xb2, 0xbd, 0x05, 0x49, 0x49,
+ 0x9d, 0x57, 0x73, 0x19, 0x33, 0xae, 0xfa, 0x33,
+ 0xf1, 0x19, 0xa8, 0x80, 0xce, 0x04, 0x9f, 0xbc,
+ 0x1d, 0x65, 0x82, 0x1b, 0xe5, 0x3a, 0x51, 0xc8,
+ 0x1c, 0x21, 0xe3, 0x5d, 0xf3, 0x7d, 0x9b, 0x2f,
+ 0x2c, 0x1d, 0x4a, 0x7f, 0x9b, 0x68, 0x35, 0xa3,
+ 0xb2, 0x50, 0xf7, 0x62, 0x79, 0xcd, 0xf4, 0x98,
+ 0x4f, 0xe5, 0x63, 0x7c, 0x3e, 0x45, 0x31, 0x8c,
+ 0x16, 0xa0, 0x12, 0xc8, 0x58, 0xce, 0x39, 0xa6,
+ 0xbc, 0x54, 0xdb, 0xc5, 0xe0, 0xd5, 0xba, 0xbc,
+ 0xb9, 0x04, 0xf4, 0x8d, 0xe8, 0x2f, 0x15, 0x9d,
+};
+
+/* 100 test cases */
+static struct crc_test {
+ u32 crc; /* random starting crc */
+ u32 start; /* random 6 bit offset in buf */
+ u32 length; /* random 11 bit length of test */
+ u32 crc_le; /* expected crc32_le result */
+ u32 crc_be; /* expected crc32_be result */
+ u32 crc32c_le; /* expected crc32c_le result */
+} const test[] __initconst =
+{
+ {0x674bf11d, 0x00000038, 0x00000542, 0x0af6d466, 0xd8b6e4c1, 0xf6e93d6c},
+ {0x35c672c6, 0x0000003a, 0x000001aa, 0xc6d3dfba, 0x28aaf3ad, 0x0fe92aca},
+ {0x496da28e, 0x00000039, 0x000005af, 0xd933660f, 0x5d57e81f, 0x52e1ebb8},
+ {0x09a9b90e, 0x00000027, 0x000001f8, 0xb45fe007, 0xf45fca9a, 0x0798af9a},
+ {0xdc97e5a9, 0x00000025, 0x000003b6, 0xf81a3562, 0xe0126ba2, 0x18eb3152},
+ {0x47c58900, 0x0000000a, 0x000000b9, 0x8e58eccf, 0xf3afc793, 0xd00d08c7},
+ {0x292561e8, 0x0000000c, 0x00000403, 0xa2ba8aaf, 0x0b797aed, 0x8ba966bc},
+ {0x415037f6, 0x00000003, 0x00000676, 0xa17d52e8, 0x7f0fdf35, 0x11d694a2},
+ {0x3466e707, 0x00000026, 0x00000042, 0x258319be, 0x75c484a2, 0x6ab3208d},
+ {0xafd1281b, 0x00000023, 0x000002ee, 0x4428eaf8, 0x06c7ad10, 0xba4603c5},
+ {0xd3857b18, 0x00000028, 0x000004a2, 0x5c430821, 0xb062b7cb, 0xe6071c6f},
+ {0x1d825a8f, 0x0000002b, 0x0000050b, 0xd2c45f0c, 0xd68634e0, 0x179ec30a},
+ {0x5033e3bc, 0x0000000b, 0x00000078, 0xa3ea4113, 0xac6d31fb, 0x0903beb8},
+ {0x94f1fb5e, 0x0000000f, 0x000003a2, 0xfbfc50b1, 0x3cfe50ed, 0x6a7cb4fa},
+ {0xc9a0fe14, 0x00000009, 0x00000473, 0x5fb61894, 0x87070591, 0xdb535801},
+ {0x88a034b1, 0x0000001c, 0x000005ad, 0xc1b16053, 0x46f95c67, 0x92bed597},
+ {0xf0f72239, 0x00000020, 0x0000026d, 0xa6fa58f3, 0xf8c2c1dd, 0x192a3f1b},
+ {0xcc20a5e3, 0x0000003b, 0x0000067a, 0x7740185a, 0x308b979a, 0xccbaec1a},
+ {0xce589c95, 0x0000002b, 0x00000641, 0xd055e987, 0x40aae25b, 0x7eabae4d},
+ {0x78edc885, 0x00000035, 0x000005be, 0xa39cb14b, 0x035b0d1f, 0x28c72982},
+ {0x9d40a377, 0x0000003b, 0x00000038, 0x1f47ccd2, 0x197fbc9d, 0xc3cd4d18},
+ {0x703d0e01, 0x0000003c, 0x000006f1, 0x88735e7c, 0xfed57c5a, 0xbca8f0e7},
+ {0x776bf505, 0x0000000f, 0x000005b2, 0x5cc4fc01, 0xf32efb97, 0x713f60b3},
+ {0x4a3e7854, 0x00000027, 0x000004b8, 0x8d923c82, 0x0cbfb4a2, 0xebd08fd5},
+ {0x209172dd, 0x0000003b, 0x00000356, 0xb89e9c2b, 0xd7868138, 0x64406c59},
+ {0x3ba4cc5b, 0x0000002f, 0x00000203, 0xe51601a9, 0x5b2a1032, 0x7421890e},
+ {0xfc62f297, 0x00000000, 0x00000079, 0x71a8e1a2, 0x5d88685f, 0xe9347603},
+ {0x64280b8b, 0x00000016, 0x000007ab, 0x0fa7a30c, 0xda3a455f, 0x1bef9060},
+ {0x97dd724b, 0x00000033, 0x000007ad, 0x5788b2f4, 0xd7326d32, 0x34720072},
+ {0x61394b52, 0x00000035, 0x00000571, 0xc66525f1, 0xcabe7fef, 0x48310f59},
+ {0x29b4faff, 0x00000024, 0x0000006e, 0xca13751e, 0x993648e0, 0x783a4213},
+ {0x29bfb1dc, 0x0000000b, 0x00000244, 0x436c43f7, 0x429f7a59, 0x9e8efd41},
+ {0x86ae934b, 0x00000035, 0x00000104, 0x0760ec93, 0x9cf7d0f4, 0xfc3d34a5},
+ {0xc4c1024e, 0x0000002e, 0x000006b1, 0x6516a3ec, 0x19321f9c, 0x17a52ae2},
+ {0x3287a80a, 0x00000026, 0x00000496, 0x0b257eb1, 0x754ebd51, 0x886d935a},
+ {0xa4db423e, 0x00000023, 0x0000045d, 0x9b3a66dc, 0x873e9f11, 0xeaaeaeb2},
+ {0x7a1078df, 0x00000015, 0x0000014a, 0x8c2484c5, 0x6a628659, 0x8e900a4b},
+ {0x6048bd5b, 0x00000006, 0x0000006a, 0x897e3559, 0xac9961af, 0xd74662b1},
+ {0xd8f9ea20, 0x0000003d, 0x00000277, 0x60eb905b, 0xed2aaf99, 0xd26752ba},
+ {0xea5ec3b4, 0x0000002a, 0x000004fe, 0x869965dc, 0x6c1f833b, 0x8b1fcd62},
+ {0x2dfb005d, 0x00000016, 0x00000345, 0x6a3b117e, 0xf05e8521, 0xf54342fe},
+ {0x5a214ade, 0x00000020, 0x000005b6, 0x467f70be, 0xcb22ccd3, 0x5b95b988},
+ {0xf0ab9cca, 0x00000032, 0x00000515, 0xed223df3, 0x7f3ef01d, 0x2e1176be},
+ {0x91b444f9, 0x0000002e, 0x000007f8, 0x84e9a983, 0x5676756f, 0x66120546},
+ {0x1b5d2ddb, 0x0000002e, 0x0000012c, 0xba638c4c, 0x3f42047b, 0xf256a5cc},
+ {0xd824d1bb, 0x0000003a, 0x000007b5, 0x6288653b, 0x3a3ebea0, 0x4af1dd69},
+ {0x0470180c, 0x00000034, 0x000001f0, 0x9d5b80d6, 0x3de08195, 0x56f0a04a},
+ {0xffaa3a3f, 0x00000036, 0x00000299, 0xf3a82ab8, 0x53e0c13d, 0x74f6b6b2},
+ {0x6406cfeb, 0x00000023, 0x00000600, 0xa920b8e8, 0xe4e2acf4, 0x085951fd},
+ {0xb24aaa38, 0x0000003e, 0x000004a1, 0x657cc328, 0x5077b2c3, 0xc65387eb},
+ {0x58b2ab7c, 0x00000039, 0x000002b4, 0x3a17ee7e, 0x9dcb3643, 0x1ca9257b},
+ {0x3db85970, 0x00000006, 0x000002b6, 0x95268b59, 0xb9812c10, 0xfd196d76},
+ {0x857830c5, 0x00000003, 0x00000590, 0x4ef439d5, 0xf042161d, 0x5ef88339},
+ {0xe1fcd978, 0x0000003e, 0x000007d8, 0xae8d8699, 0xce0a1ef5, 0x2c3714d9},
+ {0xb982a768, 0x00000016, 0x000006e0, 0x62fad3df, 0x5f8a067b, 0x58576548},
+ {0x1d581ce8, 0x0000001e, 0x0000058b, 0xf0f5da53, 0x26e39eee, 0xfd7c57de},
+ {0x2456719b, 0x00000025, 0x00000503, 0x4296ac64, 0xd50e4c14, 0xd5fedd59},
+ {0xfae6d8f2, 0x00000000, 0x0000055d, 0x057fdf2e, 0x2a31391a, 0x1cc3b17b},
+ {0xcba828e3, 0x00000039, 0x000002ce, 0xe3f22351, 0x8f00877b, 0x270eed73},
+ {0x13d25952, 0x0000000a, 0x0000072d, 0x76d4b4cc, 0x5eb67ec3, 0x91ecbb11},
+ {0x0342be3f, 0x00000015, 0x00000599, 0xec75d9f1, 0x9d4d2826, 0x05ed8d0c},
+ {0xeaa344e0, 0x00000014, 0x000004d8, 0x72a4c981, 0x2064ea06, 0x0b09ad5b},
+ {0xbbb52021, 0x0000003b, 0x00000272, 0x04af99fc, 0xaf042d35, 0xf8d511fb},
+ {0xb66384dc, 0x0000001d, 0x000007fc, 0xd7629116, 0x782bd801, 0x5ad832cc},
+ {0x616c01b6, 0x00000022, 0x000002c8, 0x5b1dab30, 0x783ce7d2, 0x1214d196},
+ {0xce2bdaad, 0x00000016, 0x0000062a, 0x932535c8, 0x3f02926d, 0x5747218a},
+ {0x00fe84d7, 0x00000005, 0x00000205, 0x850e50aa, 0x753d649c, 0xde8f14de},
+ {0xbebdcb4c, 0x00000006, 0x0000055d, 0xbeaa37a2, 0x2d8c9eba, 0x3563b7b9},
+ {0xd8b1a02a, 0x00000010, 0x00000387, 0x5017d2fc, 0x503541a5, 0x071475d0},
+ {0x3b96cad2, 0x00000036, 0x00000347, 0x1d2372ae, 0x926cd90b, 0x54c79d60},
+ {0xc94c1ed7, 0x00000005, 0x0000038b, 0x9e9fdb22, 0x144a9178, 0x4c53eee6},
+ {0x1aad454e, 0x00000025, 0x000002b2, 0xc3f6315c, 0x5c7a35b3, 0x10137a3c},
+ {0xa4fec9a6, 0x00000000, 0x000006d6, 0x90be5080, 0xa4107605, 0xaa9d6c73},
+ {0x1bbe71e2, 0x0000001f, 0x000002fd, 0x4e504c3b, 0x284ccaf1, 0xb63d23e7},
+ {0x4201c7e4, 0x00000002, 0x000002b7, 0x7822e3f9, 0x0cc912a9, 0x7f53e9cf},
+ {0x23fddc96, 0x00000003, 0x00000627, 0x8a385125, 0x07767e78, 0x13c1cd83},
+ {0xd82ba25c, 0x00000016, 0x0000063e, 0x98e4148a, 0x283330c9, 0x49ff5867},
+ {0x786f2032, 0x0000002d, 0x0000060f, 0xf201600a, 0xf561bfcd, 0x8467f211},
+ {0xfebe4e1f, 0x0000002a, 0x000004f2, 0x95e51961, 0xfd80dcab, 0x3f9683b2},
+ {0x1a6e0a39, 0x00000008, 0x00000672, 0x8af6c2a5, 0x78dd84cb, 0x76a3f874},
+ {0x56000ab8, 0x0000000e, 0x000000e5, 0x36bacb8f, 0x22ee1f77, 0x863b702f},
+ {0x4717fe0c, 0x00000000, 0x000006ec, 0x8439f342, 0x5c8e03da, 0xdc6c58ff},
+ {0xd5d5d68e, 0x0000003c, 0x000003a3, 0x46fff083, 0x177d1b39, 0x0622cc95},
+ {0xc25dd6c6, 0x00000024, 0x000006c0, 0x5ceb8eb4, 0x892b0d16, 0xe85605cd},
+ {0xe9b11300, 0x00000023, 0x00000683, 0x07a5d59a, 0x6c6a3208, 0x31da5f06},
+ {0x95cd285e, 0x00000001, 0x00000047, 0x7b3a4368, 0x0202c07e, 0xa1f2e784},
+ {0xd9245a25, 0x0000001e, 0x000003a6, 0xd33c1841, 0x1936c0d5, 0xb07cc616},
+ {0x103279db, 0x00000006, 0x0000039b, 0xca09b8a0, 0x77d62892, 0xbf943b6c},
+ {0x1cba3172, 0x00000027, 0x000001c8, 0xcb377194, 0xebe682db, 0x2c01af1c},
+ {0x8f613739, 0x0000000c, 0x000001df, 0xb4b0bc87, 0x7710bd43, 0x0fe5f56d},
+ {0x1c6aa90d, 0x0000001b, 0x0000053c, 0x70559245, 0xda7894ac, 0xf8943b2d},
+ {0xaabe5b93, 0x0000003d, 0x00000715, 0xcdbf42fa, 0x0c3b99e7, 0xe4d89272},
+ {0xf15dd038, 0x00000006, 0x000006db, 0x6e104aea, 0x8d5967f2, 0x7c2f6bbb},
+ {0x584dd49c, 0x00000020, 0x000007bc, 0x36b6cfd6, 0xad4e23b2, 0xabbf388b},
+ {0x5d8c9506, 0x00000020, 0x00000470, 0x4c62378e, 0x31d92640, 0x1dca1f4e},
+ {0xb80d17b0, 0x00000032, 0x00000346, 0x22a5bb88, 0x9a7ec89f, 0x5c170e23},
+ {0xdaf0592e, 0x00000023, 0x000007b0, 0x3cab3f99, 0x9b1fdd99, 0xc0e9d672},
+ {0x4793cc85, 0x0000000d, 0x00000706, 0xe82e04f6, 0xed3db6b7, 0xc18bdc86},
+ {0x82ebf64e, 0x00000009, 0x000007c3, 0x69d590a9, 0x9efa8499, 0xa874fcdd},
+ {0xb18a0319, 0x00000026, 0x000007db, 0x1cf98dcc, 0x8fa9ad6a, 0x9dc0bb48},
+};
+
+#include <linux/time.h>
+
+static int __init crc32c_test(void)
+{
+ int i;
+ int errors = 0;
+ int bytes = 0;
+ u64 nsec;
+ unsigned long flags;
+
+ /* keep static to prevent cache warming code from
+ * getting eliminated by the compiler */
+ static u32 crc;
+
+ /* pre-warm the cache */
+ for (i = 0; i < 100; i++) {
+ bytes += 2*test[i].length;
+
+ crc ^= __crc32c_le(test[i].crc, test_buf +
+ test[i].start, test[i].length);
+ }
+
+ /* reduce OS noise */
+ local_irq_save(flags);
+ local_irq_disable();
+
+ nsec = ktime_get_ns();
+ for (i = 0; i < 100; i++) {
+ if (test[i].crc32c_le != __crc32c_le(test[i].crc, test_buf +
+ test[i].start, test[i].length))
+ errors++;
+ }
+ nsec = ktime_get_ns() - nsec;
+
+ local_irq_restore(flags);
+ local_irq_enable();
+
+ pr_info("crc32c: CRC_LE_BITS = %d\n", CRC_LE_BITS);
+
+ if (errors)
+ pr_warn("crc32c: %d self tests failed\n", errors);
+ else {
+ pr_info("crc32c: self tests passed, processed %d bytes in %lld nsec\n",
+ bytes, nsec);
+ }
+
+ return 0;
+}
+
+static int __init crc32c_combine_test(void)
+{
+ int i, j;
+ int errors = 0, runs = 0;
+
+ for (i = 0; i < 10; i++) {
+ u32 crc_full;
+
+ crc_full = __crc32c_le(test[i].crc, test_buf + test[i].start,
+ test[i].length);
+ for (j = 0; j <= test[i].length; ++j) {
+ u32 crc1, crc2;
+ u32 len1 = j, len2 = test[i].length - j;
+
+ crc1 = __crc32c_le(test[i].crc, test_buf +
+ test[i].start, len1);
+ crc2 = __crc32c_le(0, test_buf + test[i].start +
+ len1, len2);
+
+ if (!(crc_full == __crc32c_le_combine(crc1, crc2, len2) &&
+ crc_full == test[i].crc32c_le))
+ errors++;
+ runs++;
+ cond_resched();
+ }
+ }
+
+ if (errors)
+ pr_warn("crc32c_combine: %d/%d self tests failed\n", errors, runs);
+ else
+ pr_info("crc32c_combine: %d self tests passed\n", runs);
+
+ return 0;
+}
+
+static int __init crc32_test(void)
+{
+ int i;
+ int errors = 0;
+ int bytes = 0;
+ u64 nsec;
+ unsigned long flags;
+
+ /* keep static to prevent cache warming code from
+ * getting eliminated by the compiler */
+ static u32 crc;
+
+ /* pre-warm the cache */
+ for (i = 0; i < 100; i++) {
+ bytes += 2*test[i].length;
+
+ crc ^= crc32_le(test[i].crc, test_buf +
+ test[i].start, test[i].length);
+
+ crc ^= crc32_be(test[i].crc, test_buf +
+ test[i].start, test[i].length);
+ }
+
+ /* reduce OS noise */
+ local_irq_save(flags);
+ local_irq_disable();
+
+ nsec = ktime_get_ns();
+ for (i = 0; i < 100; i++) {
+ if (test[i].crc_le != crc32_le(test[i].crc, test_buf +
+ test[i].start, test[i].length))
+ errors++;
+
+ if (test[i].crc_be != crc32_be(test[i].crc, test_buf +
+ test[i].start, test[i].length))
+ errors++;
+ }
+ nsec = ktime_get_ns() - nsec;
+
+ local_irq_restore(flags);
+ local_irq_enable();
+
+ pr_info("crc32: CRC_LE_BITS = %d, CRC_BE BITS = %d\n",
+ CRC_LE_BITS, CRC_BE_BITS);
+
+ if (errors)
+ pr_warn("crc32: %d self tests failed\n", errors);
+ else {
+ pr_info("crc32: self tests passed, processed %d bytes in %lld nsec\n",
+ bytes, nsec);
+ }
+
+ return 0;
+}
+
+static int __init crc32_combine_test(void)
+{
+ int i, j;
+ int errors = 0, runs = 0;
+
+ for (i = 0; i < 10; i++) {
+ u32 crc_full;
+
+ crc_full = crc32_le(test[i].crc, test_buf + test[i].start,
+ test[i].length);
+ for (j = 0; j <= test[i].length; ++j) {
+ u32 crc1, crc2;
+ u32 len1 = j, len2 = test[i].length - j;
+
+ crc1 = crc32_le(test[i].crc, test_buf +
+ test[i].start, len1);
+ crc2 = crc32_le(0, test_buf + test[i].start +
+ len1, len2);
+
+ if (!(crc_full == crc32_le_combine(crc1, crc2, len2) &&
+ crc_full == test[i].crc_le))
+ errors++;
+ runs++;
+ cond_resched();
+ }
+ }
+
+ if (errors)
+ pr_warn("crc32_combine: %d/%d self tests failed\n", errors, runs);
+ else
+ pr_info("crc32_combine: %d self tests passed\n", runs);
+
+ return 0;
+}
+
+static int __init crc32test_init(void)
+{
+ crc32_test();
+ crc32c_test();
+
+ crc32_combine_test();
+ crc32c_combine_test();
+
+ return 0;
+}
+
+static void __exit crc32_exit(void)
+{
+}
+
+module_init(crc32test_init);
+module_exit(crc32_exit);
+
+MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>");
+MODULE_DESCRIPTION("CRC32 selftest");
+MODULE_LICENSE("GPL");
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 04c1ef717fe0..8c28cbd7e104 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -52,9 +52,18 @@ static int debug_objects_fixups __read_mostly;
static int debug_objects_warnings __read_mostly;
static int debug_objects_enabled __read_mostly
= CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
-
+static int debug_objects_pool_size __read_mostly
+ = ODEBUG_POOL_SIZE;
+static int debug_objects_pool_min_level __read_mostly
+ = ODEBUG_POOL_MIN_LEVEL;
static struct debug_obj_descr *descr_test __read_mostly;
+/*
+ * Track numbers of kmem_cache_alloc()/free() calls done.
+ */
+static int debug_objects_allocated;
+static int debug_objects_freed;
+
static void free_obj_work(struct work_struct *work);
static DECLARE_WORK(debug_obj_work, free_obj_work);
@@ -88,13 +97,13 @@ static void fill_pool(void)
struct debug_obj *new;
unsigned long flags;
- if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL))
+ if (likely(obj_pool_free >= debug_objects_pool_min_level))
return;
if (unlikely(!obj_cache))
return;
- while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) {
+ while (obj_pool_free < debug_objects_pool_min_level) {
new = kmem_cache_zalloc(obj_cache, gfp);
if (!new)
@@ -102,6 +111,7 @@ static void fill_pool(void)
raw_spin_lock_irqsave(&pool_lock, flags);
hlist_add_head(&new->node, &obj_pool);
+ debug_objects_allocated++;
obj_pool_free++;
raw_spin_unlock_irqrestore(&pool_lock, flags);
}
@@ -162,24 +172,39 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
/*
* workqueue function to free objects.
+ *
+ * To reduce contention on the global pool_lock, the actual freeing of
+ * debug objects will be delayed if the pool_lock is busy. We also free
+ * the objects in a batch of 4 for each lock/unlock cycle.
*/
+#define ODEBUG_FREE_BATCH 4
+
static void free_obj_work(struct work_struct *work)
{
- struct debug_obj *obj;
+ struct debug_obj *objs[ODEBUG_FREE_BATCH];
unsigned long flags;
+ int i;
- raw_spin_lock_irqsave(&pool_lock, flags);
- while (obj_pool_free > ODEBUG_POOL_SIZE) {
- obj = hlist_entry(obj_pool.first, typeof(*obj), node);
- hlist_del(&obj->node);
- obj_pool_free--;
+ if (!raw_spin_trylock_irqsave(&pool_lock, flags))
+ return;
+ while (obj_pool_free >= debug_objects_pool_size + ODEBUG_FREE_BATCH) {
+ for (i = 0; i < ODEBUG_FREE_BATCH; i++) {
+ objs[i] = hlist_entry(obj_pool.first,
+ typeof(*objs[0]), node);
+ hlist_del(&objs[i]->node);
+ }
+
+ obj_pool_free -= ODEBUG_FREE_BATCH;
+ debug_objects_freed += ODEBUG_FREE_BATCH;
/*
* We release pool_lock across kmem_cache_free() to
* avoid contention on pool_lock.
*/
raw_spin_unlock_irqrestore(&pool_lock, flags);
- kmem_cache_free(obj_cache, obj);
- raw_spin_lock_irqsave(&pool_lock, flags);
+ for (i = 0; i < ODEBUG_FREE_BATCH; i++)
+ kmem_cache_free(obj_cache, objs[i]);
+ if (!raw_spin_trylock_irqsave(&pool_lock, flags))
+ return;
}
raw_spin_unlock_irqrestore(&pool_lock, flags);
}
@@ -198,7 +223,7 @@ static void free_object(struct debug_obj *obj)
* schedule work when the pool is filled and the cache is
* initialized:
*/
- if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
+ if (obj_pool_free > debug_objects_pool_size && obj_cache)
sched = 1;
hlist_add_head(&obj->node, &obj_pool);
obj_pool_free++;
@@ -758,6 +783,8 @@ static int debug_stats_show(struct seq_file *m, void *v)
seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
seq_printf(m, "pool_used :%d\n", obj_pool_used);
seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
+ seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
+ seq_printf(m, "objs_freed :%d\n", debug_objects_freed);
return 0;
}
@@ -1116,4 +1143,11 @@ void __init debug_objects_mem_init(void)
pr_warn("out of memory.\n");
} else
debug_objects_selftest();
+
+ /*
+ * Increase the thresholds for allocating and freeing objects
+ * according to the number of possible CPUs available in the system.
+ */
+ debug_objects_pool_size += num_possible_cpus() * 32;
+ debug_objects_pool_min_level += num_possible_cpus() * 4;
}
diff --git a/lib/decompress_unlz4.c b/lib/decompress_unlz4.c
index 036fc882cd72..1b0baf3008ea 100644
--- a/lib/decompress_unlz4.c
+++ b/lib/decompress_unlz4.c
@@ -72,7 +72,7 @@ STATIC inline int INIT unlz4(u8 *input, long in_len,
error("NULL input pointer and missing fill function");
goto exit_1;
} else {
- inp = large_malloc(lz4_compressbound(uncomp_chunksize));
+ inp = large_malloc(LZ4_compressBound(uncomp_chunksize));
if (!inp) {
error("Could not allocate input buffer");
goto exit_1;
@@ -136,7 +136,7 @@ STATIC inline int INIT unlz4(u8 *input, long in_len,
inp += 4;
size -= 4;
} else {
- if (chunksize > lz4_compressbound(uncomp_chunksize)) {
+ if (chunksize > LZ4_compressBound(uncomp_chunksize)) {
error("chunk length is longer than allocated");
goto exit_2;
}
@@ -152,11 +152,14 @@ STATIC inline int INIT unlz4(u8 *input, long in_len,
out_len -= dest_len;
} else
dest_len = out_len;
- ret = lz4_decompress(inp, &chunksize, outp, dest_len);
+
+ ret = LZ4_decompress_fast(inp, outp, dest_len);
+ chunksize = ret;
#else
dest_len = uncomp_chunksize;
- ret = lz4_decompress_unknownoutputsize(inp, chunksize, outp,
- &dest_len);
+
+ ret = LZ4_decompress_safe(inp, outp, chunksize, dest_len);
+ dest_len = ret;
#endif
if (ret < 0) {
error("Decoding failed");
diff --git a/lib/digsig.c b/lib/digsig.c
index 55b8b2f41a9e..03d7c63837ae 100644
--- a/lib/digsig.c
+++ b/lib/digsig.c
@@ -85,7 +85,7 @@ static int digsig_verify_rsa(struct key *key,
struct pubkey_hdr *pkh;
down_read(&key->sem);
- ukp = user_key_payload(key);
+ ukp = user_key_payload_locked(key);
if (ukp->datalen < sizeof(*pkh))
goto err1;
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 8971370bfb16..60c57ec936db 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -1155,6 +1155,11 @@ static void check_unmap(struct dma_debug_entry *ref)
dir2name[ref->direction]);
}
+ /*
+ * Drivers should use dma_mapping_error() to check the returned
+ * addresses of dma_map_single() and dma_map_page().
+ * If not, print this warning message. See Documentation/DMA-API.txt.
+ */
if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
err_printk(ref->dev, entry,
"DMA-API: device driver failed to check map error"
diff --git a/lib/dma-noop.c b/lib/dma-noop.c
index 3d766e78fbe2..de26c8b68f34 100644
--- a/lib/dma-noop.c
+++ b/lib/dma-noop.c
@@ -1,7 +1,7 @@
/*
* lib/dma-noop.c
*
- * Simple DMA noop-ops that map 1:1 with memory
+ * DMA operations that map to physical addresses without flushing memory.
*/
#include <linux/export.h>
#include <linux/mm.h>
@@ -64,7 +64,7 @@ static int dma_noop_supported(struct device *dev, u64 mask)
return 1;
}
-struct dma_map_ops dma_noop_ops = {
+const struct dma_map_ops dma_noop_ops = {
.alloc = dma_noop_alloc,
.free = dma_noop_free,
.map_page = dma_noop_map_page,
diff --git a/lib/dma-virt.c b/lib/dma-virt.c
new file mode 100644
index 000000000000..dcd4df1f7174
--- /dev/null
+++ b/lib/dma-virt.c
@@ -0,0 +1,72 @@
+/*
+ * lib/dma-virt.c
+ *
+ * DMA operations that map to virtual addresses without flushing memory.
+ */
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+
+static void *dma_virt_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ unsigned long attrs)
+{
+ void *ret;
+
+ ret = (void *)__get_free_pages(gfp, get_order(size));
+ if (ret)
+ *dma_handle = (uintptr_t)ret;
+ return ret;
+}
+
+static void dma_virt_free(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_addr,
+ unsigned long attrs)
+{
+ free_pages((unsigned long)cpu_addr, get_order(size));
+}
+
+static dma_addr_t dma_virt_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ return (uintptr_t)(page_address(page) + offset);
+}
+
+static int dma_virt_map_sg(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(sgl, sg, nents, i) {
+ BUG_ON(!sg_page(sg));
+ sg_dma_address(sg) = (uintptr_t)sg_virt(sg);
+ sg_dma_len(sg) = sg->length;
+ }
+
+ return nents;
+}
+
+static int dma_virt_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return false;
+}
+
+static int dma_virt_supported(struct device *dev, u64 mask)
+{
+ return true;
+}
+
+const struct dma_map_ops dma_virt_ops = {
+ .alloc = dma_virt_alloc,
+ .free = dma_virt_free,
+ .map_page = dma_virt_map_page,
+ .map_sg = dma_virt_map_sg,
+ .mapping_error = dma_virt_mapping_error,
+ .dma_supported = dma_virt_supported,
+};
+EXPORT_SYMBOL(dma_virt_ops);
diff --git a/lib/find_bit.c b/lib/find_bit.c
index 18072ea9c20e..6ed74f78380c 100644
--- a/lib/find_bit.c
+++ b/lib/find_bit.c
@@ -33,7 +33,7 @@ static unsigned long _find_next_bit(const unsigned long *addr,
{
unsigned long tmp;
- if (!nbits || start >= nbits)
+ if (unlikely(start >= nbits))
return nbits;
tmp = addr[start / BITS_PER_LONG] ^ invert;
@@ -151,7 +151,7 @@ static unsigned long _find_next_bit_le(const unsigned long *addr,
{
unsigned long tmp;
- if (!nbits || start >= nbits)
+ if (unlikely(start >= nbits))
return nbits;
tmp = addr[start / BITS_PER_LONG] ^ invert;
diff --git a/lib/fonts/Kconfig b/lib/fonts/Kconfig
index e77dfe00de36..8fa0791e8a1e 100644
--- a/lib/fonts/Kconfig
+++ b/lib/fonts/Kconfig
@@ -87,6 +87,14 @@ config FONT_6x10
embedded devices with a 320x240 screen, to get a reasonable number
of characters (53x24) that are still at a readable size.
+config FONT_10x18
+ bool "console 10x18 font (not supported by all drivers)" if FONTS
+ depends on FRAMEBUFFER_CONSOLE
+ help
+ This is a high resolution console font for machines with very
+ big letters. It fits between the sun 12x22 and the normal 8x16 font.
+ If other fonts are too big or too small for you, say Y, otherwise say N.
+
config FONT_SUN8x16
bool "Sparc console 8x16 font"
depends on FRAMEBUFFER_CONSOLE && (!SPARC && FONTS || SPARC)
@@ -101,14 +109,6 @@ config FONT_SUN12x22
big letters (like the letters used in the SPARC PROM). If the
standard font is unreadable for you, say Y, otherwise say N.
-config FONT_10x18
- bool "console 10x18 font (not supported by all drivers)" if FONTS
- depends on FRAMEBUFFER_CONSOLE
- help
- This is a high resolution console font for machines with very
- big letters. It fits between the sun 12x22 and the normal 8x16 font.
- If other fonts are too big or too small for you, say Y, otherwise say N.
-
config FONT_AUTOSELECT
def_bool y
depends on !FONT_8x8
diff --git a/lib/glob.c b/lib/glob.c
index 500fc80d23e1..0ba3ea86b546 100644
--- a/lib/glob.c
+++ b/lib/glob.c
@@ -121,167 +121,3 @@ backtrack:
}
}
EXPORT_SYMBOL(glob_match);
-
-
-#ifdef CONFIG_GLOB_SELFTEST
-
-#include <linux/printk.h>
-#include <linux/moduleparam.h>
-
-/* Boot with "glob.verbose=1" to show successful tests, too */
-static bool verbose = false;
-module_param(verbose, bool, 0);
-
-struct glob_test {
- char const *pat, *str;
- bool expected;
-};
-
-static bool __pure __init test(char const *pat, char const *str, bool expected)
-{
- bool match = glob_match(pat, str);
- bool success = match == expected;
-
- /* Can't get string literals into a particular section, so... */
- static char const msg_error[] __initconst =
- KERN_ERR "glob: \"%s\" vs. \"%s\": %s *** ERROR ***\n";
- static char const msg_ok[] __initconst =
- KERN_DEBUG "glob: \"%s\" vs. \"%s\": %s OK\n";
- static char const mismatch[] __initconst = "mismatch";
- char const *message;
-
- if (!success)
- message = msg_error;
- else if (verbose)
- message = msg_ok;
- else
- return success;
-
- printk(message, pat, str, mismatch + 3*match);
- return success;
-}
-
-/*
- * The tests are all jammed together in one array to make it simpler
- * to place that array in the .init.rodata section. The obvious
- * "array of structures containing char *" has no way to force the
- * pointed-to strings to be in a particular section.
- *
- * Anyway, a test consists of:
- * 1. Expected glob_match result: '1' or '0'.
- * 2. Pattern to match: null-terminated string
- * 3. String to match against: null-terminated string
- *
- * The list of tests is terminated with a final '\0' instead of
- * a glob_match result character.
- */
-static char const glob_tests[] __initconst =
- /* Some basic tests */
- "1" "a\0" "a\0"
- "0" "a\0" "b\0"
- "0" "a\0" "aa\0"
- "0" "a\0" "\0"
- "1" "\0" "\0"
- "0" "\0" "a\0"
- /* Simple character class tests */
- "1" "[a]\0" "a\0"
- "0" "[a]\0" "b\0"
- "0" "[!a]\0" "a\0"
- "1" "[!a]\0" "b\0"
- "1" "[ab]\0" "a\0"
- "1" "[ab]\0" "b\0"
- "0" "[ab]\0" "c\0"
- "1" "[!ab]\0" "c\0"
- "1" "[a-c]\0" "b\0"
- "0" "[a-c]\0" "d\0"
- /* Corner cases in character class parsing */
- "1" "[a-c-e-g]\0" "-\0"
- "0" "[a-c-e-g]\0" "d\0"
- "1" "[a-c-e-g]\0" "f\0"
- "1" "[]a-ceg-ik[]\0" "a\0"
- "1" "[]a-ceg-ik[]\0" "]\0"
- "1" "[]a-ceg-ik[]\0" "[\0"
- "1" "[]a-ceg-ik[]\0" "h\0"
- "0" "[]a-ceg-ik[]\0" "f\0"
- "0" "[!]a-ceg-ik[]\0" "h\0"
- "0" "[!]a-ceg-ik[]\0" "]\0"
- "1" "[!]a-ceg-ik[]\0" "f\0"
- /* Simple wild cards */
- "1" "?\0" "a\0"
- "0" "?\0" "aa\0"
- "0" "??\0" "a\0"
- "1" "?x?\0" "axb\0"
- "0" "?x?\0" "abx\0"
- "0" "?x?\0" "xab\0"
- /* Asterisk wild cards (backtracking) */
- "0" "*??\0" "a\0"
- "1" "*??\0" "ab\0"
- "1" "*??\0" "abc\0"
- "1" "*??\0" "abcd\0"
- "0" "??*\0" "a\0"
- "1" "??*\0" "ab\0"
- "1" "??*\0" "abc\0"
- "1" "??*\0" "abcd\0"
- "0" "?*?\0" "a\0"
- "1" "?*?\0" "ab\0"
- "1" "?*?\0" "abc\0"
- "1" "?*?\0" "abcd\0"
- "1" "*b\0" "b\0"
- "1" "*b\0" "ab\0"
- "0" "*b\0" "ba\0"
- "1" "*b\0" "bb\0"
- "1" "*b\0" "abb\0"
- "1" "*b\0" "bab\0"
- "1" "*bc\0" "abbc\0"
- "1" "*bc\0" "bc\0"
- "1" "*bc\0" "bbc\0"
- "1" "*bc\0" "bcbc\0"
- /* Multiple asterisks (complex backtracking) */
- "1" "*ac*\0" "abacadaeafag\0"
- "1" "*ac*ae*ag*\0" "abacadaeafag\0"
- "1" "*a*b*[bc]*[ef]*g*\0" "abacadaeafag\0"
- "0" "*a*b*[ef]*[cd]*g*\0" "abacadaeafag\0"
- "1" "*abcd*\0" "abcabcabcabcdefg\0"
- "1" "*ab*cd*\0" "abcabcabcabcdefg\0"
- "1" "*abcd*abcdef*\0" "abcabcdabcdeabcdefg\0"
- "0" "*abcd*\0" "abcabcabcabcefg\0"
- "0" "*ab*cd*\0" "abcabcabcabcefg\0";
-
-static int __init glob_init(void)
-{
- unsigned successes = 0;
- unsigned n = 0;
- char const *p = glob_tests;
- static char const message[] __initconst =
- KERN_INFO "glob: %u self-tests passed, %u failed\n";
-
- /*
- * Tests are jammed together in a string. The first byte is '1'
- * or '0' to indicate the expected outcome, or '\0' to indicate the
- * end of the tests. Then come two null-terminated strings: the
- * pattern and the string to match it against.
- */
- while (*p) {
- bool expected = *p++ & 1;
- char const *pat = p;
-
- p += strlen(p) + 1;
- successes += test(pat, p, expected);
- p += strlen(p) + 1;
- n++;
- }
-
- n -= successes;
- printk(message, successes, n);
-
- /* What's the errno for "kernel bug detected"? Guess... */
- return n ? -ECANCELED : 0;
-}
-
-/* We need a dummy exit function to allow unload */
-static void __exit glob_fini(void) { }
-
-module_init(glob_init);
-module_exit(glob_fini);
-
-#endif /* CONFIG_GLOB_SELFTEST */
diff --git a/lib/globtest.c b/lib/globtest.c
new file mode 100644
index 000000000000..d8e97d43b905
--- /dev/null
+++ b/lib/globtest.c
@@ -0,0 +1,167 @@
+/*
+ * Extracted fronm glob.c
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/glob.h>
+#include <linux/printk.h>
+
+/* Boot with "glob.verbose=1" to show successful tests, too */
+static bool verbose = false;
+module_param(verbose, bool, 0);
+
+struct glob_test {
+ char const *pat, *str;
+ bool expected;
+};
+
+static bool __pure __init test(char const *pat, char const *str, bool expected)
+{
+ bool match = glob_match(pat, str);
+ bool success = match == expected;
+
+ /* Can't get string literals into a particular section, so... */
+ static char const msg_error[] __initconst =
+ KERN_ERR "glob: \"%s\" vs. \"%s\": %s *** ERROR ***\n";
+ static char const msg_ok[] __initconst =
+ KERN_DEBUG "glob: \"%s\" vs. \"%s\": %s OK\n";
+ static char const mismatch[] __initconst = "mismatch";
+ char const *message;
+
+ if (!success)
+ message = msg_error;
+ else if (verbose)
+ message = msg_ok;
+ else
+ return success;
+
+ printk(message, pat, str, mismatch + 3*match);
+ return success;
+}
+
+/*
+ * The tests are all jammed together in one array to make it simpler
+ * to place that array in the .init.rodata section. The obvious
+ * "array of structures containing char *" has no way to force the
+ * pointed-to strings to be in a particular section.
+ *
+ * Anyway, a test consists of:
+ * 1. Expected glob_match result: '1' or '0'.
+ * 2. Pattern to match: null-terminated string
+ * 3. String to match against: null-terminated string
+ *
+ * The list of tests is terminated with a final '\0' instead of
+ * a glob_match result character.
+ */
+static char const glob_tests[] __initconst =
+ /* Some basic tests */
+ "1" "a\0" "a\0"
+ "0" "a\0" "b\0"
+ "0" "a\0" "aa\0"
+ "0" "a\0" "\0"
+ "1" "\0" "\0"
+ "0" "\0" "a\0"
+ /* Simple character class tests */
+ "1" "[a]\0" "a\0"
+ "0" "[a]\0" "b\0"
+ "0" "[!a]\0" "a\0"
+ "1" "[!a]\0" "b\0"
+ "1" "[ab]\0" "a\0"
+ "1" "[ab]\0" "b\0"
+ "0" "[ab]\0" "c\0"
+ "1" "[!ab]\0" "c\0"
+ "1" "[a-c]\0" "b\0"
+ "0" "[a-c]\0" "d\0"
+ /* Corner cases in character class parsing */
+ "1" "[a-c-e-g]\0" "-\0"
+ "0" "[a-c-e-g]\0" "d\0"
+ "1" "[a-c-e-g]\0" "f\0"
+ "1" "[]a-ceg-ik[]\0" "a\0"
+ "1" "[]a-ceg-ik[]\0" "]\0"
+ "1" "[]a-ceg-ik[]\0" "[\0"
+ "1" "[]a-ceg-ik[]\0" "h\0"
+ "0" "[]a-ceg-ik[]\0" "f\0"
+ "0" "[!]a-ceg-ik[]\0" "h\0"
+ "0" "[!]a-ceg-ik[]\0" "]\0"
+ "1" "[!]a-ceg-ik[]\0" "f\0"
+ /* Simple wild cards */
+ "1" "?\0" "a\0"
+ "0" "?\0" "aa\0"
+ "0" "??\0" "a\0"
+ "1" "?x?\0" "axb\0"
+ "0" "?x?\0" "abx\0"
+ "0" "?x?\0" "xab\0"
+ /* Asterisk wild cards (backtracking) */
+ "0" "*??\0" "a\0"
+ "1" "*??\0" "ab\0"
+ "1" "*??\0" "abc\0"
+ "1" "*??\0" "abcd\0"
+ "0" "??*\0" "a\0"
+ "1" "??*\0" "ab\0"
+ "1" "??*\0" "abc\0"
+ "1" "??*\0" "abcd\0"
+ "0" "?*?\0" "a\0"
+ "1" "?*?\0" "ab\0"
+ "1" "?*?\0" "abc\0"
+ "1" "?*?\0" "abcd\0"
+ "1" "*b\0" "b\0"
+ "1" "*b\0" "ab\0"
+ "0" "*b\0" "ba\0"
+ "1" "*b\0" "bb\0"
+ "1" "*b\0" "abb\0"
+ "1" "*b\0" "bab\0"
+ "1" "*bc\0" "abbc\0"
+ "1" "*bc\0" "bc\0"
+ "1" "*bc\0" "bbc\0"
+ "1" "*bc\0" "bcbc\0"
+ /* Multiple asterisks (complex backtracking) */
+ "1" "*ac*\0" "abacadaeafag\0"
+ "1" "*ac*ae*ag*\0" "abacadaeafag\0"
+ "1" "*a*b*[bc]*[ef]*g*\0" "abacadaeafag\0"
+ "0" "*a*b*[ef]*[cd]*g*\0" "abacadaeafag\0"
+ "1" "*abcd*\0" "abcabcabcabcdefg\0"
+ "1" "*ab*cd*\0" "abcabcabcabcdefg\0"
+ "1" "*abcd*abcdef*\0" "abcabcdabcdeabcdefg\0"
+ "0" "*abcd*\0" "abcabcabcabcefg\0"
+ "0" "*ab*cd*\0" "abcabcabcabcefg\0";
+
+static int __init glob_init(void)
+{
+ unsigned successes = 0;
+ unsigned n = 0;
+ char const *p = glob_tests;
+ static char const message[] __initconst =
+ KERN_INFO "glob: %u self-tests passed, %u failed\n";
+
+ /*
+ * Tests are jammed together in a string. The first byte is '1'
+ * or '0' to indicate the expected outcome, or '\0' to indicate the
+ * end of the tests. Then come two null-terminated strings: the
+ * pattern and the string to match it against.
+ */
+ while (*p) {
+ bool expected = *p++ & 1;
+ char const *pat = p;
+
+ p += strlen(p) + 1;
+ successes += test(pat, p, expected);
+ p += strlen(p) + 1;
+ n++;
+ }
+
+ n -= successes;
+ printk(message, successes, n);
+
+ /* What's the errno for "kernel bug detected"? Guess... */
+ return n ? -ECANCELED : 0;
+}
+
+/* We need a dummy exit function to allow unload */
+static void __exit glob_fini(void) { }
+
+module_init(glob_init);
+module_exit(glob_fini);
+
+MODULE_DESCRIPTION("glob(7) matching tests");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/lib/halfmd4.c b/lib/halfmd4.c
deleted file mode 100644
index 137e861d9690..000000000000
--- a/lib/halfmd4.c
+++ /dev/null
@@ -1,67 +0,0 @@
-#include <linux/compiler.h>
-#include <linux/export.h>
-#include <linux/cryptohash.h>
-#include <linux/bitops.h>
-
-/* F, G and H are basic MD4 functions: selection, majority, parity */
-#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
-#define G(x, y, z) (((x) & (y)) + (((x) ^ (y)) & (z)))
-#define H(x, y, z) ((x) ^ (y) ^ (z))
-
-/*
- * The generic round function. The application is so specific that
- * we don't bother protecting all the arguments with parens, as is generally
- * good macro practice, in favor of extra legibility.
- * Rotation is separate from addition to prevent recomputation
- */
-#define ROUND(f, a, b, c, d, x, s) \
- (a += f(b, c, d) + x, a = rol32(a, s))
-#define K1 0
-#define K2 013240474631UL
-#define K3 015666365641UL
-
-/*
- * Basic cut-down MD4 transform. Returns only 32 bits of result.
- */
-__u32 half_md4_transform(__u32 buf[4], __u32 const in[8])
-{
- __u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3];
-
- /* Round 1 */
- ROUND(F, a, b, c, d, in[0] + K1, 3);
- ROUND(F, d, a, b, c, in[1] + K1, 7);
- ROUND(F, c, d, a, b, in[2] + K1, 11);
- ROUND(F, b, c, d, a, in[3] + K1, 19);
- ROUND(F, a, b, c, d, in[4] + K1, 3);
- ROUND(F, d, a, b, c, in[5] + K1, 7);
- ROUND(F, c, d, a, b, in[6] + K1, 11);
- ROUND(F, b, c, d, a, in[7] + K1, 19);
-
- /* Round 2 */
- ROUND(G, a, b, c, d, in[1] + K2, 3);
- ROUND(G, d, a, b, c, in[3] + K2, 5);
- ROUND(G, c, d, a, b, in[5] + K2, 9);
- ROUND(G, b, c, d, a, in[7] + K2, 13);
- ROUND(G, a, b, c, d, in[0] + K2, 3);
- ROUND(G, d, a, b, c, in[2] + K2, 5);
- ROUND(G, c, d, a, b, in[4] + K2, 9);
- ROUND(G, b, c, d, a, in[6] + K2, 13);
-
- /* Round 3 */
- ROUND(H, a, b, c, d, in[3] + K3, 3);
- ROUND(H, d, a, b, c, in[7] + K3, 9);
- ROUND(H, c, d, a, b, in[2] + K3, 11);
- ROUND(H, b, c, d, a, in[6] + K3, 15);
- ROUND(H, a, b, c, d, in[1] + K3, 3);
- ROUND(H, d, a, b, c, in[5] + K3, 9);
- ROUND(H, c, d, a, b, in[0] + K3, 11);
- ROUND(H, b, c, d, a, in[4] + K3, 15);
-
- buf[0] += a;
- buf[1] += b;
- buf[2] += c;
- buf[3] += d;
-
- return buf[1]; /* "most hashed" word */
-}
-EXPORT_SYMBOL(half_md4_transform);
diff --git a/lib/idr.c b/lib/idr.c
index 52d2979a05e8..b13682bb0a1c 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -1,1068 +1,409 @@
-/*
- * 2002-10-18 written by Jim Houston jim.houston@ccur.com
- * Copyright (C) 2002 by Concurrent Computer Corporation
- * Distributed under the GNU GPL license version 2.
- *
- * Modified by George Anzinger to reuse immediately and to use
- * find bit instructions. Also removed _irq on spinlocks.
- *
- * Modified by Nadia Derbey to make it RCU safe.
- *
- * Small id to pointer translation service.
- *
- * It uses a radix tree like structure as a sparse array indexed
- * by the id to obtain the pointer. The bitmap makes allocating
- * a new id quick.
- *
- * You call it to allocate an id (an int) an associate with that id a
- * pointer or what ever, we treat it as a (void *). You can pass this
- * id to a user for him to pass back at a later time. You then pass
- * that id to this code and it returns your pointer.
- */
-
-#ifndef TEST // to test in user space...
-#include <linux/slab.h>
-#include <linux/init.h>
+#include <linux/bitmap.h>
#include <linux/export.h>
-#endif
-#include <linux/err.h>
-#include <linux/string.h>
#include <linux/idr.h>
+#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/percpu.h>
-
-#define MAX_IDR_SHIFT (sizeof(int) * 8 - 1)
-#define MAX_IDR_BIT (1U << MAX_IDR_SHIFT)
-
-/* Leave the possibility of an incomplete final layer */
-#define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
-/* Number of id_layer structs to leave in free list */
-#define MAX_IDR_FREE (MAX_IDR_LEVEL * 2)
-
-static struct kmem_cache *idr_layer_cache;
-static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head);
-static DEFINE_PER_CPU(int, idr_preload_cnt);
+DEFINE_PER_CPU(struct ida_bitmap *, ida_bitmap);
static DEFINE_SPINLOCK(simple_ida_lock);
-/* the maximum ID which can be allocated given idr->layers */
-static int idr_max(int layers)
-{
- int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT);
-
- return (1 << bits) - 1;
-}
-
-/*
- * Prefix mask for an idr_layer at @layer. For layer 0, the prefix mask is
- * all bits except for the lower IDR_BITS. For layer 1, 2 * IDR_BITS, and
- * so on.
- */
-static int idr_layer_prefix_mask(int layer)
-{
- return ~idr_max(layer + 1);
-}
-
-static struct idr_layer *get_from_free_list(struct idr *idp)
-{
- struct idr_layer *p;
- unsigned long flags;
-
- spin_lock_irqsave(&idp->lock, flags);
- if ((p = idp->id_free)) {
- idp->id_free = p->ary[0];
- idp->id_free_cnt--;
- p->ary[0] = NULL;
- }
- spin_unlock_irqrestore(&idp->lock, flags);
- return(p);
-}
-
/**
- * idr_layer_alloc - allocate a new idr_layer
- * @gfp_mask: allocation mask
- * @layer_idr: optional idr to allocate from
- *
- * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch
- * one from the per-cpu preload buffer. If @layer_idr is not %NULL, fetch
- * an idr_layer from @idr->id_free.
- *
- * @layer_idr is to maintain backward compatibility with the old alloc
- * interface - idr_pre_get() and idr_get_new*() - and will be removed
- * together with per-pool preload buffer.
- */
-static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
-{
- struct idr_layer *new;
-
- /* this is the old path, bypass to get_from_free_list() */
- if (layer_idr)
- return get_from_free_list(layer_idr);
-
- /*
- * Try to allocate directly from kmem_cache. We want to try this
- * before preload buffer; otherwise, non-preloading idr_alloc()
- * users will end up taking advantage of preloading ones. As the
- * following is allowed to fail for preloaded cases, suppress
- * warning this time.
- */
- new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN);
- if (new)
- return new;
-
- /*
- * Try to fetch one from the per-cpu preload buffer if in process
- * context. See idr_preload() for details.
- */
- if (!in_interrupt()) {
- preempt_disable();
- new = __this_cpu_read(idr_preload_head);
- if (new) {
- __this_cpu_write(idr_preload_head, new->ary[0]);
- __this_cpu_dec(idr_preload_cnt);
- new->ary[0] = NULL;
- }
- preempt_enable();
- if (new)
- return new;
- }
-
- /*
- * Both failed. Try kmem_cache again w/o adding __GFP_NOWARN so
- * that memory allocation failure warning is printed as intended.
- */
- return kmem_cache_zalloc(idr_layer_cache, gfp_mask);
-}
-
-static void idr_layer_rcu_free(struct rcu_head *head)
-{
- struct idr_layer *layer;
-
- layer = container_of(head, struct idr_layer, rcu_head);
- kmem_cache_free(idr_layer_cache, layer);
-}
-
-static inline void free_layer(struct idr *idr, struct idr_layer *p)
-{
- if (idr->hint == p)
- RCU_INIT_POINTER(idr->hint, NULL);
- call_rcu(&p->rcu_head, idr_layer_rcu_free);
-}
-
-/* only called when idp->lock is held */
-static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
-{
- p->ary[0] = idp->id_free;
- idp->id_free = p;
- idp->id_free_cnt++;
-}
-
-static void move_to_free_list(struct idr *idp, struct idr_layer *p)
-{
- unsigned long flags;
-
- /*
- * Depends on the return element being zeroed.
- */
- spin_lock_irqsave(&idp->lock, flags);
- __move_to_free_list(idp, p);
- spin_unlock_irqrestore(&idp->lock, flags);
-}
-
-static void idr_mark_full(struct idr_layer **pa, int id)
-{
- struct idr_layer *p = pa[0];
- int l = 0;
-
- __set_bit(id & IDR_MASK, p->bitmap);
- /*
- * If this layer is full mark the bit in the layer above to
- * show that this part of the radix tree is full. This may
- * complete the layer above and require walking up the radix
- * tree.
- */
- while (bitmap_full(p->bitmap, IDR_SIZE)) {
- if (!(p = pa[++l]))
- break;
- id = id >> IDR_BITS;
- __set_bit((id & IDR_MASK), p->bitmap);
- }
-}
-
-static int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
-{
- while (idp->id_free_cnt < MAX_IDR_FREE) {
- struct idr_layer *new;
- new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
- if (new == NULL)
- return (0);
- move_to_free_list(idp, new);
- }
- return 1;
-}
-
-/**
- * sub_alloc - try to allocate an id without growing the tree depth
- * @idp: idr handle
- * @starting_id: id to start search at
- * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer
- * @gfp_mask: allocation mask for idr_layer_alloc()
- * @layer_idr: optional idr passed to idr_layer_alloc()
- *
- * Allocate an id in range [@starting_id, INT_MAX] from @idp without
- * growing its depth. Returns
- *
- * the allocated id >= 0 if successful,
- * -EAGAIN if the tree needs to grow for allocation to succeed,
- * -ENOSPC if the id space is exhausted,
- * -ENOMEM if more idr_layers need to be allocated.
- */
-static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
- gfp_t gfp_mask, struct idr *layer_idr)
-{
- int n, m, sh;
- struct idr_layer *p, *new;
- int l, id, oid;
-
- id = *starting_id;
- restart:
- p = idp->top;
- l = idp->layers;
- pa[l--] = NULL;
- while (1) {
- /*
- * We run around this while until we reach the leaf node...
- */
- n = (id >> (IDR_BITS*l)) & IDR_MASK;
- m = find_next_zero_bit(p->bitmap, IDR_SIZE, n);
- if (m == IDR_SIZE) {
- /* no space available go back to previous layer. */
- l++;
- oid = id;
- id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
-
- /* if already at the top layer, we need to grow */
- if (id > idr_max(idp->layers)) {
- *starting_id = id;
- return -EAGAIN;
- }
- p = pa[l];
- BUG_ON(!p);
-
- /* If we need to go up one layer, continue the
- * loop; otherwise, restart from the top.
- */
- sh = IDR_BITS * (l + 1);
- if (oid >> sh == id >> sh)
- continue;
- else
- goto restart;
- }
- if (m != n) {
- sh = IDR_BITS*l;
- id = ((id >> sh) ^ n ^ m) << sh;
- }
- if ((id >= MAX_IDR_BIT) || (id < 0))
- return -ENOSPC;
- if (l == 0)
- break;
- /*
- * Create the layer below if it is missing.
- */
- if (!p->ary[m]) {
- new = idr_layer_alloc(gfp_mask, layer_idr);
- if (!new)
- return -ENOMEM;
- new->layer = l-1;
- new->prefix = id & idr_layer_prefix_mask(new->layer);
- rcu_assign_pointer(p->ary[m], new);
- p->count++;
- }
- pa[l--] = p;
- p = p->ary[m];
- }
-
- pa[l] = p;
- return id;
-}
-
-static int idr_get_empty_slot(struct idr *idp, int starting_id,
- struct idr_layer **pa, gfp_t gfp_mask,
- struct idr *layer_idr)
-{
- struct idr_layer *p, *new;
- int layers, v, id;
- unsigned long flags;
-
- id = starting_id;
-build_up:
- p = idp->top;
- layers = idp->layers;
- if (unlikely(!p)) {
- if (!(p = idr_layer_alloc(gfp_mask, layer_idr)))
- return -ENOMEM;
- p->layer = 0;
- layers = 1;
- }
- /*
- * Add a new layer to the top of the tree if the requested
- * id is larger than the currently allocated space.
- */
- while (id > idr_max(layers)) {
- layers++;
- if (!p->count) {
- /* special case: if the tree is currently empty,
- * then we grow the tree by moving the top node
- * upwards.
- */
- p->layer++;
- WARN_ON_ONCE(p->prefix);
- continue;
- }
- if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) {
- /*
- * The allocation failed. If we built part of
- * the structure tear it down.
- */
- spin_lock_irqsave(&idp->lock, flags);
- for (new = p; p && p != idp->top; new = p) {
- p = p->ary[0];
- new->ary[0] = NULL;
- new->count = 0;
- bitmap_clear(new->bitmap, 0, IDR_SIZE);
- __move_to_free_list(idp, new);
- }
- spin_unlock_irqrestore(&idp->lock, flags);
- return -ENOMEM;
- }
- new->ary[0] = p;
- new->count = 1;
- new->layer = layers-1;
- new->prefix = id & idr_layer_prefix_mask(new->layer);
- if (bitmap_full(p->bitmap, IDR_SIZE))
- __set_bit(0, new->bitmap);
- p = new;
- }
- rcu_assign_pointer(idp->top, p);
- idp->layers = layers;
- v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr);
- if (v == -EAGAIN)
- goto build_up;
- return(v);
-}
-
-/*
- * @id and @pa are from a successful allocation from idr_get_empty_slot().
- * Install the user pointer @ptr and mark the slot full.
- */
-static void idr_fill_slot(struct idr *idr, void *ptr, int id,
- struct idr_layer **pa)
-{
- /* update hint used for lookup, cleared from free_layer() */
- rcu_assign_pointer(idr->hint, pa[0]);
-
- rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr);
- pa[0]->count++;
- idr_mark_full(pa, id);
-}
-
-
-/**
- * idr_preload - preload for idr_alloc()
- * @gfp_mask: allocation mask to use for preloading
- *
- * Preload per-cpu layer buffer for idr_alloc(). Can only be used from
- * process context and each idr_preload() invocation should be matched with
- * idr_preload_end(). Note that preemption is disabled while preloaded.
- *
- * The first idr_alloc() in the preloaded section can be treated as if it
- * were invoked with @gfp_mask used for preloading. This allows using more
- * permissive allocation masks for idrs protected by spinlocks.
- *
- * For example, if idr_alloc() below fails, the failure can be treated as
- * if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT.
- *
- * idr_preload(GFP_KERNEL);
- * spin_lock(lock);
- *
- * id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT);
- *
- * spin_unlock(lock);
- * idr_preload_end();
- * if (id < 0)
- * error;
- */
-void idr_preload(gfp_t gfp_mask)
-{
- /*
- * Consuming preload buffer from non-process context breaks preload
- * allocation guarantee. Disallow usage from those contexts.
- */
- WARN_ON_ONCE(in_interrupt());
- might_sleep_if(gfpflags_allow_blocking(gfp_mask));
-
- preempt_disable();
-
- /*
- * idr_alloc() is likely to succeed w/o full idr_layer buffer and
- * return value from idr_alloc() needs to be checked for failure
- * anyway. Silently give up if allocation fails. The caller can
- * treat failures from idr_alloc() as if idr_alloc() were called
- * with @gfp_mask which should be enough.
- */
- while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) {
- struct idr_layer *new;
-
- preempt_enable();
- new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
- preempt_disable();
- if (!new)
- break;
-
- /* link the new one to per-cpu preload list */
- new->ary[0] = __this_cpu_read(idr_preload_head);
- __this_cpu_write(idr_preload_head, new);
- __this_cpu_inc(idr_preload_cnt);
- }
-}
-EXPORT_SYMBOL(idr_preload);
-
-/**
- * idr_alloc - allocate new idr entry
- * @idr: the (initialized) idr
+ * idr_alloc - allocate an id
+ * @idr: idr handle
* @ptr: pointer to be associated with the new id
* @start: the minimum id (inclusive)
- * @end: the maximum id (exclusive, <= 0 for max)
- * @gfp_mask: memory allocation flags
+ * @end: the maximum id (exclusive)
+ * @gfp: memory allocation flags
*
- * Allocate an id in [start, end) and associate it with @ptr. If no ID is
- * available in the specified range, returns -ENOSPC. On memory allocation
- * failure, returns -ENOMEM.
+ * Allocates an unused ID in the range [start, end). Returns -ENOSPC
+ * if there are no unused IDs in that range.
*
* Note that @end is treated as max when <= 0. This is to always allow
* using @start + N as @end as long as N is inside integer range.
*
- * The user is responsible for exclusively synchronizing all operations
- * which may modify @idr. However, read-only accesses such as idr_find()
- * or iteration can be performed under RCU read lock provided the user
- * destroys @ptr in RCU-safe way after removal from idr.
+ * Simultaneous modifications to the @idr are not allowed and should be
+ * prevented by the user, usually with a lock. idr_alloc() may be called
+ * concurrently with read-only accesses to the @idr, such as idr_find() and
+ * idr_for_each_entry().
*/
-int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
+int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
{
- int max = end > 0 ? end - 1 : INT_MAX; /* inclusive upper limit */
- struct idr_layer *pa[MAX_IDR_LEVEL + 1];
- int id;
+ void __rcu **slot;
+ struct radix_tree_iter iter;
- might_sleep_if(gfpflags_allow_blocking(gfp_mask));
-
- /* sanity checks */
if (WARN_ON_ONCE(start < 0))
return -EINVAL;
- if (unlikely(max < start))
- return -ENOSPC;
+ if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr)))
+ return -EINVAL;
- /* allocate id */
- id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL);
- if (unlikely(id < 0))
- return id;
- if (unlikely(id > max))
- return -ENOSPC;
+ radix_tree_iter_init(&iter, start);
+ slot = idr_get_free(&idr->idr_rt, &iter, gfp, end);
+ if (IS_ERR(slot))
+ return PTR_ERR(slot);
- idr_fill_slot(idr, ptr, id, pa);
- return id;
+ radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr);
+ radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE);
+ return iter.index;
}
EXPORT_SYMBOL_GPL(idr_alloc);
/**
* idr_alloc_cyclic - allocate new idr entry in a cyclical fashion
- * @idr: the (initialized) idr
+ * @idr: idr handle
* @ptr: pointer to be associated with the new id
* @start: the minimum id (inclusive)
- * @end: the maximum id (exclusive, <= 0 for max)
- * @gfp_mask: memory allocation flags
- *
- * Essentially the same as idr_alloc, but prefers to allocate progressively
- * higher ids if it can. If the "cur" counter wraps, then it will start again
- * at the "start" end of the range and allocate one that has already been used.
- */
-int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end,
- gfp_t gfp_mask)
-{
- int id;
-
- id = idr_alloc(idr, ptr, max(start, idr->cur), end, gfp_mask);
- if (id == -ENOSPC)
- id = idr_alloc(idr, ptr, start, end, gfp_mask);
-
- if (likely(id >= 0))
- idr->cur = id + 1;
- return id;
-}
-EXPORT_SYMBOL(idr_alloc_cyclic);
-
-static void idr_remove_warning(int id)
-{
- WARN(1, "idr_remove called for id=%d which is not allocated.\n", id);
-}
-
-static void sub_remove(struct idr *idp, int shift, int id)
-{
- struct idr_layer *p = idp->top;
- struct idr_layer **pa[MAX_IDR_LEVEL + 1];
- struct idr_layer ***paa = &pa[0];
- struct idr_layer *to_free;
- int n;
-
- *paa = NULL;
- *++paa = &idp->top;
-
- while ((shift > 0) && p) {
- n = (id >> shift) & IDR_MASK;
- __clear_bit(n, p->bitmap);
- *++paa = &p->ary[n];
- p = p->ary[n];
- shift -= IDR_BITS;
- }
- n = id & IDR_MASK;
- if (likely(p != NULL && test_bit(n, p->bitmap))) {
- __clear_bit(n, p->bitmap);
- RCU_INIT_POINTER(p->ary[n], NULL);
- to_free = NULL;
- while(*paa && ! --((**paa)->count)){
- if (to_free)
- free_layer(idp, to_free);
- to_free = **paa;
- **paa-- = NULL;
- }
- if (!*paa)
- idp->layers = 0;
- if (to_free)
- free_layer(idp, to_free);
- } else
- idr_remove_warning(id);
-}
-
-/**
- * idr_remove - remove the given id and free its slot
- * @idp: idr handle
- * @id: unique key
- */
-void idr_remove(struct idr *idp, int id)
-{
- struct idr_layer *p;
- struct idr_layer *to_free;
-
- if (id < 0)
- return;
-
- if (id > idr_max(idp->layers)) {
- idr_remove_warning(id);
- return;
- }
-
- sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
- if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
- idp->top->ary[0]) {
- /*
- * Single child at leftmost slot: we can shrink the tree.
- * This level is not needed anymore since when layers are
- * inserted, they are inserted at the top of the existing
- * tree.
- */
- to_free = idp->top;
- p = idp->top->ary[0];
- rcu_assign_pointer(idp->top, p);
- --idp->layers;
- to_free->count = 0;
- bitmap_clear(to_free->bitmap, 0, IDR_SIZE);
- free_layer(idp, to_free);
- }
-}
-EXPORT_SYMBOL(idr_remove);
-
-static void __idr_remove_all(struct idr *idp)
-{
- int n, id, max;
- int bt_mask;
- struct idr_layer *p;
- struct idr_layer *pa[MAX_IDR_LEVEL + 1];
- struct idr_layer **paa = &pa[0];
-
- n = idp->layers * IDR_BITS;
- *paa = idp->top;
- RCU_INIT_POINTER(idp->top, NULL);
- max = idr_max(idp->layers);
-
- id = 0;
- while (id >= 0 && id <= max) {
- p = *paa;
- while (n > IDR_BITS && p) {
- n -= IDR_BITS;
- p = p->ary[(id >> n) & IDR_MASK];
- *++paa = p;
- }
-
- bt_mask = id;
- id += 1 << n;
- /* Get the highest bit that the above add changed from 0->1. */
- while (n < fls(id ^ bt_mask)) {
- if (*paa)
- free_layer(idp, *paa);
- n += IDR_BITS;
- --paa;
- }
- }
- idp->layers = 0;
-}
-
-/**
- * idr_destroy - release all cached layers within an idr tree
- * @idp: idr handle
- *
- * Free all id mappings and all idp_layers. After this function, @idp is
- * completely unused and can be freed / recycled. The caller is
- * responsible for ensuring that no one else accesses @idp during or after
- * idr_destroy().
+ * @end: the maximum id (exclusive)
+ * @gfp: memory allocation flags
*
- * A typical clean-up sequence for objects stored in an idr tree will use
- * idr_for_each() to free all objects, if necessary, then idr_destroy() to
- * free up the id mappings and cached idr_layers.
+ * Allocates an ID larger than the last ID allocated if one is available.
+ * If not, it will attempt to allocate the smallest ID that is larger or
+ * equal to @start.
*/
-void idr_destroy(struct idr *idp)
+int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
{
- __idr_remove_all(idp);
+ int id, curr = idr->idr_next;
- while (idp->id_free_cnt) {
- struct idr_layer *p = get_from_free_list(idp);
- kmem_cache_free(idr_layer_cache, p);
- }
-}
-EXPORT_SYMBOL(idr_destroy);
+ if (curr < start)
+ curr = start;
-void *idr_find_slowpath(struct idr *idp, int id)
-{
- int n;
- struct idr_layer *p;
-
- if (id < 0)
- return NULL;
-
- p = rcu_dereference_raw(idp->top);
- if (!p)
- return NULL;
- n = (p->layer+1) * IDR_BITS;
+ id = idr_alloc(idr, ptr, curr, end, gfp);
+ if ((id == -ENOSPC) && (curr > start))
+ id = idr_alloc(idr, ptr, start, curr, gfp);
- if (id > idr_max(p->layer + 1))
- return NULL;
- BUG_ON(n == 0);
+ if (id >= 0)
+ idr->idr_next = id + 1U;
- while (n > 0 && p) {
- n -= IDR_BITS;
- BUG_ON(n != p->layer*IDR_BITS);
- p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
- }
- return((void *)p);
+ return id;
}
-EXPORT_SYMBOL(idr_find_slowpath);
+EXPORT_SYMBOL(idr_alloc_cyclic);
/**
* idr_for_each - iterate through all stored pointers
- * @idp: idr handle
+ * @idr: idr handle
* @fn: function to be called for each pointer
- * @data: data passed back to callback function
+ * @data: data passed to callback function
*
- * Iterate over the pointers registered with the given idr. The
- * callback function will be called for each pointer currently
- * registered, passing the id, the pointer and the data pointer passed
- * to this function. It is not safe to modify the idr tree while in
- * the callback, so functions such as idr_get_new and idr_remove are
- * not allowed.
+ * The callback function will be called for each entry in @idr, passing
+ * the id, the pointer and the data pointer passed to this function.
*
- * We check the return of @fn each time. If it returns anything other
- * than %0, we break out and return that value.
+ * If @fn returns anything other than %0, the iteration stops and that
+ * value is returned from this function.
*
- * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
+ * idr_for_each() can be called concurrently with idr_alloc() and
+ * idr_remove() if protected by RCU. Newly added entries may not be
+ * seen and deleted entries may be seen, but adding and removing entries
+ * will not cause other entries to be skipped, nor spurious ones to be seen.
*/
-int idr_for_each(struct idr *idp,
- int (*fn)(int id, void *p, void *data), void *data)
+int idr_for_each(const struct idr *idr,
+ int (*fn)(int id, void *p, void *data), void *data)
{
- int n, id, max, error = 0;
- struct idr_layer *p;
- struct idr_layer *pa[MAX_IDR_LEVEL + 1];
- struct idr_layer **paa = &pa[0];
-
- n = idp->layers * IDR_BITS;
- *paa = rcu_dereference_raw(idp->top);
- max = idr_max(idp->layers);
+ struct radix_tree_iter iter;
+ void __rcu **slot;
- id = 0;
- while (id >= 0 && id <= max) {
- p = *paa;
- while (n > 0 && p) {
- n -= IDR_BITS;
- p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
- *++paa = p;
- }
-
- if (p) {
- error = fn(id, (void *)p, data);
- if (error)
- break;
- }
-
- id += 1 << n;
- while (n < fls(id)) {
- n += IDR_BITS;
- --paa;
- }
+ radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) {
+ int ret = fn(iter.index, rcu_dereference_raw(*slot), data);
+ if (ret)
+ return ret;
}
- return error;
+ return 0;
}
EXPORT_SYMBOL(idr_for_each);
/**
- * idr_get_next - lookup next object of id to given id.
- * @idp: idr handle
- * @nextidp: pointer to lookup key
- *
- * Returns pointer to registered object with id, which is next number to
- * given id. After being looked up, *@nextidp will be updated for the next
- * iteration.
- *
- * This function can be called under rcu_read_lock(), given that the leaf
- * pointers lifetimes are correctly managed.
+ * idr_get_next - Find next populated entry
+ * @idr: idr handle
+ * @nextid: Pointer to lowest possible ID to return
+ *
+ * Returns the next populated entry in the tree with an ID greater than
+ * or equal to the value pointed to by @nextid. On exit, @nextid is updated
+ * to the ID of the found value. To use in a loop, the value pointed to by
+ * nextid must be incremented by the user.
*/
-void *idr_get_next(struct idr *idp, int *nextidp)
+void *idr_get_next(struct idr *idr, int *nextid)
{
- struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1];
- struct idr_layer **paa = &pa[0];
- int id = *nextidp;
- int n, max;
+ struct radix_tree_iter iter;
+ void __rcu **slot;
- /* find first ent */
- p = *paa = rcu_dereference_raw(idp->top);
- if (!p)
+ slot = radix_tree_iter_find(&idr->idr_rt, &iter, *nextid);
+ if (!slot)
return NULL;
- n = (p->layer + 1) * IDR_BITS;
- max = idr_max(p->layer + 1);
-
- while (id >= 0 && id <= max) {
- p = *paa;
- while (n > 0 && p) {
- n -= IDR_BITS;
- p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
- *++paa = p;
- }
-
- if (p) {
- *nextidp = id;
- return p;
- }
- /*
- * Proceed to the next layer at the current level. Unlike
- * idr_for_each(), @id isn't guaranteed to be aligned to
- * layer boundary at this point and adding 1 << n may
- * incorrectly skip IDs. Make sure we jump to the
- * beginning of the next layer using round_up().
- */
- id = round_up(id + 1, 1 << n);
- while (n < fls(id)) {
- n += IDR_BITS;
- --paa;
- }
- }
- return NULL;
+ *nextid = iter.index;
+ return rcu_dereference_raw(*slot);
}
EXPORT_SYMBOL(idr_get_next);
-
/**
* idr_replace - replace pointer for given id
- * @idp: idr handle
- * @ptr: pointer you want associated with the id
- * @id: lookup key
+ * @idr: idr handle
+ * @ptr: New pointer to associate with the ID
+ * @id: Lookup key
*
- * Replace the pointer registered with an id and return the old value.
- * A %-ENOENT return indicates that @id was not found.
- * A %-EINVAL return indicates that @id was not within valid constraints.
+ * Replace the pointer registered with an ID and return the old value.
+ * This function can be called under the RCU read lock concurrently with
+ * idr_alloc() and idr_remove() (as long as the ID being removed is not
+ * the one being replaced!).
*
- * The caller must serialize with writers.
+ * Returns: 0 on success. %-ENOENT indicates that @id was not found.
+ * %-EINVAL indicates that @id or @ptr were not valid.
*/
-void *idr_replace(struct idr *idp, void *ptr, int id)
+void *idr_replace(struct idr *idr, void *ptr, int id)
{
- int n;
- struct idr_layer *p, *old_p;
+ struct radix_tree_node *node;
+ void __rcu **slot = NULL;
+ void *entry;
- if (id < 0)
+ if (WARN_ON_ONCE(id < 0))
+ return ERR_PTR(-EINVAL);
+ if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr)))
return ERR_PTR(-EINVAL);
- p = idp->top;
- if (!p)
- return ERR_PTR(-ENOENT);
-
- if (id > idr_max(p->layer + 1))
- return ERR_PTR(-ENOENT);
-
- n = p->layer * IDR_BITS;
- while ((n > 0) && p) {
- p = p->ary[(id >> n) & IDR_MASK];
- n -= IDR_BITS;
- }
-
- n = id & IDR_MASK;
- if (unlikely(p == NULL || !test_bit(n, p->bitmap)))
+ entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot);
+ if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE))
return ERR_PTR(-ENOENT);
- old_p = p->ary[n];
- rcu_assign_pointer(p->ary[n], ptr);
+ __radix_tree_replace(&idr->idr_rt, node, slot, ptr, NULL, NULL);
- return old_p;
+ return entry;
}
EXPORT_SYMBOL(idr_replace);
-void __init idr_init_cache(void)
-{
- idr_layer_cache = kmem_cache_create("idr_layer_cache",
- sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
-}
-
-/**
- * idr_init - initialize idr handle
- * @idp: idr handle
- *
- * This function is use to set up the handle (@idp) that you will pass
- * to the rest of the functions.
- */
-void idr_init(struct idr *idp)
-{
- memset(idp, 0, sizeof(struct idr));
- spin_lock_init(&idp->lock);
-}
-EXPORT_SYMBOL(idr_init);
-
-static int idr_has_entry(int id, void *p, void *data)
-{
- return 1;
-}
-
-bool idr_is_empty(struct idr *idp)
-{
- return !idr_for_each(idp, idr_has_entry, NULL);
-}
-EXPORT_SYMBOL(idr_is_empty);
-
/**
* DOC: IDA description
- * IDA - IDR based ID allocator
*
- * This is id allocator without id -> pointer translation. Memory
- * usage is much lower than full blown idr because each id only
- * occupies a bit. ida uses a custom leaf node which contains
- * IDA_BITMAP_BITS slots.
- *
- * 2007-04-25 written by Tejun Heo <htejun@gmail.com>
+ * The IDA is an ID allocator which does not provide the ability to
+ * associate an ID with a pointer. As such, it only needs to store one
+ * bit per ID, and so is more space efficient than an IDR. To use an IDA,
+ * define it using DEFINE_IDA() (or embed a &struct ida in a data structure,
+ * then initialise it using ida_init()). To allocate a new ID, call
+ * ida_simple_get(). To free an ID, call ida_simple_remove().
+ *
+ * If you have more complex locking requirements, use a loop around
+ * ida_pre_get() and ida_get_new() to allocate a new ID. Then use
+ * ida_remove() to free an ID. You must make sure that ida_get_new() and
+ * ida_remove() cannot be called at the same time as each other for the
+ * same IDA.
+ *
+ * You can also use ida_get_new_above() if you need an ID to be allocated
+ * above a particular number. ida_destroy() can be used to dispose of an
+ * IDA without needing to free the individual IDs in it. You can use
+ * ida_is_empty() to find out whether the IDA has any IDs currently allocated.
+ *
+ * IDs are currently limited to the range [0-INT_MAX]. If this is an awkward
+ * limitation, it should be quite straightforward to raise the maximum.
*/
-static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
-{
- unsigned long flags;
-
- if (!ida->free_bitmap) {
- spin_lock_irqsave(&ida->idr.lock, flags);
- if (!ida->free_bitmap) {
- ida->free_bitmap = bitmap;
- bitmap = NULL;
- }
- spin_unlock_irqrestore(&ida->idr.lock, flags);
- }
-
- kfree(bitmap);
-}
-
-/**
- * ida_pre_get - reserve resources for ida allocation
- * @ida: ida handle
- * @gfp_mask: memory allocation flag
- *
- * This function should be called prior to locking and calling the
- * following function. It preallocates enough memory to satisfy the
- * worst possible allocation.
- *
- * If the system is REALLY out of memory this function returns %0,
- * otherwise %1.
+/*
+ * Developer's notes:
+ *
+ * The IDA uses the functionality provided by the IDR & radix tree to store
+ * bitmaps in each entry. The IDR_FREE tag means there is at least one bit
+ * free, unlike the IDR where it means at least one entry is free.
+ *
+ * I considered telling the radix tree that each slot is an order-10 node
+ * and storing the bit numbers in the radix tree, but the radix tree can't
+ * allow a single multiorder entry at index 0, which would significantly
+ * increase memory consumption for the IDA. So instead we divide the index
+ * by the number of bits in the leaf bitmap before doing a radix tree lookup.
+ *
+ * As an optimisation, if there are only a few low bits set in any given
+ * leaf, instead of allocating a 128-byte bitmap, we use the 'exceptional
+ * entry' functionality of the radix tree to store BITS_PER_LONG - 2 bits
+ * directly in the entry. By being really tricksy, we could store
+ * BITS_PER_LONG - 1 bits, but there're diminishing returns after optimising
+ * for 0-3 allocated IDs.
+ *
+ * We allow the radix tree 'exceptional' count to get out of date. Nothing
+ * in the IDA nor the radix tree code checks it. If it becomes important
+ * to maintain an accurate exceptional count, switch the rcu_assign_pointer()
+ * calls to radix_tree_iter_replace() which will correct the exceptional
+ * count.
+ *
+ * The IDA always requires a lock to alloc/free. If we add a 'test_bit'
+ * equivalent, it will still need locking. Going to RCU lookup would require
+ * using RCU to free bitmaps, and that's not trivial without embedding an
+ * RCU head in the bitmap, which adds a 2-pointer overhead to each 128-byte
+ * bitmap, which is excessive.
*/
-int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
-{
- /* allocate idr_layers */
- if (!__idr_pre_get(&ida->idr, gfp_mask))
- return 0;
- /* allocate free_bitmap */
- if (!ida->free_bitmap) {
- struct ida_bitmap *bitmap;
-
- bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
- if (!bitmap)
- return 0;
-
- free_bitmap(ida, bitmap);
- }
-
- return 1;
-}
-EXPORT_SYMBOL(ida_pre_get);
+#define IDA_MAX (0x80000000U / IDA_BITMAP_BITS)
/**
* ida_get_new_above - allocate new ID above or equal to a start id
- * @ida: ida handle
- * @starting_id: id to start search at
- * @p_id: pointer to the allocated handle
+ * @ida: ida handle
+ * @start: id to start search at
+ * @id: pointer to the allocated handle
*
- * Allocate new ID above or equal to @starting_id. It should be called
- * with any required locks.
+ * Allocate new ID above or equal to @start. It should be called
+ * with any required locks to ensure that concurrent calls to
+ * ida_get_new_above() / ida_get_new() / ida_remove() are not allowed.
+ * Consider using ida_simple_get() if you do not have complex locking
+ * requirements.
*
* If memory is required, it will return %-EAGAIN, you should unlock
* and go back to the ida_pre_get() call. If the ida is full, it will
- * return %-ENOSPC.
- *
- * Note that callers must ensure that concurrent access to @ida is not possible.
- * See ida_simple_get() for a varaint which takes care of locking.
+ * return %-ENOSPC. On success, it will return 0.
*
- * @p_id returns a value in the range @starting_id ... %0x7fffffff.
+ * @id returns a value in the range @start ... %0x7fffffff.
*/
-int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
+int ida_get_new_above(struct ida *ida, int start, int *id)
{
- struct idr_layer *pa[MAX_IDR_LEVEL + 1];
+ struct radix_tree_root *root = &ida->ida_rt;
+ void __rcu **slot;
+ struct radix_tree_iter iter;
struct ida_bitmap *bitmap;
- unsigned long flags;
- int idr_id = starting_id / IDA_BITMAP_BITS;
- int offset = starting_id % IDA_BITMAP_BITS;
- int t, id;
-
- restart:
- /* get vacant slot */
- t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr);
- if (t < 0)
- return t == -ENOMEM ? -EAGAIN : t;
-
- if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT)
- return -ENOSPC;
-
- if (t != idr_id)
- offset = 0;
- idr_id = t;
-
- /* if bitmap isn't there, create a new one */
- bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
- if (!bitmap) {
- spin_lock_irqsave(&ida->idr.lock, flags);
- bitmap = ida->free_bitmap;
- ida->free_bitmap = NULL;
- spin_unlock_irqrestore(&ida->idr.lock, flags);
-
- if (!bitmap)
- return -EAGAIN;
-
- memset(bitmap, 0, sizeof(struct ida_bitmap));
- rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
- (void *)bitmap);
- pa[0]->count++;
- }
-
- /* lookup for empty slot */
- t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
- if (t == IDA_BITMAP_BITS) {
- /* no empty slot after offset, continue to the next chunk */
- idr_id++;
- offset = 0;
- goto restart;
- }
-
- id = idr_id * IDA_BITMAP_BITS + t;
- if (id >= MAX_IDR_BIT)
- return -ENOSPC;
+ unsigned long index;
+ unsigned bit, ebit;
+ int new;
+
+ index = start / IDA_BITMAP_BITS;
+ bit = start % IDA_BITMAP_BITS;
+ ebit = bit + RADIX_TREE_EXCEPTIONAL_SHIFT;
+
+ slot = radix_tree_iter_init(&iter, index);
+ for (;;) {
+ if (slot)
+ slot = radix_tree_next_slot(slot, &iter,
+ RADIX_TREE_ITER_TAGGED);
+ if (!slot) {
+ slot = idr_get_free(root, &iter, GFP_NOWAIT, IDA_MAX);
+ if (IS_ERR(slot)) {
+ if (slot == ERR_PTR(-ENOMEM))
+ return -EAGAIN;
+ return PTR_ERR(slot);
+ }
+ }
+ if (iter.index > index) {
+ bit = 0;
+ ebit = RADIX_TREE_EXCEPTIONAL_SHIFT;
+ }
+ new = iter.index * IDA_BITMAP_BITS;
+ bitmap = rcu_dereference_raw(*slot);
+ if (radix_tree_exception(bitmap)) {
+ unsigned long tmp = (unsigned long)bitmap;
+ ebit = find_next_zero_bit(&tmp, BITS_PER_LONG, ebit);
+ if (ebit < BITS_PER_LONG) {
+ tmp |= 1UL << ebit;
+ rcu_assign_pointer(*slot, (void *)tmp);
+ *id = new + ebit - RADIX_TREE_EXCEPTIONAL_SHIFT;
+ return 0;
+ }
+ bitmap = this_cpu_xchg(ida_bitmap, NULL);
+ if (!bitmap)
+ return -EAGAIN;
+ memset(bitmap, 0, sizeof(*bitmap));
+ bitmap->bitmap[0] = tmp >> RADIX_TREE_EXCEPTIONAL_SHIFT;
+ rcu_assign_pointer(*slot, bitmap);
+ }
- __set_bit(t, bitmap->bitmap);
- if (++bitmap->nr_busy == IDA_BITMAP_BITS)
- idr_mark_full(pa, idr_id);
+ if (bitmap) {
+ bit = find_next_zero_bit(bitmap->bitmap,
+ IDA_BITMAP_BITS, bit);
+ new += bit;
+ if (new < 0)
+ return -ENOSPC;
+ if (bit == IDA_BITMAP_BITS)
+ continue;
- *p_id = id;
+ __set_bit(bit, bitmap->bitmap);
+ if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS))
+ radix_tree_iter_tag_clear(root, &iter,
+ IDR_FREE);
+ } else {
+ new += bit;
+ if (new < 0)
+ return -ENOSPC;
+ if (ebit < BITS_PER_LONG) {
+ bitmap = (void *)((1UL << ebit) |
+ RADIX_TREE_EXCEPTIONAL_ENTRY);
+ radix_tree_iter_replace(root, &iter, slot,
+ bitmap);
+ *id = new;
+ return 0;
+ }
+ bitmap = this_cpu_xchg(ida_bitmap, NULL);
+ if (!bitmap)
+ return -EAGAIN;
+ memset(bitmap, 0, sizeof(*bitmap));
+ __set_bit(bit, bitmap->bitmap);
+ radix_tree_iter_replace(root, &iter, slot, bitmap);
+ }
- /* Each leaf node can handle nearly a thousand slots and the
- * whole idea of ida is to have small memory foot print.
- * Throw away extra resources one by one after each successful
- * allocation.
- */
- if (ida->idr.id_free_cnt || ida->free_bitmap) {
- struct idr_layer *p = get_from_free_list(&ida->idr);
- if (p)
- kmem_cache_free(idr_layer_cache, p);
+ *id = new;
+ return 0;
}
-
- return 0;
}
EXPORT_SYMBOL(ida_get_new_above);
/**
- * ida_remove - remove the given ID
- * @ida: ida handle
- * @id: ID to free
+ * ida_remove - Free the given ID
+ * @ida: ida handle
+ * @id: ID to free
+ *
+ * This function should not be called at the same time as ida_get_new_above().
*/
void ida_remove(struct ida *ida, int id)
{
- struct idr_layer *p = ida->idr.top;
- int shift = (ida->idr.layers - 1) * IDR_BITS;
- int idr_id = id / IDA_BITMAP_BITS;
- int offset = id % IDA_BITMAP_BITS;
- int n;
+ unsigned long index = id / IDA_BITMAP_BITS;
+ unsigned offset = id % IDA_BITMAP_BITS;
struct ida_bitmap *bitmap;
+ unsigned long *btmp;
+ struct radix_tree_iter iter;
+ void __rcu **slot;
- if (idr_id > idr_max(ida->idr.layers))
+ slot = radix_tree_iter_lookup(&ida->ida_rt, &iter, index);
+ if (!slot)
goto err;
- /* clear full bits while looking up the leaf idr_layer */
- while ((shift > 0) && p) {
- n = (idr_id >> shift) & IDR_MASK;
- __clear_bit(n, p->bitmap);
- p = p->ary[n];
- shift -= IDR_BITS;
+ bitmap = rcu_dereference_raw(*slot);
+ if (radix_tree_exception(bitmap)) {
+ btmp = (unsigned long *)slot;
+ offset += RADIX_TREE_EXCEPTIONAL_SHIFT;
+ if (offset >= BITS_PER_LONG)
+ goto err;
+ } else {
+ btmp = bitmap->bitmap;
}
-
- if (p == NULL)
- goto err;
-
- n = idr_id & IDR_MASK;
- __clear_bit(n, p->bitmap);
-
- bitmap = (void *)p->ary[n];
- if (!bitmap || !test_bit(offset, bitmap->bitmap))
+ if (!test_bit(offset, btmp))
goto err;
- /* update bitmap and remove it if empty */
- __clear_bit(offset, bitmap->bitmap);
- if (--bitmap->nr_busy == 0) {
- __set_bit(n, p->bitmap); /* to please idr_remove() */
- idr_remove(&ida->idr, idr_id);
- free_bitmap(ida, bitmap);
+ __clear_bit(offset, btmp);
+ radix_tree_iter_tag_set(&ida->ida_rt, &iter, IDR_FREE);
+ if (radix_tree_exception(bitmap)) {
+ if (rcu_dereference_raw(*slot) ==
+ (void *)RADIX_TREE_EXCEPTIONAL_ENTRY)
+ radix_tree_iter_delete(&ida->ida_rt, &iter, slot);
+ } else if (bitmap_empty(btmp, IDA_BITMAP_BITS)) {
+ kfree(bitmap);
+ radix_tree_iter_delete(&ida->ida_rt, &iter, slot);
}
-
return;
-
err:
WARN(1, "ida_remove called for id=%d which is not allocated.\n", id);
}
EXPORT_SYMBOL(ida_remove);
/**
- * ida_destroy - release all cached layers within an ida tree
- * @ida: ida handle
+ * ida_destroy - Free the contents of an ida
+ * @ida: ida handle
+ *
+ * Calling this function releases all resources associated with an IDA. When
+ * this call returns, the IDA is empty and can be reused or freed. The caller
+ * should not allow ida_remove() or ida_get_new_above() to be called at the
+ * same time.
*/
void ida_destroy(struct ida *ida)
{
- idr_destroy(&ida->idr);
- kfree(ida->free_bitmap);
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+
+ radix_tree_for_each_slot(slot, &ida->ida_rt, &iter, 0) {
+ struct ida_bitmap *bitmap = rcu_dereference_raw(*slot);
+ if (!radix_tree_exception(bitmap))
+ kfree(bitmap);
+ radix_tree_iter_delete(&ida->ida_rt, &iter, slot);
+ }
}
EXPORT_SYMBOL(ida_destroy);
@@ -1141,18 +482,3 @@ void ida_simple_remove(struct ida *ida, unsigned int id)
spin_unlock_irqrestore(&simple_ida_lock, flags);
}
EXPORT_SYMBOL(ida_simple_remove);
-
-/**
- * ida_init - initialize ida handle
- * @ida: ida handle
- *
- * This function is use to set up the handle (@ida) that you will pass
- * to the rest of the functions.
- */
-void ida_init(struct ida *ida)
-{
- memset(ida, 0, sizeof(struct ida));
- idr_init(&ida->idr);
-
-}
-EXPORT_SYMBOL(ida_init);
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 86c8911b0e3a..a3e14ce92a56 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -144,4 +144,3 @@ int ioremap_page_range(unsigned long addr,
return err;
}
-EXPORT_SYMBOL_GPL(ioremap_page_range);
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 25f572303801..e68604ae3ced 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -730,43 +730,50 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
}
EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
+static inline void pipe_truncate(struct iov_iter *i)
+{
+ struct pipe_inode_info *pipe = i->pipe;
+ if (pipe->nrbufs) {
+ size_t off = i->iov_offset;
+ int idx = i->idx;
+ int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1);
+ if (off) {
+ pipe->bufs[idx].len = off - pipe->bufs[idx].offset;
+ idx = next_idx(idx, pipe);
+ nrbufs++;
+ }
+ while (pipe->nrbufs > nrbufs) {
+ pipe_buf_release(pipe, &pipe->bufs[idx]);
+ idx = next_idx(idx, pipe);
+ pipe->nrbufs--;
+ }
+ }
+}
+
static void pipe_advance(struct iov_iter *i, size_t size)
{
struct pipe_inode_info *pipe = i->pipe;
- struct pipe_buffer *buf;
- int idx = i->idx;
- size_t off = i->iov_offset, orig_sz;
-
if (unlikely(i->count < size))
size = i->count;
- orig_sz = size;
-
if (size) {
+ struct pipe_buffer *buf;
+ size_t off = i->iov_offset, left = size;
+ int idx = i->idx;
if (off) /* make it relative to the beginning of buffer */
- size += off - pipe->bufs[idx].offset;
+ left += off - pipe->bufs[idx].offset;
while (1) {
buf = &pipe->bufs[idx];
- if (size <= buf->len)
+ if (left <= buf->len)
break;
- size -= buf->len;
+ left -= buf->len;
idx = next_idx(idx, pipe);
}
- buf->len = size;
i->idx = idx;
- off = i->iov_offset = buf->offset + size;
- }
- if (off)
- idx = next_idx(idx, pipe);
- if (pipe->nrbufs) {
- int unused = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
- /* [curbuf,unused) is in use. Free [idx,unused) */
- while (idx != unused) {
- pipe_buf_release(pipe, &pipe->bufs[idx]);
- idx = next_idx(idx, pipe);
- pipe->nrbufs--;
- }
+ i->iov_offset = buf->offset + left;
}
- i->count -= orig_sz;
+ i->count -= size;
+ /* ... and discard everything past that point */
+ pipe_truncate(i);
}
void iov_iter_advance(struct iov_iter *i, size_t size)
@@ -826,6 +833,7 @@ void iov_iter_pipe(struct iov_iter *i, int direction,
size_t count)
{
BUG_ON(direction != ITER_PIPE);
+ WARN_ON(pipe->nrbufs == pipe->buffers);
i->type = direction;
i->pipe = pipe;
i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
diff --git a/lib/list_debug.c b/lib/list_debug.c
index 7f7bfa55eb6d..a34db8d27667 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -20,15 +20,16 @@
bool __list_add_valid(struct list_head *new, struct list_head *prev,
struct list_head *next)
{
- CHECK_DATA_CORRUPTION(next->prev != prev,
- "list_add corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
- prev, next->prev, next);
- CHECK_DATA_CORRUPTION(prev->next != next,
- "list_add corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
- next, prev->next, prev);
- CHECK_DATA_CORRUPTION(new == prev || new == next,
- "list_add double add: new=%p, prev=%p, next=%p.\n",
- new, prev, next);
+ if (CHECK_DATA_CORRUPTION(next->prev != prev,
+ "list_add corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
+ prev, next->prev, next) ||
+ CHECK_DATA_CORRUPTION(prev->next != next,
+ "list_add corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
+ next, prev->next, prev) ||
+ CHECK_DATA_CORRUPTION(new == prev || new == next,
+ "list_add double add: new=%p, prev=%p, next=%p.\n",
+ new, prev, next))
+ return false;
return true;
}
@@ -41,18 +42,20 @@ bool __list_del_entry_valid(struct list_head *entry)
prev = entry->prev;
next = entry->next;
- CHECK_DATA_CORRUPTION(next == LIST_POISON1,
- "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
- entry, LIST_POISON1);
- CHECK_DATA_CORRUPTION(prev == LIST_POISON2,
- "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
- entry, LIST_POISON2);
- CHECK_DATA_CORRUPTION(prev->next != entry,
- "list_del corruption. prev->next should be %p, but was %p\n",
- entry, prev->next);
- CHECK_DATA_CORRUPTION(next->prev != entry,
- "list_del corruption. next->prev should be %p, but was %p\n",
- entry, next->prev);
+ if (CHECK_DATA_CORRUPTION(next == LIST_POISON1,
+ "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
+ entry, LIST_POISON1) ||
+ CHECK_DATA_CORRUPTION(prev == LIST_POISON2,
+ "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
+ entry, LIST_POISON2) ||
+ CHECK_DATA_CORRUPTION(prev->next != entry,
+ "list_del corruption. prev->next should be %p, but was %p\n",
+ entry, prev->next) ||
+ CHECK_DATA_CORRUPTION(next->prev != entry,
+ "list_del corruption. next->prev should be %p, but was %p\n",
+ entry, next->prev))
+ return false;
+
return true;
}
diff --git a/lib/lz4/Makefile b/lib/lz4/Makefile
index 8085d04e9309..f7b113271d13 100644
--- a/lib/lz4/Makefile
+++ b/lib/lz4/Makefile
@@ -1,3 +1,5 @@
+ccflags-y += -O3
+
obj-$(CONFIG_LZ4_COMPRESS) += lz4_compress.o
obj-$(CONFIG_LZ4HC_COMPRESS) += lz4hc_compress.o
obj-$(CONFIG_LZ4_DECOMPRESS) += lz4_decompress.o
diff --git a/lib/lz4/lz4_compress.c b/lib/lz4/lz4_compress.c
index 28321d8f75ef..cc7b6d4cc7c7 100644
--- a/lib/lz4/lz4_compress.c
+++ b/lib/lz4/lz4_compress.c
@@ -1,19 +1,16 @@
/*
* LZ4 - Fast LZ compression algorithm
- * Copyright (C) 2011-2012, Yann Collet.
- * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
+ * Copyright (C) 2011 - 2016, Yann Collet.
+ * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php)
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
- *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -25,419 +22,919 @@
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
* You can contact the author at :
- * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
- * - LZ4 source repository : http://code.google.com/p/lz4/
+ * - LZ4 homepage : http://www.lz4.org
+ * - LZ4 source repository : https://github.com/lz4/lz4
*
- * Changed for kernel use by:
- * Chanho Min <chanho.min@lge.com>
+ * Changed for kernel usage by:
+ * Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
*/
+/*-************************************
+ * Dependencies
+ **************************************/
+#include <linux/lz4.h>
+#include "lz4defs.h"
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/lz4.h>
#include <asm/unaligned.h>
-#include "lz4defs.h"
-/*
- * LZ4_compressCtx :
- * -----------------
- * Compress 'isize' bytes from 'source' into an output buffer 'dest' of
- * maximum size 'maxOutputSize'. * If it cannot achieve it, compression
- * will stop, and result of the function will be zero.
- * return : the number of bytes written in buffer 'dest', or 0 if the
- * compression fails
- */
-static inline int lz4_compressctx(void *ctx,
- const char *source,
- char *dest,
- int isize,
- int maxoutputsize)
+static const int LZ4_minLength = (MFLIMIT + 1);
+static const int LZ4_64Klimit = ((64 * KB) + (MFLIMIT - 1));
+
+/*-******************************
+ * Compression functions
+ ********************************/
+static FORCE_INLINE U32 LZ4_hash4(
+ U32 sequence,
+ tableType_t const tableType)
{
- HTYPE *hashtable = (HTYPE *)ctx;
- const u8 *ip = (u8 *)source;
-#if LZ4_ARCH64
- const BYTE * const base = ip;
+ if (tableType == byU16)
+ return ((sequence * 2654435761U)
+ >> ((MINMATCH * 8) - (LZ4_HASHLOG + 1)));
+ else
+ return ((sequence * 2654435761U)
+ >> ((MINMATCH * 8) - LZ4_HASHLOG));
+}
+
+static FORCE_INLINE U32 LZ4_hash5(
+ U64 sequence,
+ tableType_t const tableType)
+{
+ const U32 hashLog = (tableType == byU16)
+ ? LZ4_HASHLOG + 1
+ : LZ4_HASHLOG;
+
+#if LZ4_LITTLE_ENDIAN
+ static const U64 prime5bytes = 889523592379ULL;
+
+ return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
#else
- const int base = 0;
+ static const U64 prime8bytes = 11400714785074694791ULL;
+
+ return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
#endif
- const u8 *anchor = ip;
- const u8 *const iend = ip + isize;
- const u8 *const mflimit = iend - MFLIMIT;
- #define MATCHLIMIT (iend - LASTLITERALS)
-
- u8 *op = (u8 *) dest;
- u8 *const oend = op + maxoutputsize;
- int length;
- const int skipstrength = SKIPSTRENGTH;
- u32 forwardh;
- int lastrun;
-
- /* Init */
- if (isize < MINLENGTH)
- goto _last_literals;
+}
+
+static FORCE_INLINE U32 LZ4_hashPosition(
+ const void *p,
+ tableType_t const tableType)
+{
+#if LZ4_ARCH64
+ if (tableType == byU32)
+ return LZ4_hash5(LZ4_read_ARCH(p), tableType);
+#endif
+
+ return LZ4_hash4(LZ4_read32(p), tableType);
+}
+
+static void LZ4_putPositionOnHash(
+ const BYTE *p,
+ U32 h,
+ void *tableBase,
+ tableType_t const tableType,
+ const BYTE *srcBase)
+{
+ switch (tableType) {
+ case byPtr:
+ {
+ const BYTE **hashTable = (const BYTE **)tableBase;
+
+ hashTable[h] = p;
+ return;
+ }
+ case byU32:
+ {
+ U32 *hashTable = (U32 *) tableBase;
+
+ hashTable[h] = (U32)(p - srcBase);
+ return;
+ }
+ case byU16:
+ {
+ U16 *hashTable = (U16 *) tableBase;
+
+ hashTable[h] = (U16)(p - srcBase);
+ return;
+ }
+ }
+}
+
+static FORCE_INLINE void LZ4_putPosition(
+ const BYTE *p,
+ void *tableBase,
+ tableType_t tableType,
+ const BYTE *srcBase)
+{
+ U32 const h = LZ4_hashPosition(p, tableType);
+
+ LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
+}
+
+static const BYTE *LZ4_getPositionOnHash(
+ U32 h,
+ void *tableBase,
+ tableType_t tableType,
+ const BYTE *srcBase)
+{
+ if (tableType == byPtr) {
+ const BYTE **hashTable = (const BYTE **) tableBase;
+
+ return hashTable[h];
+ }
+
+ if (tableType == byU32) {
+ const U32 * const hashTable = (U32 *) tableBase;
+
+ return hashTable[h] + srcBase;
+ }
+
+ {
+ /* default, to ensure a return */
+ const U16 * const hashTable = (U16 *) tableBase;
+
+ return hashTable[h] + srcBase;
+ }
+}
+
+static FORCE_INLINE const BYTE *LZ4_getPosition(
+ const BYTE *p,
+ void *tableBase,
+ tableType_t tableType,
+ const BYTE *srcBase)
+{
+ U32 const h = LZ4_hashPosition(p, tableType);
+
+ return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
+}
- memset((void *)hashtable, 0, LZ4_MEM_COMPRESS);
+
+/*
+ * LZ4_compress_generic() :
+ * inlined, to ensure branches are decided at compilation time
+ */
+static FORCE_INLINE int LZ4_compress_generic(
+ LZ4_stream_t_internal * const dictPtr,
+ const char * const source,
+ char * const dest,
+ const int inputSize,
+ const int maxOutputSize,
+ const limitedOutput_directive outputLimited,
+ const tableType_t tableType,
+ const dict_directive dict,
+ const dictIssue_directive dictIssue,
+ const U32 acceleration)
+{
+ const BYTE *ip = (const BYTE *) source;
+ const BYTE *base;
+ const BYTE *lowLimit;
+ const BYTE * const lowRefLimit = ip - dictPtr->dictSize;
+ const BYTE * const dictionary = dictPtr->dictionary;
+ const BYTE * const dictEnd = dictionary + dictPtr->dictSize;
+ const size_t dictDelta = dictEnd - (const BYTE *)source;
+ const BYTE *anchor = (const BYTE *) source;
+ const BYTE * const iend = ip + inputSize;
+ const BYTE * const mflimit = iend - MFLIMIT;
+ const BYTE * const matchlimit = iend - LASTLITERALS;
+
+ BYTE *op = (BYTE *) dest;
+ BYTE * const olimit = op + maxOutputSize;
+
+ U32 forwardH;
+ size_t refDelta = 0;
+
+ /* Init conditions */
+ if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) {
+ /* Unsupported inputSize, too large (or negative) */
+ return 0;
+ }
+
+ switch (dict) {
+ case noDict:
+ default:
+ base = (const BYTE *)source;
+ lowLimit = (const BYTE *)source;
+ break;
+ case withPrefix64k:
+ base = (const BYTE *)source - dictPtr->currentOffset;
+ lowLimit = (const BYTE *)source - dictPtr->dictSize;
+ break;
+ case usingExtDict:
+ base = (const BYTE *)source - dictPtr->currentOffset;
+ lowLimit = (const BYTE *)source;
+ break;
+ }
+
+ if ((tableType == byU16)
+ && (inputSize >= LZ4_64Klimit)) {
+ /* Size too large (not within 64K limit) */
+ return 0;
+ }
+
+ if (inputSize < LZ4_minLength) {
+ /* Input too small, no compression (all literals) */
+ goto _last_literals;
+ }
/* First Byte */
- hashtable[LZ4_HASH_VALUE(ip)] = ip - base;
+ LZ4_putPosition(ip, dictPtr->hashTable, tableType, base);
ip++;
- forwardh = LZ4_HASH_VALUE(ip);
+ forwardH = LZ4_hashPosition(ip, tableType);
/* Main Loop */
- for (;;) {
- int findmatchattempts = (1U << skipstrength) + 3;
- const u8 *forwardip = ip;
- const u8 *ref;
- u8 *token;
+ for ( ; ; ) {
+ const BYTE *match;
+ BYTE *token;
/* Find a match */
- do {
- u32 h = forwardh;
- int step = findmatchattempts++ >> skipstrength;
- ip = forwardip;
- forwardip = ip + step;
-
- if (unlikely(forwardip > mflimit))
- goto _last_literals;
-
- forwardh = LZ4_HASH_VALUE(forwardip);
- ref = base + hashtable[h];
- hashtable[h] = ip - base;
- } while ((ref < ip - MAX_DISTANCE) || (A32(ref) != A32(ip)));
+ {
+ const BYTE *forwardIp = ip;
+ unsigned int step = 1;
+ unsigned int searchMatchNb = acceleration << LZ4_SKIPTRIGGER;
+
+ do {
+ U32 const h = forwardH;
+
+ ip = forwardIp;
+ forwardIp += step;
+ step = (searchMatchNb++ >> LZ4_SKIPTRIGGER);
+
+ if (unlikely(forwardIp > mflimit))
+ goto _last_literals;
+
+ match = LZ4_getPositionOnHash(h,
+ dictPtr->hashTable,
+ tableType, base);
+
+ if (dict == usingExtDict) {
+ if (match < (const BYTE *)source) {
+ refDelta = dictDelta;
+ lowLimit = dictionary;
+ } else {
+ refDelta = 0;
+ lowLimit = (const BYTE *)source;
+ } }
+
+ forwardH = LZ4_hashPosition(forwardIp,
+ tableType);
+
+ LZ4_putPositionOnHash(ip, h, dictPtr->hashTable,
+ tableType, base);
+ } while (((dictIssue == dictSmall)
+ ? (match < lowRefLimit)
+ : 0)
+ || ((tableType == byU16)
+ ? 0
+ : (match + MAX_DISTANCE < ip))
+ || (LZ4_read32(match + refDelta)
+ != LZ4_read32(ip)));
+ }
/* Catch up */
- while ((ip > anchor) && (ref > (u8 *)source) &&
- unlikely(ip[-1] == ref[-1])) {
+ while (((ip > anchor) & (match + refDelta > lowLimit))
+ && (unlikely(ip[-1] == match[refDelta - 1]))) {
ip--;
- ref--;
+ match--;
}
- /* Encode Literal length */
- length = (int)(ip - anchor);
- token = op++;
- /* check output limit */
- if (unlikely(op + length + (2 + 1 + LASTLITERALS) +
- (length >> 8) > oend))
- return 0;
+ /* Encode Literals */
+ {
+ unsigned const int litLength = (unsigned int)(ip - anchor);
- if (length >= (int)RUN_MASK) {
- int len;
- *token = (RUN_MASK << ML_BITS);
- len = length - RUN_MASK;
- for (; len > 254 ; len -= 255)
- *op++ = 255;
- *op++ = (u8)len;
- } else
- *token = (length << ML_BITS);
+ token = op++;
+
+ if ((outputLimited) &&
+ /* Check output buffer overflow */
+ (unlikely(op + litLength +
+ (2 + 1 + LASTLITERALS) +
+ (litLength / 255) > olimit)))
+ return 0;
+
+ if (litLength >= RUN_MASK) {
+ int len = (int)litLength - RUN_MASK;
+
+ *token = (RUN_MASK << ML_BITS);
+
+ for (; len >= 255; len -= 255)
+ *op++ = 255;
+ *op++ = (BYTE)len;
+ } else
+ *token = (BYTE)(litLength << ML_BITS);
+
+ /* Copy Literals */
+ LZ4_wildCopy(op, anchor, op + litLength);
+ op += litLength;
+ }
- /* Copy Literals */
- LZ4_BLINDCOPY(anchor, op, length);
_next_match:
/* Encode Offset */
- LZ4_WRITE_LITTLEENDIAN_16(op, (u16)(ip - ref));
+ LZ4_writeLE16(op, (U16)(ip - match));
+ op += 2;
- /* Start Counting */
- ip += MINMATCH;
- /* MinMatch verified */
- ref += MINMATCH;
- anchor = ip;
- while (likely(ip < MATCHLIMIT - (STEPSIZE - 1))) {
- #if LZ4_ARCH64
- u64 diff = A64(ref) ^ A64(ip);
- #else
- u32 diff = A32(ref) ^ A32(ip);
- #endif
- if (!diff) {
- ip += STEPSIZE;
- ref += STEPSIZE;
- continue;
- }
- ip += LZ4_NBCOMMONBYTES(diff);
- goto _endcount;
- }
- #if LZ4_ARCH64
- if ((ip < (MATCHLIMIT - 3)) && (A32(ref) == A32(ip))) {
- ip += 4;
- ref += 4;
- }
- #endif
- if ((ip < (MATCHLIMIT - 1)) && (A16(ref) == A16(ip))) {
- ip += 2;
- ref += 2;
- }
- if ((ip < MATCHLIMIT) && (*ref == *ip))
- ip++;
-_endcount:
/* Encode MatchLength */
- length = (int)(ip - anchor);
- /* Check output limit */
- if (unlikely(op + (1 + LASTLITERALS) + (length >> 8) > oend))
- return 0;
- if (length >= (int)ML_MASK) {
- *token += ML_MASK;
- length -= ML_MASK;
- for (; length > 509 ; length -= 510) {
- *op++ = 255;
- *op++ = 255;
- }
- if (length > 254) {
- length -= 255;
- *op++ = 255;
+ {
+ unsigned int matchCode;
+
+ if ((dict == usingExtDict)
+ && (lowLimit == dictionary)) {
+ const BYTE *limit;
+
+ match += refDelta;
+ limit = ip + (dictEnd - match);
+
+ if (limit > matchlimit)
+ limit = matchlimit;
+
+ matchCode = LZ4_count(ip + MINMATCH,
+ match + MINMATCH, limit);
+
+ ip += MINMATCH + matchCode;
+
+ if (ip == limit) {
+ unsigned const int more = LZ4_count(ip,
+ (const BYTE *)source,
+ matchlimit);
+
+ matchCode += more;
+ ip += more;
+ }
+ } else {
+ matchCode = LZ4_count(ip + MINMATCH,
+ match + MINMATCH, matchlimit);
+ ip += MINMATCH + matchCode;
}
- *op++ = (u8)length;
- } else
- *token += length;
+
+ if (outputLimited &&
+ /* Check output buffer overflow */
+ (unlikely(op +
+ (1 + LASTLITERALS) +
+ (matchCode >> 8) > olimit)))
+ return 0;
+
+ if (matchCode >= ML_MASK) {
+ *token += ML_MASK;
+ matchCode -= ML_MASK;
+ LZ4_write32(op, 0xFFFFFFFF);
+
+ while (matchCode >= 4 * 255) {
+ op += 4;
+ LZ4_write32(op, 0xFFFFFFFF);
+ matchCode -= 4 * 255;
+ }
+
+ op += matchCode / 255;
+ *op++ = (BYTE)(matchCode % 255);
+ } else
+ *token += (BYTE)(matchCode);
+ }
+
+ anchor = ip;
/* Test end of chunk */
- if (ip > mflimit) {
- anchor = ip;
+ if (ip > mflimit)
break;
- }
/* Fill table */
- hashtable[LZ4_HASH_VALUE(ip-2)] = ip - 2 - base;
+ LZ4_putPosition(ip - 2, dictPtr->hashTable, tableType, base);
/* Test next position */
- ref = base + hashtable[LZ4_HASH_VALUE(ip)];
- hashtable[LZ4_HASH_VALUE(ip)] = ip - base;
- if ((ref > ip - (MAX_DISTANCE + 1)) && (A32(ref) == A32(ip))) {
+ match = LZ4_getPosition(ip, dictPtr->hashTable,
+ tableType, base);
+
+ if (dict == usingExtDict) {
+ if (match < (const BYTE *)source) {
+ refDelta = dictDelta;
+ lowLimit = dictionary;
+ } else {
+ refDelta = 0;
+ lowLimit = (const BYTE *)source;
+ }
+ }
+
+ LZ4_putPosition(ip, dictPtr->hashTable, tableType, base);
+
+ if (((dictIssue == dictSmall) ? (match >= lowRefLimit) : 1)
+ && (match + MAX_DISTANCE >= ip)
+ && (LZ4_read32(match + refDelta) == LZ4_read32(ip))) {
token = op++;
*token = 0;
goto _next_match;
}
/* Prepare next loop */
- anchor = ip++;
- forwardh = LZ4_HASH_VALUE(ip);
+ forwardH = LZ4_hashPosition(++ip, tableType);
}
_last_literals:
/* Encode Last Literals */
- lastrun = (int)(iend - anchor);
- if (((char *)op - dest) + lastrun + 1
- + ((lastrun + 255 - RUN_MASK) / 255) > (u32)maxoutputsize)
- return 0;
+ {
+ size_t const lastRun = (size_t)(iend - anchor);
+
+ if ((outputLimited) &&
+ /* Check output buffer overflow */
+ ((op - (BYTE *)dest) + lastRun + 1 +
+ ((lastRun + 255 - RUN_MASK) / 255) > (U32)maxOutputSize))
+ return 0;
+
+ if (lastRun >= RUN_MASK) {
+ size_t accumulator = lastRun - RUN_MASK;
+ *op++ = RUN_MASK << ML_BITS;
+ for (; accumulator >= 255; accumulator -= 255)
+ *op++ = 255;
+ *op++ = (BYTE) accumulator;
+ } else {
+ *op++ = (BYTE)(lastRun << ML_BITS);
+ }
- if (lastrun >= (int)RUN_MASK) {
- *op++ = (RUN_MASK << ML_BITS);
- lastrun -= RUN_MASK;
- for (; lastrun > 254 ; lastrun -= 255)
- *op++ = 255;
- *op++ = (u8)lastrun;
- } else
- *op++ = (lastrun << ML_BITS);
- memcpy(op, anchor, iend - anchor);
- op += iend - anchor;
+ memcpy(op, anchor, lastRun);
+
+ op += lastRun;
+ }
/* End */
- return (int)(((char *)op) - dest);
+ return (int) (((char *)op) - dest);
}
-static inline int lz4_compress64kctx(void *ctx,
- const char *source,
- char *dest,
- int isize,
- int maxoutputsize)
+static int LZ4_compress_fast_extState(
+ void *state,
+ const char *source,
+ char *dest,
+ int inputSize,
+ int maxOutputSize,
+ int acceleration)
{
- u16 *hashtable = (u16 *)ctx;
- const u8 *ip = (u8 *) source;
- const u8 *anchor = ip;
- const u8 *const base = ip;
- const u8 *const iend = ip + isize;
- const u8 *const mflimit = iend - MFLIMIT;
- #define MATCHLIMIT (iend - LASTLITERALS)
-
- u8 *op = (u8 *) dest;
- u8 *const oend = op + maxoutputsize;
- int len, length;
- const int skipstrength = SKIPSTRENGTH;
- u32 forwardh;
- int lastrun;
-
- /* Init */
- if (isize < MINLENGTH)
- goto _last_literals;
+ LZ4_stream_t_internal *ctx = &((LZ4_stream_t *)state)->internal_donotuse;
+#if LZ4_ARCH64
+ const tableType_t tableType = byU32;
+#else
+ const tableType_t tableType = byPtr;
+#endif
+
+ LZ4_resetStream((LZ4_stream_t *)state);
+
+ if (acceleration < 1)
+ acceleration = LZ4_ACCELERATION_DEFAULT;
+
+ if (maxOutputSize >= LZ4_COMPRESSBOUND(inputSize)) {
+ if (inputSize < LZ4_64Klimit)
+ return LZ4_compress_generic(ctx, source,
+ dest, inputSize, 0,
+ noLimit, byU16, noDict,
+ noDictIssue, acceleration);
+ else
+ return LZ4_compress_generic(ctx, source,
+ dest, inputSize, 0,
+ noLimit, tableType, noDict,
+ noDictIssue, acceleration);
+ } else {
+ if (inputSize < LZ4_64Klimit)
+ return LZ4_compress_generic(ctx, source,
+ dest, inputSize,
+ maxOutputSize, limitedOutput, byU16, noDict,
+ noDictIssue, acceleration);
+ else
+ return LZ4_compress_generic(ctx, source,
+ dest, inputSize,
+ maxOutputSize, limitedOutput, tableType, noDict,
+ noDictIssue, acceleration);
+ }
+}
+
+int LZ4_compress_fast(const char *source, char *dest, int inputSize,
+ int maxOutputSize, int acceleration, void *wrkmem)
+{
+ return LZ4_compress_fast_extState(wrkmem, source, dest, inputSize,
+ maxOutputSize, acceleration);
+}
+EXPORT_SYMBOL(LZ4_compress_fast);
- memset((void *)hashtable, 0, LZ4_MEM_COMPRESS);
+int LZ4_compress_default(const char *source, char *dest, int inputSize,
+ int maxOutputSize, void *wrkmem)
+{
+ return LZ4_compress_fast(source, dest, inputSize,
+ maxOutputSize, LZ4_ACCELERATION_DEFAULT, wrkmem);
+}
+EXPORT_SYMBOL(LZ4_compress_default);
+
+/*-******************************
+ * *_destSize() variant
+ ********************************/
+static int LZ4_compress_destSize_generic(
+ LZ4_stream_t_internal * const ctx,
+ const char * const src,
+ char * const dst,
+ int * const srcSizePtr,
+ const int targetDstSize,
+ const tableType_t tableType)
+{
+ const BYTE *ip = (const BYTE *) src;
+ const BYTE *base = (const BYTE *) src;
+ const BYTE *lowLimit = (const BYTE *) src;
+ const BYTE *anchor = ip;
+ const BYTE * const iend = ip + *srcSizePtr;
+ const BYTE * const mflimit = iend - MFLIMIT;
+ const BYTE * const matchlimit = iend - LASTLITERALS;
+
+ BYTE *op = (BYTE *) dst;
+ BYTE * const oend = op + targetDstSize;
+ BYTE * const oMaxLit = op + targetDstSize - 2 /* offset */
+ - 8 /* because 8 + MINMATCH == MFLIMIT */ - 1 /* token */;
+ BYTE * const oMaxMatch = op + targetDstSize
+ - (LASTLITERALS + 1 /* token */);
+ BYTE * const oMaxSeq = oMaxLit - 1 /* token */;
+
+ U32 forwardH;
+
+ /* Init conditions */
+ /* Impossible to store anything */
+ if (targetDstSize < 1)
+ return 0;
+ /* Unsupported input size, too large (or negative) */
+ if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE)
+ return 0;
+ /* Size too large (not within 64K limit) */
+ if ((tableType == byU16) && (*srcSizePtr >= LZ4_64Klimit))
+ return 0;
+ /* Input too small, no compression (all literals) */
+ if (*srcSizePtr < LZ4_minLength)
+ goto _last_literals;
/* First Byte */
- ip++;
- forwardh = LZ4_HASH64K_VALUE(ip);
+ *srcSizePtr = 0;
+ LZ4_putPosition(ip, ctx->hashTable, tableType, base);
+ ip++; forwardH = LZ4_hashPosition(ip, tableType);
/* Main Loop */
- for (;;) {
- int findmatchattempts = (1U << skipstrength) + 3;
- const u8 *forwardip = ip;
- const u8 *ref;
- u8 *token;
+ for ( ; ; ) {
+ const BYTE *match;
+ BYTE *token;
/* Find a match */
- do {
- u32 h = forwardh;
- int step = findmatchattempts++ >> skipstrength;
- ip = forwardip;
- forwardip = ip + step;
-
- if (forwardip > mflimit)
- goto _last_literals;
-
- forwardh = LZ4_HASH64K_VALUE(forwardip);
- ref = base + hashtable[h];
- hashtable[h] = (u16)(ip - base);
- } while (A32(ref) != A32(ip));
+ {
+ const BYTE *forwardIp = ip;
+ unsigned int step = 1;
+ unsigned int searchMatchNb = 1 << LZ4_SKIPTRIGGER;
+
+ do {
+ U32 h = forwardH;
+
+ ip = forwardIp;
+ forwardIp += step;
+ step = (searchMatchNb++ >> LZ4_SKIPTRIGGER);
+
+ if (unlikely(forwardIp > mflimit))
+ goto _last_literals;
+
+ match = LZ4_getPositionOnHash(h, ctx->hashTable,
+ tableType, base);
+ forwardH = LZ4_hashPosition(forwardIp,
+ tableType);
+ LZ4_putPositionOnHash(ip, h,
+ ctx->hashTable, tableType,
+ base);
+
+ } while (((tableType == byU16)
+ ? 0
+ : (match + MAX_DISTANCE < ip))
+ || (LZ4_read32(match) != LZ4_read32(ip)));
+ }
/* Catch up */
- while ((ip > anchor) && (ref > (u8 *)source)
- && (ip[-1] == ref[-1])) {
+ while ((ip > anchor)
+ && (match > lowLimit)
+ && (unlikely(ip[-1] == match[-1]))) {
ip--;
- ref--;
+ match--;
}
/* Encode Literal length */
- length = (int)(ip - anchor);
- token = op++;
- /* Check output limit */
- if (unlikely(op + length + (2 + 1 + LASTLITERALS)
- + (length >> 8) > oend))
- return 0;
- if (length >= (int)RUN_MASK) {
- *token = (RUN_MASK << ML_BITS);
- len = length - RUN_MASK;
- for (; len > 254 ; len -= 255)
- *op++ = 255;
- *op++ = (u8)len;
- } else
- *token = (length << ML_BITS);
+ {
+ unsigned int litLength = (unsigned int)(ip - anchor);
- /* Copy Literals */
- LZ4_BLINDCOPY(anchor, op, length);
+ token = op++;
+ if (op + ((litLength + 240) / 255)
+ + litLength > oMaxLit) {
+ /* Not enough space for a last match */
+ op--;
+ goto _last_literals;
+ }
+ if (litLength >= RUN_MASK) {
+ unsigned int len = litLength - RUN_MASK;
+ *token = (RUN_MASK<<ML_BITS);
+ for (; len >= 255; len -= 255)
+ *op++ = 255;
+ *op++ = (BYTE)len;
+ } else
+ *token = (BYTE)(litLength << ML_BITS);
+
+ /* Copy Literals */
+ LZ4_wildCopy(op, anchor, op + litLength);
+ op += litLength;
+ }
_next_match:
/* Encode Offset */
- LZ4_WRITE_LITTLEENDIAN_16(op, (u16)(ip - ref));
+ LZ4_writeLE16(op, (U16)(ip - match)); op += 2;
- /* Start Counting */
- ip += MINMATCH;
- /* MinMatch verified */
- ref += MINMATCH;
- anchor = ip;
+ /* Encode MatchLength */
+ {
+ size_t matchLength = LZ4_count(ip + MINMATCH,
+ match + MINMATCH, matchlimit);
- while (ip < MATCHLIMIT - (STEPSIZE - 1)) {
- #if LZ4_ARCH64
- u64 diff = A64(ref) ^ A64(ip);
- #else
- u32 diff = A32(ref) ^ A32(ip);
- #endif
-
- if (!diff) {
- ip += STEPSIZE;
- ref += STEPSIZE;
- continue;
+ if (op + ((matchLength + 240)/255) > oMaxMatch) {
+ /* Match description too long : reduce it */
+ matchLength = (15 - 1) + (oMaxMatch - op) * 255;
}
- ip += LZ4_NBCOMMONBYTES(diff);
- goto _endcount;
- }
- #if LZ4_ARCH64
- if ((ip < (MATCHLIMIT - 3)) && (A32(ref) == A32(ip))) {
- ip += 4;
- ref += 4;
+ ip += MINMATCH + matchLength;
+
+ if (matchLength >= ML_MASK) {
+ *token += ML_MASK;
+ matchLength -= ML_MASK;
+ while (matchLength >= 255) {
+ matchLength -= 255;
+ *op++ = 255;
+ }
+ *op++ = (BYTE)matchLength;
+ } else
+ *token += (BYTE)(matchLength);
}
- #endif
- if ((ip < (MATCHLIMIT - 1)) && (A16(ref) == A16(ip))) {
- ip += 2;
- ref += 2;
- }
- if ((ip < MATCHLIMIT) && (*ref == *ip))
- ip++;
-_endcount:
- /* Encode MatchLength */
- len = (int)(ip - anchor);
- /* Check output limit */
- if (unlikely(op + (1 + LASTLITERALS) + (len >> 8) > oend))
- return 0;
- if (len >= (int)ML_MASK) {
- *token += ML_MASK;
- len -= ML_MASK;
- for (; len > 509 ; len -= 510) {
- *op++ = 255;
- *op++ = 255;
- }
- if (len > 254) {
- len -= 255;
- *op++ = 255;
- }
- *op++ = (u8)len;
- } else
- *token += len;
+ anchor = ip;
- /* Test end of chunk */
- if (ip > mflimit) {
- anchor = ip;
+ /* Test end of block */
+ if (ip > mflimit)
+ break;
+ if (op > oMaxSeq)
break;
- }
/* Fill table */
- hashtable[LZ4_HASH64K_VALUE(ip-2)] = (u16)(ip - 2 - base);
+ LZ4_putPosition(ip - 2, ctx->hashTable, tableType, base);
/* Test next position */
- ref = base + hashtable[LZ4_HASH64K_VALUE(ip)];
- hashtable[LZ4_HASH64K_VALUE(ip)] = (u16)(ip - base);
- if (A32(ref) == A32(ip)) {
- token = op++;
- *token = 0;
+ match = LZ4_getPosition(ip, ctx->hashTable, tableType, base);
+ LZ4_putPosition(ip, ctx->hashTable, tableType, base);
+
+ if ((match + MAX_DISTANCE >= ip)
+ && (LZ4_read32(match) == LZ4_read32(ip))) {
+ token = op++; *token = 0;
goto _next_match;
}
/* Prepare next loop */
- anchor = ip++;
- forwardh = LZ4_HASH64K_VALUE(ip);
+ forwardH = LZ4_hashPosition(++ip, tableType);
}
_last_literals:
/* Encode Last Literals */
- lastrun = (int)(iend - anchor);
- if (op + lastrun + 1 + (lastrun - RUN_MASK + 255) / 255 > oend)
- return 0;
- if (lastrun >= (int)RUN_MASK) {
- *op++ = (RUN_MASK << ML_BITS);
- lastrun -= RUN_MASK;
- for (; lastrun > 254 ; lastrun -= 255)
- *op++ = 255;
- *op++ = (u8)lastrun;
- } else
- *op++ = (lastrun << ML_BITS);
- memcpy(op, anchor, iend - anchor);
- op += iend - anchor;
+ {
+ size_t lastRunSize = (size_t)(iend - anchor);
+
+ if (op + 1 /* token */
+ + ((lastRunSize + 240) / 255) /* litLength */
+ + lastRunSize /* literals */ > oend) {
+ /* adapt lastRunSize to fill 'dst' */
+ lastRunSize = (oend - op) - 1;
+ lastRunSize -= (lastRunSize + 240) / 255;
+ }
+ ip = anchor + lastRunSize;
+
+ if (lastRunSize >= RUN_MASK) {
+ size_t accumulator = lastRunSize - RUN_MASK;
+
+ *op++ = RUN_MASK << ML_BITS;
+ for (; accumulator >= 255; accumulator -= 255)
+ *op++ = 255;
+ *op++ = (BYTE) accumulator;
+ } else {
+ *op++ = (BYTE)(lastRunSize<<ML_BITS);
+ }
+ memcpy(op, anchor, lastRunSize);
+ op += lastRunSize;
+ }
+
/* End */
- return (int)(((char *)op) - dest);
+ *srcSizePtr = (int) (((const char *)ip) - src);
+ return (int) (((char *)op) - dst);
}
-int lz4_compress(const unsigned char *src, size_t src_len,
- unsigned char *dst, size_t *dst_len, void *wrkmem)
+static int LZ4_compress_destSize_extState(
+ LZ4_stream_t *state,
+ const char *src,
+ char *dst,
+ int *srcSizePtr,
+ int targetDstSize)
{
- int ret = -1;
- int out_len = 0;
+#if LZ4_ARCH64
+ const tableType_t tableType = byU32;
+#else
+ const tableType_t tableType = byPtr;
+#endif
- if (src_len < LZ4_64KLIMIT)
- out_len = lz4_compress64kctx(wrkmem, src, dst, src_len,
- lz4_compressbound(src_len));
- else
- out_len = lz4_compressctx(wrkmem, src, dst, src_len,
- lz4_compressbound(src_len));
+ LZ4_resetStream(state);
+
+ if (targetDstSize >= LZ4_COMPRESSBOUND(*srcSizePtr)) {
+ /* compression success is guaranteed */
+ return LZ4_compress_fast_extState(
+ state, src, dst, *srcSizePtr,
+ targetDstSize, 1);
+ } else {
+ if (*srcSizePtr < LZ4_64Klimit)
+ return LZ4_compress_destSize_generic(
+ &state->internal_donotuse,
+ src, dst, srcSizePtr,
+ targetDstSize, byU16);
+ else
+ return LZ4_compress_destSize_generic(
+ &state->internal_donotuse,
+ src, dst, srcSizePtr,
+ targetDstSize, tableType);
+ }
+}
+
+
+int LZ4_compress_destSize(
+ const char *src,
+ char *dst,
+ int *srcSizePtr,
+ int targetDstSize,
+ void *wrkmem)
+{
+ return LZ4_compress_destSize_extState(wrkmem, src, dst, srcSizePtr,
+ targetDstSize);
+}
+EXPORT_SYMBOL(LZ4_compress_destSize);
+
+/*-******************************
+ * Streaming functions
+ ********************************/
+void LZ4_resetStream(LZ4_stream_t *LZ4_stream)
+{
+ memset(LZ4_stream, 0, sizeof(LZ4_stream_t));
+}
+
+int LZ4_loadDict(LZ4_stream_t *LZ4_dict,
+ const char *dictionary, int dictSize)
+{
+ LZ4_stream_t_internal *dict = &LZ4_dict->internal_donotuse;
+ const BYTE *p = (const BYTE *)dictionary;
+ const BYTE * const dictEnd = p + dictSize;
+ const BYTE *base;
+
+ if ((dict->initCheck)
+ || (dict->currentOffset > 1 * GB)) {
+ /* Uninitialized structure, or reuse overflow */
+ LZ4_resetStream(LZ4_dict);
+ }
+
+ if (dictSize < (int)HASH_UNIT) {
+ dict->dictionary = NULL;
+ dict->dictSize = 0;
+ return 0;
+ }
+
+ if ((dictEnd - p) > 64 * KB)
+ p = dictEnd - 64 * KB;
+ dict->currentOffset += 64 * KB;
+ base = p - dict->currentOffset;
+ dict->dictionary = p;
+ dict->dictSize = (U32)(dictEnd - p);
+ dict->currentOffset += dict->dictSize;
+
+ while (p <= dictEnd - HASH_UNIT) {
+ LZ4_putPosition(p, dict->hashTable, byU32, base);
+ p += 3;
+ }
+
+ return dict->dictSize;
+}
+EXPORT_SYMBOL(LZ4_loadDict);
+
+static void LZ4_renormDictT(LZ4_stream_t_internal *LZ4_dict,
+ const BYTE *src)
+{
+ if ((LZ4_dict->currentOffset > 0x80000000) ||
+ ((uptrval)LZ4_dict->currentOffset > (uptrval)src)) {
+ /* address space overflow */
+ /* rescale hash table */
+ U32 const delta = LZ4_dict->currentOffset - 64 * KB;
+ const BYTE *dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
+ int i;
+
+ for (i = 0; i < LZ4_HASH_SIZE_U32; i++) {
+ if (LZ4_dict->hashTable[i] < delta)
+ LZ4_dict->hashTable[i] = 0;
+ else
+ LZ4_dict->hashTable[i] -= delta;
+ }
+ LZ4_dict->currentOffset = 64 * KB;
+ if (LZ4_dict->dictSize > 64 * KB)
+ LZ4_dict->dictSize = 64 * KB;
+ LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
+ }
+}
+
+int LZ4_saveDict(LZ4_stream_t *LZ4_dict, char *safeBuffer, int dictSize)
+{
+ LZ4_stream_t_internal * const dict = &LZ4_dict->internal_donotuse;
+ const BYTE * const previousDictEnd = dict->dictionary + dict->dictSize;
+
+ if ((U32)dictSize > 64 * KB) {
+ /* useless to define a dictionary > 64 * KB */
+ dictSize = 64 * KB;
+ }
+ if ((U32)dictSize > dict->dictSize)
+ dictSize = dict->dictSize;
+
+ memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
+
+ dict->dictionary = (const BYTE *)safeBuffer;
+ dict->dictSize = (U32)dictSize;
+
+ return dictSize;
+}
+EXPORT_SYMBOL(LZ4_saveDict);
+
+int LZ4_compress_fast_continue(LZ4_stream_t *LZ4_stream, const char *source,
+ char *dest, int inputSize, int maxOutputSize, int acceleration)
+{
+ LZ4_stream_t_internal *streamPtr = &LZ4_stream->internal_donotuse;
+ const BYTE * const dictEnd = streamPtr->dictionary
+ + streamPtr->dictSize;
- if (out_len < 0)
- goto exit;
+ const BYTE *smallest = (const BYTE *) source;
- *dst_len = out_len;
+ if (streamPtr->initCheck) {
+ /* Uninitialized structure detected */
+ return 0;
+ }
+
+ if ((streamPtr->dictSize > 0) && (smallest > dictEnd))
+ smallest = dictEnd;
+
+ LZ4_renormDictT(streamPtr, smallest);
+
+ if (acceleration < 1)
+ acceleration = LZ4_ACCELERATION_DEFAULT;
+
+ /* Check overlapping input/dictionary space */
+ {
+ const BYTE *sourceEnd = (const BYTE *) source + inputSize;
+
+ if ((sourceEnd > streamPtr->dictionary)
+ && (sourceEnd < dictEnd)) {
+ streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
+ if (streamPtr->dictSize > 64 * KB)
+ streamPtr->dictSize = 64 * KB;
+ if (streamPtr->dictSize < 4)
+ streamPtr->dictSize = 0;
+ streamPtr->dictionary = dictEnd - streamPtr->dictSize;
+ }
+ }
- return 0;
-exit:
- return ret;
+ /* prefix mode : source data follows dictionary */
+ if (dictEnd == (const BYTE *)source) {
+ int result;
+
+ if ((streamPtr->dictSize < 64 * KB) &&
+ (streamPtr->dictSize < streamPtr->currentOffset)) {
+ result = LZ4_compress_generic(
+ streamPtr, source, dest, inputSize,
+ maxOutputSize, limitedOutput, byU32,
+ withPrefix64k, dictSmall, acceleration);
+ } else {
+ result = LZ4_compress_generic(
+ streamPtr, source, dest, inputSize,
+ maxOutputSize, limitedOutput, byU32,
+ withPrefix64k, noDictIssue, acceleration);
+ }
+ streamPtr->dictSize += (U32)inputSize;
+ streamPtr->currentOffset += (U32)inputSize;
+ return result;
+ }
+
+ /* external dictionary mode */
+ {
+ int result;
+
+ if ((streamPtr->dictSize < 64 * KB) &&
+ (streamPtr->dictSize < streamPtr->currentOffset)) {
+ result = LZ4_compress_generic(
+ streamPtr, source, dest, inputSize,
+ maxOutputSize, limitedOutput, byU32,
+ usingExtDict, dictSmall, acceleration);
+ } else {
+ result = LZ4_compress_generic(
+ streamPtr, source, dest, inputSize,
+ maxOutputSize, limitedOutput, byU32,
+ usingExtDict, noDictIssue, acceleration);
+ }
+ streamPtr->dictionary = (const BYTE *)source;
+ streamPtr->dictSize = (U32)inputSize;
+ streamPtr->currentOffset += (U32)inputSize;
+ return result;
+ }
}
-EXPORT_SYMBOL(lz4_compress);
+EXPORT_SYMBOL(LZ4_compress_fast_continue);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("LZ4 compressor");
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index 6d940c72b5fc..bd3574312b82 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -1,25 +1,16 @@
/*
- * LZ4 Decompressor for Linux kernel
- *
- * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com>
- *
- * Based on LZ4 implementation by Yann Collet.
- *
* LZ4 - Fast LZ compression algorithm
- * Copyright (C) 2011-2012, Yann Collet.
- * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
- *
+ * Copyright (C) 2011 - 2016, Yann Collet.
+ * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php)
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
- *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -31,313 +22,487 @@
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * You can contact the author at :
+ * - LZ4 homepage : http://www.lz4.org
+ * - LZ4 source repository : https://github.com/lz4/lz4
*
- * You can contact the author at :
- * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
- * - LZ4 source repository : http://code.google.com/p/lz4/
+ * Changed for kernel usage by:
+ * Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
*/
-#ifndef STATIC
+/*-************************************
+ * Dependencies
+ **************************************/
+#include <linux/lz4.h>
+#include "lz4defs.h"
+#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
-#endif
-#include <linux/lz4.h>
-
#include <asm/unaligned.h>
-#include "lz4defs.h"
-
-static const int dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
-#if LZ4_ARCH64
-static const int dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3};
-#endif
-
-static int lz4_uncompress(const char *source, char *dest, int osize)
+/*-*****************************
+ * Decompression functions
+ *******************************/
+/* LZ4_decompress_generic() :
+ * This generic decompression function cover all use cases.
+ * It shall be instantiated several times, using different sets of directives
+ * Note that it is important this generic function is really inlined,
+ * in order to remove useless branches during compilation optimization.
+ */
+static FORCE_INLINE int LZ4_decompress_generic(
+ const char * const source,
+ char * const dest,
+ int inputSize,
+ /*
+ * If endOnInput == endOnInputSize,
+ * this value is the max size of Output Buffer.
+ */
+ int outputSize,
+ /* endOnOutputSize, endOnInputSize */
+ int endOnInput,
+ /* full, partial */
+ int partialDecoding,
+ /* only used if partialDecoding == partial */
+ int targetOutputSize,
+ /* noDict, withPrefix64k, usingExtDict */
+ int dict,
+ /* == dest when no prefix */
+ const BYTE * const lowPrefix,
+ /* only if dict == usingExtDict */
+ const BYTE * const dictStart,
+ /* note : = 0 if noDict */
+ const size_t dictSize
+ )
{
+ /* Local Variables */
const BYTE *ip = (const BYTE *) source;
- const BYTE *ref;
+ const BYTE * const iend = ip + inputSize;
+
BYTE *op = (BYTE *) dest;
- BYTE * const oend = op + osize;
+ BYTE * const oend = op + outputSize;
BYTE *cpy;
- unsigned token;
- size_t length;
+ BYTE *oexit = op + targetOutputSize;
+ const BYTE * const lowLimit = lowPrefix - dictSize;
+ const BYTE * const dictEnd = (const BYTE *)dictStart + dictSize;
+ const unsigned int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };
+ const int dec64table[] = { 0, 0, 0, -1, 0, 1, 2, 3 };
+
+ const int safeDecode = (endOnInput == endOnInputSize);
+ const int checkOffset = ((safeDecode) && (dictSize < (int)(64 * KB)));
+
+ /* Special cases */
+ /* targetOutputSize too high => decode everything */
+ if ((partialDecoding) && (oexit > oend - MFLIMIT))
+ oexit = oend - MFLIMIT;
+
+ /* Empty output buffer */
+ if ((endOnInput) && (unlikely(outputSize == 0)))
+ return ((inputSize == 1) && (*ip == 0)) ? 0 : -1;
+
+ if ((!endOnInput) && (unlikely(outputSize == 0)))
+ return (*ip == 0 ? 1 : -1);
+
+ /* Main Loop : decode sequences */
while (1) {
+ size_t length;
+ const BYTE *match;
+ size_t offset;
+
+ /* get literal length */
+ unsigned int const token = *ip++;
+
+ length = token>>ML_BITS;
- /* get runlength */
- token = *ip++;
- length = (token >> ML_BITS);
if (length == RUN_MASK) {
- size_t len;
+ unsigned int s;
- len = *ip++;
- for (; len == 255; length += 255)
- len = *ip++;
- if (unlikely(length > (size_t)(length + len)))
+ do {
+ s = *ip++;
+ length += s;
+ } while (likely(endOnInput
+ ? ip < iend - RUN_MASK
+ : 1) & (s == 255));
+
+ if ((safeDecode)
+ && unlikely(
+ (size_t)(op + length) < (size_t)(op))) {
+ /* overflow detection */
+ goto _output_error;
+ }
+ if ((safeDecode)
+ && unlikely(
+ (size_t)(ip + length) < (size_t)(ip))) {
+ /* overflow detection */
goto _output_error;
- length += len;
+ }
}
/* copy literals */
cpy = op + length;
- if (unlikely(cpy > oend - COPYLENGTH)) {
- /*
- * Error: not enough place for another match
- * (min 4) + 5 literals
- */
- if (cpy != oend)
- goto _output_error;
+ if (((endOnInput) && ((cpy > (partialDecoding ? oexit : oend - MFLIMIT))
+ || (ip + length > iend - (2 + 1 + LASTLITERALS))))
+ || ((!endOnInput) && (cpy > oend - WILDCOPYLENGTH))) {
+ if (partialDecoding) {
+ if (cpy > oend) {
+ /*
+ * Error :
+ * write attempt beyond end of output buffer
+ */
+ goto _output_error;
+ }
+ if ((endOnInput)
+ && (ip + length > iend)) {
+ /*
+ * Error :
+ * read attempt beyond
+ * end of input buffer
+ */
+ goto _output_error;
+ }
+ } else {
+ if ((!endOnInput)
+ && (cpy != oend)) {
+ /*
+ * Error :
+ * block decoding must
+ * stop exactly there
+ */
+ goto _output_error;
+ }
+ if ((endOnInput)
+ && ((ip + length != iend)
+ || (cpy > oend))) {
+ /*
+ * Error :
+ * input must be consumed
+ */
+ goto _output_error;
+ }
+ }
memcpy(op, ip, length);
ip += length;
- break; /* EOF */
+ op += length;
+ /* Necessarily EOF, due to parsing restrictions */
+ break;
}
- LZ4_WILDCOPY(ip, op, cpy);
- ip -= (op - cpy);
+
+ LZ4_wildCopy(op, ip, cpy);
+ ip += length;
op = cpy;
/* get offset */
- LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip);
+ offset = LZ4_readLE16(ip);
ip += 2;
+ match = op - offset;
- /* Error: offset create reference outside destination buffer */
- if (unlikely(ref < (BYTE *const) dest))
+ if ((checkOffset) && (unlikely(match < lowLimit))) {
+ /* Error : offset outside buffers */
goto _output_error;
+ }
+
+ /* costs ~1%; silence an msan warning when offset == 0 */
+ LZ4_write32(op, (U32)offset);
/* get matchlength */
length = token & ML_MASK;
if (length == ML_MASK) {
- for (; *ip == 255; length += 255)
- ip++;
- if (unlikely(length > (size_t)(length + *ip)))
+ unsigned int s;
+
+ do {
+ s = *ip++;
+
+ if ((endOnInput) && (ip > iend - LASTLITERALS))
+ goto _output_error;
+
+ length += s;
+ } while (s == 255);
+
+ if ((safeDecode)
+ && unlikely(
+ (size_t)(op + length) < (size_t)op)) {
+ /* overflow detection */
goto _output_error;
- length += *ip++;
+ }
}
- /* copy repeated sequence */
- if (unlikely((op - ref) < STEPSIZE)) {
-#if LZ4_ARCH64
- int dec64 = dec64table[op - ref];
-#else
- const int dec64 = 0;
-#endif
- op[0] = ref[0];
- op[1] = ref[1];
- op[2] = ref[2];
- op[3] = ref[3];
- op += 4;
- ref += 4;
- ref -= dec32table[op-ref];
- PUT4(ref, op);
- op += STEPSIZE - 4;
- ref -= dec64;
+ length += MINMATCH;
+
+ /* check external dictionary */
+ if ((dict == usingExtDict) && (match < lowPrefix)) {
+ if (unlikely(op + length > oend - LASTLITERALS)) {
+ /* doesn't respect parsing restriction */
+ goto _output_error;
+ }
+
+ if (length <= (size_t)(lowPrefix - match)) {
+ /*
+ * match can be copied as a single segment
+ * from external dictionary
+ */
+ memmove(op, dictEnd - (lowPrefix - match),
+ length);
+ op += length;
+ } else {
+ /*
+ * match encompass external
+ * dictionary and current block
+ */
+ size_t const copySize = (size_t)(lowPrefix - match);
+ size_t const restSize = length - copySize;
+
+ memcpy(op, dictEnd - copySize, copySize);
+ op += copySize;
+
+ if (restSize > (size_t)(op - lowPrefix)) {
+ /* overlap copy */
+ BYTE * const endOfMatch = op + restSize;
+ const BYTE *copyFrom = lowPrefix;
+
+ while (op < endOfMatch)
+ *op++ = *copyFrom++;
+ } else {
+ memcpy(op, lowPrefix, restSize);
+ op += restSize;
+ }
+ }
+
+ continue;
+ }
+
+ /* copy match within block */
+ cpy = op + length;
+
+ if (unlikely(offset < 8)) {
+ const int dec64 = dec64table[offset];
+
+ op[0] = match[0];
+ op[1] = match[1];
+ op[2] = match[2];
+ op[3] = match[3];
+ match += dec32table[offset];
+ memcpy(op + 4, match, 4);
+ match -= dec64;
} else {
- LZ4_COPYSTEP(ref, op);
+ LZ4_copy8(op, match);
+ match += 8;
}
- cpy = op + length - (STEPSIZE - 4);
- if (cpy > (oend - COPYLENGTH)) {
- /* Error: request to write beyond destination buffer */
- if (cpy > oend)
- goto _output_error;
-#if LZ4_ARCH64
- if ((ref + COPYLENGTH) > oend)
-#else
- if ((ref + COPYLENGTH) > oend ||
- (op + COPYLENGTH) > oend)
-#endif
+ op += 8;
+
+ if (unlikely(cpy > oend - 12)) {
+ BYTE * const oCopyLimit = oend - (WILDCOPYLENGTH - 1);
+
+ if (cpy > oend - LASTLITERALS) {
+ /*
+ * Error : last LASTLITERALS bytes
+ * must be literals (uncompressed)
+ */
goto _output_error;
- LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
+ }
+
+ if (op < oCopyLimit) {
+ LZ4_wildCopy(op, match, oCopyLimit);
+ match += oCopyLimit - op;
+ op = oCopyLimit;
+ }
+
while (op < cpy)
- *op++ = *ref++;
- op = cpy;
- /*
- * Check EOF (should never happen, since last 5 bytes
- * are supposed to be literals)
- */
- if (op == oend)
- goto _output_error;
- continue;
+ *op++ = *match++;
+ } else {
+ LZ4_copy8(op, match);
+
+ if (length > 16)
+ LZ4_wildCopy(op + 8, match + 8, cpy);
}
- LZ4_SECURECOPY(ref, op, cpy);
+
op = cpy; /* correction */
}
+
/* end of decoding */
- return (int) (((char *)ip) - source);
+ if (endOnInput) {
+ /* Nb of output bytes decoded */
+ return (int) (((char *)op) - dest);
+ } else {
+ /* Nb of input bytes read */
+ return (int) (((const char *)ip) - source);
+ }
- /* write overflow error detected */
+ /* Overflow error detected */
_output_error:
return -1;
}
-static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
- int isize, size_t maxoutputsize)
+int LZ4_decompress_safe(const char *source, char *dest,
+ int compressedSize, int maxDecompressedSize)
{
- const BYTE *ip = (const BYTE *) source;
- const BYTE *const iend = ip + isize;
- const BYTE *ref;
-
+ return LZ4_decompress_generic(source, dest, compressedSize,
+ maxDecompressedSize, endOnInputSize, full, 0,
+ noDict, (BYTE *)dest, NULL, 0);
+}
- BYTE *op = (BYTE *) dest;
- BYTE * const oend = op + maxoutputsize;
- BYTE *cpy;
+int LZ4_decompress_safe_partial(const char *source, char *dest,
+ int compressedSize, int targetOutputSize, int maxDecompressedSize)
+{
+ return LZ4_decompress_generic(source, dest, compressedSize,
+ maxDecompressedSize, endOnInputSize, partial,
+ targetOutputSize, noDict, (BYTE *)dest, NULL, 0);
+}
- /* Main Loop */
- while (ip < iend) {
+int LZ4_decompress_fast(const char *source, char *dest, int originalSize)
+{
+ return LZ4_decompress_generic(source, dest, 0, originalSize,
+ endOnOutputSize, full, 0, withPrefix64k,
+ (BYTE *)(dest - 64 * KB), NULL, 64 * KB);
+}
- unsigned token;
- size_t length;
+int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
+ const char *dictionary, int dictSize)
+{
+ LZ4_streamDecode_t_internal *lz4sd = (LZ4_streamDecode_t_internal *) LZ4_streamDecode;
- /* get runlength */
- token = *ip++;
- length = (token >> ML_BITS);
- if (length == RUN_MASK) {
- int s = 255;
- while ((ip < iend) && (s == 255)) {
- s = *ip++;
- if (unlikely(length > (size_t)(length + s)))
- goto _output_error;
- length += s;
- }
- }
- /* copy literals */
- cpy = op + length;
- if ((cpy > oend - COPYLENGTH) ||
- (ip + length > iend - COPYLENGTH)) {
-
- if (cpy > oend)
- goto _output_error;/* writes beyond buffer */
-
- if (ip + length != iend)
- goto _output_error;/*
- * Error: LZ4 format requires
- * to consume all input
- * at this stage
- */
- memcpy(op, ip, length);
- op += length;
- break;/* Necessarily EOF, due to parsing restrictions */
- }
- LZ4_WILDCOPY(ip, op, cpy);
- ip -= (op - cpy);
- op = cpy;
+ lz4sd->prefixSize = (size_t) dictSize;
+ lz4sd->prefixEnd = (const BYTE *) dictionary + dictSize;
+ lz4sd->externalDict = NULL;
+ lz4sd->extDictSize = 0;
+ return 1;
+}
- /* get offset */
- LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip);
- ip += 2;
- if (ref < (BYTE * const) dest)
- goto _output_error;
- /*
- * Error : offset creates reference
- * outside of destination buffer
- */
+/*
+ * *_continue() :
+ * These decoding functions allow decompression of multiple blocks
+ * in "streaming" mode.
+ * Previously decoded blocks must still be available at the memory
+ * position where they were decoded.
+ * If it's not possible, save the relevant part of
+ * decoded data into a safe buffer,
+ * and indicate where it stands using LZ4_setStreamDecode()
+ */
+int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode,
+ const char *source, char *dest, int compressedSize, int maxOutputSize)
+{
+ LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse;
+ int result;
+
+ if (lz4sd->prefixEnd == (BYTE *)dest) {
+ result = LZ4_decompress_generic(source, dest,
+ compressedSize,
+ maxOutputSize,
+ endOnInputSize, full, 0,
+ usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize,
+ lz4sd->externalDict,
+ lz4sd->extDictSize);
+
+ if (result <= 0)
+ return result;
+
+ lz4sd->prefixSize += result;
+ lz4sd->prefixEnd += result;
+ } else {
+ lz4sd->extDictSize = lz4sd->prefixSize;
+ lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
+ result = LZ4_decompress_generic(source, dest,
+ compressedSize, maxOutputSize,
+ endOnInputSize, full, 0,
+ usingExtDict, (BYTE *)dest,
+ lz4sd->externalDict, lz4sd->extDictSize);
+ if (result <= 0)
+ return result;
+ lz4sd->prefixSize = result;
+ lz4sd->prefixEnd = (BYTE *)dest + result;
+ }
- /* get matchlength */
- length = (token & ML_MASK);
- if (length == ML_MASK) {
- while (ip < iend) {
- int s = *ip++;
- if (unlikely(length > (size_t)(length + s)))
- goto _output_error;
- length += s;
- if (s == 255)
- continue;
- break;
- }
- }
+ return result;
+}
- /* copy repeated sequence */
- if (unlikely((op - ref) < STEPSIZE)) {
-#if LZ4_ARCH64
- int dec64 = dec64table[op - ref];
-#else
- const int dec64 = 0;
-#endif
- op[0] = ref[0];
- op[1] = ref[1];
- op[2] = ref[2];
- op[3] = ref[3];
- op += 4;
- ref += 4;
- ref -= dec32table[op - ref];
- PUT4(ref, op);
- op += STEPSIZE - 4;
- ref -= dec64;
- } else {
- LZ4_COPYSTEP(ref, op);
- }
- cpy = op + length - (STEPSIZE-4);
- if (cpy > oend - COPYLENGTH) {
- if (cpy > oend)
- goto _output_error; /* write outside of buf */
-#if LZ4_ARCH64
- if ((ref + COPYLENGTH) > oend)
-#else
- if ((ref + COPYLENGTH) > oend ||
- (op + COPYLENGTH) > oend)
-#endif
- goto _output_error;
- LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
- while (op < cpy)
- *op++ = *ref++;
- op = cpy;
- /*
- * Check EOF (should never happen, since last 5 bytes
- * are supposed to be literals)
- */
- if (op == oend)
- goto _output_error;
- continue;
- }
- LZ4_SECURECOPY(ref, op, cpy);
- op = cpy; /* correction */
+int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode,
+ const char *source, char *dest, int originalSize)
+{
+ LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse;
+ int result;
+
+ if (lz4sd->prefixEnd == (BYTE *)dest) {
+ result = LZ4_decompress_generic(source, dest, 0, originalSize,
+ endOnOutputSize, full, 0,
+ usingExtDict,
+ lz4sd->prefixEnd - lz4sd->prefixSize,
+ lz4sd->externalDict, lz4sd->extDictSize);
+
+ if (result <= 0)
+ return result;
+
+ lz4sd->prefixSize += originalSize;
+ lz4sd->prefixEnd += originalSize;
+ } else {
+ lz4sd->extDictSize = lz4sd->prefixSize;
+ lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
+ result = LZ4_decompress_generic(source, dest, 0, originalSize,
+ endOnOutputSize, full, 0,
+ usingExtDict, (BYTE *)dest,
+ lz4sd->externalDict, lz4sd->extDictSize);
+ if (result <= 0)
+ return result;
+ lz4sd->prefixSize = originalSize;
+ lz4sd->prefixEnd = (BYTE *)dest + originalSize;
}
- /* end of decoding */
- return (int) (((char *) op) - dest);
- /* write overflow error detected */
-_output_error:
- return -1;
+ return result;
}
-int lz4_decompress(const unsigned char *src, size_t *src_len,
- unsigned char *dest, size_t actual_dest_len)
+/*
+ * Advanced decoding functions :
+ * *_usingDict() :
+ * These decoding functions work the same as "_continue" ones,
+ * the dictionary must be explicitly provided within parameters
+ */
+static FORCE_INLINE int LZ4_decompress_usingDict_generic(const char *source,
+ char *dest, int compressedSize, int maxOutputSize, int safe,
+ const char *dictStart, int dictSize)
{
- int ret = -1;
- int input_len = 0;
-
- input_len = lz4_uncompress(src, dest, actual_dest_len);
- if (input_len < 0)
- goto exit_0;
- *src_len = input_len;
+ if (dictSize == 0)
+ return LZ4_decompress_generic(source, dest,
+ compressedSize, maxOutputSize, safe, full, 0,
+ noDict, (BYTE *)dest, NULL, 0);
+ if (dictStart + dictSize == dest) {
+ if (dictSize >= (int)(64 * KB - 1))
+ return LZ4_decompress_generic(source, dest,
+ compressedSize, maxOutputSize, safe, full, 0,
+ withPrefix64k, (BYTE *)dest - 64 * KB, NULL, 0);
+ return LZ4_decompress_generic(source, dest, compressedSize,
+ maxOutputSize, safe, full, 0, noDict,
+ (BYTE *)dest - dictSize, NULL, 0);
+ }
+ return LZ4_decompress_generic(source, dest, compressedSize,
+ maxOutputSize, safe, full, 0, usingExtDict,
+ (BYTE *)dest, (const BYTE *)dictStart, dictSize);
+}
- return 0;
-exit_0:
- return ret;
+int LZ4_decompress_safe_usingDict(const char *source, char *dest,
+ int compressedSize, int maxOutputSize,
+ const char *dictStart, int dictSize)
+{
+ return LZ4_decompress_usingDict_generic(source, dest,
+ compressedSize, maxOutputSize, 1, dictStart, dictSize);
}
-#ifndef STATIC
-EXPORT_SYMBOL(lz4_decompress);
-#endif
-int lz4_decompress_unknownoutputsize(const unsigned char *src, size_t src_len,
- unsigned char *dest, size_t *dest_len)
+int LZ4_decompress_fast_usingDict(const char *source, char *dest,
+ int originalSize, const char *dictStart, int dictSize)
{
- int ret = -1;
- int out_len = 0;
-
- out_len = lz4_uncompress_unknownoutputsize(src, dest, src_len,
- *dest_len);
- if (out_len < 0)
- goto exit_0;
- *dest_len = out_len;
-
- return 0;
-exit_0:
- return ret;
+ return LZ4_decompress_usingDict_generic(source, dest, 0,
+ originalSize, 0, dictStart, dictSize);
}
+
#ifndef STATIC
-EXPORT_SYMBOL(lz4_decompress_unknownoutputsize);
+EXPORT_SYMBOL(LZ4_decompress_safe);
+EXPORT_SYMBOL(LZ4_decompress_safe_partial);
+EXPORT_SYMBOL(LZ4_decompress_fast);
+EXPORT_SYMBOL(LZ4_setStreamDecode);
+EXPORT_SYMBOL(LZ4_decompress_safe_continue);
+EXPORT_SYMBOL(LZ4_decompress_fast_continue);
+EXPORT_SYMBOL(LZ4_decompress_safe_usingDict);
+EXPORT_SYMBOL(LZ4_decompress_fast_usingDict);
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION("LZ4 Decompressor");
+MODULE_DESCRIPTION("LZ4 decompressor");
#endif
diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
index c79d7ea8a38e..00a0b58a0871 100644
--- a/lib/lz4/lz4defs.h
+++ b/lib/lz4/lz4defs.h
@@ -1,157 +1,227 @@
+#ifndef __LZ4DEFS_H__
+#define __LZ4DEFS_H__
+
/*
- * lz4defs.h -- architecture specific defines
- *
- * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com>
+ * lz4defs.h -- common and architecture specific defines for the kernel usage
+
+ * LZ4 - Fast LZ compression algorithm
+ * Copyright (C) 2011-2016, Yann Collet.
+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * You can contact the author at :
+ * - LZ4 homepage : http://www.lz4.org
+ * - LZ4 source repository : https://github.com/lz4/lz4
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Changed for kernel usage by:
+ * Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
*/
-/*
- * Detects 64 bits mode
- */
+#include <asm/unaligned.h>
+#include <linux/string.h> /* memset, memcpy */
+
+#define FORCE_INLINE __always_inline
+
+/*-************************************
+ * Basic Types
+ **************************************/
+#include <linux/types.h>
+
+typedef uint8_t BYTE;
+typedef uint16_t U16;
+typedef uint32_t U32;
+typedef int32_t S32;
+typedef uint64_t U64;
+typedef uintptr_t uptrval;
+
+/*-************************************
+ * Architecture specifics
+ **************************************/
#if defined(CONFIG_64BIT)
#define LZ4_ARCH64 1
#else
#define LZ4_ARCH64 0
#endif
-/*
- * Architecture-specific macros
- */
-#define BYTE u8
-typedef struct _U16_S { u16 v; } U16_S;
-typedef struct _U32_S { u32 v; } U32_S;
-typedef struct _U64_S { u64 v; } U64_S;
-#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
-
-#define A16(x) (((U16_S *)(x))->v)
-#define A32(x) (((U32_S *)(x))->v)
-#define A64(x) (((U64_S *)(x))->v)
-
-#define PUT4(s, d) (A32(d) = A32(s))
-#define PUT8(s, d) (A64(d) = A64(s))
-
-#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
- (d = s - A16(p))
-
-#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
- do { \
- A16(p) = v; \
- p += 2; \
- } while (0)
-#else /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
-
-#define A64(x) get_unaligned((u64 *)&(((U16_S *)(x))->v))
-#define A32(x) get_unaligned((u32 *)&(((U16_S *)(x))->v))
-#define A16(x) get_unaligned((u16 *)&(((U16_S *)(x))->v))
-
-#define PUT4(s, d) \
- put_unaligned(get_unaligned((const u32 *) s), (u32 *) d)
-#define PUT8(s, d) \
- put_unaligned(get_unaligned((const u64 *) s), (u64 *) d)
-
-#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
- (d = s - get_unaligned_le16(p))
-
-#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
- do { \
- put_unaligned_le16(v, (u16 *)(p)); \
- p += 2; \
- } while (0)
+#if defined(__LITTLE_ENDIAN)
+#define LZ4_LITTLE_ENDIAN 1
+#else
+#define LZ4_LITTLE_ENDIAN 0
#endif
-#define COPYLENGTH 8
-#define ML_BITS 4
-#define ML_MASK ((1U << ML_BITS) - 1)
+/*-************************************
+ * Constants
+ **************************************/
+#define MINMATCH 4
+
+#define WILDCOPYLENGTH 8
+#define LASTLITERALS 5
+#define MFLIMIT (WILDCOPYLENGTH + MINMATCH)
+
+/* Increase this value ==> compression run slower on incompressible data */
+#define LZ4_SKIPTRIGGER 6
+
+#define HASH_UNIT sizeof(size_t)
+
+#define KB (1 << 10)
+#define MB (1 << 20)
+#define GB (1U << 30)
+
+#define MAXD_LOG 16
+#define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
+#define STEPSIZE sizeof(size_t)
+
+#define ML_BITS 4
+#define ML_MASK ((1U << ML_BITS) - 1)
#define RUN_BITS (8 - ML_BITS)
#define RUN_MASK ((1U << RUN_BITS) - 1)
-#define MEMORY_USAGE 14
-#define MINMATCH 4
-#define SKIPSTRENGTH 6
-#define LASTLITERALS 5
-#define MFLIMIT (COPYLENGTH + MINMATCH)
-#define MINLENGTH (MFLIMIT + 1)
-#define MAXD_LOG 16
-#define MAXD (1 << MAXD_LOG)
-#define MAXD_MASK (u32)(MAXD - 1)
-#define MAX_DISTANCE (MAXD - 1)
-#define HASH_LOG (MAXD_LOG - 1)
-#define HASHTABLESIZE (1 << HASH_LOG)
-#define MAX_NB_ATTEMPTS 256
-#define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH)
-#define LZ4_64KLIMIT ((1<<16) + (MFLIMIT - 1))
-#define HASHLOG64K ((MEMORY_USAGE - 2) + 1)
-#define HASH64KTABLESIZE (1U << HASHLOG64K)
-#define LZ4_HASH_VALUE(p) (((A32(p)) * 2654435761U) >> \
- ((MINMATCH * 8) - (MEMORY_USAGE-2)))
-#define LZ4_HASH64K_VALUE(p) (((A32(p)) * 2654435761U) >> \
- ((MINMATCH * 8) - HASHLOG64K))
-#define HASH_VALUE(p) (((A32(p)) * 2654435761U) >> \
- ((MINMATCH * 8) - HASH_LOG))
-
-#if LZ4_ARCH64/* 64-bit */
-#define STEPSIZE 8
-
-#define LZ4_COPYSTEP(s, d) \
- do { \
- PUT8(s, d); \
- d += 8; \
- s += 8; \
- } while (0)
-
-#define LZ4_COPYPACKET(s, d) LZ4_COPYSTEP(s, d)
-
-#define LZ4_SECURECOPY(s, d, e) \
- do { \
- if (d < e) { \
- LZ4_WILDCOPY(s, d, e); \
- } \
- } while (0)
-#define HTYPE u32
-
-#ifdef __BIG_ENDIAN
-#define LZ4_NBCOMMONBYTES(val) (__builtin_clzll(val) >> 3)
+
+/*-************************************
+ * Reading and writing into memory
+ **************************************/
+static FORCE_INLINE U16 LZ4_read16(const void *ptr)
+{
+ return get_unaligned((const U16 *)ptr);
+}
+
+static FORCE_INLINE U32 LZ4_read32(const void *ptr)
+{
+ return get_unaligned((const U32 *)ptr);
+}
+
+static FORCE_INLINE size_t LZ4_read_ARCH(const void *ptr)
+{
+ return get_unaligned((const size_t *)ptr);
+}
+
+static FORCE_INLINE void LZ4_write16(void *memPtr, U16 value)
+{
+ put_unaligned(value, (U16 *)memPtr);
+}
+
+static FORCE_INLINE void LZ4_write32(void *memPtr, U32 value)
+{
+ put_unaligned(value, (U32 *)memPtr);
+}
+
+static FORCE_INLINE U16 LZ4_readLE16(const void *memPtr)
+{
+ return get_unaligned_le16(memPtr);
+}
+
+static FORCE_INLINE void LZ4_writeLE16(void *memPtr, U16 value)
+{
+ return put_unaligned_le16(value, memPtr);
+}
+
+static FORCE_INLINE void LZ4_copy8(void *dst, const void *src)
+{
+#if LZ4_ARCH64
+ U64 a = get_unaligned((const U64 *)src);
+
+ put_unaligned(a, (U64 *)dst);
+#else
+ U32 a = get_unaligned((const U32 *)src);
+ U32 b = get_unaligned((const U32 *)src + 1);
+
+ put_unaligned(a, (U32 *)dst);
+ put_unaligned(b, (U32 *)dst + 1);
+#endif
+}
+
+/*
+ * customized variant of memcpy,
+ * which can overwrite up to 7 bytes beyond dstEnd
+ */
+static FORCE_INLINE void LZ4_wildCopy(void *dstPtr,
+ const void *srcPtr, void *dstEnd)
+{
+ BYTE *d = (BYTE *)dstPtr;
+ const BYTE *s = (const BYTE *)srcPtr;
+ BYTE *const e = (BYTE *)dstEnd;
+
+ do {
+ LZ4_copy8(d, s);
+ d += 8;
+ s += 8;
+ } while (d < e);
+}
+
+static FORCE_INLINE unsigned int LZ4_NbCommonBytes(register size_t val)
+{
+#if LZ4_LITTLE_ENDIAN
+ return __ffs(val) >> 3;
#else
-#define LZ4_NBCOMMONBYTES(val) (__builtin_ctzll(val) >> 3)
+ return (BITS_PER_LONG - 1 - __fls(val)) >> 3;
+#endif
+}
+
+static FORCE_INLINE unsigned int LZ4_count(
+ const BYTE *pIn,
+ const BYTE *pMatch,
+ const BYTE *pInLimit)
+{
+ const BYTE *const pStart = pIn;
+
+ while (likely(pIn < pInLimit - (STEPSIZE - 1))) {
+ size_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
+
+ if (!diff) {
+ pIn += STEPSIZE;
+ pMatch += STEPSIZE;
+ continue;
+ }
+
+ pIn += LZ4_NbCommonBytes(diff);
+
+ return (unsigned int)(pIn - pStart);
+ }
+
+#if LZ4_ARCH64
+ if ((pIn < (pInLimit - 3))
+ && (LZ4_read32(pMatch) == LZ4_read32(pIn))) {
+ pIn += 4;
+ pMatch += 4;
+ }
#endif
-#else /* 32-bit */
-#define STEPSIZE 4
+ if ((pIn < (pInLimit - 1))
+ && (LZ4_read16(pMatch) == LZ4_read16(pIn))) {
+ pIn += 2;
+ pMatch += 2;
+ }
-#define LZ4_COPYSTEP(s, d) \
- do { \
- PUT4(s, d); \
- d += 4; \
- s += 4; \
- } while (0)
+ if ((pIn < pInLimit) && (*pMatch == *pIn))
+ pIn++;
-#define LZ4_COPYPACKET(s, d) \
- do { \
- LZ4_COPYSTEP(s, d); \
- LZ4_COPYSTEP(s, d); \
- } while (0)
+ return (unsigned int)(pIn - pStart);
+}
-#define LZ4_SECURECOPY LZ4_WILDCOPY
-#define HTYPE const u8*
+typedef enum { noLimit = 0, limitedOutput = 1 } limitedOutput_directive;
+typedef enum { byPtr, byU32, byU16 } tableType_t;
-#ifdef __BIG_ENDIAN
-#define LZ4_NBCOMMONBYTES(val) (__builtin_clz(val) >> 3)
-#else
-#define LZ4_NBCOMMONBYTES(val) (__builtin_ctz(val) >> 3)
-#endif
+typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
+typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
-#endif
+typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
+typedef enum { full = 0, partial = 1 } earlyEnd_directive;
-#define LZ4_WILDCOPY(s, d, e) \
- do { \
- LZ4_COPYPACKET(s, d); \
- } while (d < e)
-
-#define LZ4_BLINDCOPY(s, d, l) \
- do { \
- u8 *e = (d) + l; \
- LZ4_WILDCOPY(s, d, e); \
- d = e; \
- } while (0)
+#endif
diff --git a/lib/lz4/lz4hc_compress.c b/lib/lz4/lz4hc_compress.c
index f344f76b6559..176f03b83e56 100644
--- a/lib/lz4/lz4hc_compress.c
+++ b/lib/lz4/lz4hc_compress.c
@@ -1,19 +1,17 @@
/*
* LZ4 HC - High Compression Mode of LZ4
- * Copyright (C) 2011-2012, Yann Collet.
- * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+ * Copyright (C) 2011-2015, Yann Collet.
*
+ * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php)
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
- *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -25,323 +23,361 @@
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
* You can contact the author at :
- * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
- * - LZ4 source repository : http://code.google.com/p/lz4/
+ * - LZ4 homepage : http://www.lz4.org
+ * - LZ4 source repository : https://github.com/lz4/lz4
*
- * Changed for kernel use by:
- * Chanho Min <chanho.min@lge.com>
+ * Changed for kernel usage by:
+ * Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
+/*-************************************
+ * Dependencies
+ **************************************/
#include <linux/lz4.h>
-#include <asm/unaligned.h>
#include "lz4defs.h"
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h> /* memset */
-struct lz4hc_data {
- const u8 *base;
- HTYPE hashtable[HASHTABLESIZE];
- u16 chaintable[MAXD];
- const u8 *nexttoupdate;
-} __attribute__((__packed__));
+/* *************************************
+ * Local Constants and types
+ ***************************************/
-static inline int lz4hc_init(struct lz4hc_data *hc4, const u8 *base)
+#define OPTIMAL_ML (int)((ML_MASK - 1) + MINMATCH)
+
+#define HASH_FUNCTION(i) (((i) * 2654435761U) \
+ >> ((MINMATCH*8) - LZ4HC_HASH_LOG))
+#define DELTANEXTU16(p) chainTable[(U16)(p)] /* faster */
+
+static U32 LZ4HC_hashPtr(const void *ptr)
{
- memset((void *)hc4->hashtable, 0, sizeof(hc4->hashtable));
- memset(hc4->chaintable, 0xFF, sizeof(hc4->chaintable));
-
-#if LZ4_ARCH64
- hc4->nexttoupdate = base + 1;
-#else
- hc4->nexttoupdate = base;
-#endif
- hc4->base = base;
- return 1;
+ return HASH_FUNCTION(LZ4_read32(ptr));
+}
+
+/**************************************
+ * HC Compression
+ **************************************/
+static void LZ4HC_init(LZ4HC_CCtx_internal *hc4, const BYTE *start)
+{
+ memset((void *)hc4->hashTable, 0, sizeof(hc4->hashTable));
+ memset(hc4->chainTable, 0xFF, sizeof(hc4->chainTable));
+ hc4->nextToUpdate = 64 * KB;
+ hc4->base = start - 64 * KB;
+ hc4->end = start;
+ hc4->dictBase = start - 64 * KB;
+ hc4->dictLimit = 64 * KB;
+ hc4->lowLimit = 64 * KB;
}
/* Update chains up to ip (excluded) */
-static inline void lz4hc_insert(struct lz4hc_data *hc4, const u8 *ip)
+static FORCE_INLINE void LZ4HC_Insert(LZ4HC_CCtx_internal *hc4,
+ const BYTE *ip)
{
- u16 *chaintable = hc4->chaintable;
- HTYPE *hashtable = hc4->hashtable;
-#if LZ4_ARCH64
+ U16 * const chainTable = hc4->chainTable;
+ U32 * const hashTable = hc4->hashTable;
const BYTE * const base = hc4->base;
-#else
- const int base = 0;
-#endif
+ U32 const target = (U32)(ip - base);
+ U32 idx = hc4->nextToUpdate;
+
+ while (idx < target) {
+ U32 const h = LZ4HC_hashPtr(base + idx);
+ size_t delta = idx - hashTable[h];
- while (hc4->nexttoupdate < ip) {
- const u8 *p = hc4->nexttoupdate;
- size_t delta = p - (hashtable[HASH_VALUE(p)] + base);
if (delta > MAX_DISTANCE)
delta = MAX_DISTANCE;
- chaintable[(size_t)(p) & MAXD_MASK] = (u16)delta;
- hashtable[HASH_VALUE(p)] = (p) - base;
- hc4->nexttoupdate++;
- }
-}
-static inline size_t lz4hc_commonlength(const u8 *p1, const u8 *p2,
- const u8 *const matchlimit)
-{
- const u8 *p1t = p1;
-
- while (p1t < matchlimit - (STEPSIZE - 1)) {
-#if LZ4_ARCH64
- u64 diff = A64(p2) ^ A64(p1t);
-#else
- u32 diff = A32(p2) ^ A32(p1t);
-#endif
- if (!diff) {
- p1t += STEPSIZE;
- p2 += STEPSIZE;
- continue;
- }
- p1t += LZ4_NBCOMMONBYTES(diff);
- return p1t - p1;
- }
-#if LZ4_ARCH64
- if ((p1t < (matchlimit-3)) && (A32(p2) == A32(p1t))) {
- p1t += 4;
- p2 += 4;
- }
-#endif
+ DELTANEXTU16(idx) = (U16)delta;
- if ((p1t < (matchlimit - 1)) && (A16(p2) == A16(p1t))) {
- p1t += 2;
- p2 += 2;
+ hashTable[h] = idx;
+ idx++;
}
- if ((p1t < matchlimit) && (*p2 == *p1t))
- p1t++;
- return p1t - p1;
+
+ hc4->nextToUpdate = target;
}
-static inline int lz4hc_insertandfindbestmatch(struct lz4hc_data *hc4,
- const u8 *ip, const u8 *const matchlimit, const u8 **matchpos)
+static FORCE_INLINE int LZ4HC_InsertAndFindBestMatch(
+ LZ4HC_CCtx_internal *hc4, /* Index table will be updated */
+ const BYTE *ip,
+ const BYTE * const iLimit,
+ const BYTE **matchpos,
+ const int maxNbAttempts)
{
- u16 *const chaintable = hc4->chaintable;
- HTYPE *const hashtable = hc4->hashtable;
- const u8 *ref;
-#if LZ4_ARCH64
+ U16 * const chainTable = hc4->chainTable;
+ U32 * const HashTable = hc4->hashTable;
const BYTE * const base = hc4->base;
-#else
- const int base = 0;
-#endif
- int nbattempts = MAX_NB_ATTEMPTS;
- size_t repl = 0, ml = 0;
- u16 delta;
+ const BYTE * const dictBase = hc4->dictBase;
+ const U32 dictLimit = hc4->dictLimit;
+ const U32 lowLimit = (hc4->lowLimit + 64 * KB > (U32)(ip - base))
+ ? hc4->lowLimit
+ : (U32)(ip - base) - (64 * KB - 1);
+ U32 matchIndex;
+ int nbAttempts = maxNbAttempts;
+ size_t ml = 0;
/* HC4 match finder */
- lz4hc_insert(hc4, ip);
- ref = hashtable[HASH_VALUE(ip)] + base;
-
- /* potential repetition */
- if (ref >= ip-4) {
- /* confirmed */
- if (A32(ref) == A32(ip)) {
- delta = (u16)(ip-ref);
- repl = ml = lz4hc_commonlength(ip + MINMATCH,
- ref + MINMATCH, matchlimit) + MINMATCH;
- *matchpos = ref;
- }
- ref -= (size_t)chaintable[(size_t)(ref) & MAXD_MASK];
- }
+ LZ4HC_Insert(hc4, ip);
+ matchIndex = HashTable[LZ4HC_hashPtr(ip)];
+
+ while ((matchIndex >= lowLimit)
+ && (nbAttempts)) {
+ nbAttempts--;
+ if (matchIndex >= dictLimit) {
+ const BYTE * const match = base + matchIndex;
+
+ if (*(match + ml) == *(ip + ml)
+ && (LZ4_read32(match) == LZ4_read32(ip))) {
+ size_t const mlt = LZ4_count(ip + MINMATCH,
+ match + MINMATCH, iLimit) + MINMATCH;
- while ((ref >= ip - MAX_DISTANCE) && nbattempts) {
- nbattempts--;
- if (*(ref + ml) == *(ip + ml)) {
- if (A32(ref) == A32(ip)) {
- size_t mlt =
- lz4hc_commonlength(ip + MINMATCH,
- ref + MINMATCH, matchlimit) + MINMATCH;
if (mlt > ml) {
ml = mlt;
- *matchpos = ref;
+ *matchpos = match;
+ }
+ }
+ } else {
+ const BYTE * const match = dictBase + matchIndex;
+
+ if (LZ4_read32(match) == LZ4_read32(ip)) {
+ size_t mlt;
+ const BYTE *vLimit = ip
+ + (dictLimit - matchIndex);
+
+ if (vLimit > iLimit)
+ vLimit = iLimit;
+ mlt = LZ4_count(ip + MINMATCH,
+ match + MINMATCH, vLimit) + MINMATCH;
+ if ((ip + mlt == vLimit)
+ && (vLimit < iLimit))
+ mlt += LZ4_count(ip + mlt,
+ base + dictLimit,
+ iLimit);
+ if (mlt > ml) {
+ /* virtual matchpos */
+ ml = mlt;
+ *matchpos = base + matchIndex;
}
}
}
- ref -= (size_t)chaintable[(size_t)(ref) & MAXD_MASK];
- }
-
- /* Complete table */
- if (repl) {
- const BYTE *ptr = ip;
- const BYTE *end;
- end = ip + repl - (MINMATCH-1);
- /* Pre-Load */
- while (ptr < end - delta) {
- chaintable[(size_t)(ptr) & MAXD_MASK] = delta;
- ptr++;
- }
- do {
- chaintable[(size_t)(ptr) & MAXD_MASK] = delta;
- /* Head of chain */
- hashtable[HASH_VALUE(ptr)] = (ptr) - base;
- ptr++;
- } while (ptr < end);
- hc4->nexttoupdate = end;
+ matchIndex -= DELTANEXTU16(matchIndex);
}
return (int)ml;
}
-static inline int lz4hc_insertandgetwidermatch(struct lz4hc_data *hc4,
- const u8 *ip, const u8 *startlimit, const u8 *matchlimit, int longest,
- const u8 **matchpos, const u8 **startpos)
+static FORCE_INLINE int LZ4HC_InsertAndGetWiderMatch(
+ LZ4HC_CCtx_internal *hc4,
+ const BYTE * const ip,
+ const BYTE * const iLowLimit,
+ const BYTE * const iHighLimit,
+ int longest,
+ const BYTE **matchpos,
+ const BYTE **startpos,
+ const int maxNbAttempts)
{
- u16 *const chaintable = hc4->chaintable;
- HTYPE *const hashtable = hc4->hashtable;
-#if LZ4_ARCH64
+ U16 * const chainTable = hc4->chainTable;
+ U32 * const HashTable = hc4->hashTable;
const BYTE * const base = hc4->base;
-#else
- const int base = 0;
-#endif
- const u8 *ref;
- int nbattempts = MAX_NB_ATTEMPTS;
- int delta = (int)(ip - startlimit);
+ const U32 dictLimit = hc4->dictLimit;
+ const BYTE * const lowPrefixPtr = base + dictLimit;
+ const U32 lowLimit = (hc4->lowLimit + 64 * KB > (U32)(ip - base))
+ ? hc4->lowLimit
+ : (U32)(ip - base) - (64 * KB - 1);
+ const BYTE * const dictBase = hc4->dictBase;
+ U32 matchIndex;
+ int nbAttempts = maxNbAttempts;
+ int delta = (int)(ip - iLowLimit);
/* First Match */
- lz4hc_insert(hc4, ip);
- ref = hashtable[HASH_VALUE(ip)] + base;
-
- while ((ref >= ip - MAX_DISTANCE) && (ref >= hc4->base)
- && (nbattempts)) {
- nbattempts--;
- if (*(startlimit + longest) == *(ref - delta + longest)) {
- if (A32(ref) == A32(ip)) {
- const u8 *reft = ref + MINMATCH;
- const u8 *ipt = ip + MINMATCH;
- const u8 *startt = ip;
-
- while (ipt < matchlimit-(STEPSIZE - 1)) {
- #if LZ4_ARCH64
- u64 diff = A64(reft) ^ A64(ipt);
- #else
- u32 diff = A32(reft) ^ A32(ipt);
- #endif
-
- if (!diff) {
- ipt += STEPSIZE;
- reft += STEPSIZE;
- continue;
+ LZ4HC_Insert(hc4, ip);
+ matchIndex = HashTable[LZ4HC_hashPtr(ip)];
+
+ while ((matchIndex >= lowLimit)
+ && (nbAttempts)) {
+ nbAttempts--;
+ if (matchIndex >= dictLimit) {
+ const BYTE *matchPtr = base + matchIndex;
+
+ if (*(iLowLimit + longest)
+ == *(matchPtr - delta + longest)) {
+ if (LZ4_read32(matchPtr) == LZ4_read32(ip)) {
+ int mlt = MINMATCH + LZ4_count(
+ ip + MINMATCH,
+ matchPtr + MINMATCH,
+ iHighLimit);
+ int back = 0;
+
+ while ((ip + back > iLowLimit)
+ && (matchPtr + back > lowPrefixPtr)
+ && (ip[back - 1] == matchPtr[back - 1]))
+ back--;
+
+ mlt -= back;
+
+ if (mlt > longest) {
+ longest = (int)mlt;
+ *matchpos = matchPtr + back;
+ *startpos = ip + back;
}
- ipt += LZ4_NBCOMMONBYTES(diff);
- goto _endcount;
- }
- #if LZ4_ARCH64
- if ((ipt < (matchlimit - 3))
- && (A32(reft) == A32(ipt))) {
- ipt += 4;
- reft += 4;
- }
- ipt += 2;
- #endif
- if ((ipt < (matchlimit - 1))
- && (A16(reft) == A16(ipt))) {
- reft += 2;
}
- if ((ipt < matchlimit) && (*reft == *ipt))
- ipt++;
-_endcount:
- reft = ref;
-
- while ((startt > startlimit)
- && (reft > hc4->base)
- && (startt[-1] == reft[-1])) {
- startt--;
- reft--;
- }
-
- if ((ipt - startt) > longest) {
- longest = (int)(ipt - startt);
- *matchpos = reft;
- *startpos = startt;
+ }
+ } else {
+ const BYTE * const matchPtr = dictBase + matchIndex;
+
+ if (LZ4_read32(matchPtr) == LZ4_read32(ip)) {
+ size_t mlt;
+ int back = 0;
+ const BYTE *vLimit = ip + (dictLimit - matchIndex);
+
+ if (vLimit > iHighLimit)
+ vLimit = iHighLimit;
+
+ mlt = LZ4_count(ip + MINMATCH,
+ matchPtr + MINMATCH, vLimit) + MINMATCH;
+
+ if ((ip + mlt == vLimit) && (vLimit < iHighLimit))
+ mlt += LZ4_count(ip + mlt, base + dictLimit,
+ iHighLimit);
+ while ((ip + back > iLowLimit)
+ && (matchIndex + back > lowLimit)
+ && (ip[back - 1] == matchPtr[back - 1]))
+ back--;
+
+ mlt -= back;
+
+ if ((int)mlt > longest) {
+ longest = (int)mlt;
+ *matchpos = base + matchIndex + back;
+ *startpos = ip + back;
}
}
}
- ref -= (size_t)chaintable[(size_t)(ref) & MAXD_MASK];
+
+ matchIndex -= DELTANEXTU16(matchIndex);
}
+
return longest;
}
-static inline int lz4_encodesequence(const u8 **ip, u8 **op, const u8 **anchor,
- int ml, const u8 *ref)
+static FORCE_INLINE int LZ4HC_encodeSequence(
+ const BYTE **ip,
+ BYTE **op,
+ const BYTE **anchor,
+ int matchLength,
+ const BYTE * const match,
+ limitedOutput_directive limitedOutputBuffer,
+ BYTE *oend)
{
- int length, len;
- u8 *token;
+ int length;
+ BYTE *token;
/* Encode Literal length */
length = (int)(*ip - *anchor);
token = (*op)++;
+
+ if ((limitedOutputBuffer)
+ && ((*op + (length>>8)
+ + length + (2 + 1 + LASTLITERALS)) > oend)) {
+ /* Check output limit */
+ return 1;
+ }
if (length >= (int)RUN_MASK) {
- *token = (RUN_MASK << ML_BITS);
+ int len;
+
+ *token = (RUN_MASK<<ML_BITS);
len = length - RUN_MASK;
for (; len > 254 ; len -= 255)
*(*op)++ = 255;
- *(*op)++ = (u8)len;
+ *(*op)++ = (BYTE)len;
} else
- *token = (length << ML_BITS);
+ *token = (BYTE)(length<<ML_BITS);
/* Copy Literals */
- LZ4_BLINDCOPY(*anchor, *op, length);
+ LZ4_wildCopy(*op, *anchor, (*op) + length);
+ *op += length;
/* Encode Offset */
- LZ4_WRITE_LITTLEENDIAN_16(*op, (u16)(*ip - ref));
+ LZ4_writeLE16(*op, (U16)(*ip - match));
+ *op += 2;
/* Encode MatchLength */
- len = (int)(ml - MINMATCH);
- if (len >= (int)ML_MASK) {
+ length = (int)(matchLength - MINMATCH);
+
+ if ((limitedOutputBuffer)
+ && (*op + (length>>8)
+ + (1 + LASTLITERALS) > oend)) {
+ /* Check output limit */
+ return 1;
+ }
+
+ if (length >= (int)ML_MASK) {
*token += ML_MASK;
- len -= ML_MASK;
- for (; len > 509 ; len -= 510) {
+ length -= ML_MASK;
+
+ for (; length > 509 ; length -= 510) {
*(*op)++ = 255;
*(*op)++ = 255;
}
- if (len > 254) {
- len -= 255;
+
+ if (length > 254) {
+ length -= 255;
*(*op)++ = 255;
}
- *(*op)++ = (u8)len;
+
+ *(*op)++ = (BYTE)length;
} else
- *token += len;
+ *token += (BYTE)(length);
/* Prepare next loop */
- *ip += ml;
+ *ip += matchLength;
*anchor = *ip;
return 0;
}
-static int lz4_compresshcctx(struct lz4hc_data *ctx,
- const char *source,
- char *dest,
- int isize)
+static int LZ4HC_compress_generic(
+ LZ4HC_CCtx_internal *const ctx,
+ const char * const source,
+ char * const dest,
+ int const inputSize,
+ int const maxOutputSize,
+ int compressionLevel,
+ limitedOutput_directive limit
+ )
{
- const u8 *ip = (const u8 *)source;
- const u8 *anchor = ip;
- const u8 *const iend = ip + isize;
- const u8 *const mflimit = iend - MFLIMIT;
- const u8 *const matchlimit = (iend - LASTLITERALS);
+ const BYTE *ip = (const BYTE *) source;
+ const BYTE *anchor = ip;
+ const BYTE * const iend = ip + inputSize;
+ const BYTE * const mflimit = iend - MFLIMIT;
+ const BYTE * const matchlimit = (iend - LASTLITERALS);
- u8 *op = (u8 *)dest;
+ BYTE *op = (BYTE *) dest;
+ BYTE * const oend = op + maxOutputSize;
+ unsigned int maxNbAttempts;
int ml, ml2, ml3, ml0;
- const u8 *ref = NULL;
- const u8 *start2 = NULL;
- const u8 *ref2 = NULL;
- const u8 *start3 = NULL;
- const u8 *ref3 = NULL;
- const u8 *start0;
- const u8 *ref0;
- int lastrun;
+ const BYTE *ref = NULL;
+ const BYTE *start2 = NULL;
+ const BYTE *ref2 = NULL;
+ const BYTE *start3 = NULL;
+ const BYTE *ref3 = NULL;
+ const BYTE *start0;
+ const BYTE *ref0;
+
+ /* init */
+ if (compressionLevel > LZ4HC_MAX_CLEVEL)
+ compressionLevel = LZ4HC_MAX_CLEVEL;
+ if (compressionLevel < 1)
+ compressionLevel = LZ4HC_DEFAULT_CLEVEL;
+ maxNbAttempts = 1 << (compressionLevel - 1);
+ ctx->end += inputSize;
ip++;
/* Main Loop */
while (ip < mflimit) {
- ml = lz4hc_insertandfindbestmatch(ctx, ip, matchlimit, (&ref));
+ ml = LZ4HC_InsertAndFindBestMatch(ctx, ip,
+ matchlimit, (&ref), maxNbAttempts);
if (!ml) {
ip++;
continue;
@@ -351,51 +387,59 @@ static int lz4_compresshcctx(struct lz4hc_data *ctx,
start0 = ip;
ref0 = ref;
ml0 = ml;
-_search2:
- if (ip+ml < mflimit)
- ml2 = lz4hc_insertandgetwidermatch(ctx, ip + ml - 2,
- ip + 1, matchlimit, ml, &ref2, &start2);
+
+_Search2:
+ if (ip + ml < mflimit)
+ ml2 = LZ4HC_InsertAndGetWiderMatch(ctx,
+ ip + ml - 2, ip + 0,
+ matchlimit, ml, &ref2,
+ &start2, maxNbAttempts);
else
ml2 = ml;
- /* No better match */
+
if (ml2 == ml) {
- lz4_encodesequence(&ip, &op, &anchor, ml, ref);
+ /* No better match */
+ if (LZ4HC_encodeSequence(&ip, &op,
+ &anchor, ml, ref, limit, oend))
+ return 0;
continue;
}
if (start0 < ip) {
- /* empirical */
if (start2 < ip + ml0) {
+ /* empirical */
ip = start0;
ref = ref0;
ml = ml0;
}
}
- /*
- * Here, start0==ip
- * First Match too small : removed
- */
+
+ /* Here, start0 == ip */
if ((start2 - ip) < 3) {
+ /* First Match too small : removed */
ml = ml2;
ip = start2;
ref = ref2;
- goto _search2;
+ goto _Search2;
}
-_search3:
+_Search3:
/*
- * Currently we have :
- * ml2 > ml1, and
- * ip1+3 <= ip2 (usually < ip1+ml1)
- */
+ * Currently we have :
+ * ml2 > ml1, and
+ * ip1 + 3 <= ip2 (usually < ip1 + ml1)
+ */
if ((start2 - ip) < OPTIMAL_ML) {
int correction;
int new_ml = ml;
+
if (new_ml > OPTIMAL_ML)
new_ml = OPTIMAL_ML;
if (ip + new_ml > start2 + ml2 - MINMATCH)
new_ml = (int)(start2 - ip) + ml2 - MINMATCH;
+
correction = new_ml - (int)(start2 - ip);
+
if (correction > 0) {
start2 += correction;
ref2 += correction;
@@ -403,39 +447,44 @@ _search3:
}
}
/*
- * Now, we have start2 = ip+new_ml,
- * with new_ml=min(ml, OPTIMAL_ML=18)
+ * Now, we have start2 = ip + new_ml,
+ * with new_ml = min(ml, OPTIMAL_ML = 18)
*/
+
if (start2 + ml2 < mflimit)
- ml3 = lz4hc_insertandgetwidermatch(ctx,
- start2 + ml2 - 3, start2, matchlimit,
- ml2, &ref3, &start3);
+ ml3 = LZ4HC_InsertAndGetWiderMatch(ctx,
+ start2 + ml2 - 3, start2,
+ matchlimit, ml2, &ref3, &start3,
+ maxNbAttempts);
else
ml3 = ml2;
- /* No better match : 2 sequences to encode */
if (ml3 == ml2) {
+ /* No better match : 2 sequences to encode */
/* ip & ref are known; Now for ml */
- if (start2 < ip+ml)
+ if (start2 < ip + ml)
ml = (int)(start2 - ip);
-
/* Now, encode 2 sequences */
- lz4_encodesequence(&ip, &op, &anchor, ml, ref);
+ if (LZ4HC_encodeSequence(&ip, &op, &anchor,
+ ml, ref, limit, oend))
+ return 0;
ip = start2;
- lz4_encodesequence(&ip, &op, &anchor, ml2, ref2);
+ if (LZ4HC_encodeSequence(&ip, &op, &anchor,
+ ml2, ref2, limit, oend))
+ return 0;
continue;
}
- /* Not enough space for match 2 : remove it */
if (start3 < ip + ml + 3) {
- /*
- * can write Seq1 immediately ==> Seq2 is removed,
- * so Seq3 becomes Seq1
- */
+ /* Not enough space for match 2 : remove it */
if (start3 >= (ip + ml)) {
+ /* can write Seq1 immediately
+ * ==> Seq2 is removed,
+ * so Seq3 becomes Seq1
+ */
if (start2 < ip + ml) {
- int correction =
- (int)(ip + ml - start2);
+ int correction = (int)(ip + ml - start2);
+
start2 += correction;
ref2 += correction;
ml2 -= correction;
@@ -446,35 +495,38 @@ _search3:
}
}
- lz4_encodesequence(&ip, &op, &anchor, ml, ref);
- ip = start3;
+ if (LZ4HC_encodeSequence(&ip, &op, &anchor,
+ ml, ref, limit, oend))
+ return 0;
+ ip = start3;
ref = ref3;
- ml = ml3;
+ ml = ml3;
start0 = start2;
ref0 = ref2;
ml0 = ml2;
- goto _search2;
+ goto _Search2;
}
start2 = start3;
ref2 = ref3;
ml2 = ml3;
- goto _search3;
+ goto _Search3;
}
/*
- * OK, now we have 3 ascending matches; let's write at least
- * the first one ip & ref are known; Now for ml
- */
+ * OK, now we have 3 ascending matches;
+ * let's write at least the first one
+ * ip & ref are known; Now for ml
+ */
if (start2 < ip + ml) {
if ((start2 - ip) < (int)ML_MASK) {
int correction;
+
if (ml > OPTIMAL_ML)
ml = OPTIMAL_ML;
if (ip + ml > start2 + ml2 - MINMATCH)
- ml = (int)(start2 - ip) + ml2
- - MINMATCH;
+ ml = (int)(start2 - ip) + ml2 - MINMATCH;
correction = ml - (int)(start2 - ip);
if (correction > 0) {
start2 += correction;
@@ -484,7 +536,9 @@ _search3:
} else
ml = (int)(start2 - ip);
}
- lz4_encodesequence(&ip, &op, &anchor, ml, ref);
+ if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml,
+ ref, limit, oend))
+ return 0;
ip = start2;
ref = ref2;
@@ -494,46 +548,222 @@ _search3:
ref2 = ref3;
ml2 = ml3;
- goto _search3;
+ goto _Search3;
}
/* Encode Last Literals */
- lastrun = (int)(iend - anchor);
- if (lastrun >= (int)RUN_MASK) {
- *op++ = (RUN_MASK << ML_BITS);
- lastrun -= RUN_MASK;
- for (; lastrun > 254 ; lastrun -= 255)
- *op++ = 255;
- *op++ = (u8) lastrun;
- } else
- *op++ = (lastrun << ML_BITS);
- memcpy(op, anchor, iend - anchor);
- op += iend - anchor;
+ {
+ int lastRun = (int)(iend - anchor);
+
+ if ((limit)
+ && (((char *)op - dest) + lastRun + 1
+ + ((lastRun + 255 - RUN_MASK)/255)
+ > (U32)maxOutputSize)) {
+ /* Check output limit */
+ return 0;
+ }
+ if (lastRun >= (int)RUN_MASK) {
+ *op++ = (RUN_MASK<<ML_BITS);
+ lastRun -= RUN_MASK;
+ for (; lastRun > 254 ; lastRun -= 255)
+ *op++ = 255;
+ *op++ = (BYTE) lastRun;
+ } else
+ *op++ = (BYTE)(lastRun<<ML_BITS);
+ memcpy(op, anchor, iend - anchor);
+ op += iend - anchor;
+ }
+
/* End */
return (int) (((char *)op) - dest);
}
-int lz4hc_compress(const unsigned char *src, size_t src_len,
- unsigned char *dst, size_t *dst_len, void *wrkmem)
+static int LZ4_compress_HC_extStateHC(
+ void *state,
+ const char *src,
+ char *dst,
+ int srcSize,
+ int maxDstSize,
+ int compressionLevel)
{
- int ret = -1;
- int out_len = 0;
+ LZ4HC_CCtx_internal *ctx = &((LZ4_streamHC_t *)state)->internal_donotuse;
- struct lz4hc_data *hc4 = (struct lz4hc_data *)wrkmem;
- lz4hc_init(hc4, (const u8 *)src);
- out_len = lz4_compresshcctx((struct lz4hc_data *)hc4, (const u8 *)src,
- (char *)dst, (int)src_len);
+ if (((size_t)(state)&(sizeof(void *) - 1)) != 0) {
+ /* Error : state is not aligned
+ * for pointers (32 or 64 bits)
+ */
+ return 0;
+ }
- if (out_len < 0)
- goto exit;
+ LZ4HC_init(ctx, (const BYTE *)src);
- *dst_len = out_len;
- return 0;
+ if (maxDstSize < LZ4_compressBound(srcSize))
+ return LZ4HC_compress_generic(ctx, src, dst,
+ srcSize, maxDstSize, compressionLevel, limitedOutput);
+ else
+ return LZ4HC_compress_generic(ctx, src, dst,
+ srcSize, maxDstSize, compressionLevel, noLimit);
+}
+
+int LZ4_compress_HC(const char *src, char *dst, int srcSize,
+ int maxDstSize, int compressionLevel, void *wrkmem)
+{
+ return LZ4_compress_HC_extStateHC(wrkmem, src, dst,
+ srcSize, maxDstSize, compressionLevel);
+}
+EXPORT_SYMBOL(LZ4_compress_HC);
+
+/**************************************
+ * Streaming Functions
+ **************************************/
+void LZ4_resetStreamHC(LZ4_streamHC_t *LZ4_streamHCPtr, int compressionLevel)
+{
+ LZ4_streamHCPtr->internal_donotuse.base = NULL;
+ LZ4_streamHCPtr->internal_donotuse.compressionLevel = (unsigned int)compressionLevel;
+}
+
+int LZ4_loadDictHC(LZ4_streamHC_t *LZ4_streamHCPtr,
+ const char *dictionary,
+ int dictSize)
+{
+ LZ4HC_CCtx_internal *ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
+
+ if (dictSize > 64 * KB) {
+ dictionary += dictSize - 64 * KB;
+ dictSize = 64 * KB;
+ }
+ LZ4HC_init(ctxPtr, (const BYTE *)dictionary);
+ if (dictSize >= 4)
+ LZ4HC_Insert(ctxPtr, (const BYTE *)dictionary + (dictSize - 3));
+ ctxPtr->end = (const BYTE *)dictionary + dictSize;
+ return dictSize;
+}
+EXPORT_SYMBOL(LZ4_loadDictHC);
-exit:
- return ret;
+/* compression */
+
+static void LZ4HC_setExternalDict(
+ LZ4HC_CCtx_internal *ctxPtr,
+ const BYTE *newBlock)
+{
+ if (ctxPtr->end >= ctxPtr->base + 4) {
+ /* Referencing remaining dictionary content */
+ LZ4HC_Insert(ctxPtr, ctxPtr->end - 3);
+ }
+
+ /*
+ * Only one memory segment for extDict,
+ * so any previous extDict is lost at this stage
+ */
+ ctxPtr->lowLimit = ctxPtr->dictLimit;
+ ctxPtr->dictLimit = (U32)(ctxPtr->end - ctxPtr->base);
+ ctxPtr->dictBase = ctxPtr->base;
+ ctxPtr->base = newBlock - ctxPtr->dictLimit;
+ ctxPtr->end = newBlock;
+ /* match referencing will resume from there */
+ ctxPtr->nextToUpdate = ctxPtr->dictLimit;
+}
+EXPORT_SYMBOL(LZ4HC_setExternalDict);
+
+static int LZ4_compressHC_continue_generic(
+ LZ4_streamHC_t *LZ4_streamHCPtr,
+ const char *source,
+ char *dest,
+ int inputSize,
+ int maxOutputSize,
+ limitedOutput_directive limit)
+{
+ LZ4HC_CCtx_internal *ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
+
+ /* auto - init if forgotten */
+ if (ctxPtr->base == NULL)
+ LZ4HC_init(ctxPtr, (const BYTE *) source);
+
+ /* Check overflow */
+ if ((size_t)(ctxPtr->end - ctxPtr->base) > 2 * GB) {
+ size_t dictSize = (size_t)(ctxPtr->end - ctxPtr->base)
+ - ctxPtr->dictLimit;
+ if (dictSize > 64 * KB)
+ dictSize = 64 * KB;
+ LZ4_loadDictHC(LZ4_streamHCPtr,
+ (const char *)(ctxPtr->end) - dictSize, (int)dictSize);
+ }
+
+ /* Check if blocks follow each other */
+ if ((const BYTE *)source != ctxPtr->end)
+ LZ4HC_setExternalDict(ctxPtr, (const BYTE *)source);
+
+ /* Check overlapping input/dictionary space */
+ {
+ const BYTE *sourceEnd = (const BYTE *) source + inputSize;
+ const BYTE * const dictBegin = ctxPtr->dictBase + ctxPtr->lowLimit;
+ const BYTE * const dictEnd = ctxPtr->dictBase + ctxPtr->dictLimit;
+
+ if ((sourceEnd > dictBegin)
+ && ((const BYTE *)source < dictEnd)) {
+ if (sourceEnd > dictEnd)
+ sourceEnd = dictEnd;
+ ctxPtr->lowLimit = (U32)(sourceEnd - ctxPtr->dictBase);
+
+ if (ctxPtr->dictLimit - ctxPtr->lowLimit < 4)
+ ctxPtr->lowLimit = ctxPtr->dictLimit;
+ }
+ }
+
+ return LZ4HC_compress_generic(ctxPtr, source, dest,
+ inputSize, maxOutputSize, ctxPtr->compressionLevel, limit);
+}
+
+int LZ4_compress_HC_continue(
+ LZ4_streamHC_t *LZ4_streamHCPtr,
+ const char *source,
+ char *dest,
+ int inputSize,
+ int maxOutputSize)
+{
+ if (maxOutputSize < LZ4_compressBound(inputSize))
+ return LZ4_compressHC_continue_generic(LZ4_streamHCPtr,
+ source, dest, inputSize, maxOutputSize, limitedOutput);
+ else
+ return LZ4_compressHC_continue_generic(LZ4_streamHCPtr,
+ source, dest, inputSize, maxOutputSize, noLimit);
+}
+EXPORT_SYMBOL(LZ4_compress_HC_continue);
+
+/* dictionary saving */
+
+int LZ4_saveDictHC(
+ LZ4_streamHC_t *LZ4_streamHCPtr,
+ char *safeBuffer,
+ int dictSize)
+{
+ LZ4HC_CCtx_internal *const streamPtr = &LZ4_streamHCPtr->internal_donotuse;
+ int const prefixSize = (int)(streamPtr->end
+ - (streamPtr->base + streamPtr->dictLimit));
+
+ if (dictSize > 64 * KB)
+ dictSize = 64 * KB;
+ if (dictSize < 4)
+ dictSize = 0;
+ if (dictSize > prefixSize)
+ dictSize = prefixSize;
+
+ memmove(safeBuffer, streamPtr->end - dictSize, dictSize);
+
+ {
+ U32 const endIndex = (U32)(streamPtr->end - streamPtr->base);
+
+ streamPtr->end = (const BYTE *)safeBuffer + dictSize;
+ streamPtr->base = streamPtr->end - endIndex;
+ streamPtr->dictLimit = endIndex - dictSize;
+ streamPtr->lowLimit = endIndex - dictSize;
+
+ if (streamPtr->nextToUpdate < streamPtr->dictLimit)
+ streamPtr->nextToUpdate = streamPtr->dictLimit;
+ }
+ return dictSize;
}
-EXPORT_SYMBOL(lz4hc_compress);
+EXPORT_SYMBOL(LZ4_saveDictHC);
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION("LZ4HC compressor");
+MODULE_DESCRIPTION("LZ4 HC compressor");
diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c
index 75554754eadf..5f7999eacad5 100644
--- a/lib/nmi_backtrace.c
+++ b/lib/nmi_backtrace.c
@@ -77,7 +77,7 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
* Force flush any remote buffers that might be stuck in IRQ context
* and therefore could not run their irq_work.
*/
- printk_nmi_flush();
+ printk_safe_flush();
clear_bit_unlock(0, &backtrace_flag);
put_cpu();
diff --git a/lib/parman.c b/lib/parman.c
new file mode 100644
index 000000000000..c6e42a8db824
--- /dev/null
+++ b/lib/parman.c
@@ -0,0 +1,376 @@
+/*
+ * lib/parman.c - Manager for linear priority array areas
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/list.h>
+#include <linux/err.h>
+#include <linux/parman.h>
+
+struct parman_algo {
+ int (*item_add)(struct parman *parman, struct parman_prio *prio,
+ struct parman_item *item);
+ void (*item_remove)(struct parman *parman, struct parman_prio *prio,
+ struct parman_item *item);
+};
+
+struct parman {
+ const struct parman_ops *ops;
+ void *priv;
+ const struct parman_algo *algo;
+ unsigned long count;
+ unsigned long limit_count;
+ struct list_head prio_list;
+};
+
+static int parman_enlarge(struct parman *parman)
+{
+ unsigned long new_count = parman->limit_count +
+ parman->ops->resize_step;
+ int err;
+
+ err = parman->ops->resize(parman->priv, new_count);
+ if (err)
+ return err;
+ parman->limit_count = new_count;
+ return 0;
+}
+
+static int parman_shrink(struct parman *parman)
+{
+ unsigned long new_count = parman->limit_count -
+ parman->ops->resize_step;
+ int err;
+
+ if (new_count < parman->ops->base_count)
+ return 0;
+ err = parman->ops->resize(parman->priv, new_count);
+ if (err)
+ return err;
+ parman->limit_count = new_count;
+ return 0;
+}
+
+static bool parman_prio_used(struct parman_prio *prio)
+
+{
+ return !list_empty(&prio->item_list);
+}
+
+static struct parman_item *parman_prio_first_item(struct parman_prio *prio)
+{
+ return list_first_entry(&prio->item_list,
+ typeof(struct parman_item), list);
+}
+
+static unsigned long parman_prio_first_index(struct parman_prio *prio)
+{
+ return parman_prio_first_item(prio)->index;
+}
+
+static struct parman_item *parman_prio_last_item(struct parman_prio *prio)
+{
+ return list_last_entry(&prio->item_list,
+ typeof(struct parman_item), list);
+}
+
+static unsigned long parman_prio_last_index(struct parman_prio *prio)
+{
+ return parman_prio_last_item(prio)->index;
+}
+
+static unsigned long parman_lsort_new_index_find(struct parman *parman,
+ struct parman_prio *prio)
+{
+ list_for_each_entry_from_reverse(prio, &parman->prio_list, list) {
+ if (!parman_prio_used(prio))
+ continue;
+ return parman_prio_last_index(prio) + 1;
+ }
+ return 0;
+}
+
+static void __parman_prio_move(struct parman *parman, struct parman_prio *prio,
+ struct parman_item *item, unsigned long to_index,
+ unsigned long count)
+{
+ parman->ops->move(parman->priv, item->index, to_index, count);
+}
+
+static void parman_prio_shift_down(struct parman *parman,
+ struct parman_prio *prio)
+{
+ struct parman_item *item;
+ unsigned long to_index;
+
+ if (!parman_prio_used(prio))
+ return;
+ item = parman_prio_first_item(prio);
+ to_index = parman_prio_last_index(prio) + 1;
+ __parman_prio_move(parman, prio, item, to_index, 1);
+ list_move_tail(&item->list, &prio->item_list);
+ item->index = to_index;
+}
+
+static void parman_prio_shift_up(struct parman *parman,
+ struct parman_prio *prio)
+{
+ struct parman_item *item;
+ unsigned long to_index;
+
+ if (!parman_prio_used(prio))
+ return;
+ item = parman_prio_last_item(prio);
+ to_index = parman_prio_first_index(prio) - 1;
+ __parman_prio_move(parman, prio, item, to_index, 1);
+ list_move(&item->list, &prio->item_list);
+ item->index = to_index;
+}
+
+static void parman_prio_item_remove(struct parman *parman,
+ struct parman_prio *prio,
+ struct parman_item *item)
+{
+ struct parman_item *last_item;
+ unsigned long to_index;
+
+ last_item = parman_prio_last_item(prio);
+ if (last_item == item) {
+ list_del(&item->list);
+ return;
+ }
+ to_index = item->index;
+ __parman_prio_move(parman, prio, last_item, to_index, 1);
+ list_del(&last_item->list);
+ list_replace(&item->list, &last_item->list);
+ last_item->index = to_index;
+}
+
+static int parman_lsort_item_add(struct parman *parman,
+ struct parman_prio *prio,
+ struct parman_item *item)
+{
+ struct parman_prio *prio2;
+ unsigned long new_index;
+ int err;
+
+ if (parman->count + 1 > parman->limit_count) {
+ err = parman_enlarge(parman);
+ if (err)
+ return err;
+ }
+
+ new_index = parman_lsort_new_index_find(parman, prio);
+ list_for_each_entry_reverse(prio2, &parman->prio_list, list) {
+ if (prio2 == prio)
+ break;
+ parman_prio_shift_down(parman, prio2);
+ }
+ item->index = new_index;
+ list_add_tail(&item->list, &prio->item_list);
+ parman->count++;
+ return 0;
+}
+
+static void parman_lsort_item_remove(struct parman *parman,
+ struct parman_prio *prio,
+ struct parman_item *item)
+{
+ parman_prio_item_remove(parman, prio, item);
+ list_for_each_entry_continue(prio, &parman->prio_list, list)
+ parman_prio_shift_up(parman, prio);
+ parman->count--;
+ if (parman->limit_count - parman->count >= parman->ops->resize_step)
+ parman_shrink(parman);
+}
+
+static const struct parman_algo parman_lsort = {
+ .item_add = parman_lsort_item_add,
+ .item_remove = parman_lsort_item_remove,
+};
+
+static const struct parman_algo *parman_algos[] = {
+ &parman_lsort,
+};
+
+/**
+ * parman_create - creates a new parman instance
+ * @ops: caller-specific callbacks
+ * @priv: pointer to a private data passed to the ops
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * Each parman instance manages an array area with chunks of entries
+ * with the same priority. Consider following example:
+ *
+ * item 1 with prio 10
+ * item 2 with prio 10
+ * item 3 with prio 10
+ * item 4 with prio 20
+ * item 5 with prio 20
+ * item 6 with prio 30
+ * item 7 with prio 30
+ * item 8 with prio 30
+ *
+ * In this example, there are 3 priority chunks. The order of the priorities
+ * matters, however the order of items within a single priority chunk does not
+ * matter. So the same array could be ordered as follows:
+ *
+ * item 2 with prio 10
+ * item 3 with prio 10
+ * item 1 with prio 10
+ * item 5 with prio 20
+ * item 4 with prio 20
+ * item 7 with prio 30
+ * item 8 with prio 30
+ * item 6 with prio 30
+ *
+ * The goal of parman is to maintain the priority ordering. The caller
+ * provides @ops with callbacks parman uses to move the items
+ * and resize the array area.
+ *
+ * Returns a pointer to newly created parman instance in case of success,
+ * otherwise it returns NULL.
+ */
+struct parman *parman_create(const struct parman_ops *ops, void *priv)
+{
+ struct parman *parman;
+
+ parman = kzalloc(sizeof(*parman), GFP_KERNEL);
+ if (!parman)
+ return NULL;
+ INIT_LIST_HEAD(&parman->prio_list);
+ parman->ops = ops;
+ parman->priv = priv;
+ parman->limit_count = ops->base_count;
+ parman->algo = parman_algos[ops->algo];
+ return parman;
+}
+EXPORT_SYMBOL(parman_create);
+
+/**
+ * parman_destroy - destroys existing parman instance
+ * @parman: parman instance
+ *
+ * Note: all locking must be provided by the caller.
+ */
+void parman_destroy(struct parman *parman)
+{
+ WARN_ON(!list_empty(&parman->prio_list));
+ kfree(parman);
+}
+EXPORT_SYMBOL(parman_destroy);
+
+/**
+ * parman_prio_init - initializes a parman priority chunk
+ * @parman: parman instance
+ * @prio: parman prio structure to be initialized
+ * @prority: desired priority of the chunk
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * Before caller could add an item with certain priority, he has to
+ * initialize a priority chunk for it using this function.
+ */
+void parman_prio_init(struct parman *parman, struct parman_prio *prio,
+ unsigned long priority)
+{
+ struct parman_prio *prio2;
+ struct list_head *pos;
+
+ INIT_LIST_HEAD(&prio->item_list);
+ prio->priority = priority;
+
+ /* Position inside the list according to priority */
+ list_for_each(pos, &parman->prio_list) {
+ prio2 = list_entry(pos, typeof(*prio2), list);
+ if (prio2->priority > prio->priority)
+ break;
+ }
+ list_add_tail(&prio->list, pos);
+}
+EXPORT_SYMBOL(parman_prio_init);
+
+/**
+ * parman_prio_fini - finalizes use of parman priority chunk
+ * @prio: parman prio structure
+ *
+ * Note: all locking must be provided by the caller.
+ */
+void parman_prio_fini(struct parman_prio *prio)
+{
+ WARN_ON(parman_prio_used(prio));
+ list_del(&prio->list);
+}
+EXPORT_SYMBOL(parman_prio_fini);
+
+/**
+ * parman_item_add - adds a parman item under defined priority
+ * @parman: parman instance
+ * @prio: parman prio instance to add the item to
+ * @item: parman item instance
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * Adds item to a array managed by parman instance under the specified priority.
+ *
+ * Returns 0 in case of success, negative number to indicate an error.
+ */
+int parman_item_add(struct parman *parman, struct parman_prio *prio,
+ struct parman_item *item)
+{
+ return parman->algo->item_add(parman, prio, item);
+}
+EXPORT_SYMBOL(parman_item_add);
+
+/**
+ * parman_item_del - deletes parman item
+ * @parman: parman instance
+ * @prio: parman prio instance to delete the item from
+ * @item: parman item instance
+ *
+ * Note: all locking must be provided by the caller.
+ */
+void parman_item_remove(struct parman *parman, struct parman_prio *prio,
+ struct parman_item *item)
+{
+ parman->algo->item_remove(parman, prio, item);
+}
+EXPORT_SYMBOL(parman_item_remove);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Priority-based array manager");
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index c8cebb137076..9c21000df0b5 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -176,13 +176,12 @@ static int percpu_counter_cpu_dead(unsigned int cpu)
spin_lock_irq(&percpu_counters_lock);
list_for_each_entry(fbc, &percpu_counters, list) {
s32 *pcount;
- unsigned long flags;
- raw_spin_lock_irqsave(&fbc->lock, flags);
+ raw_spin_lock(&fbc->lock);
pcount = per_cpu_ptr(fbc->counters, cpu);
fbc->count += *pcount;
*pcount = 0;
- raw_spin_unlock_irqrestore(&fbc->lock, flags);
+ raw_spin_unlock(&fbc->lock);
}
spin_unlock_irq(&percpu_counters_lock);
#endif
diff --git a/lib/prime_numbers.c b/lib/prime_numbers.c
new file mode 100644
index 000000000000..550eec457c2e
--- /dev/null
+++ b/lib/prime_numbers.c
@@ -0,0 +1,315 @@
+#define pr_fmt(fmt) "prime numbers: " fmt "\n"
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/prime_numbers.h>
+#include <linux/slab.h>
+
+#define bitmap_size(nbits) (BITS_TO_LONGS(nbits) * sizeof(unsigned long))
+
+struct primes {
+ struct rcu_head rcu;
+ unsigned long last, sz;
+ unsigned long primes[];
+};
+
+#if BITS_PER_LONG == 64
+static const struct primes small_primes = {
+ .last = 61,
+ .sz = 64,
+ .primes = {
+ BIT(2) |
+ BIT(3) |
+ BIT(5) |
+ BIT(7) |
+ BIT(11) |
+ BIT(13) |
+ BIT(17) |
+ BIT(19) |
+ BIT(23) |
+ BIT(29) |
+ BIT(31) |
+ BIT(37) |
+ BIT(41) |
+ BIT(43) |
+ BIT(47) |
+ BIT(53) |
+ BIT(59) |
+ BIT(61)
+ }
+};
+#elif BITS_PER_LONG == 32
+static const struct primes small_primes = {
+ .last = 31,
+ .sz = 32,
+ .primes = {
+ BIT(2) |
+ BIT(3) |
+ BIT(5) |
+ BIT(7) |
+ BIT(11) |
+ BIT(13) |
+ BIT(17) |
+ BIT(19) |
+ BIT(23) |
+ BIT(29) |
+ BIT(31)
+ }
+};
+#else
+#error "unhandled BITS_PER_LONG"
+#endif
+
+static DEFINE_MUTEX(lock);
+static const struct primes __rcu *primes = RCU_INITIALIZER(&small_primes);
+
+static unsigned long selftest_max;
+
+static bool slow_is_prime_number(unsigned long x)
+{
+ unsigned long y = int_sqrt(x);
+
+ while (y > 1) {
+ if ((x % y) == 0)
+ break;
+ y--;
+ }
+
+ return y == 1;
+}
+
+static unsigned long slow_next_prime_number(unsigned long x)
+{
+ while (x < ULONG_MAX && !slow_is_prime_number(++x))
+ ;
+
+ return x;
+}
+
+static unsigned long clear_multiples(unsigned long x,
+ unsigned long *p,
+ unsigned long start,
+ unsigned long end)
+{
+ unsigned long m;
+
+ m = 2 * x;
+ if (m < start)
+ m = roundup(start, x);
+
+ while (m < end) {
+ __clear_bit(m, p);
+ m += x;
+ }
+
+ return x;
+}
+
+static bool expand_to_next_prime(unsigned long x)
+{
+ const struct primes *p;
+ struct primes *new;
+ unsigned long sz, y;
+
+ /* Betrand's Postulate (or Chebyshev's theorem) states that if n > 3,
+ * there is always at least one prime p between n and 2n - 2.
+ * Equivalently, if n > 1, then there is always at least one prime p
+ * such that n < p < 2n.
+ *
+ * http://mathworld.wolfram.com/BertrandsPostulate.html
+ * https://en.wikipedia.org/wiki/Bertrand's_postulate
+ */
+ sz = 2 * x;
+ if (sz < x)
+ return false;
+
+ sz = round_up(sz, BITS_PER_LONG);
+ new = kmalloc(sizeof(*new) + bitmap_size(sz),
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!new)
+ return false;
+
+ mutex_lock(&lock);
+ p = rcu_dereference_protected(primes, lockdep_is_held(&lock));
+ if (x < p->last) {
+ kfree(new);
+ goto unlock;
+ }
+
+ /* Where memory permits, track the primes using the
+ * Sieve of Eratosthenes. The sieve is to remove all multiples of known
+ * primes from the set, what remains in the set is therefore prime.
+ */
+ bitmap_fill(new->primes, sz);
+ bitmap_copy(new->primes, p->primes, p->sz);
+ for (y = 2UL; y < sz; y = find_next_bit(new->primes, sz, y + 1))
+ new->last = clear_multiples(y, new->primes, p->sz, sz);
+ new->sz = sz;
+
+ BUG_ON(new->last <= x);
+
+ rcu_assign_pointer(primes, new);
+ if (p != &small_primes)
+ kfree_rcu((struct primes *)p, rcu);
+
+unlock:
+ mutex_unlock(&lock);
+ return true;
+}
+
+static void free_primes(void)
+{
+ const struct primes *p;
+
+ mutex_lock(&lock);
+ p = rcu_dereference_protected(primes, lockdep_is_held(&lock));
+ if (p != &small_primes) {
+ rcu_assign_pointer(primes, &small_primes);
+ kfree_rcu((struct primes *)p, rcu);
+ }
+ mutex_unlock(&lock);
+}
+
+/**
+ * next_prime_number - return the next prime number
+ * @x: the starting point for searching to test
+ *
+ * A prime number is an integer greater than 1 that is only divisible by
+ * itself and 1. The set of prime numbers is computed using the Sieve of
+ * Eratoshenes (on finding a prime, all multiples of that prime are removed
+ * from the set) enabling a fast lookup of the next prime number larger than
+ * @x. If the sieve fails (memory limitation), the search falls back to using
+ * slow trial-divison, up to the value of ULONG_MAX (which is reported as the
+ * final prime as a sentinel).
+ *
+ * Returns: the next prime number larger than @x
+ */
+unsigned long next_prime_number(unsigned long x)
+{
+ const struct primes *p;
+
+ rcu_read_lock();
+ p = rcu_dereference(primes);
+ while (x >= p->last) {
+ rcu_read_unlock();
+
+ if (!expand_to_next_prime(x))
+ return slow_next_prime_number(x);
+
+ rcu_read_lock();
+ p = rcu_dereference(primes);
+ }
+ x = find_next_bit(p->primes, p->last, x + 1);
+ rcu_read_unlock();
+
+ return x;
+}
+EXPORT_SYMBOL(next_prime_number);
+
+/**
+ * is_prime_number - test whether the given number is prime
+ * @x: the number to test
+ *
+ * A prime number is an integer greater than 1 that is only divisible by
+ * itself and 1. Internally a cache of prime numbers is kept (to speed up
+ * searching for sequential primes, see next_prime_number()), but if the number
+ * falls outside of that cache, its primality is tested using trial-divison.
+ *
+ * Returns: true if @x is prime, false for composite numbers.
+ */
+bool is_prime_number(unsigned long x)
+{
+ const struct primes *p;
+ bool result;
+
+ rcu_read_lock();
+ p = rcu_dereference(primes);
+ while (x >= p->sz) {
+ rcu_read_unlock();
+
+ if (!expand_to_next_prime(x))
+ return slow_is_prime_number(x);
+
+ rcu_read_lock();
+ p = rcu_dereference(primes);
+ }
+ result = test_bit(x, p->primes);
+ rcu_read_unlock();
+
+ return result;
+}
+EXPORT_SYMBOL(is_prime_number);
+
+static void dump_primes(void)
+{
+ const struct primes *p;
+ char *buf;
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+
+ rcu_read_lock();
+ p = rcu_dereference(primes);
+
+ if (buf)
+ bitmap_print_to_pagebuf(true, buf, p->primes, p->sz);
+ pr_info("primes.{last=%lu, .sz=%lu, .primes[]=...x%lx} = %s",
+ p->last, p->sz, p->primes[BITS_TO_LONGS(p->sz) - 1], buf);
+
+ rcu_read_unlock();
+
+ kfree(buf);
+}
+
+static int selftest(unsigned long max)
+{
+ unsigned long x, last;
+
+ if (!max)
+ return 0;
+
+ for (last = 0, x = 2; x < max; x++) {
+ bool slow = slow_is_prime_number(x);
+ bool fast = is_prime_number(x);
+
+ if (slow != fast) {
+ pr_err("inconsistent result for is-prime(%lu): slow=%s, fast=%s!",
+ x, slow ? "yes" : "no", fast ? "yes" : "no");
+ goto err;
+ }
+
+ if (!slow)
+ continue;
+
+ if (next_prime_number(last) != x) {
+ pr_err("incorrect result for next-prime(%lu): expected %lu, got %lu",
+ last, x, next_prime_number(last));
+ goto err;
+ }
+ last = x;
+ }
+
+ pr_info("selftest(%lu) passed, last prime was %lu", x, last);
+ return 0;
+
+err:
+ dump_primes();
+ return -EINVAL;
+}
+
+static int __init primes_init(void)
+{
+ return selftest(selftest_max);
+}
+
+static void __exit primes_exit(void)
+{
+ free_primes();
+}
+
+module_init(primes_init);
+module_exit(primes_exit);
+
+module_param_named(selftest, selftest_max, ulong, 0400);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 6f382e07de77..5ed506d648c4 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -22,20 +22,21 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
#include <linux/cpu.h>
#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/idr.h>
#include <linux/init.h>
#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/radix-tree.h>
+#include <linux/kmemleak.h>
#include <linux/percpu.h>
+#include <linux/preempt.h> /* in_interrupt() */
+#include <linux/radix-tree.h>
+#include <linux/rcupdate.h>
#include <linux/slab.h>
-#include <linux/kmemleak.h>
-#include <linux/cpu.h>
#include <linux/string.h>
-#include <linux/bitops.h>
-#include <linux/rcupdate.h>
-#include <linux/preempt.h> /* in_interrupt() */
/* Number of nodes in fully populated tree of given height */
@@ -60,11 +61,28 @@ static struct kmem_cache *radix_tree_node_cachep;
#define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1)
/*
+ * The IDR does not have to be as high as the radix tree since it uses
+ * signed integers, not unsigned longs.
+ */
+#define IDR_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(int) - 1)
+#define IDR_MAX_PATH (DIV_ROUND_UP(IDR_INDEX_BITS, \
+ RADIX_TREE_MAP_SHIFT))
+#define IDR_PRELOAD_SIZE (IDR_MAX_PATH * 2 - 1)
+
+/*
+ * The IDA is even shorter since it uses a bitmap at the last level.
+ */
+#define IDA_INDEX_BITS (8 * sizeof(int) - 1 - ilog2(IDA_BITMAP_BITS))
+#define IDA_MAX_PATH (DIV_ROUND_UP(IDA_INDEX_BITS, \
+ RADIX_TREE_MAP_SHIFT))
+#define IDA_PRELOAD_SIZE (IDA_MAX_PATH * 2 - 1)
+
+/*
* Per-cpu pool of preloaded nodes
*/
struct radix_tree_preload {
unsigned nr;
- /* nodes->private_data points to next preallocated node */
+ /* nodes->parent points to next preallocated node */
struct radix_tree_node *nodes;
};
static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
@@ -83,35 +101,38 @@ static inline void *node_to_entry(void *ptr)
#ifdef CONFIG_RADIX_TREE_MULTIORDER
/* Sibling slots point directly to another slot in the same node */
-static inline bool is_sibling_entry(struct radix_tree_node *parent, void *node)
+static inline
+bool is_sibling_entry(const struct radix_tree_node *parent, void *node)
{
- void **ptr = node;
+ void __rcu **ptr = node;
return (parent->slots <= ptr) &&
(ptr < parent->slots + RADIX_TREE_MAP_SIZE);
}
#else
-static inline bool is_sibling_entry(struct radix_tree_node *parent, void *node)
+static inline
+bool is_sibling_entry(const struct radix_tree_node *parent, void *node)
{
return false;
}
#endif
-static inline unsigned long get_slot_offset(struct radix_tree_node *parent,
- void **slot)
+static inline unsigned long
+get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot)
{
return slot - parent->slots;
}
-static unsigned int radix_tree_descend(struct radix_tree_node *parent,
+static unsigned int radix_tree_descend(const struct radix_tree_node *parent,
struct radix_tree_node **nodep, unsigned long index)
{
unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK;
- void **entry = rcu_dereference_raw(parent->slots[offset]);
+ void __rcu **entry = rcu_dereference_raw(parent->slots[offset]);
#ifdef CONFIG_RADIX_TREE_MULTIORDER
if (radix_tree_is_internal_node(entry)) {
if (is_sibling_entry(parent, entry)) {
- void **sibentry = (void **) entry_to_node(entry);
+ void __rcu **sibentry;
+ sibentry = (void __rcu **) entry_to_node(entry);
offset = get_slot_offset(parent, sibentry);
entry = rcu_dereference_raw(*sibentry);
}
@@ -122,7 +143,7 @@ static unsigned int radix_tree_descend(struct radix_tree_node *parent,
return offset;
}
-static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
+static inline gfp_t root_gfp_mask(const struct radix_tree_root *root)
{
return root->gfp_mask & __GFP_BITS_MASK;
}
@@ -139,42 +160,48 @@ static inline void tag_clear(struct radix_tree_node *node, unsigned int tag,
__clear_bit(offset, node->tags[tag]);
}
-static inline int tag_get(struct radix_tree_node *node, unsigned int tag,
+static inline int tag_get(const struct radix_tree_node *node, unsigned int tag,
int offset)
{
return test_bit(offset, node->tags[tag]);
}
-static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag)
+static inline void root_tag_set(struct radix_tree_root *root, unsigned tag)
{
- root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT));
+ root->gfp_mask |= (__force gfp_t)(1 << (tag + ROOT_TAG_SHIFT));
}
static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag)
{
- root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT));
+ root->gfp_mask &= (__force gfp_t)~(1 << (tag + ROOT_TAG_SHIFT));
}
static inline void root_tag_clear_all(struct radix_tree_root *root)
{
- root->gfp_mask &= __GFP_BITS_MASK;
+ root->gfp_mask &= (1 << ROOT_TAG_SHIFT) - 1;
+}
+
+static inline int root_tag_get(const struct radix_tree_root *root, unsigned tag)
+{
+ return (__force int)root->gfp_mask & (1 << (tag + ROOT_TAG_SHIFT));
}
-static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag)
+static inline unsigned root_tags_get(const struct radix_tree_root *root)
{
- return (__force int)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT));
+ return (__force unsigned)root->gfp_mask >> ROOT_TAG_SHIFT;
}
-static inline unsigned root_tags_get(struct radix_tree_root *root)
+static inline bool is_idr(const struct radix_tree_root *root)
{
- return (__force unsigned)root->gfp_mask >> __GFP_BITS_SHIFT;
+ return !!(root->gfp_mask & ROOT_IS_IDR);
}
/*
* Returns 1 if any slot in the node has this tag set.
* Otherwise returns 0.
*/
-static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag)
+static inline int any_tag_set(const struct radix_tree_node *node,
+ unsigned int tag)
{
unsigned idx;
for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
@@ -184,6 +211,11 @@ static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag)
return 0;
}
+static inline void all_tag_set(struct radix_tree_node *node, unsigned int tag)
+{
+ bitmap_fill(node->tags[tag], RADIX_TREE_MAP_SIZE);
+}
+
/**
* radix_tree_find_next_bit - find the next set bit in a memory region
*
@@ -232,11 +264,18 @@ static inline unsigned long shift_maxindex(unsigned int shift)
return (RADIX_TREE_MAP_SIZE << shift) - 1;
}
-static inline unsigned long node_maxindex(struct radix_tree_node *node)
+static inline unsigned long node_maxindex(const struct radix_tree_node *node)
{
return shift_maxindex(node->shift);
}
+static unsigned long next_index(unsigned long index,
+ const struct radix_tree_node *node,
+ unsigned long offset)
+{
+ return (index & ~node_maxindex(node)) + (offset << node->shift);
+}
+
#ifndef __KERNEL__
static void dump_node(struct radix_tree_node *node, unsigned long index)
{
@@ -275,11 +314,59 @@ static void radix_tree_dump(struct radix_tree_root *root)
{
pr_debug("radix root: %p rnode %p tags %x\n",
root, root->rnode,
- root->gfp_mask >> __GFP_BITS_SHIFT);
+ root->gfp_mask >> ROOT_TAG_SHIFT);
if (!radix_tree_is_internal_node(root->rnode))
return;
dump_node(entry_to_node(root->rnode), 0);
}
+
+static void dump_ida_node(void *entry, unsigned long index)
+{
+ unsigned long i;
+
+ if (!entry)
+ return;
+
+ if (radix_tree_is_internal_node(entry)) {
+ struct radix_tree_node *node = entry_to_node(entry);
+
+ pr_debug("ida node: %p offset %d indices %lu-%lu parent %p free %lx shift %d count %d\n",
+ node, node->offset, index * IDA_BITMAP_BITS,
+ ((index | node_maxindex(node)) + 1) *
+ IDA_BITMAP_BITS - 1,
+ node->parent, node->tags[0][0], node->shift,
+ node->count);
+ for (i = 0; i < RADIX_TREE_MAP_SIZE; i++)
+ dump_ida_node(node->slots[i],
+ index | (i << node->shift));
+ } else if (radix_tree_exceptional_entry(entry)) {
+ pr_debug("ida excp: %p offset %d indices %lu-%lu data %lx\n",
+ entry, (int)(index & RADIX_TREE_MAP_MASK),
+ index * IDA_BITMAP_BITS,
+ index * IDA_BITMAP_BITS + BITS_PER_LONG -
+ RADIX_TREE_EXCEPTIONAL_SHIFT,
+ (unsigned long)entry >>
+ RADIX_TREE_EXCEPTIONAL_SHIFT);
+ } else {
+ struct ida_bitmap *bitmap = entry;
+
+ pr_debug("ida btmp: %p offset %d indices %lu-%lu data", bitmap,
+ (int)(index & RADIX_TREE_MAP_MASK),
+ index * IDA_BITMAP_BITS,
+ (index + 1) * IDA_BITMAP_BITS - 1);
+ for (i = 0; i < IDA_BITMAP_LONGS; i++)
+ pr_cont(" %lx", bitmap->bitmap[i]);
+ pr_cont("\n");
+ }
+}
+
+static void ida_dump(struct ida *ida)
+{
+ struct radix_tree_root *root = &ida->ida_rt;
+ pr_debug("ida: %p node %p free %d\n", ida, root->rnode,
+ root->gfp_mask >> ROOT_TAG_SHIFT);
+ dump_ida_node(root->rnode, 0);
+}
#endif
/*
@@ -287,13 +374,12 @@ static void radix_tree_dump(struct radix_tree_root *root)
* that the caller has pinned this thread of control to the current CPU.
*/
static struct radix_tree_node *
-radix_tree_node_alloc(struct radix_tree_root *root,
- struct radix_tree_node *parent,
+radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent,
+ struct radix_tree_root *root,
unsigned int shift, unsigned int offset,
unsigned int count, unsigned int exceptional)
{
struct radix_tree_node *ret = NULL;
- gfp_t gfp_mask = root_gfp_mask(root);
/*
* Preload code isn't irq safe and it doesn't make sense to use
@@ -321,8 +407,7 @@ radix_tree_node_alloc(struct radix_tree_root *root,
rtp = this_cpu_ptr(&radix_tree_preloads);
if (rtp->nr) {
ret = rtp->nodes;
- rtp->nodes = ret->private_data;
- ret->private_data = NULL;
+ rtp->nodes = ret->parent;
rtp->nr--;
}
/*
@@ -336,11 +421,12 @@ radix_tree_node_alloc(struct radix_tree_root *root,
out:
BUG_ON(radix_tree_is_internal_node(ret));
if (ret) {
- ret->parent = parent;
ret->shift = shift;
ret->offset = offset;
ret->count = count;
ret->exceptional = exceptional;
+ ret->parent = parent;
+ ret->root = root;
}
return ret;
}
@@ -399,7 +485,7 @@ static int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
preempt_disable();
rtp = this_cpu_ptr(&radix_tree_preloads);
if (rtp->nr < nr) {
- node->private_data = rtp->nodes;
+ node->parent = rtp->nodes;
rtp->nodes = node;
rtp->nr++;
} else {
@@ -510,7 +596,7 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order)
return __radix_tree_preload(gfp_mask, nr_nodes);
}
-static unsigned radix_tree_load_root(struct radix_tree_root *root,
+static unsigned radix_tree_load_root(const struct radix_tree_root *root,
struct radix_tree_node **nodep, unsigned long *maxindex)
{
struct radix_tree_node *node = rcu_dereference_raw(root->rnode);
@@ -530,10 +616,10 @@ static unsigned radix_tree_load_root(struct radix_tree_root *root,
/*
* Extend a radix tree so it can store key @index.
*/
-static int radix_tree_extend(struct radix_tree_root *root,
+static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp,
unsigned long index, unsigned int shift)
{
- struct radix_tree_node *slot;
+ void *entry;
unsigned int maxshift;
int tag;
@@ -542,32 +628,44 @@ static int radix_tree_extend(struct radix_tree_root *root,
while (index > shift_maxindex(maxshift))
maxshift += RADIX_TREE_MAP_SHIFT;
- slot = root->rnode;
- if (!slot)
+ entry = rcu_dereference_raw(root->rnode);
+ if (!entry && (!is_idr(root) || root_tag_get(root, IDR_FREE)))
goto out;
do {
- struct radix_tree_node *node = radix_tree_node_alloc(root,
- NULL, shift, 0, 1, 0);
+ struct radix_tree_node *node = radix_tree_node_alloc(gfp, NULL,
+ root, shift, 0, 1, 0);
if (!node)
return -ENOMEM;
- /* Propagate the aggregated tag info into the new root */
- for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
- if (root_tag_get(root, tag))
- tag_set(node, tag, 0);
+ if (is_idr(root)) {
+ all_tag_set(node, IDR_FREE);
+ if (!root_tag_get(root, IDR_FREE)) {
+ tag_clear(node, IDR_FREE, 0);
+ root_tag_set(root, IDR_FREE);
+ }
+ } else {
+ /* Propagate the aggregated tag info to the new child */
+ for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
+ if (root_tag_get(root, tag))
+ tag_set(node, tag, 0);
+ }
}
BUG_ON(shift > BITS_PER_LONG);
- if (radix_tree_is_internal_node(slot)) {
- entry_to_node(slot)->parent = node;
- } else if (radix_tree_exceptional_entry(slot)) {
+ if (radix_tree_is_internal_node(entry)) {
+ entry_to_node(entry)->parent = node;
+ } else if (radix_tree_exceptional_entry(entry)) {
/* Moving an exceptional root->rnode to a node */
node->exceptional = 1;
}
- node->slots[0] = slot;
- slot = node_to_entry(node);
- rcu_assign_pointer(root->rnode, slot);
+ /*
+ * entry was already in the radix tree, so we do not need
+ * rcu_assign_pointer here
+ */
+ node->slots[0] = (void __rcu *)entry;
+ entry = node_to_entry(node);
+ rcu_assign_pointer(root->rnode, entry);
shift += RADIX_TREE_MAP_SHIFT;
} while (shift <= maxshift);
out:
@@ -578,12 +676,14 @@ out:
* radix_tree_shrink - shrink radix tree to minimum height
* @root radix tree root
*/
-static inline void radix_tree_shrink(struct radix_tree_root *root,
+static inline bool radix_tree_shrink(struct radix_tree_root *root,
radix_tree_update_node_t update_node,
void *private)
{
+ bool shrunk = false;
+
for (;;) {
- struct radix_tree_node *node = root->rnode;
+ struct radix_tree_node *node = rcu_dereference_raw(root->rnode);
struct radix_tree_node *child;
if (!radix_tree_is_internal_node(node))
@@ -597,7 +697,7 @@ static inline void radix_tree_shrink(struct radix_tree_root *root,
*/
if (node->count != 1)
break;
- child = node->slots[0];
+ child = rcu_dereference_raw(node->slots[0]);
if (!child)
break;
if (!radix_tree_is_internal_node(child) && node->shift)
@@ -613,7 +713,9 @@ static inline void radix_tree_shrink(struct radix_tree_root *root,
* (node->slots[0]), it will be safe to dereference the new
* one (root->rnode) as far as dependent read barriers go.
*/
- root->rnode = child;
+ root->rnode = (void __rcu *)child;
+ if (is_idr(root) && !tag_get(node, IDR_FREE, 0))
+ root_tag_clear(root, IDR_FREE);
/*
* We have a dilemma here. The node's slot[0] must not be
@@ -635,26 +737,34 @@ static inline void radix_tree_shrink(struct radix_tree_root *root,
*/
node->count = 0;
if (!radix_tree_is_internal_node(child)) {
- node->slots[0] = RADIX_TREE_RETRY;
+ node->slots[0] = (void __rcu *)RADIX_TREE_RETRY;
if (update_node)
update_node(node, private);
}
+ WARN_ON_ONCE(!list_empty(&node->private_list));
radix_tree_node_free(node);
+ shrunk = true;
}
+
+ return shrunk;
}
-static void delete_node(struct radix_tree_root *root,
+static bool delete_node(struct radix_tree_root *root,
struct radix_tree_node *node,
radix_tree_update_node_t update_node, void *private)
{
+ bool deleted = false;
+
do {
struct radix_tree_node *parent;
if (node->count) {
- if (node == entry_to_node(root->rnode))
- radix_tree_shrink(root, update_node, private);
- return;
+ if (node_to_entry(node) ==
+ rcu_dereference_raw(root->rnode))
+ deleted |= radix_tree_shrink(root, update_node,
+ private);
+ return deleted;
}
parent = node->parent;
@@ -662,14 +772,23 @@ static void delete_node(struct radix_tree_root *root,
parent->slots[node->offset] = NULL;
parent->count--;
} else {
- root_tag_clear_all(root);
+ /*
+ * Shouldn't the tags already have all been cleared
+ * by the caller?
+ */
+ if (!is_idr(root))
+ root_tag_clear_all(root);
root->rnode = NULL;
}
+ WARN_ON_ONCE(!list_empty(&node->private_list));
radix_tree_node_free(node);
+ deleted = true;
node = parent;
} while (node);
+
+ return deleted;
}
/**
@@ -691,13 +810,14 @@ static void delete_node(struct radix_tree_root *root,
*/
int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
unsigned order, struct radix_tree_node **nodep,
- void ***slotp)
+ void __rcu ***slotp)
{
struct radix_tree_node *node = NULL, *child;
- void **slot = (void **)&root->rnode;
+ void __rcu **slot = (void __rcu **)&root->rnode;
unsigned long maxindex;
unsigned int shift, offset = 0;
unsigned long max = index | ((1UL << order) - 1);
+ gfp_t gfp = root_gfp_mask(root);
shift = radix_tree_load_root(root, &child, &maxindex);
@@ -705,18 +825,18 @@ int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
if (order > 0 && max == ((1UL << order) - 1))
max++;
if (max > maxindex) {
- int error = radix_tree_extend(root, max, shift);
+ int error = radix_tree_extend(root, gfp, max, shift);
if (error < 0)
return error;
shift = error;
- child = root->rnode;
+ child = rcu_dereference_raw(root->rnode);
}
while (shift > order) {
shift -= RADIX_TREE_MAP_SHIFT;
if (child == NULL) {
/* Have to add a child node. */
- child = radix_tree_node_alloc(root, node, shift,
+ child = radix_tree_node_alloc(gfp, node, root, shift,
offset, 0, 0);
if (!child)
return -ENOMEM;
@@ -739,7 +859,6 @@ int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
return 0;
}
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
/*
* Free any nodes below this node. The tree is presumed to not need
* shrinking, and any user data in the tree is presumed to not need a
@@ -755,7 +874,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
struct radix_tree_node *child = entry_to_node(node);
for (;;) {
- void *entry = child->slots[offset];
+ void *entry = rcu_dereference_raw(child->slots[offset]);
if (radix_tree_is_internal_node(entry) &&
!is_sibling_entry(child, entry)) {
child = entry_to_node(entry);
@@ -767,6 +886,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
struct radix_tree_node *old = child;
offset = child->offset + 1;
child = child->parent;
+ WARN_ON_ONCE(!list_empty(&old->private_list));
radix_tree_node_free(old);
if (old == entry_to_node(node))
return;
@@ -774,8 +894,9 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
}
}
-static inline int insert_entries(struct radix_tree_node *node, void **slot,
- void *item, unsigned order, bool replace)
+#ifdef CONFIG_RADIX_TREE_MULTIORDER
+static inline int insert_entries(struct radix_tree_node *node,
+ void __rcu **slot, void *item, unsigned order, bool replace)
{
struct radix_tree_node *child;
unsigned i, n, tag, offset, tags = 0;
@@ -810,7 +931,7 @@ static inline int insert_entries(struct radix_tree_node *node, void **slot,
}
for (i = 0; i < n; i++) {
- struct radix_tree_node *old = slot[i];
+ struct radix_tree_node *old = rcu_dereference_raw(slot[i]);
if (i) {
rcu_assign_pointer(slot[i], child);
for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
@@ -837,8 +958,8 @@ static inline int insert_entries(struct radix_tree_node *node, void **slot,
return n;
}
#else
-static inline int insert_entries(struct radix_tree_node *node, void **slot,
- void *item, unsigned order, bool replace)
+static inline int insert_entries(struct radix_tree_node *node,
+ void __rcu **slot, void *item, unsigned order, bool replace)
{
if (*slot)
return -EEXIST;
@@ -865,7 +986,7 @@ int __radix_tree_insert(struct radix_tree_root *root, unsigned long index,
unsigned order, void *item)
{
struct radix_tree_node *node;
- void **slot;
+ void __rcu **slot;
int error;
BUG_ON(radix_tree_is_internal_node(item));
@@ -905,16 +1026,17 @@ EXPORT_SYMBOL(__radix_tree_insert);
* allocated and @root->rnode is used as a direct slot instead of
* pointing to a node, in which case *@nodep will be NULL.
*/
-void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index,
- struct radix_tree_node **nodep, void ***slotp)
+void *__radix_tree_lookup(const struct radix_tree_root *root,
+ unsigned long index, struct radix_tree_node **nodep,
+ void __rcu ***slotp)
{
struct radix_tree_node *node, *parent;
unsigned long maxindex;
- void **slot;
+ void __rcu **slot;
restart:
parent = NULL;
- slot = (void **)&root->rnode;
+ slot = (void __rcu **)&root->rnode;
radix_tree_load_root(root, &node, &maxindex);
if (index > maxindex)
return NULL;
@@ -949,9 +1071,10 @@ void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index,
* exclusive from other writers. Any dereference of the slot must be done
* using radix_tree_deref_slot.
*/
-void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
+void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *root,
+ unsigned long index)
{
- void **slot;
+ void __rcu **slot;
if (!__radix_tree_lookup(root, index, NULL, &slot))
return NULL;
@@ -971,75 +1094,76 @@ EXPORT_SYMBOL(radix_tree_lookup_slot);
* them safely). No RCU barriers are required to access or modify the
* returned item, however.
*/
-void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index)
+void *radix_tree_lookup(const struct radix_tree_root *root, unsigned long index)
{
return __radix_tree_lookup(root, index, NULL, NULL);
}
EXPORT_SYMBOL(radix_tree_lookup);
-static inline int slot_count(struct radix_tree_node *node,
- void **slot)
+static inline void replace_sibling_entries(struct radix_tree_node *node,
+ void __rcu **slot, int count, int exceptional)
{
- int n = 1;
#ifdef CONFIG_RADIX_TREE_MULTIORDER
void *ptr = node_to_entry(slot);
- unsigned offset = get_slot_offset(node, slot);
- int i;
+ unsigned offset = get_slot_offset(node, slot) + 1;
- for (i = 1; offset + i < RADIX_TREE_MAP_SIZE; i++) {
- if (node->slots[offset + i] != ptr)
+ while (offset < RADIX_TREE_MAP_SIZE) {
+ if (rcu_dereference_raw(node->slots[offset]) != ptr)
break;
- n++;
+ if (count < 0) {
+ node->slots[offset] = NULL;
+ node->count--;
+ }
+ node->exceptional += exceptional;
+ offset++;
}
#endif
- return n;
}
-static void replace_slot(struct radix_tree_root *root,
- struct radix_tree_node *node,
- void **slot, void *item,
- bool warn_typeswitch)
+static void replace_slot(void __rcu **slot, void *item,
+ struct radix_tree_node *node, int count, int exceptional)
{
- void *old = rcu_dereference_raw(*slot);
- int count, exceptional;
-
- WARN_ON_ONCE(radix_tree_is_internal_node(item));
-
- count = !!item - !!old;
- exceptional = !!radix_tree_exceptional_entry(item) -
- !!radix_tree_exceptional_entry(old);
-
- WARN_ON_ONCE(warn_typeswitch && (count || exceptional));
+ if (WARN_ON_ONCE(radix_tree_is_internal_node(item)))
+ return;
- if (node) {
+ if (node && (count || exceptional)) {
node->count += count;
- if (exceptional) {
- exceptional *= slot_count(node, slot);
- node->exceptional += exceptional;
- }
+ node->exceptional += exceptional;
+ replace_sibling_entries(node, slot, count, exceptional);
}
rcu_assign_pointer(*slot, item);
}
-static inline void delete_sibling_entries(struct radix_tree_node *node,
- void **slot)
+static bool node_tag_get(const struct radix_tree_root *root,
+ const struct radix_tree_node *node,
+ unsigned int tag, unsigned int offset)
{
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
- bool exceptional = radix_tree_exceptional_entry(*slot);
- void *ptr = node_to_entry(slot);
- unsigned offset = get_slot_offset(node, slot);
- int i;
+ if (node)
+ return tag_get(node, tag, offset);
+ return root_tag_get(root, tag);
+}
- for (i = 1; offset + i < RADIX_TREE_MAP_SIZE; i++) {
- if (node->slots[offset + i] != ptr)
- break;
- node->slots[offset + i] = NULL;
- node->count--;
- if (exceptional)
- node->exceptional--;
+/*
+ * IDR users want to be able to store NULL in the tree, so if the slot isn't
+ * free, don't adjust the count, even if it's transitioning between NULL and
+ * non-NULL. For the IDA, we mark slots as being IDR_FREE while they still
+ * have empty bits, but it only stores NULL in slots when they're being
+ * deleted.
+ */
+static int calculate_count(struct radix_tree_root *root,
+ struct radix_tree_node *node, void __rcu **slot,
+ void *item, void *old)
+{
+ if (is_idr(root)) {
+ unsigned offset = get_slot_offset(node, slot);
+ bool free = node_tag_get(root, node, IDR_FREE, offset);
+ if (!free)
+ return 0;
+ if (!old)
+ return 1;
}
-#endif
+ return !!item - !!old;
}
/**
@@ -1056,18 +1180,22 @@ static inline void delete_sibling_entries(struct radix_tree_node *node,
*/
void __radix_tree_replace(struct radix_tree_root *root,
struct radix_tree_node *node,
- void **slot, void *item,
+ void __rcu **slot, void *item,
radix_tree_update_node_t update_node, void *private)
{
- if (!item)
- delete_sibling_entries(node, slot);
+ void *old = rcu_dereference_raw(*slot);
+ int exceptional = !!radix_tree_exceptional_entry(item) -
+ !!radix_tree_exceptional_entry(old);
+ int count = calculate_count(root, node, slot, item, old);
+
/*
* This function supports replacing exceptional entries and
* deleting entries, but that needs accounting against the
* node unless the slot is root->rnode.
*/
- replace_slot(root, node, slot, item,
- !node && slot != (void **)&root->rnode);
+ WARN_ON_ONCE(!node && (slot != (void __rcu **)&root->rnode) &&
+ (count || exceptional));
+ replace_slot(slot, item, node, count, exceptional);
if (!node)
return;
@@ -1095,10 +1223,11 @@ void __radix_tree_replace(struct radix_tree_root *root,
* radix_tree_iter_replace().
*/
void radix_tree_replace_slot(struct radix_tree_root *root,
- void **slot, void *item)
+ void __rcu **slot, void *item)
{
- replace_slot(root, NULL, slot, item, true);
+ __radix_tree_replace(root, NULL, slot, item, NULL, NULL);
}
+EXPORT_SYMBOL(radix_tree_replace_slot);
/**
* radix_tree_iter_replace - replace item in a slot
@@ -1110,7 +1239,8 @@ void radix_tree_replace_slot(struct radix_tree_root *root,
* Caller must hold tree write locked across split and replacement.
*/
void radix_tree_iter_replace(struct radix_tree_root *root,
- const struct radix_tree_iter *iter, void **slot, void *item)
+ const struct radix_tree_iter *iter,
+ void __rcu **slot, void *item)
{
__radix_tree_replace(root, iter->node, slot, item, NULL, NULL);
}
@@ -1134,7 +1264,7 @@ int radix_tree_join(struct radix_tree_root *root, unsigned long index,
unsigned order, void *item)
{
struct radix_tree_node *node;
- void **slot;
+ void __rcu **slot;
int error;
BUG_ON(radix_tree_is_internal_node(item));
@@ -1169,9 +1299,10 @@ int radix_tree_split(struct radix_tree_root *root, unsigned long index,
unsigned order)
{
struct radix_tree_node *parent, *node, *child;
- void **slot;
+ void __rcu **slot;
unsigned int offset, end;
unsigned n, tag, tags = 0;
+ gfp_t gfp = root_gfp_mask(root);
if (!__radix_tree_lookup(root, index, &parent, &slot))
return -ENOENT;
@@ -1185,7 +1316,8 @@ int radix_tree_split(struct radix_tree_root *root, unsigned long index,
tags |= 1 << tag;
for (end = offset + 1; end < RADIX_TREE_MAP_SIZE; end++) {
- if (!is_sibling_entry(parent, parent->slots[end]))
+ if (!is_sibling_entry(parent,
+ rcu_dereference_raw(parent->slots[end])))
break;
for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
if (tags & (1 << tag))
@@ -1209,14 +1341,15 @@ int radix_tree_split(struct radix_tree_root *root, unsigned long index,
for (;;) {
if (node->shift > order) {
- child = radix_tree_node_alloc(root, node,
+ child = radix_tree_node_alloc(gfp, node, root,
node->shift - RADIX_TREE_MAP_SHIFT,
offset, 0, 0);
if (!child)
goto nomem;
if (node != parent) {
node->count++;
- node->slots[offset] = node_to_entry(child);
+ rcu_assign_pointer(node->slots[offset],
+ node_to_entry(child));
for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
if (tags & (1 << tag))
tag_set(node, tag, offset);
@@ -1258,6 +1391,22 @@ int radix_tree_split(struct radix_tree_root *root, unsigned long index,
}
#endif
+static void node_tag_set(struct radix_tree_root *root,
+ struct radix_tree_node *node,
+ unsigned int tag, unsigned int offset)
+{
+ while (node) {
+ if (tag_get(node, tag, offset))
+ return;
+ tag_set(node, tag, offset);
+ offset = node->offset;
+ node = node->parent;
+ }
+
+ if (!root_tag_get(root, tag))
+ root_tag_set(root, tag);
+}
+
/**
* radix_tree_tag_set - set a tag on a radix tree node
* @root: radix tree root
@@ -1299,6 +1448,18 @@ void *radix_tree_tag_set(struct radix_tree_root *root,
}
EXPORT_SYMBOL(radix_tree_tag_set);
+/**
+ * radix_tree_iter_tag_set - set a tag on the current iterator entry
+ * @root: radix tree root
+ * @iter: iterator state
+ * @tag: tag to set
+ */
+void radix_tree_iter_tag_set(struct radix_tree_root *root,
+ const struct radix_tree_iter *iter, unsigned int tag)
+{
+ node_tag_set(root, iter->node, tag, iter_offset(iter));
+}
+
static void node_tag_clear(struct radix_tree_root *root,
struct radix_tree_node *node,
unsigned int tag, unsigned int offset)
@@ -1319,34 +1480,6 @@ static void node_tag_clear(struct radix_tree_root *root,
root_tag_clear(root, tag);
}
-static void node_tag_set(struct radix_tree_root *root,
- struct radix_tree_node *node,
- unsigned int tag, unsigned int offset)
-{
- while (node) {
- if (tag_get(node, tag, offset))
- return;
- tag_set(node, tag, offset);
- offset = node->offset;
- node = node->parent;
- }
-
- if (!root_tag_get(root, tag))
- root_tag_set(root, tag);
-}
-
-/**
- * radix_tree_iter_tag_set - set a tag on the current iterator entry
- * @root: radix tree root
- * @iter: iterator state
- * @tag: tag to set
- */
-void radix_tree_iter_tag_set(struct radix_tree_root *root,
- const struct radix_tree_iter *iter, unsigned int tag)
-{
- node_tag_set(root, iter->node, tag, iter_offset(iter));
-}
-
/**
* radix_tree_tag_clear - clear a tag on a radix tree node
* @root: radix tree root
@@ -1387,6 +1520,18 @@ void *radix_tree_tag_clear(struct radix_tree_root *root,
EXPORT_SYMBOL(radix_tree_tag_clear);
/**
+ * radix_tree_iter_tag_clear - clear a tag on the current iterator entry
+ * @root: radix tree root
+ * @iter: iterator state
+ * @tag: tag to clear
+ */
+void radix_tree_iter_tag_clear(struct radix_tree_root *root,
+ const struct radix_tree_iter *iter, unsigned int tag)
+{
+ node_tag_clear(root, iter->node, tag, iter_offset(iter));
+}
+
+/**
* radix_tree_tag_get - get a tag on a radix tree node
* @root: radix tree root
* @index: index key
@@ -1401,7 +1546,7 @@ EXPORT_SYMBOL(radix_tree_tag_clear);
* the RCU lock is held, unless tag modification and node deletion are excluded
* from concurrency.
*/
-int radix_tree_tag_get(struct radix_tree_root *root,
+int radix_tree_tag_get(const struct radix_tree_root *root,
unsigned long index, unsigned int tag)
{
struct radix_tree_node *node, *parent;
@@ -1413,8 +1558,6 @@ int radix_tree_tag_get(struct radix_tree_root *root,
radix_tree_load_root(root, &node, &maxindex);
if (index > maxindex)
return 0;
- if (node == NULL)
- return 0;
while (radix_tree_is_internal_node(node)) {
unsigned offset;
@@ -1422,8 +1565,6 @@ int radix_tree_tag_get(struct radix_tree_root *root,
parent = entry_to_node(node);
offset = radix_tree_descend(parent, &node, index);
- if (!node)
- return 0;
if (!tag_get(parent, tag, offset))
return 0;
if (node == RADIX_TREE_RETRY)
@@ -1450,6 +1591,11 @@ static void set_iter_tags(struct radix_tree_iter *iter,
unsigned tag_long = offset / BITS_PER_LONG;
unsigned tag_bit = offset % BITS_PER_LONG;
+ if (!node) {
+ iter->tags = 1;
+ return;
+ }
+
iter->tags = node->tags[tag][tag_long] >> tag_bit;
/* This never happens if RADIX_TREE_TAG_LONGS == 1 */
@@ -1464,8 +1610,8 @@ static void set_iter_tags(struct radix_tree_iter *iter,
}
#ifdef CONFIG_RADIX_TREE_MULTIORDER
-static void **skip_siblings(struct radix_tree_node **nodep,
- void **slot, struct radix_tree_iter *iter)
+static void __rcu **skip_siblings(struct radix_tree_node **nodep,
+ void __rcu **slot, struct radix_tree_iter *iter)
{
void *sib = node_to_entry(slot - 1);
@@ -1482,8 +1628,8 @@ static void **skip_siblings(struct radix_tree_node **nodep,
return NULL;
}
-void ** __radix_tree_next_slot(void **slot, struct radix_tree_iter *iter,
- unsigned flags)
+void __rcu **__radix_tree_next_slot(void __rcu **slot,
+ struct radix_tree_iter *iter, unsigned flags)
{
unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK;
struct radix_tree_node *node = rcu_dereference_raw(*slot);
@@ -1536,20 +1682,20 @@ void ** __radix_tree_next_slot(void **slot, struct radix_tree_iter *iter,
}
EXPORT_SYMBOL(__radix_tree_next_slot);
#else
-static void **skip_siblings(struct radix_tree_node **nodep,
- void **slot, struct radix_tree_iter *iter)
+static void __rcu **skip_siblings(struct radix_tree_node **nodep,
+ void __rcu **slot, struct radix_tree_iter *iter)
{
return slot;
}
#endif
-void **radix_tree_iter_resume(void **slot, struct radix_tree_iter *iter)
+void __rcu **radix_tree_iter_resume(void __rcu **slot,
+ struct radix_tree_iter *iter)
{
struct radix_tree_node *node;
slot++;
iter->index = __radix_tree_iter_add(iter, 1);
- node = rcu_dereference_raw(*slot);
skip_siblings(&node, slot, iter);
iter->next_index = iter->index;
iter->tags = 0;
@@ -1565,7 +1711,7 @@ EXPORT_SYMBOL(radix_tree_iter_resume);
* @flags: RADIX_TREE_ITER_* flags and tag index
* Returns: pointer to chunk first slot, or NULL if iteration is over
*/
-void **radix_tree_next_chunk(struct radix_tree_root *root,
+void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root,
struct radix_tree_iter *iter, unsigned flags)
{
unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK;
@@ -1602,7 +1748,7 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
iter->tags = 1;
iter->node = NULL;
__set_iter_shift(iter, 0);
- return (void **)&root->rnode;
+ return (void __rcu **)&root->rnode;
}
do {
@@ -1620,7 +1766,8 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
offset + 1);
else
while (++offset < RADIX_TREE_MAP_SIZE) {
- void *slot = node->slots[offset];
+ void *slot = rcu_dereference_raw(
+ node->slots[offset]);
if (is_sibling_entry(node, slot))
continue;
if (slot)
@@ -1676,11 +1823,11 @@ EXPORT_SYMBOL(radix_tree_next_chunk);
* stored in 'results'.
*/
unsigned int
-radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
+radix_tree_gang_lookup(const struct radix_tree_root *root, void **results,
unsigned long first_index, unsigned int max_items)
{
struct radix_tree_iter iter;
- void **slot;
+ void __rcu **slot;
unsigned int ret = 0;
if (unlikely(!max_items))
@@ -1721,12 +1868,12 @@ EXPORT_SYMBOL(radix_tree_gang_lookup);
* protection, radix_tree_deref_slot may fail requiring a retry.
*/
unsigned int
-radix_tree_gang_lookup_slot(struct radix_tree_root *root,
- void ***results, unsigned long *indices,
+radix_tree_gang_lookup_slot(const struct radix_tree_root *root,
+ void __rcu ***results, unsigned long *indices,
unsigned long first_index, unsigned int max_items)
{
struct radix_tree_iter iter;
- void **slot;
+ void __rcu **slot;
unsigned int ret = 0;
if (unlikely(!max_items))
@@ -1758,12 +1905,12 @@ EXPORT_SYMBOL(radix_tree_gang_lookup_slot);
* returns the number of items which were placed at *@results.
*/
unsigned int
-radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
+radix_tree_gang_lookup_tag(const struct radix_tree_root *root, void **results,
unsigned long first_index, unsigned int max_items,
unsigned int tag)
{
struct radix_tree_iter iter;
- void **slot;
+ void __rcu **slot;
unsigned int ret = 0;
if (unlikely(!max_items))
@@ -1799,12 +1946,12 @@ EXPORT_SYMBOL(radix_tree_gang_lookup_tag);
* returns the number of slots which were placed at *@results.
*/
unsigned int
-radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
- unsigned long first_index, unsigned int max_items,
- unsigned int tag)
+radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *root,
+ void __rcu ***results, unsigned long first_index,
+ unsigned int max_items, unsigned int tag)
{
struct radix_tree_iter iter;
- void **slot;
+ void __rcu **slot;
unsigned int ret = 0;
if (unlikely(!max_items))
@@ -1824,70 +1971,98 @@ EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot);
* __radix_tree_delete_node - try to free node after clearing a slot
* @root: radix tree root
* @node: node containing @index
+ * @update_node: callback for changing leaf nodes
+ * @private: private data to pass to @update_node
*
* After clearing the slot at @index in @node from radix tree
* rooted at @root, call this function to attempt freeing the
* node and shrinking the tree.
*/
void __radix_tree_delete_node(struct radix_tree_root *root,
- struct radix_tree_node *node)
+ struct radix_tree_node *node,
+ radix_tree_update_node_t update_node,
+ void *private)
{
- delete_node(root, node, NULL, NULL);
+ delete_node(root, node, update_node, private);
+}
+
+static bool __radix_tree_delete(struct radix_tree_root *root,
+ struct radix_tree_node *node, void __rcu **slot)
+{
+ void *old = rcu_dereference_raw(*slot);
+ int exceptional = radix_tree_exceptional_entry(old) ? -1 : 0;
+ unsigned offset = get_slot_offset(node, slot);
+ int tag;
+
+ if (is_idr(root))
+ node_tag_set(root, node, IDR_FREE, offset);
+ else
+ for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
+ node_tag_clear(root, node, tag, offset);
+
+ replace_slot(slot, NULL, node, -1, exceptional);
+ return node && delete_node(root, node, NULL, NULL);
}
/**
- * radix_tree_delete_item - delete an item from a radix tree
- * @root: radix tree root
- * @index: index key
- * @item: expected item
+ * radix_tree_iter_delete - delete the entry at this iterator position
+ * @root: radix tree root
+ * @iter: iterator state
+ * @slot: pointer to slot
*
- * Remove @item at @index from the radix tree rooted at @root.
+ * Delete the entry at the position currently pointed to by the iterator.
+ * This may result in the current node being freed; if it is, the iterator
+ * is advanced so that it will not reference the freed memory. This
+ * function may be called without any locking if there are no other threads
+ * which can access this tree.
+ */
+void radix_tree_iter_delete(struct radix_tree_root *root,
+ struct radix_tree_iter *iter, void __rcu **slot)
+{
+ if (__radix_tree_delete(root, iter->node, slot))
+ iter->index = iter->next_index;
+}
+
+/**
+ * radix_tree_delete_item - delete an item from a radix tree
+ * @root: radix tree root
+ * @index: index key
+ * @item: expected item
+ *
+ * Remove @item at @index from the radix tree rooted at @root.
*
- * Returns the address of the deleted item, or NULL if it was not present
- * or the entry at the given @index was not @item.
+ * Return: the deleted entry, or %NULL if it was not present
+ * or the entry at the given @index was not @item.
*/
void *radix_tree_delete_item(struct radix_tree_root *root,
unsigned long index, void *item)
{
- struct radix_tree_node *node;
- unsigned int offset;
- void **slot;
+ struct radix_tree_node *node = NULL;
+ void __rcu **slot;
void *entry;
- int tag;
entry = __radix_tree_lookup(root, index, &node, &slot);
- if (!entry)
+ if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE,
+ get_slot_offset(node, slot))))
return NULL;
if (item && entry != item)
return NULL;
- if (!node) {
- root_tag_clear_all(root);
- root->rnode = NULL;
- return entry;
- }
-
- offset = get_slot_offset(node, slot);
-
- /* Clear all tags associated with the item to be deleted. */
- for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
- node_tag_clear(root, node, tag, offset);
-
- __radix_tree_replace(root, node, slot, NULL, NULL, NULL);
+ __radix_tree_delete(root, node, slot);
return entry;
}
EXPORT_SYMBOL(radix_tree_delete_item);
/**
- * radix_tree_delete - delete an item from a radix tree
- * @root: radix tree root
- * @index: index key
+ * radix_tree_delete - delete an entry from a radix tree
+ * @root: radix tree root
+ * @index: index key
*
- * Remove the item at @index from the radix tree rooted at @root.
+ * Remove the entry at @index from the radix tree rooted at @root.
*
- * Returns the address of the deleted item, or NULL if it was not present.
+ * Return: The deleted entry, or %NULL if it was not present.
*/
void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
{
@@ -1897,15 +2072,14 @@ EXPORT_SYMBOL(radix_tree_delete);
void radix_tree_clear_tags(struct radix_tree_root *root,
struct radix_tree_node *node,
- void **slot)
+ void __rcu **slot)
{
if (node) {
unsigned int tag, offset = get_slot_offset(node, slot);
for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
node_tag_clear(root, node, tag, offset);
} else {
- /* Clear root node tags */
- root->gfp_mask &= __GFP_BITS_MASK;
+ root_tag_clear_all(root);
}
}
@@ -1914,12 +2088,147 @@ void radix_tree_clear_tags(struct radix_tree_root *root,
* @root: radix tree root
* @tag: tag to test
*/
-int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag)
+int radix_tree_tagged(const struct radix_tree_root *root, unsigned int tag)
{
return root_tag_get(root, tag);
}
EXPORT_SYMBOL(radix_tree_tagged);
+/**
+ * idr_preload - preload for idr_alloc()
+ * @gfp_mask: allocation mask to use for preloading
+ *
+ * Preallocate memory to use for the next call to idr_alloc(). This function
+ * returns with preemption disabled. It will be enabled by idr_preload_end().
+ */
+void idr_preload(gfp_t gfp_mask)
+{
+ __radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE);
+}
+EXPORT_SYMBOL(idr_preload);
+
+/**
+ * ida_pre_get - reserve resources for ida allocation
+ * @ida: ida handle
+ * @gfp: memory allocation flags
+ *
+ * This function should be called before calling ida_get_new_above(). If it
+ * is unable to allocate memory, it will return %0. On success, it returns %1.
+ */
+int ida_pre_get(struct ida *ida, gfp_t gfp)
+{
+ __radix_tree_preload(gfp, IDA_PRELOAD_SIZE);
+ /*
+ * The IDA API has no preload_end() equivalent. Instead,
+ * ida_get_new() can return -EAGAIN, prompting the caller
+ * to return to the ida_pre_get() step.
+ */
+ preempt_enable();
+
+ if (!this_cpu_read(ida_bitmap)) {
+ struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp);
+ if (!bitmap)
+ return 0;
+ bitmap = this_cpu_cmpxchg(ida_bitmap, NULL, bitmap);
+ kfree(bitmap);
+ }
+
+ return 1;
+}
+EXPORT_SYMBOL(ida_pre_get);
+
+void __rcu **idr_get_free(struct radix_tree_root *root,
+ struct radix_tree_iter *iter, gfp_t gfp, int end)
+{
+ struct radix_tree_node *node = NULL, *child;
+ void __rcu **slot = (void __rcu **)&root->rnode;
+ unsigned long maxindex, start = iter->next_index;
+ unsigned long max = end > 0 ? end - 1 : INT_MAX;
+ unsigned int shift, offset = 0;
+
+ grow:
+ shift = radix_tree_load_root(root, &child, &maxindex);
+ if (!radix_tree_tagged(root, IDR_FREE))
+ start = max(start, maxindex + 1);
+ if (start > max)
+ return ERR_PTR(-ENOSPC);
+
+ if (start > maxindex) {
+ int error = radix_tree_extend(root, gfp, start, shift);
+ if (error < 0)
+ return ERR_PTR(error);
+ shift = error;
+ child = rcu_dereference_raw(root->rnode);
+ }
+
+ while (shift) {
+ shift -= RADIX_TREE_MAP_SHIFT;
+ if (child == NULL) {
+ /* Have to add a child node. */
+ child = radix_tree_node_alloc(gfp, node, root, shift,
+ offset, 0, 0);
+ if (!child)
+ return ERR_PTR(-ENOMEM);
+ all_tag_set(child, IDR_FREE);
+ rcu_assign_pointer(*slot, node_to_entry(child));
+ if (node)
+ node->count++;
+ } else if (!radix_tree_is_internal_node(child))
+ break;
+
+ node = entry_to_node(child);
+ offset = radix_tree_descend(node, &child, start);
+ if (!tag_get(node, IDR_FREE, offset)) {
+ offset = radix_tree_find_next_bit(node, IDR_FREE,
+ offset + 1);
+ start = next_index(start, node, offset);
+ if (start > max)
+ return ERR_PTR(-ENOSPC);
+ while (offset == RADIX_TREE_MAP_SIZE) {
+ offset = node->offset + 1;
+ node = node->parent;
+ if (!node)
+ goto grow;
+ shift = node->shift;
+ }
+ child = rcu_dereference_raw(node->slots[offset]);
+ }
+ slot = &node->slots[offset];
+ }
+
+ iter->index = start;
+ if (node)
+ iter->next_index = 1 + min(max, (start | node_maxindex(node)));
+ else
+ iter->next_index = 1;
+ iter->node = node;
+ __set_iter_shift(iter, shift);
+ set_iter_tags(iter, node, offset, IDR_FREE);
+
+ return slot;
+}
+
+/**
+ * idr_destroy - release all internal memory from an IDR
+ * @idr: idr handle
+ *
+ * After this function is called, the IDR is empty, and may be reused or
+ * the data structure containing it may be freed.
+ *
+ * A typical clean-up sequence for objects stored in an idr tree will use
+ * idr_for_each() to free all objects, if necessary, then idr_destroy() to
+ * free the memory used to keep track of those objects.
+ */
+void idr_destroy(struct idr *idr)
+{
+ struct radix_tree_node *node = rcu_dereference_raw(idr->idr_rt.rnode);
+ if (radix_tree_is_internal_node(node))
+ radix_tree_free_nodes(node);
+ idr->idr_rt.rnode = NULL;
+ root_tag_set(&idr->idr_rt, IDR_FREE);
+}
+EXPORT_SYMBOL(idr_destroy);
+
static void
radix_tree_node_ctor(void *arg)
{
@@ -1963,10 +2272,12 @@ static int radix_tree_cpu_dead(unsigned int cpu)
rtp = &per_cpu(radix_tree_preloads, cpu);
while (rtp->nr) {
node = rtp->nodes;
- rtp->nodes = node->private_data;
+ rtp->nodes = node->parent;
kmem_cache_free(radix_tree_node_cachep, node);
rtp->nr--;
}
+ kfree(per_cpu(ida_bitmap, cpu));
+ per_cpu(ida_bitmap, cpu) = NULL;
return 0;
}
diff --git a/lib/rbtree.c b/lib/rbtree.c
index 1f8b112a7c35..4ba2828a67c0 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -427,7 +427,9 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
static const struct rb_augment_callbacks dummy_callbacks = {
- dummy_propagate, dummy_copy, dummy_rotate
+ .propagate = dummy_propagate,
+ .copy = dummy_copy,
+ .rotate = dummy_rotate
};
void rb_insert_color(struct rb_node *node, struct rb_root *root)
diff --git a/lib/refcount.c b/lib/refcount.c
new file mode 100644
index 000000000000..1d33366189d1
--- /dev/null
+++ b/lib/refcount.c
@@ -0,0 +1,267 @@
+/*
+ * Variant of atomic_t specialized for reference counts.
+ *
+ * The interface matches the atomic_t interface (to aid in porting) but only
+ * provides the few functions one should use for reference counting.
+ *
+ * It differs in that the counter saturates at UINT_MAX and will not move once
+ * there. This avoids wrapping the counter and causing 'spurious'
+ * use-after-free issues.
+ *
+ * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
+ * and provide only what is strictly required for refcounts.
+ *
+ * The increments are fully relaxed; these will not provide ordering. The
+ * rationale is that whatever is used to obtain the object we're increasing the
+ * reference count on will provide the ordering. For locked data structures,
+ * its the lock acquire, for RCU/lockless data structures its the dependent
+ * load.
+ *
+ * Do note that inc_not_zero() provides a control dependency which will order
+ * future stores against the inc, this ensures we'll never modify the object
+ * if we did not in fact acquire a reference.
+ *
+ * The decrements will provide release order, such that all the prior loads and
+ * stores will be issued before, it also provides a control dependency, which
+ * will order us against the subsequent free().
+ *
+ * The control dependency is against the load of the cmpxchg (ll/sc) that
+ * succeeded. This means the stores aren't fully ordered, but this is fine
+ * because the 1->0 transition indicates no concurrency.
+ *
+ * Note that the allocator is responsible for ordering things between free()
+ * and alloc().
+ *
+ */
+
+#include <linux/refcount.h>
+#include <linux/bug.h>
+
+bool refcount_add_not_zero(unsigned int i, refcount_t *r)
+{
+ unsigned int old, new, val = atomic_read(&r->refs);
+
+ for (;;) {
+ if (!val)
+ return false;
+
+ if (unlikely(val == UINT_MAX))
+ return true;
+
+ new = val + i;
+ if (new < val)
+ new = UINT_MAX;
+ old = atomic_cmpxchg_relaxed(&r->refs, val, new);
+ if (old == val)
+ break;
+
+ val = old;
+ }
+
+ WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(refcount_add_not_zero);
+
+void refcount_add(unsigned int i, refcount_t *r)
+{
+ WARN(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
+}
+EXPORT_SYMBOL_GPL(refcount_add);
+
+/*
+ * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller has guaranteed the
+ * object memory to be stable (RCU, etc.). It does provide a control dependency
+ * and thereby orders future stores. See the comment on top.
+ */
+bool refcount_inc_not_zero(refcount_t *r)
+{
+ unsigned int old, new, val = atomic_read(&r->refs);
+
+ for (;;) {
+ new = val + 1;
+
+ if (!val)
+ return false;
+
+ if (unlikely(!new))
+ return true;
+
+ old = atomic_cmpxchg_relaxed(&r->refs, val, new);
+ if (old == val)
+ break;
+
+ val = old;
+ }
+
+ WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(refcount_inc_not_zero);
+
+/*
+ * Similar to atomic_inc(), will saturate at UINT_MAX and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller already has a
+ * reference on the object, will WARN when this is not so.
+ */
+void refcount_inc(refcount_t *r)
+{
+ WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
+}
+EXPORT_SYMBOL_GPL(refcount_inc);
+
+bool refcount_sub_and_test(unsigned int i, refcount_t *r)
+{
+ unsigned int old, new, val = atomic_read(&r->refs);
+
+ for (;;) {
+ if (unlikely(val == UINT_MAX))
+ return false;
+
+ new = val - i;
+ if (new > val) {
+ WARN(new > val, "refcount_t: underflow; use-after-free.\n");
+ return false;
+ }
+
+ old = atomic_cmpxchg_release(&r->refs, val, new);
+ if (old == val)
+ break;
+
+ val = old;
+ }
+
+ return !new;
+}
+EXPORT_SYMBOL_GPL(refcount_sub_and_test);
+
+/*
+ * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
+ * decrement when saturated at UINT_MAX.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ */
+bool refcount_dec_and_test(refcount_t *r)
+{
+ return refcount_sub_and_test(1, r);
+}
+EXPORT_SYMBOL_GPL(refcount_dec_and_test);
+
+/*
+ * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
+ * when saturated at UINT_MAX.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before.
+ */
+
+void refcount_dec(refcount_t *r)
+{
+ WARN(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
+}
+EXPORT_SYMBOL_GPL(refcount_dec);
+
+/*
+ * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
+ * success thereof.
+ *
+ * Like all decrement operations, it provides release memory order and provides
+ * a control dependency.
+ *
+ * It can be used like a try-delete operator; this explicit case is provided
+ * and not cmpxchg in generic, because that would allow implementing unsafe
+ * operations.
+ */
+bool refcount_dec_if_one(refcount_t *r)
+{
+ return atomic_cmpxchg_release(&r->refs, 1, 0) == 1;
+}
+EXPORT_SYMBOL_GPL(refcount_dec_if_one);
+
+/*
+ * No atomic_t counterpart, it decrements unless the value is 1, in which case
+ * it will return false.
+ *
+ * Was often done like: atomic_add_unless(&var, -1, 1)
+ */
+bool refcount_dec_not_one(refcount_t *r)
+{
+ unsigned int old, new, val = atomic_read(&r->refs);
+
+ for (;;) {
+ if (unlikely(val == UINT_MAX))
+ return true;
+
+ if (val == 1)
+ return false;
+
+ new = val - 1;
+ if (new > val) {
+ WARN(new > val, "refcount_t: underflow; use-after-free.\n");
+ return true;
+ }
+
+ old = atomic_cmpxchg_release(&r->refs, val, new);
+ if (old == val)
+ break;
+
+ val = old;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(refcount_dec_not_one);
+
+/*
+ * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
+ * to decrement when saturated at UINT_MAX.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ */
+bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
+{
+ if (refcount_dec_not_one(r))
+ return false;
+
+ mutex_lock(lock);
+ if (!refcount_dec_and_test(r)) {
+ mutex_unlock(lock);
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(refcount_dec_and_mutex_lock);
+
+/*
+ * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
+ * decrement when saturated at UINT_MAX.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ */
+bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
+{
+ if (refcount_dec_not_one(r))
+ return false;
+
+ spin_lock(lock);
+ if (!refcount_dec_and_test(r)) {
+ spin_unlock(lock);
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(refcount_dec_and_lock);
+
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 32d0ad058380..c5b9b9351cec 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -32,6 +32,11 @@
#define HASH_MIN_SIZE 4U
#define BUCKET_LOCKS_PER_CPU 32UL
+union nested_table {
+ union nested_table __rcu *table;
+ struct rhash_head __rcu *bucket;
+};
+
static u32 head_hashfn(struct rhashtable *ht,
const struct bucket_table *tbl,
const struct rhash_head *he)
@@ -76,6 +81,9 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
/* Never allocate more than 0.5 locks per bucket */
size = min_t(unsigned int, size, tbl->size >> 1);
+ if (tbl->nest)
+ size = min(size, 1U << tbl->nest);
+
if (sizeof(spinlock_t) != 0) {
tbl->locks = NULL;
#ifdef CONFIG_NUMA
@@ -99,11 +107,46 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
return 0;
}
+static void nested_table_free(union nested_table *ntbl, unsigned int size)
+{
+ const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
+ const unsigned int len = 1 << shift;
+ unsigned int i;
+
+ ntbl = rcu_dereference_raw(ntbl->table);
+ if (!ntbl)
+ return;
+
+ if (size > len) {
+ size >>= shift;
+ for (i = 0; i < len; i++)
+ nested_table_free(ntbl + i, size);
+ }
+
+ kfree(ntbl);
+}
+
+static void nested_bucket_table_free(const struct bucket_table *tbl)
+{
+ unsigned int size = tbl->size >> tbl->nest;
+ unsigned int len = 1 << tbl->nest;
+ union nested_table *ntbl;
+ unsigned int i;
+
+ ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
+
+ for (i = 0; i < len; i++)
+ nested_table_free(ntbl + i, size);
+
+ kfree(ntbl);
+}
+
static void bucket_table_free(const struct bucket_table *tbl)
{
- if (tbl)
- kvfree(tbl->locks);
+ if (tbl->nest)
+ nested_bucket_table_free(tbl);
+ kvfree(tbl->locks);
kvfree(tbl);
}
@@ -112,6 +155,59 @@ static void bucket_table_free_rcu(struct rcu_head *head)
bucket_table_free(container_of(head, struct bucket_table, rcu));
}
+static union nested_table *nested_table_alloc(struct rhashtable *ht,
+ union nested_table __rcu **prev,
+ unsigned int shifted,
+ unsigned int nhash)
+{
+ union nested_table *ntbl;
+ int i;
+
+ ntbl = rcu_dereference(*prev);
+ if (ntbl)
+ return ntbl;
+
+ ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
+
+ if (ntbl && shifted) {
+ for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++)
+ INIT_RHT_NULLS_HEAD(ntbl[i].bucket, ht,
+ (i << shifted) | nhash);
+ }
+
+ rcu_assign_pointer(*prev, ntbl);
+
+ return ntbl;
+}
+
+static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
+ size_t nbuckets,
+ gfp_t gfp)
+{
+ const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
+ struct bucket_table *tbl;
+ size_t size;
+
+ if (nbuckets < (1 << (shift + 1)))
+ return NULL;
+
+ size = sizeof(*tbl) + sizeof(tbl->buckets[0]);
+
+ tbl = kzalloc(size, gfp);
+ if (!tbl)
+ return NULL;
+
+ if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
+ 0, 0)) {
+ kfree(tbl);
+ return NULL;
+ }
+
+ tbl->nest = (ilog2(nbuckets) - 1) % shift + 1;
+
+ return tbl;
+}
+
static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
size_t nbuckets,
gfp_t gfp)
@@ -126,10 +222,17 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
if (tbl == NULL && gfp == GFP_KERNEL)
tbl = vzalloc(size);
+
+ size = nbuckets;
+
+ if (tbl == NULL && gfp != GFP_KERNEL) {
+ tbl = nested_bucket_table_alloc(ht, nbuckets, gfp);
+ nbuckets = 0;
+ }
if (tbl == NULL)
return NULL;
- tbl->size = nbuckets;
+ tbl->size = size;
if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
bucket_table_free(tbl);
@@ -164,12 +267,17 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
struct bucket_table *new_tbl = rhashtable_last_table(ht,
rht_dereference_rcu(old_tbl->future_tbl, ht));
- struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash];
- int err = -ENOENT;
+ struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash);
+ int err = -EAGAIN;
struct rhash_head *head, *next, *entry;
spinlock_t *new_bucket_lock;
unsigned int new_hash;
+ if (new_tbl->nest)
+ goto out;
+
+ err = -ENOENT;
+
rht_for_each(entry, old_tbl, old_hash) {
err = 0;
next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
@@ -202,19 +310,26 @@ out:
return err;
}
-static void rhashtable_rehash_chain(struct rhashtable *ht,
+static int rhashtable_rehash_chain(struct rhashtable *ht,
unsigned int old_hash)
{
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
spinlock_t *old_bucket_lock;
+ int err;
old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
spin_lock_bh(old_bucket_lock);
- while (!rhashtable_rehash_one(ht, old_hash))
+ while (!(err = rhashtable_rehash_one(ht, old_hash)))
;
- old_tbl->rehash++;
+
+ if (err == -ENOENT) {
+ old_tbl->rehash++;
+ err = 0;
+ }
spin_unlock_bh(old_bucket_lock);
+
+ return err;
}
static int rhashtable_rehash_attach(struct rhashtable *ht,
@@ -246,13 +361,17 @@ static int rhashtable_rehash_table(struct rhashtable *ht)
struct bucket_table *new_tbl;
struct rhashtable_walker *walker;
unsigned int old_hash;
+ int err;
new_tbl = rht_dereference(old_tbl->future_tbl, ht);
if (!new_tbl)
return 0;
- for (old_hash = 0; old_hash < old_tbl->size; old_hash++)
- rhashtable_rehash_chain(ht, old_hash);
+ for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
+ err = rhashtable_rehash_chain(ht, old_hash);
+ if (err)
+ return err;
+ }
/* Publish the new table pointer. */
rcu_assign_pointer(ht->tbl, new_tbl);
@@ -271,31 +390,16 @@ static int rhashtable_rehash_table(struct rhashtable *ht)
return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
}
-/**
- * rhashtable_expand - Expand hash table while allowing concurrent lookups
- * @ht: the hash table to expand
- *
- * A secondary bucket array is allocated and the hash entries are migrated.
- *
- * This function may only be called in a context where it is safe to call
- * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
- *
- * The caller must ensure that no concurrent resizing occurs by holding
- * ht->mutex.
- *
- * It is valid to have concurrent insertions and deletions protected by per
- * bucket locks or concurrent RCU protected lookups and traversals.
- */
-static int rhashtable_expand(struct rhashtable *ht)
+static int rhashtable_rehash_alloc(struct rhashtable *ht,
+ struct bucket_table *old_tbl,
+ unsigned int size)
{
- struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
+ struct bucket_table *new_tbl;
int err;
ASSERT_RHT_MUTEX(ht);
- old_tbl = rhashtable_last_table(ht, old_tbl);
-
- new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL);
+ new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
if (new_tbl == NULL)
return -ENOMEM;
@@ -324,12 +428,9 @@ static int rhashtable_expand(struct rhashtable *ht)
*/
static int rhashtable_shrink(struct rhashtable *ht)
{
- struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
+ struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
unsigned int nelems = atomic_read(&ht->nelems);
unsigned int size = 0;
- int err;
-
- ASSERT_RHT_MUTEX(ht);
if (nelems)
size = roundup_pow_of_two(nelems * 3 / 2);
@@ -342,15 +443,7 @@ static int rhashtable_shrink(struct rhashtable *ht)
if (rht_dereference(old_tbl->future_tbl, ht))
return -EEXIST;
- new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
- if (new_tbl == NULL)
- return -ENOMEM;
-
- err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
- if (err)
- bucket_table_free(new_tbl);
-
- return err;
+ return rhashtable_rehash_alloc(ht, old_tbl, size);
}
static void rht_deferred_worker(struct work_struct *work)
@@ -366,11 +459,14 @@ static void rht_deferred_worker(struct work_struct *work)
tbl = rhashtable_last_table(ht, tbl);
if (rht_grow_above_75(ht, tbl))
- rhashtable_expand(ht);
+ err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2);
else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
- rhashtable_shrink(ht);
+ err = rhashtable_shrink(ht);
+ else if (tbl->nest)
+ err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
- err = rhashtable_rehash_table(ht);
+ if (!err)
+ err = rhashtable_rehash_table(ht);
mutex_unlock(&ht->mutex);
@@ -439,8 +535,8 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
int elasticity;
elasticity = ht->elasticity;
- pprev = &tbl->buckets[hash];
- rht_for_each(head, tbl, hash) {
+ pprev = rht_bucket_var(tbl, hash);
+ rht_for_each_continue(head, *pprev, tbl, hash) {
struct rhlist_head *list;
struct rhlist_head *plist;
@@ -477,6 +573,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
struct rhash_head *obj,
void *data)
{
+ struct rhash_head __rcu **pprev;
struct bucket_table *new_tbl;
struct rhash_head *head;
@@ -499,7 +596,11 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
if (unlikely(rht_grow_above_100(ht, tbl)))
return ERR_PTR(-EAGAIN);
- head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
+ pprev = rht_bucket_insert(ht, tbl, hash);
+ if (!pprev)
+ return ERR_PTR(-ENOMEM);
+
+ head = rht_dereference_bucket(*pprev, tbl, hash);
RCU_INIT_POINTER(obj->next, head);
if (ht->rhlist) {
@@ -509,7 +610,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
RCU_INIT_POINTER(list->next, NULL);
}
- rcu_assign_pointer(tbl->buckets[hash], obj);
+ rcu_assign_pointer(*pprev, obj);
atomic_inc(&ht->nelems);
if (rht_grow_above_75(ht, tbl))
@@ -975,7 +1076,7 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
void (*free_fn)(void *ptr, void *arg),
void *arg)
{
- const struct bucket_table *tbl;
+ struct bucket_table *tbl;
unsigned int i;
cancel_work_sync(&ht->run_work);
@@ -986,7 +1087,7 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
for (i = 0; i < tbl->size; i++) {
struct rhash_head *pos, *next;
- for (pos = rht_dereference(tbl->buckets[i], ht),
+ for (pos = rht_dereference(*rht_bucket(tbl, i), ht),
next = !rht_is_a_nulls(pos) ?
rht_dereference(pos->next, ht) : NULL;
!rht_is_a_nulls(pos);
@@ -1007,3 +1108,71 @@ void rhashtable_destroy(struct rhashtable *ht)
return rhashtable_free_and_destroy(ht, NULL, NULL);
}
EXPORT_SYMBOL_GPL(rhashtable_destroy);
+
+struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
+ unsigned int hash)
+{
+ const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
+ static struct rhash_head __rcu *rhnull =
+ (struct rhash_head __rcu *)NULLS_MARKER(0);
+ unsigned int index = hash & ((1 << tbl->nest) - 1);
+ unsigned int size = tbl->size >> tbl->nest;
+ unsigned int subhash = hash;
+ union nested_table *ntbl;
+
+ ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
+ ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash);
+ subhash >>= tbl->nest;
+
+ while (ntbl && size > (1 << shift)) {
+ index = subhash & ((1 << shift) - 1);
+ ntbl = rht_dereference_bucket_rcu(ntbl[index].table,
+ tbl, hash);
+ size >>= shift;
+ subhash >>= shift;
+ }
+
+ if (!ntbl)
+ return &rhnull;
+
+ return &ntbl[subhash].bucket;
+
+}
+EXPORT_SYMBOL_GPL(rht_bucket_nested);
+
+struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
+ struct bucket_table *tbl,
+ unsigned int hash)
+{
+ const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
+ unsigned int index = hash & ((1 << tbl->nest) - 1);
+ unsigned int size = tbl->size >> tbl->nest;
+ union nested_table *ntbl;
+ unsigned int shifted;
+ unsigned int nhash;
+
+ ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
+ hash >>= tbl->nest;
+ nhash = index;
+ shifted = tbl->nest;
+ ntbl = nested_table_alloc(ht, &ntbl[index].table,
+ size <= (1 << shift) ? shifted : 0, nhash);
+
+ while (ntbl && size > (1 << shift)) {
+ index = hash & ((1 << shift) - 1);
+ size >>= shift;
+ hash >>= shift;
+ nhash |= index << shifted;
+ shifted += shift;
+ ntbl = nested_table_alloc(ht, &ntbl[index].table,
+ size <= (1 << shift) ? shifted : 0,
+ nhash);
+ }
+
+ if (!ntbl)
+ return NULL;
+
+ return &ntbl[hash].bucket;
+
+}
+EXPORT_SYMBOL_GPL(rht_bucket_nested_insert);
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 2cecf05c82fd..55e11c4b2f3b 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -17,6 +17,7 @@
#include <linux/random.h>
#include <linux/sbitmap.h>
+#include <linux/seq_file.h>
int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
gfp_t flags, int node)
@@ -180,6 +181,62 @@ unsigned int sbitmap_weight(const struct sbitmap *sb)
}
EXPORT_SYMBOL_GPL(sbitmap_weight);
+void sbitmap_show(struct sbitmap *sb, struct seq_file *m)
+{
+ seq_printf(m, "depth=%u\n", sb->depth);
+ seq_printf(m, "busy=%u\n", sbitmap_weight(sb));
+ seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift);
+ seq_printf(m, "map_nr=%u\n", sb->map_nr);
+}
+EXPORT_SYMBOL_GPL(sbitmap_show);
+
+static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte)
+{
+ if ((offset & 0xf) == 0) {
+ if (offset != 0)
+ seq_putc(m, '\n');
+ seq_printf(m, "%08x:", offset);
+ }
+ if ((offset & 0x1) == 0)
+ seq_putc(m, ' ');
+ seq_printf(m, "%02x", byte);
+}
+
+void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m)
+{
+ u8 byte = 0;
+ unsigned int byte_bits = 0;
+ unsigned int offset = 0;
+ int i;
+
+ for (i = 0; i < sb->map_nr; i++) {
+ unsigned long word = READ_ONCE(sb->map[i].word);
+ unsigned int word_bits = READ_ONCE(sb->map[i].depth);
+
+ while (word_bits > 0) {
+ unsigned int bits = min(8 - byte_bits, word_bits);
+
+ byte |= (word & (BIT(bits) - 1)) << byte_bits;
+ byte_bits += bits;
+ if (byte_bits == 8) {
+ emit_byte(m, offset, byte);
+ byte = 0;
+ byte_bits = 0;
+ offset++;
+ }
+ word >>= bits;
+ word_bits -= bits;
+ }
+ }
+ if (byte_bits) {
+ emit_byte(m, offset, byte);
+ offset++;
+ }
+ if (offset)
+ seq_putc(m, '\n');
+}
+EXPORT_SYMBOL_GPL(sbitmap_bitmap_show);
+
static unsigned int sbq_calc_wake_batch(unsigned int depth)
{
unsigned int wake_batch;
@@ -239,7 +296,19 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
{
- sbq->wake_batch = sbq_calc_wake_batch(depth);
+ unsigned int wake_batch = sbq_calc_wake_batch(depth);
+ int i;
+
+ if (sbq->wake_batch != wake_batch) {
+ WRITE_ONCE(sbq->wake_batch, wake_batch);
+ /*
+ * Pairs with the memory barrier in sbq_wake_up() to ensure that
+ * the batch size is updated before the wait counts.
+ */
+ smp_mb__before_atomic();
+ for (i = 0; i < SBQ_WAIT_QUEUES; i++)
+ atomic_set(&sbq->ws[i].wait_cnt, 1);
+ }
sbitmap_resize(&sbq->sb, depth);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
@@ -297,20 +366,39 @@ static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
static void sbq_wake_up(struct sbitmap_queue *sbq)
{
struct sbq_wait_state *ws;
+ unsigned int wake_batch;
int wait_cnt;
- /* Ensure that the wait list checks occur after clear_bit(). */
- smp_mb();
+ /*
+ * Pairs with the memory barrier in set_current_state() to ensure the
+ * proper ordering of clear_bit()/waitqueue_active() in the waker and
+ * test_and_set_bit()/prepare_to_wait()/finish_wait() in the waiter. See
+ * the comment on waitqueue_active(). This is __after_atomic because we
+ * just did clear_bit() in the caller.
+ */
+ smp_mb__after_atomic();
ws = sbq_wake_ptr(sbq);
if (!ws)
return;
wait_cnt = atomic_dec_return(&ws->wait_cnt);
- if (unlikely(wait_cnt < 0))
- wait_cnt = atomic_inc_return(&ws->wait_cnt);
- if (wait_cnt == 0) {
- atomic_add(sbq->wake_batch, &ws->wait_cnt);
+ if (wait_cnt <= 0) {
+ wake_batch = READ_ONCE(sbq->wake_batch);
+ /*
+ * Pairs with the memory barrier in sbitmap_queue_resize() to
+ * ensure that we see the batch size update before the wait
+ * count is reset.
+ */
+ smp_mb__before_atomic();
+ /*
+ * If there are concurrent callers to sbq_wake_up(), the last
+ * one to decrement the wait count below zero will bump it back
+ * up. If there is a concurrent resize, the count reset will
+ * either cause the cmpxchg to fail or overwrite after the
+ * cmpxchg.
+ */
+ atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wait_cnt + wake_batch);
sbq_index_atomic_inc(&sbq->wake_index);
wake_up(&ws->wait);
}
@@ -331,7 +419,8 @@ void sbitmap_queue_wake_all(struct sbitmap_queue *sbq)
int i, wake_index;
/*
- * Make sure all changes prior to this are visible from other CPUs.
+ * Pairs with the memory barrier in set_current_state() like in
+ * sbq_wake_up().
*/
smp_mb();
wake_index = atomic_read(&sbq->wake_index);
@@ -345,3 +434,37 @@ void sbitmap_queue_wake_all(struct sbitmap_queue *sbq)
}
}
EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all);
+
+void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
+{
+ bool first;
+ int i;
+
+ sbitmap_show(&sbq->sb, m);
+
+ seq_puts(m, "alloc_hint={");
+ first = true;
+ for_each_possible_cpu(i) {
+ if (!first)
+ seq_puts(m, ", ");
+ first = false;
+ seq_printf(m, "%u", *per_cpu_ptr(sbq->alloc_hint, i));
+ }
+ seq_puts(m, "}\n");
+
+ seq_printf(m, "wake_batch=%u\n", sbq->wake_batch);
+ seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index));
+
+ seq_puts(m, "ws={\n");
+ for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
+ struct sbq_wait_state *ws = &sbq->ws[i];
+
+ seq_printf(m, "\t{.wait_cnt=%d, .wait=%s},\n",
+ atomic_read(&ws->wait_cnt),
+ waitqueue_active(&ws->wait) ? "active" : "inactive");
+ }
+ seq_puts(m, "}\n");
+
+ seq_printf(m, "round_robin=%d\n", sbq->round_robin);
+}
+EXPORT_SYMBOL_GPL(sbitmap_queue_show);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 004fc70fc56a..c6cf82242d65 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -651,7 +651,6 @@ size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
{
unsigned int offset = 0;
struct sg_mapping_iter miter;
- unsigned long flags;
unsigned int sg_flags = SG_MITER_ATOMIC;
if (to_buffer)
@@ -664,9 +663,7 @@ size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
if (!sg_miter_skip(&miter, skip))
return false;
- local_irq_save(flags);
-
- while (sg_miter_next(&miter) && offset < buflen) {
+ while ((offset < buflen) && sg_miter_next(&miter)) {
unsigned int len;
len = min(miter.length, buflen - offset);
@@ -681,7 +678,6 @@ size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
sg_miter_stop(&miter);
- local_irq_restore(flags);
return offset;
}
EXPORT_SYMBOL(sg_copy_buffer);
diff --git a/lib/show_mem.c b/lib/show_mem.c
index 1feed6a2b12a..0beaa1d899aa 100644
--- a/lib/show_mem.c
+++ b/lib/show_mem.c
@@ -9,13 +9,13 @@
#include <linux/quicklist.h>
#include <linux/cma.h>
-void show_mem(unsigned int filter)
+void show_mem(unsigned int filter, nodemask_t *nodemask)
{
pg_data_t *pgdat;
unsigned long total = 0, reserved = 0, highmem = 0;
printk("Mem-Info:\n");
- show_free_areas(filter);
+ show_free_areas(filter, nodemask);
for_each_online_pgdat(pgdat) {
unsigned long flags;
diff --git a/lib/siphash.c b/lib/siphash.c
new file mode 100644
index 000000000000..3ae58b4edad6
--- /dev/null
+++ b/lib/siphash.c
@@ -0,0 +1,551 @@
+/* Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This file is provided under a dual BSD/GPLv2 license.
+ *
+ * SipHash: a fast short-input PRF
+ * https://131002.net/siphash/
+ *
+ * This implementation is specifically for SipHash2-4 for a secure PRF
+ * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for
+ * hashtables.
+ */
+
+#include <linux/siphash.h>
+#include <asm/unaligned.h>
+
+#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
+#include <linux/dcache.h>
+#include <asm/word-at-a-time.h>
+#endif
+
+#define SIPROUND \
+ do { \
+ v0 += v1; v1 = rol64(v1, 13); v1 ^= v0; v0 = rol64(v0, 32); \
+ v2 += v3; v3 = rol64(v3, 16); v3 ^= v2; \
+ v0 += v3; v3 = rol64(v3, 21); v3 ^= v0; \
+ v2 += v1; v1 = rol64(v1, 17); v1 ^= v2; v2 = rol64(v2, 32); \
+ } while (0)
+
+#define PREAMBLE(len) \
+ u64 v0 = 0x736f6d6570736575ULL; \
+ u64 v1 = 0x646f72616e646f6dULL; \
+ u64 v2 = 0x6c7967656e657261ULL; \
+ u64 v3 = 0x7465646279746573ULL; \
+ u64 b = ((u64)(len)) << 56; \
+ v3 ^= key->key[1]; \
+ v2 ^= key->key[0]; \
+ v1 ^= key->key[1]; \
+ v0 ^= key->key[0];
+
+#define POSTAMBLE \
+ v3 ^= b; \
+ SIPROUND; \
+ SIPROUND; \
+ v0 ^= b; \
+ v2 ^= 0xff; \
+ SIPROUND; \
+ SIPROUND; \
+ SIPROUND; \
+ SIPROUND; \
+ return (v0 ^ v1) ^ (v2 ^ v3);
+
+u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
+{
+ const u8 *end = data + len - (len % sizeof(u64));
+ const u8 left = len & (sizeof(u64) - 1);
+ u64 m;
+ PREAMBLE(len)
+ for (; data != end; data += sizeof(u64)) {
+ m = le64_to_cpup(data);
+ v3 ^= m;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= m;
+ }
+#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
+ if (left)
+ b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
+ bytemask_from_count(left)));
+#else
+ switch (left) {
+ case 7: b |= ((u64)end[6]) << 48;
+ case 6: b |= ((u64)end[5]) << 40;
+ case 5: b |= ((u64)end[4]) << 32;
+ case 4: b |= le32_to_cpup(data); break;
+ case 3: b |= ((u64)end[2]) << 16;
+ case 2: b |= le16_to_cpup(data); break;
+ case 1: b |= end[0];
+ }
+#endif
+ POSTAMBLE
+}
+EXPORT_SYMBOL(__siphash_aligned);
+
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
+{
+ const u8 *end = data + len - (len % sizeof(u64));
+ const u8 left = len & (sizeof(u64) - 1);
+ u64 m;
+ PREAMBLE(len)
+ for (; data != end; data += sizeof(u64)) {
+ m = get_unaligned_le64(data);
+ v3 ^= m;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= m;
+ }
+#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
+ if (left)
+ b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
+ bytemask_from_count(left)));
+#else
+ switch (left) {
+ case 7: b |= ((u64)end[6]) << 48;
+ case 6: b |= ((u64)end[5]) << 40;
+ case 5: b |= ((u64)end[4]) << 32;
+ case 4: b |= get_unaligned_le32(end); break;
+ case 3: b |= ((u64)end[2]) << 16;
+ case 2: b |= get_unaligned_le16(end); break;
+ case 1: b |= end[0];
+ }
+#endif
+ POSTAMBLE
+}
+EXPORT_SYMBOL(__siphash_unaligned);
+#endif
+
+/**
+ * siphash_1u64 - compute 64-bit siphash PRF value of a u64
+ * @first: first u64
+ * @key: the siphash key
+ */
+u64 siphash_1u64(const u64 first, const siphash_key_t *key)
+{
+ PREAMBLE(8)
+ v3 ^= first;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= first;
+ POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_1u64);
+
+/**
+ * siphash_2u64 - compute 64-bit siphash PRF value of 2 u64
+ * @first: first u64
+ * @second: second u64
+ * @key: the siphash key
+ */
+u64 siphash_2u64(const u64 first, const u64 second, const siphash_key_t *key)
+{
+ PREAMBLE(16)
+ v3 ^= first;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= first;
+ v3 ^= second;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= second;
+ POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_2u64);
+
+/**
+ * siphash_3u64 - compute 64-bit siphash PRF value of 3 u64
+ * @first: first u64
+ * @second: second u64
+ * @third: third u64
+ * @key: the siphash key
+ */
+u64 siphash_3u64(const u64 first, const u64 second, const u64 third,
+ const siphash_key_t *key)
+{
+ PREAMBLE(24)
+ v3 ^= first;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= first;
+ v3 ^= second;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= second;
+ v3 ^= third;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= third;
+ POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_3u64);
+
+/**
+ * siphash_4u64 - compute 64-bit siphash PRF value of 4 u64
+ * @first: first u64
+ * @second: second u64
+ * @third: third u64
+ * @forth: forth u64
+ * @key: the siphash key
+ */
+u64 siphash_4u64(const u64 first, const u64 second, const u64 third,
+ const u64 forth, const siphash_key_t *key)
+{
+ PREAMBLE(32)
+ v3 ^= first;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= first;
+ v3 ^= second;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= second;
+ v3 ^= third;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= third;
+ v3 ^= forth;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= forth;
+ POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_4u64);
+
+u64 siphash_1u32(const u32 first, const siphash_key_t *key)
+{
+ PREAMBLE(4)
+ b |= first;
+ POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_1u32);
+
+u64 siphash_3u32(const u32 first, const u32 second, const u32 third,
+ const siphash_key_t *key)
+{
+ u64 combined = (u64)second << 32 | first;
+ PREAMBLE(12)
+ v3 ^= combined;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= combined;
+ b |= third;
+ POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_3u32);
+
+#if BITS_PER_LONG == 64
+/* Note that on 64-bit, we make HalfSipHash1-3 actually be SipHash1-3, for
+ * performance reasons. On 32-bit, below, we actually implement HalfSipHash1-3.
+ */
+
+#define HSIPROUND SIPROUND
+#define HPREAMBLE(len) PREAMBLE(len)
+#define HPOSTAMBLE \
+ v3 ^= b; \
+ HSIPROUND; \
+ v0 ^= b; \
+ v2 ^= 0xff; \
+ HSIPROUND; \
+ HSIPROUND; \
+ HSIPROUND; \
+ return (v0 ^ v1) ^ (v2 ^ v3);
+
+u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
+{
+ const u8 *end = data + len - (len % sizeof(u64));
+ const u8 left = len & (sizeof(u64) - 1);
+ u64 m;
+ HPREAMBLE(len)
+ for (; data != end; data += sizeof(u64)) {
+ m = le64_to_cpup(data);
+ v3 ^= m;
+ HSIPROUND;
+ v0 ^= m;
+ }
+#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
+ if (left)
+ b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
+ bytemask_from_count(left)));
+#else
+ switch (left) {
+ case 7: b |= ((u64)end[6]) << 48;
+ case 6: b |= ((u64)end[5]) << 40;
+ case 5: b |= ((u64)end[4]) << 32;
+ case 4: b |= le32_to_cpup(data); break;
+ case 3: b |= ((u64)end[2]) << 16;
+ case 2: b |= le16_to_cpup(data); break;
+ case 1: b |= end[0];
+ }
+#endif
+ HPOSTAMBLE
+}
+EXPORT_SYMBOL(__hsiphash_aligned);
+
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+u32 __hsiphash_unaligned(const void *data, size_t len,
+ const hsiphash_key_t *key)
+{
+ const u8 *end = data + len - (len % sizeof(u64));
+ const u8 left = len & (sizeof(u64) - 1);
+ u64 m;
+ HPREAMBLE(len)
+ for (; data != end; data += sizeof(u64)) {
+ m = get_unaligned_le64(data);
+ v3 ^= m;
+ HSIPROUND;
+ v0 ^= m;
+ }
+#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
+ if (left)
+ b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
+ bytemask_from_count(left)));
+#else
+ switch (left) {
+ case 7: b |= ((u64)end[6]) << 48;
+ case 6: b |= ((u64)end[5]) << 40;
+ case 5: b |= ((u64)end[4]) << 32;
+ case 4: b |= get_unaligned_le32(end); break;
+ case 3: b |= ((u64)end[2]) << 16;
+ case 2: b |= get_unaligned_le16(end); break;
+ case 1: b |= end[0];
+ }
+#endif
+ HPOSTAMBLE
+}
+EXPORT_SYMBOL(__hsiphash_unaligned);
+#endif
+
+/**
+ * hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32
+ * @first: first u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_1u32(const u32 first, const hsiphash_key_t *key)
+{
+ HPREAMBLE(4)
+ b |= first;
+ HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_1u32);
+
+/**
+ * hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32
+ * @first: first u32
+ * @second: second u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_2u32(const u32 first, const u32 second, const hsiphash_key_t *key)
+{
+ u64 combined = (u64)second << 32 | first;
+ HPREAMBLE(8)
+ v3 ^= combined;
+ HSIPROUND;
+ v0 ^= combined;
+ HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_2u32);
+
+/**
+ * hsiphash_3u32 - compute 32-bit hsiphash PRF value of 3 u32
+ * @first: first u32
+ * @second: second u32
+ * @third: third u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_3u32(const u32 first, const u32 second, const u32 third,
+ const hsiphash_key_t *key)
+{
+ u64 combined = (u64)second << 32 | first;
+ HPREAMBLE(12)
+ v3 ^= combined;
+ HSIPROUND;
+ v0 ^= combined;
+ b |= third;
+ HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_3u32);
+
+/**
+ * hsiphash_4u32 - compute 32-bit hsiphash PRF value of 4 u32
+ * @first: first u32
+ * @second: second u32
+ * @third: third u32
+ * @forth: forth u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third,
+ const u32 forth, const hsiphash_key_t *key)
+{
+ u64 combined = (u64)second << 32 | first;
+ HPREAMBLE(16)
+ v3 ^= combined;
+ HSIPROUND;
+ v0 ^= combined;
+ combined = (u64)forth << 32 | third;
+ v3 ^= combined;
+ HSIPROUND;
+ v0 ^= combined;
+ HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_4u32);
+#else
+#define HSIPROUND \
+ do { \
+ v0 += v1; v1 = rol32(v1, 5); v1 ^= v0; v0 = rol32(v0, 16); \
+ v2 += v3; v3 = rol32(v3, 8); v3 ^= v2; \
+ v0 += v3; v3 = rol32(v3, 7); v3 ^= v0; \
+ v2 += v1; v1 = rol32(v1, 13); v1 ^= v2; v2 = rol32(v2, 16); \
+ } while (0)
+
+#define HPREAMBLE(len) \
+ u32 v0 = 0; \
+ u32 v1 = 0; \
+ u32 v2 = 0x6c796765U; \
+ u32 v3 = 0x74656462U; \
+ u32 b = ((u32)(len)) << 24; \
+ v3 ^= key->key[1]; \
+ v2 ^= key->key[0]; \
+ v1 ^= key->key[1]; \
+ v0 ^= key->key[0];
+
+#define HPOSTAMBLE \
+ v3 ^= b; \
+ HSIPROUND; \
+ v0 ^= b; \
+ v2 ^= 0xff; \
+ HSIPROUND; \
+ HSIPROUND; \
+ HSIPROUND; \
+ return v1 ^ v3;
+
+u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
+{
+ const u8 *end = data + len - (len % sizeof(u32));
+ const u8 left = len & (sizeof(u32) - 1);
+ u32 m;
+ HPREAMBLE(len)
+ for (; data != end; data += sizeof(u32)) {
+ m = le32_to_cpup(data);
+ v3 ^= m;
+ HSIPROUND;
+ v0 ^= m;
+ }
+ switch (left) {
+ case 3: b |= ((u32)end[2]) << 16;
+ case 2: b |= le16_to_cpup(data); break;
+ case 1: b |= end[0];
+ }
+ HPOSTAMBLE
+}
+EXPORT_SYMBOL(__hsiphash_aligned);
+
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+u32 __hsiphash_unaligned(const void *data, size_t len,
+ const hsiphash_key_t *key)
+{
+ const u8 *end = data + len - (len % sizeof(u32));
+ const u8 left = len & (sizeof(u32) - 1);
+ u32 m;
+ HPREAMBLE(len)
+ for (; data != end; data += sizeof(u32)) {
+ m = get_unaligned_le32(data);
+ v3 ^= m;
+ HSIPROUND;
+ v0 ^= m;
+ }
+ switch (left) {
+ case 3: b |= ((u32)end[2]) << 16;
+ case 2: b |= get_unaligned_le16(end); break;
+ case 1: b |= end[0];
+ }
+ HPOSTAMBLE
+}
+EXPORT_SYMBOL(__hsiphash_unaligned);
+#endif
+
+/**
+ * hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32
+ * @first: first u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_1u32(const u32 first, const hsiphash_key_t *key)
+{
+ HPREAMBLE(4)
+ v3 ^= first;
+ HSIPROUND;
+ v0 ^= first;
+ HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_1u32);
+
+/**
+ * hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32
+ * @first: first u32
+ * @second: second u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_2u32(const u32 first, const u32 second, const hsiphash_key_t *key)
+{
+ HPREAMBLE(8)
+ v3 ^= first;
+ HSIPROUND;
+ v0 ^= first;
+ v3 ^= second;
+ HSIPROUND;
+ v0 ^= second;
+ HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_2u32);
+
+/**
+ * hsiphash_3u32 - compute 32-bit hsiphash PRF value of 3 u32
+ * @first: first u32
+ * @second: second u32
+ * @third: third u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_3u32(const u32 first, const u32 second, const u32 third,
+ const hsiphash_key_t *key)
+{
+ HPREAMBLE(12)
+ v3 ^= first;
+ HSIPROUND;
+ v0 ^= first;
+ v3 ^= second;
+ HSIPROUND;
+ v0 ^= second;
+ v3 ^= third;
+ HSIPROUND;
+ v0 ^= third;
+ HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_3u32);
+
+/**
+ * hsiphash_4u32 - compute 32-bit hsiphash PRF value of 4 u32
+ * @first: first u32
+ * @second: second u32
+ * @third: third u32
+ * @forth: forth u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third,
+ const u32 forth, const hsiphash_key_t *key)
+{
+ HPREAMBLE(16)
+ v3 ^= first;
+ HSIPROUND;
+ v0 ^= first;
+ v3 ^= second;
+ HSIPROUND;
+ v0 ^= second;
+ v3 ^= third;
+ HSIPROUND;
+ v0 ^= third;
+ v3 ^= forth;
+ HSIPROUND;
+ v0 ^= forth;
+ HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_4u32);
+#endif
diff --git a/lib/sort.c b/lib/sort.c
index fc20df42aa6f..975c6ef6fec7 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -4,6 +4,8 @@
* Jan 23 2005 Matt Mackall <mpm@selenic.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/export.h>
#include <linux/sort.h>
@@ -101,42 +103,3 @@ void sort(void *base, size_t num, size_t size,
}
EXPORT_SYMBOL(sort);
-
-#if 0
-#include <linux/slab.h>
-/* a simple boot-time regression test */
-
-int cmpint(const void *a, const void *b)
-{
- return *(int *)a - *(int *)b;
-}
-
-static int sort_test(void)
-{
- int *a, i, r = 1;
-
- a = kmalloc(1000 * sizeof(int), GFP_KERNEL);
- BUG_ON(!a);
-
- printk("testing sort()\n");
-
- for (i = 0; i < 1000; i++) {
- r = (r * 725861) % 6599;
- a[i] = r;
- }
-
- sort(a, 1000, sizeof(int), cmpint, NULL);
-
- for (i = 0; i < 999; i++)
- if (a[i] > a[i+1]) {
- printk("sort() failed!\n");
- break;
- }
-
- kfree(a);
-
- return 0;
-}
-
-module_init(sort_test);
-#endif
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index cb1b54ee8527..a8d74a733a38 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -53,7 +53,7 @@
*/
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
-int swiotlb_force;
+enum swiotlb_force swiotlb_force;
/*
* Used to do a quick range check in swiotlb_tbl_unmap_single and
@@ -83,6 +83,12 @@ static unsigned int *io_tlb_list;
static unsigned int io_tlb_index;
/*
+ * Max segment that we can provide which (if pages are contingous) will
+ * not be bounced (unless SWIOTLB_FORCE is set).
+ */
+unsigned int max_segment;
+
+/*
* We need to save away the original address corresponding to a mapped entry
* for the sync operations.
*/
@@ -106,8 +112,12 @@ setup_io_tlb_npages(char *str)
}
if (*str == ',')
++str;
- if (!strcmp(str, "force"))
- swiotlb_force = 1;
+ if (!strcmp(str, "force")) {
+ swiotlb_force = SWIOTLB_FORCE;
+ } else if (!strcmp(str, "noforce")) {
+ swiotlb_force = SWIOTLB_NO_FORCE;
+ io_tlb_nslabs = 1;
+ }
return 0;
}
@@ -120,6 +130,20 @@ unsigned long swiotlb_nr_tbl(void)
}
EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
+unsigned int swiotlb_max_segment(void)
+{
+ return max_segment;
+}
+EXPORT_SYMBOL_GPL(swiotlb_max_segment);
+
+void swiotlb_set_max_segment(unsigned int val)
+{
+ if (swiotlb_force == SWIOTLB_FORCE)
+ max_segment = 1;
+ else
+ max_segment = rounddown(val, PAGE_SIZE);
+}
+
/* default to 64MB */
#define IO_TLB_DEFAULT_SIZE (64UL<<20)
unsigned long swiotlb_size_or_default(void)
@@ -201,6 +225,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
if (verbose)
swiotlb_print_info();
+ swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
return 0;
}
@@ -279,6 +304,7 @@ swiotlb_late_init_with_default_size(size_t default_size)
rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs);
if (rc)
free_pages((unsigned long)vstart, order);
+
return rc;
}
@@ -333,6 +359,8 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
late_alloc = 1;
+ swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
+
return 0;
cleanup4:
@@ -347,6 +375,7 @@ cleanup2:
io_tlb_end = 0;
io_tlb_start = 0;
io_tlb_nslabs = 0;
+ max_segment = 0;
return -ENOMEM;
}
@@ -375,6 +404,7 @@ void __init swiotlb_free(void)
PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
}
io_tlb_nslabs = 0;
+ max_segment = 0;
}
int is_swiotlb_buffer(phys_addr_t paddr)
@@ -453,11 +483,11 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
: 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
/*
- * For mappings greater than a page, we limit the stride (and
- * hence alignment) to a page size.
+ * For mappings greater than or equal to a page, we limit the stride
+ * (and hence alignment) to a page size.
*/
nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
- if (size > PAGE_SIZE)
+ if (size >= PAGE_SIZE)
stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
else
stride = 1;
@@ -543,8 +573,15 @@ static phys_addr_t
map_single(struct device *hwdev, phys_addr_t phys, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
- dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start);
+ dma_addr_t start_dma_addr;
+ if (swiotlb_force == SWIOTLB_NO_FORCE) {
+ dev_warn_ratelimited(hwdev, "Cannot do DMA to address %pa\n",
+ &phys);
+ return SWIOTLB_MAP_ERROR;
+ }
+
+ start_dma_addr = phys_to_dma(hwdev, io_tlb_start);
return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size,
dir, attrs);
}
@@ -721,6 +758,9 @@ static void
swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
int do_panic)
{
+ if (swiotlb_force == SWIOTLB_NO_FORCE)
+ return;
+
/*
* Ran out of IOMMU space for this operation. This is very bad.
* Unfortunately the drivers cannot handle this operation properly.
@@ -763,7 +803,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
* we can safely return the device addr and not worry about bounce
* buffering it.
*/
- if (dma_capable(dev, dev_addr, size) && !swiotlb_force)
+ if (dma_capable(dev, dev_addr, size) && swiotlb_force != SWIOTLB_FORCE)
return dev_addr;
trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
@@ -904,7 +944,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
phys_addr_t paddr = sg_phys(sg);
dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
- if (swiotlb_force ||
+ if (swiotlb_force == SWIOTLB_FORCE ||
!dma_capable(hwdev, dev_addr, sg->length)) {
phys_addr_t map = map_single(hwdev, sg_phys(sg),
sg->length, dir, attrs);
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index a3e8ec3fb1c5..09371b0a9baf 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -42,12 +42,6 @@ static const struct file_operations test_fw_fops = {
.read = test_fw_misc_read,
};
-static struct miscdevice test_fw_misc_device = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "test_firmware",
- .fops = &test_fw_fops,
-};
-
static ssize_t trigger_request_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -132,39 +126,81 @@ out:
}
static DEVICE_ATTR_WO(trigger_async_request);
-static int __init test_firmware_init(void)
+static ssize_t trigger_custom_fallback_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int rc;
+ char *name;
- rc = misc_register(&test_fw_misc_device);
+ name = kstrndup(buf, count, GFP_KERNEL);
+ if (!name)
+ return -ENOSPC;
+
+ pr_info("loading '%s' using custom fallback mechanism\n", name);
+
+ mutex_lock(&test_fw_mutex);
+ release_firmware(test_firmware);
+ test_firmware = NULL;
+ rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG, name,
+ dev, GFP_KERNEL, NULL,
+ trigger_async_request_cb);
if (rc) {
- pr_err("could not register misc device: %d\n", rc);
- return rc;
+ pr_info("async load of '%s' failed: %d\n", name, rc);
+ kfree(name);
+ goto out;
}
- rc = device_create_file(test_fw_misc_device.this_device,
- &dev_attr_trigger_request);
- if (rc) {
- pr_err("could not create sysfs interface: %d\n", rc);
- goto dereg;
+ /* Free 'name' ASAP, to test for race conditions */
+ kfree(name);
+
+ wait_for_completion(&async_fw_done);
+
+ if (test_firmware) {
+ pr_info("loaded: %zu\n", test_firmware->size);
+ rc = count;
+ } else {
+ pr_err("failed to async load firmware\n");
+ rc = -ENODEV;
}
- rc = device_create_file(test_fw_misc_device.this_device,
- &dev_attr_trigger_async_request);
+out:
+ mutex_unlock(&test_fw_mutex);
+
+ return rc;
+}
+static DEVICE_ATTR_WO(trigger_custom_fallback);
+
+#define TEST_FW_DEV_ATTR(name) &dev_attr_##name.attr
+
+static struct attribute *test_dev_attrs[] = {
+ TEST_FW_DEV_ATTR(trigger_request),
+ TEST_FW_DEV_ATTR(trigger_async_request),
+ TEST_FW_DEV_ATTR(trigger_custom_fallback),
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(test_dev);
+
+static struct miscdevice test_fw_misc_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "test_firmware",
+ .fops = &test_fw_fops,
+ .groups = test_dev_groups,
+};
+
+static int __init test_firmware_init(void)
+{
+ int rc;
+
+ rc = misc_register(&test_fw_misc_device);
if (rc) {
- pr_err("could not create async sysfs interface: %d\n", rc);
- goto remove_file;
+ pr_err("could not register misc device: %d\n", rc);
+ return rc;
}
pr_warn("interface ready\n");
return 0;
-
-remove_file:
- device_remove_file(test_fw_misc_device.this_device,
- &dev_attr_trigger_async_request);
-dereg:
- misc_deregister(&test_fw_misc_device);
- return rc;
}
module_init(test_firmware_init);
@@ -172,10 +208,6 @@ module_init(test_firmware_init);
static void __exit test_firmware_exit(void)
{
release_firmware(test_firmware);
- device_remove_file(test_fw_misc_device.this_device,
- &dev_attr_trigger_async_request);
- device_remove_file(test_fw_misc_device.this_device,
- &dev_attr_trigger_request);
misc_deregister(&test_fw_misc_device);
pr_warn("removed interface\n");
}
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index fbdf87920093..0b1d3140fbb8 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -11,6 +11,7 @@
#define pr_fmt(fmt) "kasan test: %s " fmt, __func__
+#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/mman.h>
#include <linux/mm.h>
@@ -331,6 +332,38 @@ static noinline void __init kmem_cache_oob(void)
kmem_cache_destroy(cache);
}
+static noinline void __init memcg_accounted_kmem_cache(void)
+{
+ int i;
+ char *p;
+ size_t size = 200;
+ struct kmem_cache *cache;
+
+ cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
+ if (!cache) {
+ pr_err("Cache allocation failed\n");
+ return;
+ }
+
+ pr_info("allocate memcg accounted object\n");
+ /*
+ * Several allocations with a delay to allow for lazy per memcg kmem
+ * cache creation.
+ */
+ for (i = 0; i < 5; i++) {
+ p = kmem_cache_alloc(cache, GFP_KERNEL);
+ if (!p) {
+ pr_err("Allocation failed\n");
+ goto free_cache;
+ }
+ kmem_cache_free(cache, p);
+ msleep(100);
+ }
+
+free_cache:
+ kmem_cache_destroy(cache);
+}
+
static char global_array[10];
static noinline void __init kasan_global_oob(void)
@@ -460,6 +493,7 @@ static int __init kmalloc_tests_init(void)
kmalloc_uaf_memset();
kmalloc_uaf2();
kmem_cache_oob();
+ memcg_accounted_kmem_cache();
kasan_stack_oob();
kasan_global_oob();
ksize_unpoisons_memory();
diff --git a/lib/test_parman.c b/lib/test_parman.c
new file mode 100644
index 000000000000..35e32243693c
--- /dev/null
+++ b/lib/test_parman.c
@@ -0,0 +1,395 @@
+/*
+ * lib/test_parman.c - Test module for parman
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/random.h>
+#include <linux/parman.h>
+
+#define TEST_PARMAN_PRIO_SHIFT 7 /* defines number of prios for testing */
+#define TEST_PARMAN_PRIO_COUNT BIT(TEST_PARMAN_PRIO_SHIFT)
+#define TEST_PARMAN_PRIO_MASK (TEST_PARMAN_PRIO_COUNT - 1)
+
+#define TEST_PARMAN_ITEM_SHIFT 13 /* defines a total number
+ * of items for testing
+ */
+#define TEST_PARMAN_ITEM_COUNT BIT(TEST_PARMAN_ITEM_SHIFT)
+#define TEST_PARMAN_ITEM_MASK (TEST_PARMAN_ITEM_COUNT - 1)
+
+#define TEST_PARMAN_BASE_SHIFT 8
+#define TEST_PARMAN_BASE_COUNT BIT(TEST_PARMAN_BASE_SHIFT)
+#define TEST_PARMAN_RESIZE_STEP_SHIFT 7
+#define TEST_PARMAN_RESIZE_STEP_COUNT BIT(TEST_PARMAN_RESIZE_STEP_SHIFT)
+
+#define TEST_PARMAN_BULK_MAX_SHIFT (2 + TEST_PARMAN_RESIZE_STEP_SHIFT)
+#define TEST_PARMAN_BULK_MAX_COUNT BIT(TEST_PARMAN_BULK_MAX_SHIFT)
+#define TEST_PARMAN_BULK_MAX_MASK (TEST_PARMAN_BULK_MAX_COUNT - 1)
+
+#define TEST_PARMAN_RUN_BUDGET (TEST_PARMAN_ITEM_COUNT * 256)
+
+struct test_parman_prio {
+ struct parman_prio parman_prio;
+ unsigned long priority;
+};
+
+struct test_parman_item {
+ struct parman_item parman_item;
+ struct test_parman_prio *prio;
+ bool used;
+};
+
+struct test_parman {
+ struct parman *parman;
+ struct test_parman_item **prio_array;
+ unsigned long prio_array_limit;
+ struct test_parman_prio prios[TEST_PARMAN_PRIO_COUNT];
+ struct test_parman_item items[TEST_PARMAN_ITEM_COUNT];
+ struct rnd_state rnd;
+ unsigned long run_budget;
+ unsigned long bulk_budget;
+ bool bulk_noop;
+ unsigned int used_items;
+};
+
+#define ITEM_PTRS_SIZE(count) (sizeof(struct test_parman_item *) * (count))
+
+static int test_parman_resize(void *priv, unsigned long new_count)
+{
+ struct test_parman *test_parman = priv;
+ struct test_parman_item **prio_array;
+ unsigned long old_count;
+
+ prio_array = krealloc(test_parman->prio_array,
+ ITEM_PTRS_SIZE(new_count), GFP_KERNEL);
+ if (new_count == 0)
+ return 0;
+ if (!prio_array)
+ return -ENOMEM;
+ old_count = test_parman->prio_array_limit;
+ if (new_count > old_count)
+ memset(&prio_array[old_count], 0,
+ ITEM_PTRS_SIZE(new_count - old_count));
+ test_parman->prio_array = prio_array;
+ test_parman->prio_array_limit = new_count;
+ return 0;
+}
+
+static void test_parman_move(void *priv, unsigned long from_index,
+ unsigned long to_index, unsigned long count)
+{
+ struct test_parman *test_parman = priv;
+ struct test_parman_item **prio_array = test_parman->prio_array;
+
+ memmove(&prio_array[to_index], &prio_array[from_index],
+ ITEM_PTRS_SIZE(count));
+ memset(&prio_array[from_index], 0, ITEM_PTRS_SIZE(count));
+}
+
+static const struct parman_ops test_parman_lsort_ops = {
+ .base_count = TEST_PARMAN_BASE_COUNT,
+ .resize_step = TEST_PARMAN_RESIZE_STEP_COUNT,
+ .resize = test_parman_resize,
+ .move = test_parman_move,
+ .algo = PARMAN_ALGO_TYPE_LSORT,
+};
+
+static void test_parman_rnd_init(struct test_parman *test_parman)
+{
+ prandom_seed_state(&test_parman->rnd, 3141592653589793238ULL);
+}
+
+static u32 test_parman_rnd_get(struct test_parman *test_parman)
+{
+ return prandom_u32_state(&test_parman->rnd);
+}
+
+static unsigned long test_parman_priority_gen(struct test_parman *test_parman)
+{
+ unsigned long priority;
+ int i;
+
+again:
+ priority = test_parman_rnd_get(test_parman);
+ if (priority == 0)
+ goto again;
+
+ for (i = 0; i < TEST_PARMAN_PRIO_COUNT; i++) {
+ struct test_parman_prio *prio = &test_parman->prios[i];
+
+ if (prio->priority == 0)
+ break;
+ if (prio->priority == priority)
+ goto again;
+ }
+ return priority;
+}
+
+static void test_parman_prios_init(struct test_parman *test_parman)
+{
+ int i;
+
+ for (i = 0; i < TEST_PARMAN_PRIO_COUNT; i++) {
+ struct test_parman_prio *prio = &test_parman->prios[i];
+
+ /* Assign random uniqueue priority to each prio structure */
+ prio->priority = test_parman_priority_gen(test_parman);
+ parman_prio_init(test_parman->parman, &prio->parman_prio,
+ prio->priority);
+ }
+}
+
+static void test_parman_prios_fini(struct test_parman *test_parman)
+{
+ int i;
+
+ for (i = 0; i < TEST_PARMAN_PRIO_COUNT; i++) {
+ struct test_parman_prio *prio = &test_parman->prios[i];
+
+ parman_prio_fini(&prio->parman_prio);
+ }
+}
+
+static void test_parman_items_init(struct test_parman *test_parman)
+{
+ int i;
+
+ for (i = 0; i < TEST_PARMAN_ITEM_COUNT; i++) {
+ struct test_parman_item *item = &test_parman->items[i];
+ unsigned int prio_index = test_parman_rnd_get(test_parman) &
+ TEST_PARMAN_PRIO_MASK;
+
+ /* Assign random prio to each item structure */
+ item->prio = &test_parman->prios[prio_index];
+ }
+}
+
+static void test_parman_items_fini(struct test_parman *test_parman)
+{
+ int i;
+
+ for (i = 0; i < TEST_PARMAN_ITEM_COUNT; i++) {
+ struct test_parman_item *item = &test_parman->items[i];
+
+ if (!item->used)
+ continue;
+ parman_item_remove(test_parman->parman,
+ &item->prio->parman_prio,
+ &item->parman_item);
+ }
+}
+
+static struct test_parman *test_parman_create(const struct parman_ops *ops)
+{
+ struct test_parman *test_parman;
+ int err;
+
+ test_parman = kzalloc(sizeof(*test_parman), GFP_KERNEL);
+ if (!test_parman)
+ return ERR_PTR(-ENOMEM);
+ err = test_parman_resize(test_parman, TEST_PARMAN_BASE_COUNT);
+ if (err)
+ goto err_resize;
+ test_parman->parman = parman_create(ops, test_parman);
+ if (!test_parman->parman) {
+ err = -ENOMEM;
+ goto err_parman_create;
+ }
+ test_parman_rnd_init(test_parman);
+ test_parman_prios_init(test_parman);
+ test_parman_items_init(test_parman);
+ test_parman->run_budget = TEST_PARMAN_RUN_BUDGET;
+ return test_parman;
+
+err_parman_create:
+ test_parman_resize(test_parman, 0);
+err_resize:
+ kfree(test_parman);
+ return ERR_PTR(err);
+}
+
+static void test_parman_destroy(struct test_parman *test_parman)
+{
+ test_parman_items_fini(test_parman);
+ test_parman_prios_fini(test_parman);
+ parman_destroy(test_parman->parman);
+ test_parman_resize(test_parman, 0);
+ kfree(test_parman);
+}
+
+static bool test_parman_run_check_budgets(struct test_parman *test_parman)
+{
+ if (test_parman->run_budget-- == 0)
+ return false;
+ if (test_parman->bulk_budget-- != 0)
+ return true;
+
+ test_parman->bulk_budget = test_parman_rnd_get(test_parman) &
+ TEST_PARMAN_BULK_MAX_MASK;
+ test_parman->bulk_noop = test_parman_rnd_get(test_parman) & 1;
+ return true;
+}
+
+static int test_parman_run(struct test_parman *test_parman)
+{
+ unsigned int i = test_parman_rnd_get(test_parman);
+ int err;
+
+ while (test_parman_run_check_budgets(test_parman)) {
+ unsigned int item_index = i++ & TEST_PARMAN_ITEM_MASK;
+ struct test_parman_item *item = &test_parman->items[item_index];
+
+ if (test_parman->bulk_noop)
+ continue;
+
+ if (!item->used) {
+ err = parman_item_add(test_parman->parman,
+ &item->prio->parman_prio,
+ &item->parman_item);
+ if (err)
+ return err;
+ test_parman->prio_array[item->parman_item.index] = item;
+ test_parman->used_items++;
+ } else {
+ test_parman->prio_array[item->parman_item.index] = NULL;
+ parman_item_remove(test_parman->parman,
+ &item->prio->parman_prio,
+ &item->parman_item);
+ test_parman->used_items--;
+ }
+ item->used = !item->used;
+ }
+ return 0;
+}
+
+static int test_parman_check_array(struct test_parman *test_parman,
+ bool gaps_allowed)
+{
+ unsigned int last_unused_items = 0;
+ unsigned long last_priority = 0;
+ unsigned int used_items = 0;
+ int i;
+
+ if (test_parman->prio_array_limit < TEST_PARMAN_BASE_COUNT) {
+ pr_err("Array limit is lower than the base count (%lu < %lu)\n",
+ test_parman->prio_array_limit, TEST_PARMAN_BASE_COUNT);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < test_parman->prio_array_limit; i++) {
+ struct test_parman_item *item = test_parman->prio_array[i];
+
+ if (!item) {
+ last_unused_items++;
+ continue;
+ }
+ if (last_unused_items && !gaps_allowed) {
+ pr_err("Gap found in array even though they are forbidden\n");
+ return -EINVAL;
+ }
+
+ last_unused_items = 0;
+ used_items++;
+
+ if (item->prio->priority < last_priority) {
+ pr_err("Item belongs under higher priority then the last one (current: %lu, previous: %lu)\n",
+ item->prio->priority, last_priority);
+ return -EINVAL;
+ }
+ last_priority = item->prio->priority;
+
+ if (item->parman_item.index != i) {
+ pr_err("Item has different index in compare to where it actually is (%lu != %d)\n",
+ item->parman_item.index, i);
+ return -EINVAL;
+ }
+ }
+
+ if (used_items != test_parman->used_items) {
+ pr_err("Number of used items in array does not match (%u != %u)\n",
+ used_items, test_parman->used_items);
+ return -EINVAL;
+ }
+
+ if (last_unused_items >= TEST_PARMAN_RESIZE_STEP_COUNT) {
+ pr_err("Number of unused item at the end of array is bigger than resize step (%u >= %lu)\n",
+ last_unused_items, TEST_PARMAN_RESIZE_STEP_COUNT);
+ return -EINVAL;
+ }
+
+ pr_info("Priority array check successful\n");
+
+ return 0;
+}
+
+static int test_parman_lsort(void)
+{
+ struct test_parman *test_parman;
+ int err;
+
+ test_parman = test_parman_create(&test_parman_lsort_ops);
+ if (IS_ERR(test_parman))
+ return PTR_ERR(test_parman);
+
+ err = test_parman_run(test_parman);
+ if (err)
+ goto out;
+
+ err = test_parman_check_array(test_parman, false);
+ if (err)
+ goto out;
+out:
+ test_parman_destroy(test_parman);
+ return err;
+}
+
+static int __init test_parman_init(void)
+{
+ return test_parman_lsort();
+}
+
+static void __exit test_parman_exit(void)
+{
+}
+
+module_init(test_parman_init);
+module_exit(test_parman_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Test module for parman");
diff --git a/lib/test_siphash.c b/lib/test_siphash.c
new file mode 100644
index 000000000000..a6d854d933bf
--- /dev/null
+++ b/lib/test_siphash.c
@@ -0,0 +1,223 @@
+/* Test cases for siphash.c
+ *
+ * Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This file is provided under a dual BSD/GPLv2 license.
+ *
+ * SipHash: a fast short-input PRF
+ * https://131002.net/siphash/
+ *
+ * This implementation is specifically for SipHash2-4 for a secure PRF
+ * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for
+ * hashtables.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/siphash.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+
+/* Test vectors taken from reference source available at:
+ * https://github.com/veorq/SipHash
+ */
+
+static const siphash_key_t test_key_siphash =
+ {{ 0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL }};
+
+static const u64 test_vectors_siphash[64] = {
+ 0x726fdb47dd0e0e31ULL, 0x74f839c593dc67fdULL, 0x0d6c8009d9a94f5aULL,
+ 0x85676696d7fb7e2dULL, 0xcf2794e0277187b7ULL, 0x18765564cd99a68dULL,
+ 0xcbc9466e58fee3ceULL, 0xab0200f58b01d137ULL, 0x93f5f5799a932462ULL,
+ 0x9e0082df0ba9e4b0ULL, 0x7a5dbbc594ddb9f3ULL, 0xf4b32f46226bada7ULL,
+ 0x751e8fbc860ee5fbULL, 0x14ea5627c0843d90ULL, 0xf723ca908e7af2eeULL,
+ 0xa129ca6149be45e5ULL, 0x3f2acc7f57c29bdbULL, 0x699ae9f52cbe4794ULL,
+ 0x4bc1b3f0968dd39cULL, 0xbb6dc91da77961bdULL, 0xbed65cf21aa2ee98ULL,
+ 0xd0f2cbb02e3b67c7ULL, 0x93536795e3a33e88ULL, 0xa80c038ccd5ccec8ULL,
+ 0xb8ad50c6f649af94ULL, 0xbce192de8a85b8eaULL, 0x17d835b85bbb15f3ULL,
+ 0x2f2e6163076bcfadULL, 0xde4daaaca71dc9a5ULL, 0xa6a2506687956571ULL,
+ 0xad87a3535c49ef28ULL, 0x32d892fad841c342ULL, 0x7127512f72f27cceULL,
+ 0xa7f32346f95978e3ULL, 0x12e0b01abb051238ULL, 0x15e034d40fa197aeULL,
+ 0x314dffbe0815a3b4ULL, 0x027990f029623981ULL, 0xcadcd4e59ef40c4dULL,
+ 0x9abfd8766a33735cULL, 0x0e3ea96b5304a7d0ULL, 0xad0c42d6fc585992ULL,
+ 0x187306c89bc215a9ULL, 0xd4a60abcf3792b95ULL, 0xf935451de4f21df2ULL,
+ 0xa9538f0419755787ULL, 0xdb9acddff56ca510ULL, 0xd06c98cd5c0975ebULL,
+ 0xe612a3cb9ecba951ULL, 0xc766e62cfcadaf96ULL, 0xee64435a9752fe72ULL,
+ 0xa192d576b245165aULL, 0x0a8787bf8ecb74b2ULL, 0x81b3e73d20b49b6fULL,
+ 0x7fa8220ba3b2eceaULL, 0x245731c13ca42499ULL, 0xb78dbfaf3a8d83bdULL,
+ 0xea1ad565322a1a0bULL, 0x60e61c23a3795013ULL, 0x6606d7e446282b93ULL,
+ 0x6ca4ecb15c5f91e1ULL, 0x9f626da15c9625f3ULL, 0xe51b38608ef25f57ULL,
+ 0x958a324ceb064572ULL
+};
+
+#if BITS_PER_LONG == 64
+static const hsiphash_key_t test_key_hsiphash =
+ {{ 0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL }};
+
+static const u32 test_vectors_hsiphash[64] = {
+ 0x050fc4dcU, 0x7d57ca93U, 0x4dc7d44dU,
+ 0xe7ddf7fbU, 0x88d38328U, 0x49533b67U,
+ 0xc59f22a7U, 0x9bb11140U, 0x8d299a8eU,
+ 0x6c063de4U, 0x92ff097fU, 0xf94dc352U,
+ 0x57b4d9a2U, 0x1229ffa7U, 0xc0f95d34U,
+ 0x2a519956U, 0x7d908b66U, 0x63dbd80cU,
+ 0xb473e63eU, 0x8d297d1cU, 0xa6cce040U,
+ 0x2b45f844U, 0xa320872eU, 0xdae6c123U,
+ 0x67349c8cU, 0x705b0979U, 0xca9913a5U,
+ 0x4ade3b35U, 0xef6cd00dU, 0x4ab1e1f4U,
+ 0x43c5e663U, 0x8c21d1bcU, 0x16a7b60dU,
+ 0x7a8ff9bfU, 0x1f2a753eU, 0xbf186b91U,
+ 0xada26206U, 0xa3c33057U, 0xae3a36a1U,
+ 0x7b108392U, 0x99e41531U, 0x3f1ad944U,
+ 0xc8138825U, 0xc28949a6U, 0xfaf8876bU,
+ 0x9f042196U, 0x68b1d623U, 0x8b5114fdU,
+ 0xdf074c46U, 0x12cc86b3U, 0x0a52098fU,
+ 0x9d292f9aU, 0xa2f41f12U, 0x43a71ed0U,
+ 0x73f0bce6U, 0x70a7e980U, 0x243c6d75U,
+ 0xfdb71513U, 0xa67d8a08U, 0xb7e8f148U,
+ 0xf7a644eeU, 0x0f1837f2U, 0x4b6694e0U,
+ 0xb7bbb3a8U
+};
+#else
+static const hsiphash_key_t test_key_hsiphash =
+ {{ 0x03020100U, 0x07060504U }};
+
+static const u32 test_vectors_hsiphash[64] = {
+ 0x5814c896U, 0xe7e864caU, 0xbc4b0e30U,
+ 0x01539939U, 0x7e059ea6U, 0x88e3d89bU,
+ 0xa0080b65U, 0x9d38d9d6U, 0x577999b1U,
+ 0xc839caedU, 0xe4fa32cfU, 0x959246eeU,
+ 0x6b28096cU, 0x66dd9cd6U, 0x16658a7cU,
+ 0xd0257b04U, 0x8b31d501U, 0x2b1cd04bU,
+ 0x06712339U, 0x522aca67U, 0x911bb605U,
+ 0x90a65f0eU, 0xf826ef7bU, 0x62512debU,
+ 0x57150ad7U, 0x5d473507U, 0x1ec47442U,
+ 0xab64afd3U, 0x0a4100d0U, 0x6d2ce652U,
+ 0x2331b6a3U, 0x08d8791aU, 0xbc6dda8dU,
+ 0xe0f6c934U, 0xb0652033U, 0x9b9851ccU,
+ 0x7c46fb7fU, 0x732ba8cbU, 0xf142997aU,
+ 0xfcc9aa1bU, 0x05327eb2U, 0xe110131cU,
+ 0xf9e5e7c0U, 0xa7d708a6U, 0x11795ab1U,
+ 0x65671619U, 0x9f5fff91U, 0xd89c5267U,
+ 0x007783ebU, 0x95766243U, 0xab639262U,
+ 0x9c7e1390U, 0xc368dda6U, 0x38ddc455U,
+ 0xfa13d379U, 0x979ea4e8U, 0x53ecd77eU,
+ 0x2ee80657U, 0x33dbb66aU, 0xae3f0577U,
+ 0x88b4c4ccU, 0x3e7f480bU, 0x74c1ebf8U,
+ 0x87178304U
+};
+#endif
+
+static int __init siphash_test_init(void)
+{
+ u8 in[64] __aligned(SIPHASH_ALIGNMENT);
+ u8 in_unaligned[65] __aligned(SIPHASH_ALIGNMENT);
+ u8 i;
+ int ret = 0;
+
+ for (i = 0; i < 64; ++i) {
+ in[i] = i;
+ in_unaligned[i + 1] = i;
+ if (siphash(in, i, &test_key_siphash) !=
+ test_vectors_siphash[i]) {
+ pr_info("siphash self-test aligned %u: FAIL\n", i + 1);
+ ret = -EINVAL;
+ }
+ if (siphash(in_unaligned + 1, i, &test_key_siphash) !=
+ test_vectors_siphash[i]) {
+ pr_info("siphash self-test unaligned %u: FAIL\n", i + 1);
+ ret = -EINVAL;
+ }
+ if (hsiphash(in, i, &test_key_hsiphash) !=
+ test_vectors_hsiphash[i]) {
+ pr_info("hsiphash self-test aligned %u: FAIL\n", i + 1);
+ ret = -EINVAL;
+ }
+ if (hsiphash(in_unaligned + 1, i, &test_key_hsiphash) !=
+ test_vectors_hsiphash[i]) {
+ pr_info("hsiphash self-test unaligned %u: FAIL\n", i + 1);
+ ret = -EINVAL;
+ }
+ }
+ if (siphash_1u64(0x0706050403020100ULL, &test_key_siphash) !=
+ test_vectors_siphash[8]) {
+ pr_info("siphash self-test 1u64: FAIL\n");
+ ret = -EINVAL;
+ }
+ if (siphash_2u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
+ &test_key_siphash) != test_vectors_siphash[16]) {
+ pr_info("siphash self-test 2u64: FAIL\n");
+ ret = -EINVAL;
+ }
+ if (siphash_3u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
+ 0x1716151413121110ULL, &test_key_siphash) !=
+ test_vectors_siphash[24]) {
+ pr_info("siphash self-test 3u64: FAIL\n");
+ ret = -EINVAL;
+ }
+ if (siphash_4u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
+ 0x1716151413121110ULL, 0x1f1e1d1c1b1a1918ULL,
+ &test_key_siphash) != test_vectors_siphash[32]) {
+ pr_info("siphash self-test 4u64: FAIL\n");
+ ret = -EINVAL;
+ }
+ if (siphash_1u32(0x03020100U, &test_key_siphash) !=
+ test_vectors_siphash[4]) {
+ pr_info("siphash self-test 1u32: FAIL\n");
+ ret = -EINVAL;
+ }
+ if (siphash_2u32(0x03020100U, 0x07060504U, &test_key_siphash) !=
+ test_vectors_siphash[8]) {
+ pr_info("siphash self-test 2u32: FAIL\n");
+ ret = -EINVAL;
+ }
+ if (siphash_3u32(0x03020100U, 0x07060504U,
+ 0x0b0a0908U, &test_key_siphash) !=
+ test_vectors_siphash[12]) {
+ pr_info("siphash self-test 3u32: FAIL\n");
+ ret = -EINVAL;
+ }
+ if (siphash_4u32(0x03020100U, 0x07060504U,
+ 0x0b0a0908U, 0x0f0e0d0cU, &test_key_siphash) !=
+ test_vectors_siphash[16]) {
+ pr_info("siphash self-test 4u32: FAIL\n");
+ ret = -EINVAL;
+ }
+ if (hsiphash_1u32(0x03020100U, &test_key_hsiphash) !=
+ test_vectors_hsiphash[4]) {
+ pr_info("hsiphash self-test 1u32: FAIL\n");
+ ret = -EINVAL;
+ }
+ if (hsiphash_2u32(0x03020100U, 0x07060504U, &test_key_hsiphash) !=
+ test_vectors_hsiphash[8]) {
+ pr_info("hsiphash self-test 2u32: FAIL\n");
+ ret = -EINVAL;
+ }
+ if (hsiphash_3u32(0x03020100U, 0x07060504U,
+ 0x0b0a0908U, &test_key_hsiphash) !=
+ test_vectors_hsiphash[12]) {
+ pr_info("hsiphash self-test 3u32: FAIL\n");
+ ret = -EINVAL;
+ }
+ if (hsiphash_4u32(0x03020100U, 0x07060504U,
+ 0x0b0a0908U, 0x0f0e0d0cU, &test_key_hsiphash) !=
+ test_vectors_hsiphash[16]) {
+ pr_info("hsiphash self-test 4u32: FAIL\n");
+ ret = -EINVAL;
+ }
+ if (!ret)
+ pr_info("self-tests: pass\n");
+ return ret;
+}
+
+static void __exit siphash_test_exit(void)
+{
+}
+
+module_init(siphash_test_init);
+module_exit(siphash_test_exit);
+
+MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/test_sort.c b/lib/test_sort.c
new file mode 100644
index 000000000000..4db3911db50a
--- /dev/null
+++ b/lib/test_sort.c
@@ -0,0 +1,44 @@
+#include <linux/sort.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+
+/*
+ * A simple boot-time regression test
+ * License: GPL
+ */
+
+#define TEST_LEN 1000
+
+static int __init cmpint(const void *a, const void *b)
+{
+ return *(int *)a - *(int *)b;
+}
+
+static int __init test_sort_init(void)
+{
+ int *a, i, r = 1, err = -ENOMEM;
+
+ a = kmalloc_array(TEST_LEN, sizeof(*a), GFP_KERNEL);
+ if (!a)
+ return err;
+
+ for (i = 0; i < TEST_LEN; i++) {
+ r = (r * 725861) % 6599;
+ a[i] = r;
+ }
+
+ sort(a, TEST_LEN, sizeof(*a), cmpint, NULL);
+
+ err = -EINVAL;
+ for (i = 0; i < TEST_LEN-1; i++)
+ if (a[i] > a[i+1]) {
+ pr_err("test has failed\n");
+ goto exit;
+ }
+ err = 0;
+ pr_info("test passed\n");
+exit:
+ kfree(a);
+ return err;
+}
+subsys_initcall(test_sort_init);
diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c
index 0ecef3e4690e..1a8d71a68531 100644
--- a/lib/test_user_copy.c
+++ b/lib/test_user_copy.c
@@ -25,6 +25,24 @@
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
+/*
+ * Several 32-bit architectures support 64-bit {get,put}_user() calls.
+ * As there doesn't appear to be anything that can safely determine
+ * their capability at compile-time, we just have to opt-out certain archs.
+ */
+#if BITS_PER_LONG == 64 || (!(defined(CONFIG_ARM) && !defined(MMU)) && \
+ !defined(CONFIG_AVR32) && \
+ !defined(CONFIG_BLACKFIN) && \
+ !defined(CONFIG_M32R) && \
+ !defined(CONFIG_M68K) && \
+ !defined(CONFIG_MICROBLAZE) && \
+ !defined(CONFIG_MN10300) && \
+ !defined(CONFIG_NIOS2) && \
+ !defined(CONFIG_PPC32) && \
+ !defined(CONFIG_SUPERH))
+# define TEST_U64
+#endif
+
#define test(condition, msg) \
({ \
int cond = (condition); \
@@ -40,7 +58,12 @@ static int __init test_user_copy_init(void)
char __user *usermem;
char *bad_usermem;
unsigned long user_addr;
- unsigned long value = 0x5A;
+ u8 val_u8;
+ u16 val_u16;
+ u32 val_u32;
+#ifdef TEST_U64
+ u64 val_u64;
+#endif
kmem = kmalloc(PAGE_SIZE * 2, GFP_KERNEL);
if (!kmem)
@@ -58,33 +81,100 @@ static int __init test_user_copy_init(void)
usermem = (char __user *)user_addr;
bad_usermem = (char *)user_addr;
- /* Legitimate usage: none of these should fail. */
- ret |= test(copy_from_user(kmem, usermem, PAGE_SIZE),
- "legitimate copy_from_user failed");
+ /*
+ * Legitimate usage: none of these copies should fail.
+ */
+ memset(kmem, 0x3a, PAGE_SIZE * 2);
ret |= test(copy_to_user(usermem, kmem, PAGE_SIZE),
"legitimate copy_to_user failed");
- ret |= test(get_user(value, (unsigned long __user *)usermem),
- "legitimate get_user failed");
- ret |= test(put_user(value, (unsigned long __user *)usermem),
- "legitimate put_user failed");
-
- /* Invalid usage: none of these should succeed. */
+ memset(kmem, 0x0, PAGE_SIZE);
+ ret |= test(copy_from_user(kmem, usermem, PAGE_SIZE),
+ "legitimate copy_from_user failed");
+ ret |= test(memcmp(kmem, kmem + PAGE_SIZE, PAGE_SIZE),
+ "legitimate usercopy failed to copy data");
+
+#define test_legit(size, check) \
+ do { \
+ val_##size = check; \
+ ret |= test(put_user(val_##size, (size __user *)usermem), \
+ "legitimate put_user (" #size ") failed"); \
+ val_##size = 0; \
+ ret |= test(get_user(val_##size, (size __user *)usermem), \
+ "legitimate get_user (" #size ") failed"); \
+ ret |= test(val_##size != check, \
+ "legitimate get_user (" #size ") failed to do copy"); \
+ if (val_##size != check) { \
+ pr_info("0x%llx != 0x%llx\n", \
+ (unsigned long long)val_##size, \
+ (unsigned long long)check); \
+ } \
+ } while (0)
+
+ test_legit(u8, 0x5a);
+ test_legit(u16, 0x5a5b);
+ test_legit(u32, 0x5a5b5c5d);
+#ifdef TEST_U64
+ test_legit(u64, 0x5a5b5c5d6a6b6c6d);
+#endif
+#undef test_legit
+
+ /*
+ * Invalid usage: none of these copies should succeed.
+ */
+
+ /* Prepare kernel memory with check values. */
+ memset(kmem, 0x5a, PAGE_SIZE);
+ memset(kmem + PAGE_SIZE, 0, PAGE_SIZE);
+
+ /* Reject kernel-to-kernel copies through copy_from_user(). */
ret |= test(!copy_from_user(kmem, (char __user *)(kmem + PAGE_SIZE),
PAGE_SIZE),
"illegal all-kernel copy_from_user passed");
+
+ /* Destination half of buffer should have been zeroed. */
+ ret |= test(memcmp(kmem + PAGE_SIZE, kmem, PAGE_SIZE),
+ "zeroing failure for illegal all-kernel copy_from_user");
+
+#if 0
+ /*
+ * When running with SMAP/PAN/etc, this will Oops the kernel
+ * due to the zeroing of userspace memory on failure. This needs
+ * to be tested in LKDTM instead, since this test module does not
+ * expect to explode.
+ */
ret |= test(!copy_from_user(bad_usermem, (char __user *)kmem,
PAGE_SIZE),
"illegal reversed copy_from_user passed");
+#endif
ret |= test(!copy_to_user((char __user *)kmem, kmem + PAGE_SIZE,
PAGE_SIZE),
"illegal all-kernel copy_to_user passed");
ret |= test(!copy_to_user((char __user *)kmem, bad_usermem,
PAGE_SIZE),
"illegal reversed copy_to_user passed");
- ret |= test(!get_user(value, (unsigned long __user *)kmem),
- "illegal get_user passed");
- ret |= test(!put_user(value, (unsigned long __user *)kmem),
- "illegal put_user passed");
+
+#define test_illegal(size, check) \
+ do { \
+ val_##size = (check); \
+ ret |= test(!get_user(val_##size, (size __user *)kmem), \
+ "illegal get_user (" #size ") passed"); \
+ ret |= test(val_##size != (size)0, \
+ "zeroing failure for illegal get_user (" #size ")"); \
+ if (val_##size != (size)0) { \
+ pr_info("0x%llx != 0\n", \
+ (unsigned long long)val_##size); \
+ } \
+ ret |= test(!put_user(val_##size, (size __user *)kmem), \
+ "illegal put_user (" #size ") passed"); \
+ } while (0)
+
+ test_illegal(u8, 0x5a);
+ test_illegal(u16, 0x5a5b);
+ test_illegal(u32, 0x5a5b5c5d);
+#ifdef TEST_U64
+ test_illegal(u64, 0x5a5b5c5d6a6b6c6d);
+#endif
+#undef test_illegal
vm_munmap(user_addr, PAGE_SIZE * 2);
kfree(kmem);
diff --git a/lib/timerqueue.c b/lib/timerqueue.c
index adc6ee0a5126..4a720ed4fdaf 100644
--- a/lib/timerqueue.c
+++ b/lib/timerqueue.c
@@ -80,8 +80,7 @@ bool timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node)
if (head->next == node) {
struct rb_node *rbn = rb_next(&node->node);
- head->next = rbn ?
- rb_entry(rbn, struct timerqueue_node, node) : NULL;
+ head->next = rb_entry_safe(rbn, struct timerqueue_node, node);
}
rb_erase(&node->node, &head->head);
RB_CLEAR_NODE(&node->node);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 0967771d8f7f..e3bf4e0f10b5 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1739,6 +1739,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
* 'h', 'l', or 'L' for integer fields
* 'z' support added 23/7/1999 S.H.
* 'z' changed to 'Z' --davidm 1/25/99
+ * 'Z' changed to 'z' --adobriyan 2017-01-25
* 't' added for ptrdiff_t
*
* @fmt: the format string
@@ -1838,7 +1839,7 @@ qualifier:
/* get the conversion qualifier */
qualifier = 0;
if (*fmt == 'h' || _tolower(*fmt) == 'l' ||
- _tolower(*fmt) == 'z' || *fmt == 't') {
+ *fmt == 'z' || *fmt == 't') {
qualifier = *fmt++;
if (unlikely(qualifier == *fmt)) {
if (qualifier == 'l') {
@@ -1907,7 +1908,7 @@ qualifier:
else if (qualifier == 'l') {
BUILD_BUG_ON(FORMAT_TYPE_ULONG + SIGN != FORMAT_TYPE_LONG);
spec->type = FORMAT_TYPE_ULONG + (spec->flags & SIGN);
- } else if (_tolower(qualifier) == 'z') {
+ } else if (qualifier == 'z') {
spec->type = FORMAT_TYPE_SIZE_T;
} else if (qualifier == 't') {
spec->type = FORMAT_TYPE_PTRDIFF;
@@ -2657,7 +2658,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
/* get conversion qualifier */
qualifier = -1;
if (*fmt == 'h' || _tolower(*fmt) == 'l' ||
- _tolower(*fmt) == 'z') {
+ *fmt == 'z') {
qualifier = *fmt++;
if (unlikely(qualifier == *fmt)) {
if (qualifier == 'h') {
@@ -2851,7 +2852,6 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
else
*va_arg(args, unsigned long long *) = val.u;
break;
- case 'Z':
case 'z':
*va_arg(args, size_t *) = val.u;
break;